]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * ARM generic helpers. | |
3 | * | |
4 | * This code is licensed under the GNU GPL v2 or later. | |
5 | * | |
6 | * SPDX-License-Identifier: GPL-2.0-or-later | |
7 | */ | |
8 | ||
9 | #include "qemu/osdep.h" | |
10 | #include "qemu/log.h" | |
11 | #include "trace.h" | |
12 | #include "cpu.h" | |
13 | #include "internals.h" | |
14 | #include "exec/helper-proto.h" | |
15 | #include "qemu/main-loop.h" | |
16 | #include "qemu/timer.h" | |
17 | #include "qemu/bitops.h" | |
18 | #include "qemu/crc32c.h" | |
19 | #include "qemu/qemu-print.h" | |
20 | #include "exec/exec-all.h" | |
21 | #include <zlib.h> /* For crc32 */ | |
22 | #include "hw/irq.h" | |
23 | #include "sysemu/cpu-timers.h" | |
24 | #include "sysemu/kvm.h" | |
25 | #include "sysemu/tcg.h" | |
26 | #include "qapi/error.h" | |
27 | #include "qemu/guest-random.h" | |
28 | #ifdef CONFIG_TCG | |
29 | #include "semihosting/common-semi.h" | |
30 | #endif | |
31 | #include "cpregs.h" | |
32 | ||
33 | #define ARM_CPU_FREQ 1000000000 /* FIXME: 1 GHz, should be configurable */ | |
34 | ||
35 | static void switch_mode(CPUARMState *env, int mode); | |
36 | ||
37 | static uint64_t raw_read(CPUARMState *env, const ARMCPRegInfo *ri) | |
38 | { | |
39 | assert(ri->fieldoffset); | |
40 | if (cpreg_field_is_64bit(ri)) { | |
41 | return CPREG_FIELD64(env, ri); | |
42 | } else { | |
43 | return CPREG_FIELD32(env, ri); | |
44 | } | |
45 | } | |
46 | ||
47 | void raw_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) | |
48 | { | |
49 | assert(ri->fieldoffset); | |
50 | if (cpreg_field_is_64bit(ri)) { | |
51 | CPREG_FIELD64(env, ri) = value; | |
52 | } else { | |
53 | CPREG_FIELD32(env, ri) = value; | |
54 | } | |
55 | } | |
56 | ||
57 | static void *raw_ptr(CPUARMState *env, const ARMCPRegInfo *ri) | |
58 | { | |
59 | return (char *)env + ri->fieldoffset; | |
60 | } | |
61 | ||
62 | uint64_t read_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri) | |
63 | { | |
64 | /* Raw read of a coprocessor register (as needed for migration, etc). */ | |
65 | if (ri->type & ARM_CP_CONST) { | |
66 | return ri->resetvalue; | |
67 | } else if (ri->raw_readfn) { | |
68 | return ri->raw_readfn(env, ri); | |
69 | } else if (ri->readfn) { | |
70 | return ri->readfn(env, ri); | |
71 | } else { | |
72 | return raw_read(env, ri); | |
73 | } | |
74 | } | |
75 | ||
76 | static void write_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri, | |
77 | uint64_t v) | |
78 | { | |
79 | /* | |
80 | * Raw write of a coprocessor register (as needed for migration, etc). | |
81 | * Note that constant registers are treated as write-ignored; the | |
82 | * caller should check for success by whether a readback gives the | |
83 | * value written. | |
84 | */ | |
85 | if (ri->type & ARM_CP_CONST) { | |
86 | return; | |
87 | } else if (ri->raw_writefn) { | |
88 | ri->raw_writefn(env, ri, v); | |
89 | } else if (ri->writefn) { | |
90 | ri->writefn(env, ri, v); | |
91 | } else { | |
92 | raw_write(env, ri, v); | |
93 | } | |
94 | } | |
95 | ||
96 | static bool raw_accessors_invalid(const ARMCPRegInfo *ri) | |
97 | { | |
98 | /* | |
99 | * Return true if the regdef would cause an assertion if you called | |
100 | * read_raw_cp_reg() or write_raw_cp_reg() on it (ie if it is a | |
101 | * program bug for it not to have the NO_RAW flag). | |
102 | * NB that returning false here doesn't necessarily mean that calling | |
103 | * read/write_raw_cp_reg() is safe, because we can't distinguish "has | |
104 | * read/write access functions which are safe for raw use" from "has | |
105 | * read/write access functions which have side effects but has forgotten | |
106 | * to provide raw access functions". | |
107 | * The tests here line up with the conditions in read/write_raw_cp_reg() | |
108 | * and assertions in raw_read()/raw_write(). | |
109 | */ | |
110 | if ((ri->type & ARM_CP_CONST) || | |
111 | ri->fieldoffset || | |
112 | ((ri->raw_writefn || ri->writefn) && (ri->raw_readfn || ri->readfn))) { | |
113 | return false; | |
114 | } | |
115 | return true; | |
116 | } | |
117 | ||
118 | bool write_cpustate_to_list(ARMCPU *cpu, bool kvm_sync) | |
119 | { | |
120 | /* Write the coprocessor state from cpu->env to the (index,value) list. */ | |
121 | int i; | |
122 | bool ok = true; | |
123 | ||
124 | for (i = 0; i < cpu->cpreg_array_len; i++) { | |
125 | uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]); | |
126 | const ARMCPRegInfo *ri; | |
127 | uint64_t newval; | |
128 | ||
129 | ri = get_arm_cp_reginfo(cpu->cp_regs, regidx); | |
130 | if (!ri) { | |
131 | ok = false; | |
132 | continue; | |
133 | } | |
134 | if (ri->type & ARM_CP_NO_RAW) { | |
135 | continue; | |
136 | } | |
137 | ||
138 | newval = read_raw_cp_reg(&cpu->env, ri); | |
139 | if (kvm_sync) { | |
140 | /* | |
141 | * Only sync if the previous list->cpustate sync succeeded. | |
142 | * Rather than tracking the success/failure state for every | |
143 | * item in the list, we just recheck "does the raw write we must | |
144 | * have made in write_list_to_cpustate() read back OK" here. | |
145 | */ | |
146 | uint64_t oldval = cpu->cpreg_values[i]; | |
147 | ||
148 | if (oldval == newval) { | |
149 | continue; | |
150 | } | |
151 | ||
152 | write_raw_cp_reg(&cpu->env, ri, oldval); | |
153 | if (read_raw_cp_reg(&cpu->env, ri) != oldval) { | |
154 | continue; | |
155 | } | |
156 | ||
157 | write_raw_cp_reg(&cpu->env, ri, newval); | |
158 | } | |
159 | cpu->cpreg_values[i] = newval; | |
160 | } | |
161 | return ok; | |
162 | } | |
163 | ||
164 | bool write_list_to_cpustate(ARMCPU *cpu) | |
165 | { | |
166 | int i; | |
167 | bool ok = true; | |
168 | ||
169 | for (i = 0; i < cpu->cpreg_array_len; i++) { | |
170 | uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]); | |
171 | uint64_t v = cpu->cpreg_values[i]; | |
172 | const ARMCPRegInfo *ri; | |
173 | ||
174 | ri = get_arm_cp_reginfo(cpu->cp_regs, regidx); | |
175 | if (!ri) { | |
176 | ok = false; | |
177 | continue; | |
178 | } | |
179 | if (ri->type & ARM_CP_NO_RAW) { | |
180 | continue; | |
181 | } | |
182 | /* | |
183 | * Write value and confirm it reads back as written | |
184 | * (to catch read-only registers and partially read-only | |
185 | * registers where the incoming migration value doesn't match) | |
186 | */ | |
187 | write_raw_cp_reg(&cpu->env, ri, v); | |
188 | if (read_raw_cp_reg(&cpu->env, ri) != v) { | |
189 | ok = false; | |
190 | } | |
191 | } | |
192 | return ok; | |
193 | } | |
194 | ||
195 | static void add_cpreg_to_list(gpointer key, gpointer opaque) | |
196 | { | |
197 | ARMCPU *cpu = opaque; | |
198 | uint32_t regidx = (uintptr_t)key; | |
199 | const ARMCPRegInfo *ri = get_arm_cp_reginfo(cpu->cp_regs, regidx); | |
200 | ||
201 | if (!(ri->type & (ARM_CP_NO_RAW | ARM_CP_ALIAS))) { | |
202 | cpu->cpreg_indexes[cpu->cpreg_array_len] = cpreg_to_kvm_id(regidx); | |
203 | /* The value array need not be initialized at this point */ | |
204 | cpu->cpreg_array_len++; | |
205 | } | |
206 | } | |
207 | ||
208 | static void count_cpreg(gpointer key, gpointer opaque) | |
209 | { | |
210 | ARMCPU *cpu = opaque; | |
211 | const ARMCPRegInfo *ri; | |
212 | ||
213 | ri = g_hash_table_lookup(cpu->cp_regs, key); | |
214 | ||
215 | if (!(ri->type & (ARM_CP_NO_RAW | ARM_CP_ALIAS))) { | |
216 | cpu->cpreg_array_len++; | |
217 | } | |
218 | } | |
219 | ||
220 | static gint cpreg_key_compare(gconstpointer a, gconstpointer b) | |
221 | { | |
222 | uint64_t aidx = cpreg_to_kvm_id((uintptr_t)a); | |
223 | uint64_t bidx = cpreg_to_kvm_id((uintptr_t)b); | |
224 | ||
225 | if (aidx > bidx) { | |
226 | return 1; | |
227 | } | |
228 | if (aidx < bidx) { | |
229 | return -1; | |
230 | } | |
231 | return 0; | |
232 | } | |
233 | ||
234 | void init_cpreg_list(ARMCPU *cpu) | |
235 | { | |
236 | /* | |
237 | * Initialise the cpreg_tuples[] array based on the cp_regs hash. | |
238 | * Note that we require cpreg_tuples[] to be sorted by key ID. | |
239 | */ | |
240 | GList *keys; | |
241 | int arraylen; | |
242 | ||
243 | keys = g_hash_table_get_keys(cpu->cp_regs); | |
244 | keys = g_list_sort(keys, cpreg_key_compare); | |
245 | ||
246 | cpu->cpreg_array_len = 0; | |
247 | ||
248 | g_list_foreach(keys, count_cpreg, cpu); | |
249 | ||
250 | arraylen = cpu->cpreg_array_len; | |
251 | cpu->cpreg_indexes = g_new(uint64_t, arraylen); | |
252 | cpu->cpreg_values = g_new(uint64_t, arraylen); | |
253 | cpu->cpreg_vmstate_indexes = g_new(uint64_t, arraylen); | |
254 | cpu->cpreg_vmstate_values = g_new(uint64_t, arraylen); | |
255 | cpu->cpreg_vmstate_array_len = cpu->cpreg_array_len; | |
256 | cpu->cpreg_array_len = 0; | |
257 | ||
258 | g_list_foreach(keys, add_cpreg_to_list, cpu); | |
259 | ||
260 | assert(cpu->cpreg_array_len == arraylen); | |
261 | ||
262 | g_list_free(keys); | |
263 | } | |
264 | ||
265 | /* | |
266 | * Some registers are not accessible from AArch32 EL3 if SCR.NS == 0. | |
267 | */ | |
268 | static CPAccessResult access_el3_aa32ns(CPUARMState *env, | |
269 | const ARMCPRegInfo *ri, | |
270 | bool isread) | |
271 | { | |
272 | if (!is_a64(env) && arm_current_el(env) == 3 && | |
273 | arm_is_secure_below_el3(env)) { | |
274 | return CP_ACCESS_TRAP_UNCATEGORIZED; | |
275 | } | |
276 | return CP_ACCESS_OK; | |
277 | } | |
278 | ||
279 | /* | |
280 | * Some secure-only AArch32 registers trap to EL3 if used from | |
281 | * Secure EL1 (but are just ordinary UNDEF in other non-EL3 contexts). | |
282 | * Note that an access from Secure EL1 can only happen if EL3 is AArch64. | |
283 | * We assume that the .access field is set to PL1_RW. | |
284 | */ | |
285 | static CPAccessResult access_trap_aa32s_el1(CPUARMState *env, | |
286 | const ARMCPRegInfo *ri, | |
287 | bool isread) | |
288 | { | |
289 | if (arm_current_el(env) == 3) { | |
290 | return CP_ACCESS_OK; | |
291 | } | |
292 | if (arm_is_secure_below_el3(env)) { | |
293 | if (env->cp15.scr_el3 & SCR_EEL2) { | |
294 | return CP_ACCESS_TRAP_EL2; | |
295 | } | |
296 | return CP_ACCESS_TRAP_EL3; | |
297 | } | |
298 | /* This will be EL1 NS and EL2 NS, which just UNDEF */ | |
299 | return CP_ACCESS_TRAP_UNCATEGORIZED; | |
300 | } | |
301 | ||
302 | /* | |
303 | * Check for traps to performance monitor registers, which are controlled | |
304 | * by MDCR_EL2.TPM for EL2 and MDCR_EL3.TPM for EL3. | |
305 | */ | |
306 | static CPAccessResult access_tpm(CPUARMState *env, const ARMCPRegInfo *ri, | |
307 | bool isread) | |
308 | { | |
309 | int el = arm_current_el(env); | |
310 | uint64_t mdcr_el2 = arm_mdcr_el2_eff(env); | |
311 | ||
312 | if (el < 2 && (mdcr_el2 & MDCR_TPM)) { | |
313 | return CP_ACCESS_TRAP_EL2; | |
314 | } | |
315 | if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) { | |
316 | return CP_ACCESS_TRAP_EL3; | |
317 | } | |
318 | return CP_ACCESS_OK; | |
319 | } | |
320 | ||
321 | /* Check for traps from EL1 due to HCR_EL2.TVM and HCR_EL2.TRVM. */ | |
322 | static CPAccessResult access_tvm_trvm(CPUARMState *env, const ARMCPRegInfo *ri, | |
323 | bool isread) | |
324 | { | |
325 | if (arm_current_el(env) == 1) { | |
326 | uint64_t trap = isread ? HCR_TRVM : HCR_TVM; | |
327 | if (arm_hcr_el2_eff(env) & trap) { | |
328 | return CP_ACCESS_TRAP_EL2; | |
329 | } | |
330 | } | |
331 | return CP_ACCESS_OK; | |
332 | } | |
333 | ||
334 | /* Check for traps from EL1 due to HCR_EL2.TSW. */ | |
335 | static CPAccessResult access_tsw(CPUARMState *env, const ARMCPRegInfo *ri, | |
336 | bool isread) | |
337 | { | |
338 | if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TSW)) { | |
339 | return CP_ACCESS_TRAP_EL2; | |
340 | } | |
341 | return CP_ACCESS_OK; | |
342 | } | |
343 | ||
344 | /* Check for traps from EL1 due to HCR_EL2.TACR. */ | |
345 | static CPAccessResult access_tacr(CPUARMState *env, const ARMCPRegInfo *ri, | |
346 | bool isread) | |
347 | { | |
348 | if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TACR)) { | |
349 | return CP_ACCESS_TRAP_EL2; | |
350 | } | |
351 | return CP_ACCESS_OK; | |
352 | } | |
353 | ||
354 | /* Check for traps from EL1 due to HCR_EL2.TTLB. */ | |
355 | static CPAccessResult access_ttlb(CPUARMState *env, const ARMCPRegInfo *ri, | |
356 | bool isread) | |
357 | { | |
358 | if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TTLB)) { | |
359 | return CP_ACCESS_TRAP_EL2; | |
360 | } | |
361 | return CP_ACCESS_OK; | |
362 | } | |
363 | ||
364 | /* Check for traps from EL1 due to HCR_EL2.TTLB or TTLBIS. */ | |
365 | static CPAccessResult access_ttlbis(CPUARMState *env, const ARMCPRegInfo *ri, | |
366 | bool isread) | |
367 | { | |
368 | if (arm_current_el(env) == 1 && | |
369 | (arm_hcr_el2_eff(env) & (HCR_TTLB | HCR_TTLBIS))) { | |
370 | return CP_ACCESS_TRAP_EL2; | |
371 | } | |
372 | return CP_ACCESS_OK; | |
373 | } | |
374 | ||
375 | #ifdef TARGET_AARCH64 | |
376 | /* Check for traps from EL1 due to HCR_EL2.TTLB or TTLBOS. */ | |
377 | static CPAccessResult access_ttlbos(CPUARMState *env, const ARMCPRegInfo *ri, | |
378 | bool isread) | |
379 | { | |
380 | if (arm_current_el(env) == 1 && | |
381 | (arm_hcr_el2_eff(env) & (HCR_TTLB | HCR_TTLBOS))) { | |
382 | return CP_ACCESS_TRAP_EL2; | |
383 | } | |
384 | return CP_ACCESS_OK; | |
385 | } | |
386 | #endif | |
387 | ||
388 | static void dacr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) | |
389 | { | |
390 | ARMCPU *cpu = env_archcpu(env); | |
391 | ||
392 | raw_write(env, ri, value); | |
393 | tlb_flush(CPU(cpu)); /* Flush TLB as domain not tracked in TLB */ | |
394 | } | |
395 | ||
396 | static void fcse_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) | |
397 | { | |
398 | ARMCPU *cpu = env_archcpu(env); | |
399 | ||
400 | if (raw_read(env, ri) != value) { | |
401 | /* | |
402 | * Unlike real hardware the qemu TLB uses virtual addresses, | |
403 | * not modified virtual addresses, so this causes a TLB flush. | |
404 | */ | |
405 | tlb_flush(CPU(cpu)); | |
406 | raw_write(env, ri, value); | |
407 | } | |
408 | } | |
409 | ||
410 | static void contextidr_write(CPUARMState *env, const ARMCPRegInfo *ri, | |
411 | uint64_t value) | |
412 | { | |
413 | ARMCPU *cpu = env_archcpu(env); | |
414 | ||
415 | if (raw_read(env, ri) != value && !arm_feature(env, ARM_FEATURE_PMSA) | |
416 | && !extended_addresses_enabled(env)) { | |
417 | /* | |
418 | * For VMSA (when not using the LPAE long descriptor page table | |
419 | * format) this register includes the ASID, so do a TLB flush. | |
420 | * For PMSA it is purely a process ID and no action is needed. | |
421 | */ | |
422 | tlb_flush(CPU(cpu)); | |
423 | } | |
424 | raw_write(env, ri, value); | |
425 | } | |
426 | ||
427 | static int alle1_tlbmask(CPUARMState *env) | |
428 | { | |
429 | /* | |
430 | * Note that the 'ALL' scope must invalidate both stage 1 and | |
431 | * stage 2 translations, whereas most other scopes only invalidate | |
432 | * stage 1 translations. | |
433 | */ | |
434 | return (ARMMMUIdxBit_E10_1 | | |
435 | ARMMMUIdxBit_E10_1_PAN | | |
436 | ARMMMUIdxBit_E10_0 | | |
437 | ARMMMUIdxBit_Stage2 | | |
438 | ARMMMUIdxBit_Stage2_S); | |
439 | } | |
440 | ||
441 | ||
442 | /* IS variants of TLB operations must affect all cores */ | |
443 | static void tlbiall_is_write(CPUARMState *env, const ARMCPRegInfo *ri, | |
444 | uint64_t value) | |
445 | { | |
446 | CPUState *cs = env_cpu(env); | |
447 | ||
448 | tlb_flush_all_cpus_synced(cs); | |
449 | } | |
450 | ||
451 | static void tlbiasid_is_write(CPUARMState *env, const ARMCPRegInfo *ri, | |
452 | uint64_t value) | |
453 | { | |
454 | CPUState *cs = env_cpu(env); | |
455 | ||
456 | tlb_flush_all_cpus_synced(cs); | |
457 | } | |
458 | ||
459 | static void tlbimva_is_write(CPUARMState *env, const ARMCPRegInfo *ri, | |
460 | uint64_t value) | |
461 | { | |
462 | CPUState *cs = env_cpu(env); | |
463 | ||
464 | tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK); | |
465 | } | |
466 | ||
467 | static void tlbimvaa_is_write(CPUARMState *env, const ARMCPRegInfo *ri, | |
468 | uint64_t value) | |
469 | { | |
470 | CPUState *cs = env_cpu(env); | |
471 | ||
472 | tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK); | |
473 | } | |
474 | ||
475 | /* | |
476 | * Non-IS variants of TLB operations are upgraded to | |
477 | * IS versions if we are at EL1 and HCR_EL2.FB is effectively set to | |
478 | * force broadcast of these operations. | |
479 | */ | |
480 | static bool tlb_force_broadcast(CPUARMState *env) | |
481 | { | |
482 | return arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_FB); | |
483 | } | |
484 | ||
485 | static void tlbiall_write(CPUARMState *env, const ARMCPRegInfo *ri, | |
486 | uint64_t value) | |
487 | { | |
488 | /* Invalidate all (TLBIALL) */ | |
489 | CPUState *cs = env_cpu(env); | |
490 | ||
491 | if (tlb_force_broadcast(env)) { | |
492 | tlb_flush_all_cpus_synced(cs); | |
493 | } else { | |
494 | tlb_flush(cs); | |
495 | } | |
496 | } | |
497 | ||
498 | static void tlbimva_write(CPUARMState *env, const ARMCPRegInfo *ri, | |
499 | uint64_t value) | |
500 | { | |
501 | /* Invalidate single TLB entry by MVA and ASID (TLBIMVA) */ | |
502 | CPUState *cs = env_cpu(env); | |
503 | ||
504 | value &= TARGET_PAGE_MASK; | |
505 | if (tlb_force_broadcast(env)) { | |
506 | tlb_flush_page_all_cpus_synced(cs, value); | |
507 | } else { | |
508 | tlb_flush_page(cs, value); | |
509 | } | |
510 | } | |
511 | ||
512 | static void tlbiasid_write(CPUARMState *env, const ARMCPRegInfo *ri, | |
513 | uint64_t value) | |
514 | { | |
515 | /* Invalidate by ASID (TLBIASID) */ | |
516 | CPUState *cs = env_cpu(env); | |
517 | ||
518 | if (tlb_force_broadcast(env)) { | |
519 | tlb_flush_all_cpus_synced(cs); | |
520 | } else { | |
521 | tlb_flush(cs); | |
522 | } | |
523 | } | |
524 | ||
525 | static void tlbimvaa_write(CPUARMState *env, const ARMCPRegInfo *ri, | |
526 | uint64_t value) | |
527 | { | |
528 | /* Invalidate single entry by MVA, all ASIDs (TLBIMVAA) */ | |
529 | CPUState *cs = env_cpu(env); | |
530 | ||
531 | value &= TARGET_PAGE_MASK; | |
532 | if (tlb_force_broadcast(env)) { | |
533 | tlb_flush_page_all_cpus_synced(cs, value); | |
534 | } else { | |
535 | tlb_flush_page(cs, value); | |
536 | } | |
537 | } | |
538 | ||
539 | static void tlbiall_nsnh_write(CPUARMState *env, const ARMCPRegInfo *ri, | |
540 | uint64_t value) | |
541 | { | |
542 | CPUState *cs = env_cpu(env); | |
543 | ||
544 | tlb_flush_by_mmuidx(cs, alle1_tlbmask(env)); | |
545 | } | |
546 | ||
547 | static void tlbiall_nsnh_is_write(CPUARMState *env, const ARMCPRegInfo *ri, | |
548 | uint64_t value) | |
549 | { | |
550 | CPUState *cs = env_cpu(env); | |
551 | ||
552 | tlb_flush_by_mmuidx_all_cpus_synced(cs, alle1_tlbmask(env)); | |
553 | } | |
554 | ||
555 | ||
556 | static void tlbiall_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri, | |
557 | uint64_t value) | |
558 | { | |
559 | CPUState *cs = env_cpu(env); | |
560 | ||
561 | tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_E2); | |
562 | } | |
563 | ||
564 | static void tlbiall_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri, | |
565 | uint64_t value) | |
566 | { | |
567 | CPUState *cs = env_cpu(env); | |
568 | ||
569 | tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_E2); | |
570 | } | |
571 | ||
572 | static void tlbimva_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri, | |
573 | uint64_t value) | |
574 | { | |
575 | CPUState *cs = env_cpu(env); | |
576 | uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12); | |
577 | ||
578 | tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_E2); | |
579 | } | |
580 | ||
581 | static void tlbimva_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri, | |
582 | uint64_t value) | |
583 | { | |
584 | CPUState *cs = env_cpu(env); | |
585 | uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12); | |
586 | ||
587 | tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, | |
588 | ARMMMUIdxBit_E2); | |
589 | } | |
590 | ||
591 | static void tlbiipas2_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri, | |
592 | uint64_t value) | |
593 | { | |
594 | CPUState *cs = env_cpu(env); | |
595 | uint64_t pageaddr = (value & MAKE_64BIT_MASK(0, 28)) << 12; | |
596 | ||
597 | tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_Stage2); | |
598 | } | |
599 | ||
600 | static void tlbiipas2is_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri, | |
601 | uint64_t value) | |
602 | { | |
603 | CPUState *cs = env_cpu(env); | |
604 | uint64_t pageaddr = (value & MAKE_64BIT_MASK(0, 28)) << 12; | |
605 | ||
606 | tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, ARMMMUIdxBit_Stage2); | |
607 | } | |
608 | ||
609 | static const ARMCPRegInfo cp_reginfo[] = { | |
610 | /* | |
611 | * Define the secure and non-secure FCSE identifier CP registers | |
612 | * separately because there is no secure bank in V8 (no _EL3). This allows | |
613 | * the secure register to be properly reset and migrated. There is also no | |
614 | * v8 EL1 version of the register so the non-secure instance stands alone. | |
615 | */ | |
616 | { .name = "FCSEIDR", | |
617 | .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 0, | |
618 | .access = PL1_RW, .secure = ARM_CP_SECSTATE_NS, | |
619 | .fieldoffset = offsetof(CPUARMState, cp15.fcseidr_ns), | |
620 | .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, }, | |
621 | { .name = "FCSEIDR_S", | |
622 | .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 0, | |
623 | .access = PL1_RW, .secure = ARM_CP_SECSTATE_S, | |
624 | .fieldoffset = offsetof(CPUARMState, cp15.fcseidr_s), | |
625 | .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, }, | |
626 | /* | |
627 | * Define the secure and non-secure context identifier CP registers | |
628 | * separately because there is no secure bank in V8 (no _EL3). This allows | |
629 | * the secure register to be properly reset and migrated. In the | |
630 | * non-secure case, the 32-bit register will have reset and migration | |
631 | * disabled during registration as it is handled by the 64-bit instance. | |
632 | */ | |
633 | { .name = "CONTEXTIDR_EL1", .state = ARM_CP_STATE_BOTH, | |
634 | .opc0 = 3, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1, | |
635 | .access = PL1_RW, .accessfn = access_tvm_trvm, | |
636 | .fgt = FGT_CONTEXTIDR_EL1, | |
637 | .secure = ARM_CP_SECSTATE_NS, | |
638 | .fieldoffset = offsetof(CPUARMState, cp15.contextidr_el[1]), | |
639 | .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, }, | |
640 | { .name = "CONTEXTIDR_S", .state = ARM_CP_STATE_AA32, | |
641 | .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1, | |
642 | .access = PL1_RW, .accessfn = access_tvm_trvm, | |
643 | .secure = ARM_CP_SECSTATE_S, | |
644 | .fieldoffset = offsetof(CPUARMState, cp15.contextidr_s), | |
645 | .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, }, | |
646 | }; | |
647 | ||
648 | static const ARMCPRegInfo not_v8_cp_reginfo[] = { | |
649 | /* | |
650 | * NB: Some of these registers exist in v8 but with more precise | |
651 | * definitions that don't use CP_ANY wildcards (mostly in v8_cp_reginfo[]). | |
652 | */ | |
653 | /* MMU Domain access control / MPU write buffer control */ | |
654 | { .name = "DACR", | |
655 | .cp = 15, .opc1 = CP_ANY, .crn = 3, .crm = CP_ANY, .opc2 = CP_ANY, | |
656 | .access = PL1_RW, .accessfn = access_tvm_trvm, .resetvalue = 0, | |
657 | .writefn = dacr_write, .raw_writefn = raw_write, | |
658 | .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dacr_s), | |
659 | offsetoflow32(CPUARMState, cp15.dacr_ns) } }, | |
660 | /* | |
661 | * ARMv7 allocates a range of implementation defined TLB LOCKDOWN regs. | |
662 | * For v6 and v5, these mappings are overly broad. | |
663 | */ | |
664 | { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 0, | |
665 | .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP }, | |
666 | { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 1, | |
667 | .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP }, | |
668 | { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 4, | |
669 | .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP }, | |
670 | { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 8, | |
671 | .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP }, | |
672 | /* Cache maintenance ops; some of this space may be overridden later. */ | |
673 | { .name = "CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY, | |
674 | .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W, | |
675 | .type = ARM_CP_NOP | ARM_CP_OVERRIDE }, | |
676 | }; | |
677 | ||
678 | static const ARMCPRegInfo not_v6_cp_reginfo[] = { | |
679 | /* | |
680 | * Not all pre-v6 cores implemented this WFI, so this is slightly | |
681 | * over-broad. | |
682 | */ | |
683 | { .name = "WFI_v5", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = 2, | |
684 | .access = PL1_W, .type = ARM_CP_WFI }, | |
685 | }; | |
686 | ||
687 | static const ARMCPRegInfo not_v7_cp_reginfo[] = { | |
688 | /* | |
689 | * Standard v6 WFI (also used in some pre-v6 cores); not in v7 (which | |
690 | * is UNPREDICTABLE; we choose to NOP as most implementations do). | |
691 | */ | |
692 | { .name = "WFI_v6", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4, | |
693 | .access = PL1_W, .type = ARM_CP_WFI }, | |
694 | /* | |
695 | * L1 cache lockdown. Not architectural in v6 and earlier but in practice | |
696 | * implemented in 926, 946, 1026, 1136, 1176 and 11MPCore. StrongARM and | |
697 | * OMAPCP will override this space. | |
698 | */ | |
699 | { .name = "DLOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 0, | |
700 | .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_data), | |
701 | .resetvalue = 0 }, | |
702 | { .name = "ILOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 1, | |
703 | .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_insn), | |
704 | .resetvalue = 0 }, | |
705 | /* v6 doesn't have the cache ID registers but Linux reads them anyway */ | |
706 | { .name = "DUMMY", .cp = 15, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = CP_ANY, | |
707 | .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW, | |
708 | .resetvalue = 0 }, | |
709 | /* | |
710 | * We don't implement pre-v7 debug but most CPUs had at least a DBGDIDR; | |
711 | * implementing it as RAZ means the "debug architecture version" bits | |
712 | * will read as a reserved value, which should cause Linux to not try | |
713 | * to use the debug hardware. | |
714 | */ | |
715 | { .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 0, | |
716 | .access = PL0_R, .type = ARM_CP_CONST, .resetvalue = 0 }, | |
717 | /* | |
718 | * MMU TLB control. Note that the wildcarding means we cover not just | |
719 | * the unified TLB ops but also the dside/iside/inner-shareable variants. | |
720 | */ | |
721 | { .name = "TLBIALL", .cp = 15, .crn = 8, .crm = CP_ANY, | |
722 | .opc1 = CP_ANY, .opc2 = 0, .access = PL1_W, .writefn = tlbiall_write, | |
723 | .type = ARM_CP_NO_RAW }, | |
724 | { .name = "TLBIMVA", .cp = 15, .crn = 8, .crm = CP_ANY, | |
725 | .opc1 = CP_ANY, .opc2 = 1, .access = PL1_W, .writefn = tlbimva_write, | |
726 | .type = ARM_CP_NO_RAW }, | |
727 | { .name = "TLBIASID", .cp = 15, .crn = 8, .crm = CP_ANY, | |
728 | .opc1 = CP_ANY, .opc2 = 2, .access = PL1_W, .writefn = tlbiasid_write, | |
729 | .type = ARM_CP_NO_RAW }, | |
730 | { .name = "TLBIMVAA", .cp = 15, .crn = 8, .crm = CP_ANY, | |
731 | .opc1 = CP_ANY, .opc2 = 3, .access = PL1_W, .writefn = tlbimvaa_write, | |
732 | .type = ARM_CP_NO_RAW }, | |
733 | { .name = "PRRR", .cp = 15, .crn = 10, .crm = 2, | |
734 | .opc1 = 0, .opc2 = 0, .access = PL1_RW, .type = ARM_CP_NOP }, | |
735 | { .name = "NMRR", .cp = 15, .crn = 10, .crm = 2, | |
736 | .opc1 = 0, .opc2 = 1, .access = PL1_RW, .type = ARM_CP_NOP }, | |
737 | }; | |
738 | ||
739 | static void cpacr_write(CPUARMState *env, const ARMCPRegInfo *ri, | |
740 | uint64_t value) | |
741 | { | |
742 | uint32_t mask = 0; | |
743 | ||
744 | /* In ARMv8 most bits of CPACR_EL1 are RES0. */ | |
745 | if (!arm_feature(env, ARM_FEATURE_V8)) { | |
746 | /* | |
747 | * ARMv7 defines bits for unimplemented coprocessors as RAZ/WI. | |
748 | * ASEDIS [31] and D32DIS [30] are both UNK/SBZP without VFP. | |
749 | * TRCDIS [28] is RAZ/WI since we do not implement a trace macrocell. | |
750 | */ | |
751 | if (cpu_isar_feature(aa32_vfp_simd, env_archcpu(env))) { | |
752 | /* VFP coprocessor: cp10 & cp11 [23:20] */ | |
753 | mask |= R_CPACR_ASEDIS_MASK | | |
754 | R_CPACR_D32DIS_MASK | | |
755 | R_CPACR_CP11_MASK | | |
756 | R_CPACR_CP10_MASK; | |
757 | ||
758 | if (!arm_feature(env, ARM_FEATURE_NEON)) { | |
759 | /* ASEDIS [31] bit is RAO/WI */ | |
760 | value |= R_CPACR_ASEDIS_MASK; | |
761 | } | |
762 | ||
763 | /* | |
764 | * VFPv3 and upwards with NEON implement 32 double precision | |
765 | * registers (D0-D31). | |
766 | */ | |
767 | if (!cpu_isar_feature(aa32_simd_r32, env_archcpu(env))) { | |
768 | /* D32DIS [30] is RAO/WI if D16-31 are not implemented. */ | |
769 | value |= R_CPACR_D32DIS_MASK; | |
770 | } | |
771 | } | |
772 | value &= mask; | |
773 | } | |
774 | ||
775 | /* | |
776 | * For A-profile AArch32 EL3 (but not M-profile secure mode), if NSACR.CP10 | |
777 | * is 0 then CPACR.{CP11,CP10} ignore writes and read as 0b00. | |
778 | */ | |
779 | if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) && | |
780 | !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) { | |
781 | mask = R_CPACR_CP11_MASK | R_CPACR_CP10_MASK; | |
782 | value = (value & ~mask) | (env->cp15.cpacr_el1 & mask); | |
783 | } | |
784 | ||
785 | env->cp15.cpacr_el1 = value; | |
786 | } | |
787 | ||
788 | static uint64_t cpacr_read(CPUARMState *env, const ARMCPRegInfo *ri) | |
789 | { | |
790 | /* | |
791 | * For A-profile AArch32 EL3 (but not M-profile secure mode), if NSACR.CP10 | |
792 | * is 0 then CPACR.{CP11,CP10} ignore writes and read as 0b00. | |
793 | */ | |
794 | uint64_t value = env->cp15.cpacr_el1; | |
795 | ||
796 | if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) && | |
797 | !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) { | |
798 | value = ~(R_CPACR_CP11_MASK | R_CPACR_CP10_MASK); | |
799 | } | |
800 | return value; | |
801 | } | |
802 | ||
803 | ||
804 | static void cpacr_reset(CPUARMState *env, const ARMCPRegInfo *ri) | |
805 | { | |
806 | /* | |
807 | * Call cpacr_write() so that we reset with the correct RAO bits set | |
808 | * for our CPU features. | |
809 | */ | |
810 | cpacr_write(env, ri, 0); | |
811 | } | |
812 | ||
813 | static CPAccessResult cpacr_access(CPUARMState *env, const ARMCPRegInfo *ri, | |
814 | bool isread) | |
815 | { | |
816 | if (arm_feature(env, ARM_FEATURE_V8)) { | |
817 | /* Check if CPACR accesses are to be trapped to EL2 */ | |
818 | if (arm_current_el(env) == 1 && arm_is_el2_enabled(env) && | |
819 | FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, TCPAC)) { | |
820 | return CP_ACCESS_TRAP_EL2; | |
821 | /* Check if CPACR accesses are to be trapped to EL3 */ | |
822 | } else if (arm_current_el(env) < 3 && | |
823 | FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, TCPAC)) { | |
824 | return CP_ACCESS_TRAP_EL3; | |
825 | } | |
826 | } | |
827 | ||
828 | return CP_ACCESS_OK; | |
829 | } | |
830 | ||
831 | static CPAccessResult cptr_access(CPUARMState *env, const ARMCPRegInfo *ri, | |
832 | bool isread) | |
833 | { | |
834 | /* Check if CPTR accesses are set to trap to EL3 */ | |
835 | if (arm_current_el(env) == 2 && | |
836 | FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, TCPAC)) { | |
837 | return CP_ACCESS_TRAP_EL3; | |
838 | } | |
839 | ||
840 | return CP_ACCESS_OK; | |
841 | } | |
842 | ||
843 | static const ARMCPRegInfo v6_cp_reginfo[] = { | |
844 | /* prefetch by MVA in v6, NOP in v7 */ | |
845 | { .name = "MVA_prefetch", | |
846 | .cp = 15, .crn = 7, .crm = 13, .opc1 = 0, .opc2 = 1, | |
847 | .access = PL1_W, .type = ARM_CP_NOP }, | |
848 | /* | |
849 | * We need to break the TB after ISB to execute self-modifying code | |
850 | * correctly and also to take any pending interrupts immediately. | |
851 | * So use arm_cp_write_ignore() function instead of ARM_CP_NOP flag. | |
852 | */ | |
853 | { .name = "ISB", .cp = 15, .crn = 7, .crm = 5, .opc1 = 0, .opc2 = 4, | |
854 | .access = PL0_W, .type = ARM_CP_NO_RAW, .writefn = arm_cp_write_ignore }, | |
855 | { .name = "DSB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 4, | |
856 | .access = PL0_W, .type = ARM_CP_NOP }, | |
857 | { .name = "DMB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 5, | |
858 | .access = PL0_W, .type = ARM_CP_NOP }, | |
859 | { .name = "IFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 2, | |
860 | .access = PL1_RW, .accessfn = access_tvm_trvm, | |
861 | .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ifar_s), | |
862 | offsetof(CPUARMState, cp15.ifar_ns) }, | |
863 | .resetvalue = 0, }, | |
864 | /* | |
865 | * Watchpoint Fault Address Register : should actually only be present | |
866 | * for 1136, 1176, 11MPCore. | |
867 | */ | |
868 | { .name = "WFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 1, | |
869 | .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0, }, | |
870 | { .name = "CPACR", .state = ARM_CP_STATE_BOTH, .opc0 = 3, | |
871 | .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 2, .accessfn = cpacr_access, | |
872 | .fgt = FGT_CPACR_EL1, | |
873 | .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.cpacr_el1), | |
874 | .resetfn = cpacr_reset, .writefn = cpacr_write, .readfn = cpacr_read }, | |
875 | }; | |
876 | ||
877 | typedef struct pm_event { | |
878 | uint16_t number; /* PMEVTYPER.evtCount is 16 bits wide */ | |
879 | /* If the event is supported on this CPU (used to generate PMCEID[01]) */ | |
880 | bool (*supported)(CPUARMState *); | |
881 | /* | |
882 | * Retrieve the current count of the underlying event. The programmed | |
883 | * counters hold a difference from the return value from this function | |
884 | */ | |
885 | uint64_t (*get_count)(CPUARMState *); | |
886 | /* | |
887 | * Return how many nanoseconds it will take (at a minimum) for count events | |
888 | * to occur. A negative value indicates the counter will never overflow, or | |
889 | * that the counter has otherwise arranged for the overflow bit to be set | |
890 | * and the PMU interrupt to be raised on overflow. | |
891 | */ | |
892 | int64_t (*ns_per_count)(uint64_t); | |
893 | } pm_event; | |
894 | ||
895 | static bool event_always_supported(CPUARMState *env) | |
896 | { | |
897 | return true; | |
898 | } | |
899 | ||
900 | static uint64_t swinc_get_count(CPUARMState *env) | |
901 | { | |
902 | /* | |
903 | * SW_INCR events are written directly to the pmevcntr's by writes to | |
904 | * PMSWINC, so there is no underlying count maintained by the PMU itself | |
905 | */ | |
906 | return 0; | |
907 | } | |
908 | ||
909 | static int64_t swinc_ns_per(uint64_t ignored) | |
910 | { | |
911 | return -1; | |
912 | } | |
913 | ||
914 | /* | |
915 | * Return the underlying cycle count for the PMU cycle counters. If we're in | |
916 | * usermode, simply return 0. | |
917 | */ | |
918 | static uint64_t cycles_get_count(CPUARMState *env) | |
919 | { | |
920 | #ifndef CONFIG_USER_ONLY | |
921 | return muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), | |
922 | ARM_CPU_FREQ, NANOSECONDS_PER_SECOND); | |
923 | #else | |
924 | return cpu_get_host_ticks(); | |
925 | #endif | |
926 | } | |
927 | ||
928 | #ifndef CONFIG_USER_ONLY | |
929 | static int64_t cycles_ns_per(uint64_t cycles) | |
930 | { | |
931 | return (ARM_CPU_FREQ / NANOSECONDS_PER_SECOND) * cycles; | |
932 | } | |
933 | ||
934 | static bool instructions_supported(CPUARMState *env) | |
935 | { | |
936 | return icount_enabled() == 1; /* Precise instruction counting */ | |
937 | } | |
938 | ||
939 | static uint64_t instructions_get_count(CPUARMState *env) | |
940 | { | |
941 | return (uint64_t)icount_get_raw(); | |
942 | } | |
943 | ||
944 | static int64_t instructions_ns_per(uint64_t icount) | |
945 | { | |
946 | return icount_to_ns((int64_t)icount); | |
947 | } | |
948 | #endif | |
949 | ||
950 | static bool pmuv3p1_events_supported(CPUARMState *env) | |
951 | { | |
952 | /* For events which are supported in any v8.1 PMU */ | |
953 | return cpu_isar_feature(any_pmuv3p1, env_archcpu(env)); | |
954 | } | |
955 | ||
956 | static bool pmuv3p4_events_supported(CPUARMState *env) | |
957 | { | |
958 | /* For events which are supported in any v8.1 PMU */ | |
959 | return cpu_isar_feature(any_pmuv3p4, env_archcpu(env)); | |
960 | } | |
961 | ||
962 | static uint64_t zero_event_get_count(CPUARMState *env) | |
963 | { | |
964 | /* For events which on QEMU never fire, so their count is always zero */ | |
965 | return 0; | |
966 | } | |
967 | ||
968 | static int64_t zero_event_ns_per(uint64_t cycles) | |
969 | { | |
970 | /* An event which never fires can never overflow */ | |
971 | return -1; | |
972 | } | |
973 | ||
974 | static const pm_event pm_events[] = { | |
975 | { .number = 0x000, /* SW_INCR */ | |
976 | .supported = event_always_supported, | |
977 | .get_count = swinc_get_count, | |
978 | .ns_per_count = swinc_ns_per, | |
979 | }, | |
980 | #ifndef CONFIG_USER_ONLY | |
981 | { .number = 0x008, /* INST_RETIRED, Instruction architecturally executed */ | |
982 | .supported = instructions_supported, | |
983 | .get_count = instructions_get_count, | |
984 | .ns_per_count = instructions_ns_per, | |
985 | }, | |
986 | { .number = 0x011, /* CPU_CYCLES, Cycle */ | |
987 | .supported = event_always_supported, | |
988 | .get_count = cycles_get_count, | |
989 | .ns_per_count = cycles_ns_per, | |
990 | }, | |
991 | #endif | |
992 | { .number = 0x023, /* STALL_FRONTEND */ | |
993 | .supported = pmuv3p1_events_supported, | |
994 | .get_count = zero_event_get_count, | |
995 | .ns_per_count = zero_event_ns_per, | |
996 | }, | |
997 | { .number = 0x024, /* STALL_BACKEND */ | |
998 | .supported = pmuv3p1_events_supported, | |
999 | .get_count = zero_event_get_count, | |
1000 | .ns_per_count = zero_event_ns_per, | |
1001 | }, | |
1002 | { .number = 0x03c, /* STALL */ | |
1003 | .supported = pmuv3p4_events_supported, | |
1004 | .get_count = zero_event_get_count, | |
1005 | .ns_per_count = zero_event_ns_per, | |
1006 | }, | |
1007 | }; | |
1008 | ||
1009 | /* | |
1010 | * Note: Before increasing MAX_EVENT_ID beyond 0x3f into the 0x40xx range of | |
1011 | * events (i.e. the statistical profiling extension), this implementation | |
1012 | * should first be updated to something sparse instead of the current | |
1013 | * supported_event_map[] array. | |
1014 | */ | |
1015 | #define MAX_EVENT_ID 0x3c | |
1016 | #define UNSUPPORTED_EVENT UINT16_MAX | |
1017 | static uint16_t supported_event_map[MAX_EVENT_ID + 1]; | |
1018 | ||
1019 | /* | |
1020 | * Called upon CPU initialization to initialize PMCEID[01]_EL0 and build a map | |
1021 | * of ARM event numbers to indices in our pm_events array. | |
1022 | * | |
1023 | * Note: Events in the 0x40XX range are not currently supported. | |
1024 | */ | |
1025 | void pmu_init(ARMCPU *cpu) | |
1026 | { | |
1027 | unsigned int i; | |
1028 | ||
1029 | /* | |
1030 | * Empty supported_event_map and cpu->pmceid[01] before adding supported | |
1031 | * events to them | |
1032 | */ | |
1033 | for (i = 0; i < ARRAY_SIZE(supported_event_map); i++) { | |
1034 | supported_event_map[i] = UNSUPPORTED_EVENT; | |
1035 | } | |
1036 | cpu->pmceid0 = 0; | |
1037 | cpu->pmceid1 = 0; | |
1038 | ||
1039 | for (i = 0; i < ARRAY_SIZE(pm_events); i++) { | |
1040 | const pm_event *cnt = &pm_events[i]; | |
1041 | assert(cnt->number <= MAX_EVENT_ID); | |
1042 | /* We do not currently support events in the 0x40xx range */ | |
1043 | assert(cnt->number <= 0x3f); | |
1044 | ||
1045 | if (cnt->supported(&cpu->env)) { | |
1046 | supported_event_map[cnt->number] = i; | |
1047 | uint64_t event_mask = 1ULL << (cnt->number & 0x1f); | |
1048 | if (cnt->number & 0x20) { | |
1049 | cpu->pmceid1 |= event_mask; | |
1050 | } else { | |
1051 | cpu->pmceid0 |= event_mask; | |
1052 | } | |
1053 | } | |
1054 | } | |
1055 | } | |
1056 | ||
1057 | /* | |
1058 | * Check at runtime whether a PMU event is supported for the current machine | |
1059 | */ | |
1060 | static bool event_supported(uint16_t number) | |
1061 | { | |
1062 | if (number > MAX_EVENT_ID) { | |
1063 | return false; | |
1064 | } | |
1065 | return supported_event_map[number] != UNSUPPORTED_EVENT; | |
1066 | } | |
1067 | ||
1068 | static CPAccessResult pmreg_access(CPUARMState *env, const ARMCPRegInfo *ri, | |
1069 | bool isread) | |
1070 | { | |
1071 | /* | |
1072 | * Performance monitor registers user accessibility is controlled | |
1073 | * by PMUSERENR. MDCR_EL2.TPM and MDCR_EL3.TPM allow configurable | |
1074 | * trapping to EL2 or EL3 for other accesses. | |
1075 | */ | |
1076 | int el = arm_current_el(env); | |
1077 | uint64_t mdcr_el2 = arm_mdcr_el2_eff(env); | |
1078 | ||
1079 | if (el == 0 && !(env->cp15.c9_pmuserenr & 1)) { | |
1080 | return CP_ACCESS_TRAP; | |
1081 | } | |
1082 | if (el < 2 && (mdcr_el2 & MDCR_TPM)) { | |
1083 | return CP_ACCESS_TRAP_EL2; | |
1084 | } | |
1085 | if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) { | |
1086 | return CP_ACCESS_TRAP_EL3; | |
1087 | } | |
1088 | ||
1089 | return CP_ACCESS_OK; | |
1090 | } | |
1091 | ||
1092 | static CPAccessResult pmreg_access_xevcntr(CPUARMState *env, | |
1093 | const ARMCPRegInfo *ri, | |
1094 | bool isread) | |
1095 | { | |
1096 | /* ER: event counter read trap control */ | |
1097 | if (arm_feature(env, ARM_FEATURE_V8) | |
1098 | && arm_current_el(env) == 0 | |
1099 | && (env->cp15.c9_pmuserenr & (1 << 3)) != 0 | |
1100 | && isread) { | |
1101 | return CP_ACCESS_OK; | |
1102 | } | |
1103 | ||
1104 | return pmreg_access(env, ri, isread); | |
1105 | } | |
1106 | ||
1107 | static CPAccessResult pmreg_access_swinc(CPUARMState *env, | |
1108 | const ARMCPRegInfo *ri, | |
1109 | bool isread) | |
1110 | { | |
1111 | /* SW: software increment write trap control */ | |
1112 | if (arm_feature(env, ARM_FEATURE_V8) | |
1113 | && arm_current_el(env) == 0 | |
1114 | && (env->cp15.c9_pmuserenr & (1 << 1)) != 0 | |
1115 | && !isread) { | |
1116 | return CP_ACCESS_OK; | |
1117 | } | |
1118 | ||
1119 | return pmreg_access(env, ri, isread); | |
1120 | } | |
1121 | ||
1122 | static CPAccessResult pmreg_access_selr(CPUARMState *env, | |
1123 | const ARMCPRegInfo *ri, | |
1124 | bool isread) | |
1125 | { | |
1126 | /* ER: event counter read trap control */ | |
1127 | if (arm_feature(env, ARM_FEATURE_V8) | |
1128 | && arm_current_el(env) == 0 | |
1129 | && (env->cp15.c9_pmuserenr & (1 << 3)) != 0) { | |
1130 | return CP_ACCESS_OK; | |
1131 | } | |
1132 | ||
1133 | return pmreg_access(env, ri, isread); | |
1134 | } | |
1135 | ||
1136 | static CPAccessResult pmreg_access_ccntr(CPUARMState *env, | |
1137 | const ARMCPRegInfo *ri, | |
1138 | bool isread) | |
1139 | { | |
1140 | /* CR: cycle counter read trap control */ | |
1141 | if (arm_feature(env, ARM_FEATURE_V8) | |
1142 | && arm_current_el(env) == 0 | |
1143 | && (env->cp15.c9_pmuserenr & (1 << 2)) != 0 | |
1144 | && isread) { | |
1145 | return CP_ACCESS_OK; | |
1146 | } | |
1147 | ||
1148 | return pmreg_access(env, ri, isread); | |
1149 | } | |
1150 | ||
1151 | /* | |
1152 | * Bits in MDCR_EL2 and MDCR_EL3 which pmu_counter_enabled() looks at. | |
1153 | * We use these to decide whether we need to wrap a write to MDCR_EL2 | |
1154 | * or MDCR_EL3 in pmu_op_start()/pmu_op_finish() calls. | |
1155 | */ | |
1156 | #define MDCR_EL2_PMU_ENABLE_BITS \ | |
1157 | (MDCR_HPME | MDCR_HPMD | MDCR_HPMN | MDCR_HCCD | MDCR_HLP) | |
1158 | #define MDCR_EL3_PMU_ENABLE_BITS (MDCR_SPME | MDCR_SCCD) | |
1159 | ||
1160 | /* | |
1161 | * Returns true if the counter (pass 31 for PMCCNTR) should count events using | |
1162 | * the current EL, security state, and register configuration. | |
1163 | */ | |
1164 | static bool pmu_counter_enabled(CPUARMState *env, uint8_t counter) | |
1165 | { | |
1166 | uint64_t filter; | |
1167 | bool e, p, u, nsk, nsu, nsh, m; | |
1168 | bool enabled, prohibited = false, filtered; | |
1169 | bool secure = arm_is_secure(env); | |
1170 | int el = arm_current_el(env); | |
1171 | uint64_t mdcr_el2 = arm_mdcr_el2_eff(env); | |
1172 | uint8_t hpmn = mdcr_el2 & MDCR_HPMN; | |
1173 | ||
1174 | if (!arm_feature(env, ARM_FEATURE_PMU)) { | |
1175 | return false; | |
1176 | } | |
1177 | ||
1178 | if (!arm_feature(env, ARM_FEATURE_EL2) || | |
1179 | (counter < hpmn || counter == 31)) { | |
1180 | e = env->cp15.c9_pmcr & PMCRE; | |
1181 | } else { | |
1182 | e = mdcr_el2 & MDCR_HPME; | |
1183 | } | |
1184 | enabled = e && (env->cp15.c9_pmcnten & (1 << counter)); | |
1185 | ||
1186 | /* Is event counting prohibited? */ | |
1187 | if (el == 2 && (counter < hpmn || counter == 31)) { | |
1188 | prohibited = mdcr_el2 & MDCR_HPMD; | |
1189 | } | |
1190 | if (secure) { | |
1191 | prohibited = prohibited || !(env->cp15.mdcr_el3 & MDCR_SPME); | |
1192 | } | |
1193 | ||
1194 | if (counter == 31) { | |
1195 | /* | |
1196 | * The cycle counter defaults to running. PMCR.DP says "disable | |
1197 | * the cycle counter when event counting is prohibited". | |
1198 | * Some MDCR bits disable the cycle counter specifically. | |
1199 | */ | |
1200 | prohibited = prohibited && env->cp15.c9_pmcr & PMCRDP; | |
1201 | if (cpu_isar_feature(any_pmuv3p5, env_archcpu(env))) { | |
1202 | if (secure) { | |
1203 | prohibited = prohibited || (env->cp15.mdcr_el3 & MDCR_SCCD); | |
1204 | } | |
1205 | if (el == 2) { | |
1206 | prohibited = prohibited || (mdcr_el2 & MDCR_HCCD); | |
1207 | } | |
1208 | } | |
1209 | } | |
1210 | ||
1211 | if (counter == 31) { | |
1212 | filter = env->cp15.pmccfiltr_el0; | |
1213 | } else { | |
1214 | filter = env->cp15.c14_pmevtyper[counter]; | |
1215 | } | |
1216 | ||
1217 | p = filter & PMXEVTYPER_P; | |
1218 | u = filter & PMXEVTYPER_U; | |
1219 | nsk = arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_NSK); | |
1220 | nsu = arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_NSU); | |
1221 | nsh = arm_feature(env, ARM_FEATURE_EL2) && (filter & PMXEVTYPER_NSH); | |
1222 | m = arm_el_is_aa64(env, 1) && | |
1223 | arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_M); | |
1224 | ||
1225 | if (el == 0) { | |
1226 | filtered = secure ? u : u != nsu; | |
1227 | } else if (el == 1) { | |
1228 | filtered = secure ? p : p != nsk; | |
1229 | } else if (el == 2) { | |
1230 | filtered = !nsh; | |
1231 | } else { /* EL3 */ | |
1232 | filtered = m != p; | |
1233 | } | |
1234 | ||
1235 | if (counter != 31) { | |
1236 | /* | |
1237 | * If not checking PMCCNTR, ensure the counter is setup to an event we | |
1238 | * support | |
1239 | */ | |
1240 | uint16_t event = filter & PMXEVTYPER_EVTCOUNT; | |
1241 | if (!event_supported(event)) { | |
1242 | return false; | |
1243 | } | |
1244 | } | |
1245 | ||
1246 | return enabled && !prohibited && !filtered; | |
1247 | } | |
1248 | ||
1249 | static void pmu_update_irq(CPUARMState *env) | |
1250 | { | |
1251 | ARMCPU *cpu = env_archcpu(env); | |
1252 | qemu_set_irq(cpu->pmu_interrupt, (env->cp15.c9_pmcr & PMCRE) && | |
1253 | (env->cp15.c9_pminten & env->cp15.c9_pmovsr)); | |
1254 | } | |
1255 | ||
1256 | static bool pmccntr_clockdiv_enabled(CPUARMState *env) | |
1257 | { | |
1258 | /* | |
1259 | * Return true if the clock divider is enabled and the cycle counter | |
1260 | * is supposed to tick only once every 64 clock cycles. This is | |
1261 | * controlled by PMCR.D, but if PMCR.LC is set to enable the long | |
1262 | * (64-bit) cycle counter PMCR.D has no effect. | |
1263 | */ | |
1264 | return (env->cp15.c9_pmcr & (PMCRD | PMCRLC)) == PMCRD; | |
1265 | } | |
1266 | ||
1267 | static bool pmevcntr_is_64_bit(CPUARMState *env, int counter) | |
1268 | { | |
1269 | /* Return true if the specified event counter is configured to be 64 bit */ | |
1270 | ||
1271 | /* This isn't intended to be used with the cycle counter */ | |
1272 | assert(counter < 31); | |
1273 | ||
1274 | if (!cpu_isar_feature(any_pmuv3p5, env_archcpu(env))) { | |
1275 | return false; | |
1276 | } | |
1277 | ||
1278 | if (arm_feature(env, ARM_FEATURE_EL2)) { | |
1279 | /* | |
1280 | * MDCR_EL2.HLP still applies even when EL2 is disabled in the | |
1281 | * current security state, so we don't use arm_mdcr_el2_eff() here. | |
1282 | */ | |
1283 | bool hlp = env->cp15.mdcr_el2 & MDCR_HLP; | |
1284 | int hpmn = env->cp15.mdcr_el2 & MDCR_HPMN; | |
1285 | ||
1286 | if (hpmn != 0 && counter >= hpmn) { | |
1287 | return hlp; | |
1288 | } | |
1289 | } | |
1290 | return env->cp15.c9_pmcr & PMCRLP; | |
1291 | } | |
1292 | ||
1293 | /* | |
1294 | * Ensure c15_ccnt is the guest-visible count so that operations such as | |
1295 | * enabling/disabling the counter or filtering, modifying the count itself, | |
1296 | * etc. can be done logically. This is essentially a no-op if the counter is | |
1297 | * not enabled at the time of the call. | |
1298 | */ | |
1299 | static void pmccntr_op_start(CPUARMState *env) | |
1300 | { | |
1301 | uint64_t cycles = cycles_get_count(env); | |
1302 | ||
1303 | if (pmu_counter_enabled(env, 31)) { | |
1304 | uint64_t eff_cycles = cycles; | |
1305 | if (pmccntr_clockdiv_enabled(env)) { | |
1306 | eff_cycles /= 64; | |
1307 | } | |
1308 | ||
1309 | uint64_t new_pmccntr = eff_cycles - env->cp15.c15_ccnt_delta; | |
1310 | ||
1311 | uint64_t overflow_mask = env->cp15.c9_pmcr & PMCRLC ? \ | |
1312 | 1ull << 63 : 1ull << 31; | |
1313 | if (env->cp15.c15_ccnt & ~new_pmccntr & overflow_mask) { | |
1314 | env->cp15.c9_pmovsr |= (1ULL << 31); | |
1315 | pmu_update_irq(env); | |
1316 | } | |
1317 | ||
1318 | env->cp15.c15_ccnt = new_pmccntr; | |
1319 | } | |
1320 | env->cp15.c15_ccnt_delta = cycles; | |
1321 | } | |
1322 | ||
1323 | /* | |
1324 | * If PMCCNTR is enabled, recalculate the delta between the clock and the | |
1325 | * guest-visible count. A call to pmccntr_op_finish should follow every call to | |
1326 | * pmccntr_op_start. | |
1327 | */ | |
1328 | static void pmccntr_op_finish(CPUARMState *env) | |
1329 | { | |
1330 | if (pmu_counter_enabled(env, 31)) { | |
1331 | #ifndef CONFIG_USER_ONLY | |
1332 | /* Calculate when the counter will next overflow */ | |
1333 | uint64_t remaining_cycles = -env->cp15.c15_ccnt; | |
1334 | if (!(env->cp15.c9_pmcr & PMCRLC)) { | |
1335 | remaining_cycles = (uint32_t)remaining_cycles; | |
1336 | } | |
1337 | int64_t overflow_in = cycles_ns_per(remaining_cycles); | |
1338 | ||
1339 | if (overflow_in > 0) { | |
1340 | int64_t overflow_at; | |
1341 | ||
1342 | if (!sadd64_overflow(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), | |
1343 | overflow_in, &overflow_at)) { | |
1344 | ARMCPU *cpu = env_archcpu(env); | |
1345 | timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at); | |
1346 | } | |
1347 | } | |
1348 | #endif | |
1349 | ||
1350 | uint64_t prev_cycles = env->cp15.c15_ccnt_delta; | |
1351 | if (pmccntr_clockdiv_enabled(env)) { | |
1352 | prev_cycles /= 64; | |
1353 | } | |
1354 | env->cp15.c15_ccnt_delta = prev_cycles - env->cp15.c15_ccnt; | |
1355 | } | |
1356 | } | |
1357 | ||
1358 | static void pmevcntr_op_start(CPUARMState *env, uint8_t counter) | |
1359 | { | |
1360 | ||
1361 | uint16_t event = env->cp15.c14_pmevtyper[counter] & PMXEVTYPER_EVTCOUNT; | |
1362 | uint64_t count = 0; | |
1363 | if (event_supported(event)) { | |
1364 | uint16_t event_idx = supported_event_map[event]; | |
1365 | count = pm_events[event_idx].get_count(env); | |
1366 | } | |
1367 | ||
1368 | if (pmu_counter_enabled(env, counter)) { | |
1369 | uint64_t new_pmevcntr = count - env->cp15.c14_pmevcntr_delta[counter]; | |
1370 | uint64_t overflow_mask = pmevcntr_is_64_bit(env, counter) ? | |
1371 | 1ULL << 63 : 1ULL << 31; | |
1372 | ||
1373 | if (env->cp15.c14_pmevcntr[counter] & ~new_pmevcntr & overflow_mask) { | |
1374 | env->cp15.c9_pmovsr |= (1 << counter); | |
1375 | pmu_update_irq(env); | |
1376 | } | |
1377 | env->cp15.c14_pmevcntr[counter] = new_pmevcntr; | |
1378 | } | |
1379 | env->cp15.c14_pmevcntr_delta[counter] = count; | |
1380 | } | |
1381 | ||
1382 | static void pmevcntr_op_finish(CPUARMState *env, uint8_t counter) | |
1383 | { | |
1384 | if (pmu_counter_enabled(env, counter)) { | |
1385 | #ifndef CONFIG_USER_ONLY | |
1386 | uint16_t event = env->cp15.c14_pmevtyper[counter] & PMXEVTYPER_EVTCOUNT; | |
1387 | uint16_t event_idx = supported_event_map[event]; | |
1388 | uint64_t delta = -(env->cp15.c14_pmevcntr[counter] + 1); | |
1389 | int64_t overflow_in; | |
1390 | ||
1391 | if (!pmevcntr_is_64_bit(env, counter)) { | |
1392 | delta = (uint32_t)delta; | |
1393 | } | |
1394 | overflow_in = pm_events[event_idx].ns_per_count(delta); | |
1395 | ||
1396 | if (overflow_in > 0) { | |
1397 | int64_t overflow_at; | |
1398 | ||
1399 | if (!sadd64_overflow(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), | |
1400 | overflow_in, &overflow_at)) { | |
1401 | ARMCPU *cpu = env_archcpu(env); | |
1402 | timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at); | |
1403 | } | |
1404 | } | |
1405 | #endif | |
1406 | ||
1407 | env->cp15.c14_pmevcntr_delta[counter] -= | |
1408 | env->cp15.c14_pmevcntr[counter]; | |
1409 | } | |
1410 | } | |
1411 | ||
1412 | void pmu_op_start(CPUARMState *env) | |
1413 | { | |
1414 | unsigned int i; | |
1415 | pmccntr_op_start(env); | |
1416 | for (i = 0; i < pmu_num_counters(env); i++) { | |
1417 | pmevcntr_op_start(env, i); | |
1418 | } | |
1419 | } | |
1420 | ||
1421 | void pmu_op_finish(CPUARMState *env) | |
1422 | { | |
1423 | unsigned int i; | |
1424 | pmccntr_op_finish(env); | |
1425 | for (i = 0; i < pmu_num_counters(env); i++) { | |
1426 | pmevcntr_op_finish(env, i); | |
1427 | } | |
1428 | } | |
1429 | ||
1430 | void pmu_pre_el_change(ARMCPU *cpu, void *ignored) | |
1431 | { | |
1432 | pmu_op_start(&cpu->env); | |
1433 | } | |
1434 | ||
1435 | void pmu_post_el_change(ARMCPU *cpu, void *ignored) | |
1436 | { | |
1437 | pmu_op_finish(&cpu->env); | |
1438 | } | |
1439 | ||
1440 | void arm_pmu_timer_cb(void *opaque) | |
1441 | { | |
1442 | ARMCPU *cpu = opaque; | |
1443 | ||
1444 | /* | |
1445 | * Update all the counter values based on the current underlying counts, | |
1446 | * triggering interrupts to be raised, if necessary. pmu_op_finish() also | |
1447 | * has the effect of setting the cpu->pmu_timer to the next earliest time a | |
1448 | * counter may expire. | |
1449 | */ | |
1450 | pmu_op_start(&cpu->env); | |
1451 | pmu_op_finish(&cpu->env); | |
1452 | } | |
1453 | ||
1454 | static void pmcr_write(CPUARMState *env, const ARMCPRegInfo *ri, | |
1455 | uint64_t value) | |
1456 | { | |
1457 | pmu_op_start(env); | |
1458 | ||
1459 | if (value & PMCRC) { | |
1460 | /* The counter has been reset */ | |
1461 | env->cp15.c15_ccnt = 0; | |
1462 | } | |
1463 | ||
1464 | if (value & PMCRP) { | |
1465 | unsigned int i; | |
1466 | for (i = 0; i < pmu_num_counters(env); i++) { | |
1467 | env->cp15.c14_pmevcntr[i] = 0; | |
1468 | } | |
1469 | } | |
1470 | ||
1471 | env->cp15.c9_pmcr &= ~PMCR_WRITABLE_MASK; | |
1472 | env->cp15.c9_pmcr |= (value & PMCR_WRITABLE_MASK); | |
1473 | ||
1474 | pmu_op_finish(env); | |
1475 | } | |
1476 | ||
1477 | static void pmswinc_write(CPUARMState *env, const ARMCPRegInfo *ri, | |
1478 | uint64_t value) | |
1479 | { | |
1480 | unsigned int i; | |
1481 | uint64_t overflow_mask, new_pmswinc; | |
1482 | ||
1483 | for (i = 0; i < pmu_num_counters(env); i++) { | |
1484 | /* Increment a counter's count iff: */ | |
1485 | if ((value & (1 << i)) && /* counter's bit is set */ | |
1486 | /* counter is enabled and not filtered */ | |
1487 | pmu_counter_enabled(env, i) && | |
1488 | /* counter is SW_INCR */ | |
1489 | (env->cp15.c14_pmevtyper[i] & PMXEVTYPER_EVTCOUNT) == 0x0) { | |
1490 | pmevcntr_op_start(env, i); | |
1491 | ||
1492 | /* | |
1493 | * Detect if this write causes an overflow since we can't predict | |
1494 | * PMSWINC overflows like we can for other events | |
1495 | */ | |
1496 | new_pmswinc = env->cp15.c14_pmevcntr[i] + 1; | |
1497 | ||
1498 | overflow_mask = pmevcntr_is_64_bit(env, i) ? | |
1499 | 1ULL << 63 : 1ULL << 31; | |
1500 | ||
1501 | if (env->cp15.c14_pmevcntr[i] & ~new_pmswinc & overflow_mask) { | |
1502 | env->cp15.c9_pmovsr |= (1 << i); | |
1503 | pmu_update_irq(env); | |
1504 | } | |
1505 | ||
1506 | env->cp15.c14_pmevcntr[i] = new_pmswinc; | |
1507 | ||
1508 | pmevcntr_op_finish(env, i); | |
1509 | } | |
1510 | } | |
1511 | } | |
1512 | ||
1513 | static uint64_t pmccntr_read(CPUARMState *env, const ARMCPRegInfo *ri) | |
1514 | { | |
1515 | uint64_t ret; | |
1516 | pmccntr_op_start(env); | |
1517 | ret = env->cp15.c15_ccnt; | |
1518 | pmccntr_op_finish(env); | |
1519 | return ret; | |
1520 | } | |
1521 | ||
1522 | static void pmselr_write(CPUARMState *env, const ARMCPRegInfo *ri, | |
1523 | uint64_t value) | |
1524 | { | |
1525 | /* | |
1526 | * The value of PMSELR.SEL affects the behavior of PMXEVTYPER and | |
1527 | * PMXEVCNTR. We allow [0..31] to be written to PMSELR here; in the | |
1528 | * meanwhile, we check PMSELR.SEL when PMXEVTYPER and PMXEVCNTR are | |
1529 | * accessed. | |
1530 | */ | |
1531 | env->cp15.c9_pmselr = value & 0x1f; | |
1532 | } | |
1533 | ||
1534 | static void pmccntr_write(CPUARMState *env, const ARMCPRegInfo *ri, | |
1535 | uint64_t value) | |
1536 | { | |
1537 | pmccntr_op_start(env); | |
1538 | env->cp15.c15_ccnt = value; | |
1539 | pmccntr_op_finish(env); | |
1540 | } | |
1541 | ||
1542 | static void pmccntr_write32(CPUARMState *env, const ARMCPRegInfo *ri, | |
1543 | uint64_t value) | |
1544 | { | |
1545 | uint64_t cur_val = pmccntr_read(env, NULL); | |
1546 | ||
1547 | pmccntr_write(env, ri, deposit64(cur_val, 0, 32, value)); | |
1548 | } | |
1549 | ||
1550 | static void pmccfiltr_write(CPUARMState *env, const ARMCPRegInfo *ri, | |
1551 | uint64_t value) | |
1552 | { | |
1553 | pmccntr_op_start(env); | |
1554 | env->cp15.pmccfiltr_el0 = value & PMCCFILTR_EL0; | |
1555 | pmccntr_op_finish(env); | |
1556 | } | |
1557 | ||
1558 | static void pmccfiltr_write_a32(CPUARMState *env, const ARMCPRegInfo *ri, | |
1559 | uint64_t value) | |
1560 | { | |
1561 | pmccntr_op_start(env); | |
1562 | /* M is not accessible from AArch32 */ | |
1563 | env->cp15.pmccfiltr_el0 = (env->cp15.pmccfiltr_el0 & PMCCFILTR_M) | | |
1564 | (value & PMCCFILTR); | |
1565 | pmccntr_op_finish(env); | |
1566 | } | |
1567 | ||
1568 | static uint64_t pmccfiltr_read_a32(CPUARMState *env, const ARMCPRegInfo *ri) | |
1569 | { | |
1570 | /* M is not visible in AArch32 */ | |
1571 | return env->cp15.pmccfiltr_el0 & PMCCFILTR; | |
1572 | } | |
1573 | ||
1574 | static void pmcntenset_write(CPUARMState *env, const ARMCPRegInfo *ri, | |
1575 | uint64_t value) | |
1576 | { | |
1577 | pmu_op_start(env); | |
1578 | value &= pmu_counter_mask(env); | |
1579 | env->cp15.c9_pmcnten |= value; | |
1580 | pmu_op_finish(env); | |
1581 | } | |
1582 | ||
1583 | static void pmcntenclr_write(CPUARMState *env, const ARMCPRegInfo *ri, | |
1584 | uint64_t value) | |
1585 | { | |
1586 | pmu_op_start(env); | |
1587 | value &= pmu_counter_mask(env); | |
1588 | env->cp15.c9_pmcnten &= ~value; | |
1589 | pmu_op_finish(env); | |
1590 | } | |
1591 | ||
1592 | static void pmovsr_write(CPUARMState *env, const ARMCPRegInfo *ri, | |
1593 | uint64_t value) | |
1594 | { | |
1595 | value &= pmu_counter_mask(env); | |
1596 | env->cp15.c9_pmovsr &= ~value; | |
1597 | pmu_update_irq(env); | |
1598 | } | |
1599 | ||
1600 | static void pmovsset_write(CPUARMState *env, const ARMCPRegInfo *ri, | |
1601 | uint64_t value) | |
1602 | { | |
1603 | value &= pmu_counter_mask(env); | |
1604 | env->cp15.c9_pmovsr |= value; | |
1605 | pmu_update_irq(env); | |
1606 | } | |
1607 | ||
1608 | static void pmevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri, | |
1609 | uint64_t value, const uint8_t counter) | |
1610 | { | |
1611 | if (counter == 31) { | |
1612 | pmccfiltr_write(env, ri, value); | |
1613 | } else if (counter < pmu_num_counters(env)) { | |
1614 | pmevcntr_op_start(env, counter); | |
1615 | ||
1616 | /* | |
1617 | * If this counter's event type is changing, store the current | |
1618 | * underlying count for the new type in c14_pmevcntr_delta[counter] so | |
1619 | * pmevcntr_op_finish has the correct baseline when it converts back to | |
1620 | * a delta. | |
1621 | */ | |
1622 | uint16_t old_event = env->cp15.c14_pmevtyper[counter] & | |
1623 | PMXEVTYPER_EVTCOUNT; | |
1624 | uint16_t new_event = value & PMXEVTYPER_EVTCOUNT; | |
1625 | if (old_event != new_event) { | |
1626 | uint64_t count = 0; | |
1627 | if (event_supported(new_event)) { | |
1628 | uint16_t event_idx = supported_event_map[new_event]; | |
1629 | count = pm_events[event_idx].get_count(env); | |
1630 | } | |
1631 | env->cp15.c14_pmevcntr_delta[counter] = count; | |
1632 | } | |
1633 | ||
1634 | env->cp15.c14_pmevtyper[counter] = value & PMXEVTYPER_MASK; | |
1635 | pmevcntr_op_finish(env, counter); | |
1636 | } | |
1637 | /* | |
1638 | * Attempts to access PMXEVTYPER are CONSTRAINED UNPREDICTABLE when | |
1639 | * PMSELR value is equal to or greater than the number of implemented | |
1640 | * counters, but not equal to 0x1f. We opt to behave as a RAZ/WI. | |
1641 | */ | |
1642 | } | |
1643 | ||
1644 | static uint64_t pmevtyper_read(CPUARMState *env, const ARMCPRegInfo *ri, | |
1645 | const uint8_t counter) | |
1646 | { | |
1647 | if (counter == 31) { | |
1648 | return env->cp15.pmccfiltr_el0; | |
1649 | } else if (counter < pmu_num_counters(env)) { | |
1650 | return env->cp15.c14_pmevtyper[counter]; | |
1651 | } else { | |
1652 | /* | |
1653 | * We opt to behave as a RAZ/WI when attempts to access PMXEVTYPER | |
1654 | * are CONSTRAINED UNPREDICTABLE. See comments in pmevtyper_write(). | |
1655 | */ | |
1656 | return 0; | |
1657 | } | |
1658 | } | |
1659 | ||
1660 | static void pmevtyper_writefn(CPUARMState *env, const ARMCPRegInfo *ri, | |
1661 | uint64_t value) | |
1662 | { | |
1663 | uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7); | |
1664 | pmevtyper_write(env, ri, value, counter); | |
1665 | } | |
1666 | ||
1667 | static void pmevtyper_rawwrite(CPUARMState *env, const ARMCPRegInfo *ri, | |
1668 | uint64_t value) | |
1669 | { | |
1670 | uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7); | |
1671 | env->cp15.c14_pmevtyper[counter] = value; | |
1672 | ||
1673 | /* | |
1674 | * pmevtyper_rawwrite is called between a pair of pmu_op_start and | |
1675 | * pmu_op_finish calls when loading saved state for a migration. Because | |
1676 | * we're potentially updating the type of event here, the value written to | |
1677 | * c14_pmevcntr_delta by the preceeding pmu_op_start call may be for a | |
1678 | * different counter type. Therefore, we need to set this value to the | |
1679 | * current count for the counter type we're writing so that pmu_op_finish | |
1680 | * has the correct count for its calculation. | |
1681 | */ | |
1682 | uint16_t event = value & PMXEVTYPER_EVTCOUNT; | |
1683 | if (event_supported(event)) { | |
1684 | uint16_t event_idx = supported_event_map[event]; | |
1685 | env->cp15.c14_pmevcntr_delta[counter] = | |
1686 | pm_events[event_idx].get_count(env); | |
1687 | } | |
1688 | } | |
1689 | ||
1690 | static uint64_t pmevtyper_readfn(CPUARMState *env, const ARMCPRegInfo *ri) | |
1691 | { | |
1692 | uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7); | |
1693 | return pmevtyper_read(env, ri, counter); | |
1694 | } | |
1695 | ||
1696 | static void pmxevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri, | |
1697 | uint64_t value) | |
1698 | { | |
1699 | pmevtyper_write(env, ri, value, env->cp15.c9_pmselr & 31); | |
1700 | } | |
1701 | ||
1702 | static uint64_t pmxevtyper_read(CPUARMState *env, const ARMCPRegInfo *ri) | |
1703 | { | |
1704 | return pmevtyper_read(env, ri, env->cp15.c9_pmselr & 31); | |
1705 | } | |
1706 | ||
1707 | static void pmevcntr_write(CPUARMState *env, const ARMCPRegInfo *ri, | |
1708 | uint64_t value, uint8_t counter) | |
1709 | { | |
1710 | if (!cpu_isar_feature(any_pmuv3p5, env_archcpu(env))) { | |
1711 | /* Before FEAT_PMUv3p5, top 32 bits of event counters are RES0 */ | |
1712 | value &= MAKE_64BIT_MASK(0, 32); | |
1713 | } | |
1714 | if (counter < pmu_num_counters(env)) { | |
1715 | pmevcntr_op_start(env, counter); | |
1716 | env->cp15.c14_pmevcntr[counter] = value; | |
1717 | pmevcntr_op_finish(env, counter); | |
1718 | } | |
1719 | /* | |
1720 | * We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR | |
1721 | * are CONSTRAINED UNPREDICTABLE. | |
1722 | */ | |
1723 | } | |
1724 | ||
1725 | static uint64_t pmevcntr_read(CPUARMState *env, const ARMCPRegInfo *ri, | |
1726 | uint8_t counter) | |
1727 | { | |
1728 | if (counter < pmu_num_counters(env)) { | |
1729 | uint64_t ret; | |
1730 | pmevcntr_op_start(env, counter); | |
1731 | ret = env->cp15.c14_pmevcntr[counter]; | |
1732 | pmevcntr_op_finish(env, counter); | |
1733 | if (!cpu_isar_feature(any_pmuv3p5, env_archcpu(env))) { | |
1734 | /* Before FEAT_PMUv3p5, top 32 bits of event counters are RES0 */ | |
1735 | ret &= MAKE_64BIT_MASK(0, 32); | |
1736 | } | |
1737 | return ret; | |
1738 | } else { | |
1739 | /* | |
1740 | * We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR | |
1741 | * are CONSTRAINED UNPREDICTABLE. | |
1742 | */ | |
1743 | return 0; | |
1744 | } | |
1745 | } | |
1746 | ||
1747 | static void pmevcntr_writefn(CPUARMState *env, const ARMCPRegInfo *ri, | |
1748 | uint64_t value) | |
1749 | { | |
1750 | uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7); | |
1751 | pmevcntr_write(env, ri, value, counter); | |
1752 | } | |
1753 | ||
1754 | static uint64_t pmevcntr_readfn(CPUARMState *env, const ARMCPRegInfo *ri) | |
1755 | { | |
1756 | uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7); | |
1757 | return pmevcntr_read(env, ri, counter); | |
1758 | } | |
1759 | ||
1760 | static void pmevcntr_rawwrite(CPUARMState *env, const ARMCPRegInfo *ri, | |
1761 | uint64_t value) | |
1762 | { | |
1763 | uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7); | |
1764 | assert(counter < pmu_num_counters(env)); | |
1765 | env->cp15.c14_pmevcntr[counter] = value; | |
1766 | pmevcntr_write(env, ri, value, counter); | |
1767 | } | |
1768 | ||
1769 | static uint64_t pmevcntr_rawread(CPUARMState *env, const ARMCPRegInfo *ri) | |
1770 | { | |
1771 | uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7); | |
1772 | assert(counter < pmu_num_counters(env)); | |
1773 | return env->cp15.c14_pmevcntr[counter]; | |
1774 | } | |
1775 | ||
1776 | static void pmxevcntr_write(CPUARMState *env, const ARMCPRegInfo *ri, | |
1777 | uint64_t value) | |
1778 | { | |
1779 | pmevcntr_write(env, ri, value, env->cp15.c9_pmselr & 31); | |
1780 | } | |
1781 | ||
1782 | static uint64_t pmxevcntr_read(CPUARMState *env, const ARMCPRegInfo *ri) | |
1783 | { | |
1784 | return pmevcntr_read(env, ri, env->cp15.c9_pmselr & 31); | |
1785 | } | |
1786 | ||
1787 | static void pmuserenr_write(CPUARMState *env, const ARMCPRegInfo *ri, | |
1788 | uint64_t value) | |
1789 | { | |
1790 | if (arm_feature(env, ARM_FEATURE_V8)) { | |
1791 | env->cp15.c9_pmuserenr = value & 0xf; | |
1792 | } else { | |
1793 | env->cp15.c9_pmuserenr = value & 1; | |
1794 | } | |
1795 | } | |
1796 | ||
1797 | static void pmintenset_write(CPUARMState *env, const ARMCPRegInfo *ri, | |
1798 | uint64_t value) | |
1799 | { | |
1800 | /* We have no event counters so only the C bit can be changed */ | |
1801 | value &= pmu_counter_mask(env); | |
1802 | env->cp15.c9_pminten |= value; | |
1803 | pmu_update_irq(env); | |
1804 | } | |
1805 | ||
1806 | static void pmintenclr_write(CPUARMState *env, const ARMCPRegInfo *ri, | |
1807 | uint64_t value) | |
1808 | { | |
1809 | value &= pmu_counter_mask(env); | |
1810 | env->cp15.c9_pminten &= ~value; | |
1811 | pmu_update_irq(env); | |
1812 | } | |
1813 | ||
1814 | static void vbar_write(CPUARMState *env, const ARMCPRegInfo *ri, | |
1815 | uint64_t value) | |
1816 | { | |
1817 | /* | |
1818 | * Note that even though the AArch64 view of this register has bits | |
1819 | * [10:0] all RES0 we can only mask the bottom 5, to comply with the | |
1820 | * architectural requirements for bits which are RES0 only in some | |
1821 | * contexts. (ARMv8 would permit us to do no masking at all, but ARMv7 | |
1822 | * requires the bottom five bits to be RAZ/WI because they're UNK/SBZP.) | |
1823 | */ | |
1824 | raw_write(env, ri, value & ~0x1FULL); | |
1825 | } | |
1826 | ||
1827 | static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) | |
1828 | { | |
1829 | /* Begin with base v8.0 state. */ | |
1830 | uint64_t valid_mask = 0x3fff; | |
1831 | ARMCPU *cpu = env_archcpu(env); | |
1832 | uint64_t changed; | |
1833 | ||
1834 | /* | |
1835 | * Because SCR_EL3 is the "real" cpreg and SCR is the alias, reset always | |
1836 | * passes the reginfo for SCR_EL3, which has type ARM_CP_STATE_AA64. | |
1837 | * Instead, choose the format based on the mode of EL3. | |
1838 | */ | |
1839 | if (arm_el_is_aa64(env, 3)) { | |
1840 | value |= SCR_FW | SCR_AW; /* RES1 */ | |
1841 | valid_mask &= ~SCR_NET; /* RES0 */ | |
1842 | ||
1843 | if (!cpu_isar_feature(aa64_aa32_el1, cpu) && | |
1844 | !cpu_isar_feature(aa64_aa32_el2, cpu)) { | |
1845 | value |= SCR_RW; /* RAO/WI */ | |
1846 | } | |
1847 | if (cpu_isar_feature(aa64_ras, cpu)) { | |
1848 | valid_mask |= SCR_TERR; | |
1849 | } | |
1850 | if (cpu_isar_feature(aa64_lor, cpu)) { | |
1851 | valid_mask |= SCR_TLOR; | |
1852 | } | |
1853 | if (cpu_isar_feature(aa64_pauth, cpu)) { | |
1854 | valid_mask |= SCR_API | SCR_APK; | |
1855 | } | |
1856 | if (cpu_isar_feature(aa64_sel2, cpu)) { | |
1857 | valid_mask |= SCR_EEL2; | |
1858 | } else if (cpu_isar_feature(aa64_rme, cpu)) { | |
1859 | /* With RME and without SEL2, NS is RES1 (R_GSWWH, I_DJJQJ). */ | |
1860 | value |= SCR_NS; | |
1861 | } | |
1862 | if (cpu_isar_feature(aa64_mte, cpu)) { | |
1863 | valid_mask |= SCR_ATA; | |
1864 | } | |
1865 | if (cpu_isar_feature(aa64_scxtnum, cpu)) { | |
1866 | valid_mask |= SCR_ENSCXT; | |
1867 | } | |
1868 | if (cpu_isar_feature(aa64_doublefault, cpu)) { | |
1869 | valid_mask |= SCR_EASE | SCR_NMEA; | |
1870 | } | |
1871 | if (cpu_isar_feature(aa64_sme, cpu)) { | |
1872 | valid_mask |= SCR_ENTP2; | |
1873 | } | |
1874 | if (cpu_isar_feature(aa64_hcx, cpu)) { | |
1875 | valid_mask |= SCR_HXEN; | |
1876 | } | |
1877 | if (cpu_isar_feature(aa64_fgt, cpu)) { | |
1878 | valid_mask |= SCR_FGTEN; | |
1879 | } | |
1880 | if (cpu_isar_feature(aa64_rme, cpu)) { | |
1881 | valid_mask |= SCR_NSE | SCR_GPF; | |
1882 | } | |
1883 | } else { | |
1884 | valid_mask &= ~(SCR_RW | SCR_ST); | |
1885 | if (cpu_isar_feature(aa32_ras, cpu)) { | |
1886 | valid_mask |= SCR_TERR; | |
1887 | } | |
1888 | } | |
1889 | ||
1890 | if (!arm_feature(env, ARM_FEATURE_EL2)) { | |
1891 | valid_mask &= ~SCR_HCE; | |
1892 | ||
1893 | /* | |
1894 | * On ARMv7, SMD (or SCD as it is called in v7) is only | |
1895 | * supported if EL2 exists. The bit is UNK/SBZP when | |
1896 | * EL2 is unavailable. In QEMU ARMv7, we force it to always zero | |
1897 | * when EL2 is unavailable. | |
1898 | * On ARMv8, this bit is always available. | |
1899 | */ | |
1900 | if (arm_feature(env, ARM_FEATURE_V7) && | |
1901 | !arm_feature(env, ARM_FEATURE_V8)) { | |
1902 | valid_mask &= ~SCR_SMD; | |
1903 | } | |
1904 | } | |
1905 | ||
1906 | /* Clear all-context RES0 bits. */ | |
1907 | value &= valid_mask; | |
1908 | changed = env->cp15.scr_el3 ^ value; | |
1909 | env->cp15.scr_el3 = value; | |
1910 | ||
1911 | /* | |
1912 | * If SCR_EL3.{NS,NSE} changes, i.e. change of security state, | |
1913 | * we must invalidate all TLBs below EL3. | |
1914 | */ | |
1915 | if (changed & (SCR_NS | SCR_NSE)) { | |
1916 | tlb_flush_by_mmuidx(env_cpu(env), (ARMMMUIdxBit_E10_0 | | |
1917 | ARMMMUIdxBit_E20_0 | | |
1918 | ARMMMUIdxBit_E10_1 | | |
1919 | ARMMMUIdxBit_E20_2 | | |
1920 | ARMMMUIdxBit_E10_1_PAN | | |
1921 | ARMMMUIdxBit_E20_2_PAN | | |
1922 | ARMMMUIdxBit_E2)); | |
1923 | } | |
1924 | } | |
1925 | ||
1926 | static void scr_reset(CPUARMState *env, const ARMCPRegInfo *ri) | |
1927 | { | |
1928 | /* | |
1929 | * scr_write will set the RES1 bits on an AArch64-only CPU. | |
1930 | * The reset value will be 0x30 on an AArch64-only CPU and 0 otherwise. | |
1931 | */ | |
1932 | scr_write(env, ri, 0); | |
1933 | } | |
1934 | ||
1935 | static CPAccessResult access_tid4(CPUARMState *env, | |
1936 | const ARMCPRegInfo *ri, | |
1937 | bool isread) | |
1938 | { | |
1939 | if (arm_current_el(env) == 1 && | |
1940 | (arm_hcr_el2_eff(env) & (HCR_TID2 | HCR_TID4))) { | |
1941 | return CP_ACCESS_TRAP_EL2; | |
1942 | } | |
1943 | ||
1944 | return CP_ACCESS_OK; | |
1945 | } | |
1946 | ||
1947 | static uint64_t ccsidr_read(CPUARMState *env, const ARMCPRegInfo *ri) | |
1948 | { | |
1949 | ARMCPU *cpu = env_archcpu(env); | |
1950 | ||
1951 | /* | |
1952 | * Acquire the CSSELR index from the bank corresponding to the CCSIDR | |
1953 | * bank | |
1954 | */ | |
1955 | uint32_t index = A32_BANKED_REG_GET(env, csselr, | |
1956 | ri->secure & ARM_CP_SECSTATE_S); | |
1957 | ||
1958 | return cpu->ccsidr[index]; | |
1959 | } | |
1960 | ||
1961 | static void csselr_write(CPUARMState *env, const ARMCPRegInfo *ri, | |
1962 | uint64_t value) | |
1963 | { | |
1964 | raw_write(env, ri, value & 0xf); | |
1965 | } | |
1966 | ||
1967 | static uint64_t isr_read(CPUARMState *env, const ARMCPRegInfo *ri) | |
1968 | { | |
1969 | CPUState *cs = env_cpu(env); | |
1970 | bool el1 = arm_current_el(env) == 1; | |
1971 | uint64_t hcr_el2 = el1 ? arm_hcr_el2_eff(env) : 0; | |
1972 | uint64_t ret = 0; | |
1973 | ||
1974 | if (hcr_el2 & HCR_IMO) { | |
1975 | if (cs->interrupt_request & CPU_INTERRUPT_VIRQ) { | |
1976 | ret |= CPSR_I; | |
1977 | } | |
1978 | } else { | |
1979 | if (cs->interrupt_request & CPU_INTERRUPT_HARD) { | |
1980 | ret |= CPSR_I; | |
1981 | } | |
1982 | } | |
1983 | ||
1984 | if (hcr_el2 & HCR_FMO) { | |
1985 | if (cs->interrupt_request & CPU_INTERRUPT_VFIQ) { | |
1986 | ret |= CPSR_F; | |
1987 | } | |
1988 | } else { | |
1989 | if (cs->interrupt_request & CPU_INTERRUPT_FIQ) { | |
1990 | ret |= CPSR_F; | |
1991 | } | |
1992 | } | |
1993 | ||
1994 | if (hcr_el2 & HCR_AMO) { | |
1995 | if (cs->interrupt_request & CPU_INTERRUPT_VSERR) { | |
1996 | ret |= CPSR_A; | |
1997 | } | |
1998 | } | |
1999 | ||
2000 | return ret; | |
2001 | } | |
2002 | ||
2003 | static CPAccessResult access_aa64_tid1(CPUARMState *env, const ARMCPRegInfo *ri, | |
2004 | bool isread) | |
2005 | { | |
2006 | if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TID1)) { | |
2007 | return CP_ACCESS_TRAP_EL2; | |
2008 | } | |
2009 | ||
2010 | return CP_ACCESS_OK; | |
2011 | } | |
2012 | ||
2013 | static CPAccessResult access_aa32_tid1(CPUARMState *env, const ARMCPRegInfo *ri, | |
2014 | bool isread) | |
2015 | { | |
2016 | if (arm_feature(env, ARM_FEATURE_V8)) { | |
2017 | return access_aa64_tid1(env, ri, isread); | |
2018 | } | |
2019 | ||
2020 | return CP_ACCESS_OK; | |
2021 | } | |
2022 | ||
2023 | static const ARMCPRegInfo v7_cp_reginfo[] = { | |
2024 | /* the old v6 WFI, UNPREDICTABLE in v7 but we choose to NOP */ | |
2025 | { .name = "NOP", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4, | |
2026 | .access = PL1_W, .type = ARM_CP_NOP }, | |
2027 | /* | |
2028 | * Performance monitors are implementation defined in v7, | |
2029 | * but with an ARM recommended set of registers, which we | |
2030 | * follow. | |
2031 | * | |
2032 | * Performance registers fall into three categories: | |
2033 | * (a) always UNDEF in PL0, RW in PL1 (PMINTENSET, PMINTENCLR) | |
2034 | * (b) RO in PL0 (ie UNDEF on write), RW in PL1 (PMUSERENR) | |
2035 | * (c) UNDEF in PL0 if PMUSERENR.EN==0, otherwise accessible (all others) | |
2036 | * For the cases controlled by PMUSERENR we must set .access to PL0_RW | |
2037 | * or PL0_RO as appropriate and then check PMUSERENR in the helper fn. | |
2038 | */ | |
2039 | { .name = "PMCNTENSET", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 1, | |
2040 | .access = PL0_RW, .type = ARM_CP_ALIAS | ARM_CP_IO, | |
2041 | .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten), | |
2042 | .writefn = pmcntenset_write, | |
2043 | .accessfn = pmreg_access, | |
2044 | .fgt = FGT_PMCNTEN, | |
2045 | .raw_writefn = raw_write }, | |
2046 | { .name = "PMCNTENSET_EL0", .state = ARM_CP_STATE_AA64, .type = ARM_CP_IO, | |
2047 | .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 1, | |
2048 | .access = PL0_RW, .accessfn = pmreg_access, | |
2049 | .fgt = FGT_PMCNTEN, | |
2050 | .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten), .resetvalue = 0, | |
2051 | .writefn = pmcntenset_write, .raw_writefn = raw_write }, | |
2052 | { .name = "PMCNTENCLR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 2, | |
2053 | .access = PL0_RW, | |
2054 | .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten), | |
2055 | .accessfn = pmreg_access, | |
2056 | .fgt = FGT_PMCNTEN, | |
2057 | .writefn = pmcntenclr_write, | |
2058 | .type = ARM_CP_ALIAS | ARM_CP_IO }, | |
2059 | { .name = "PMCNTENCLR_EL0", .state = ARM_CP_STATE_AA64, | |
2060 | .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 2, | |
2061 | .access = PL0_RW, .accessfn = pmreg_access, | |
2062 | .fgt = FGT_PMCNTEN, | |
2063 | .type = ARM_CP_ALIAS | ARM_CP_IO, | |
2064 | .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten), | |
2065 | .writefn = pmcntenclr_write }, | |
2066 | { .name = "PMOVSR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 3, | |
2067 | .access = PL0_RW, .type = ARM_CP_IO, | |
2068 | .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmovsr), | |
2069 | .accessfn = pmreg_access, | |
2070 | .fgt = FGT_PMOVS, | |
2071 | .writefn = pmovsr_write, | |
2072 | .raw_writefn = raw_write }, | |
2073 | { .name = "PMOVSCLR_EL0", .state = ARM_CP_STATE_AA64, | |
2074 | .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 3, | |
2075 | .access = PL0_RW, .accessfn = pmreg_access, | |
2076 | .fgt = FGT_PMOVS, | |
2077 | .type = ARM_CP_ALIAS | ARM_CP_IO, | |
2078 | .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr), | |
2079 | .writefn = pmovsr_write, | |
2080 | .raw_writefn = raw_write }, | |
2081 | { .name = "PMSWINC", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 4, | |
2082 | .access = PL0_W, .accessfn = pmreg_access_swinc, | |
2083 | .fgt = FGT_PMSWINC_EL0, | |
2084 | .type = ARM_CP_NO_RAW | ARM_CP_IO, | |
2085 | .writefn = pmswinc_write }, | |
2086 | { .name = "PMSWINC_EL0", .state = ARM_CP_STATE_AA64, | |
2087 | .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 4, | |
2088 | .access = PL0_W, .accessfn = pmreg_access_swinc, | |
2089 | .fgt = FGT_PMSWINC_EL0, | |
2090 | .type = ARM_CP_NO_RAW | ARM_CP_IO, | |
2091 | .writefn = pmswinc_write }, | |
2092 | { .name = "PMSELR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 5, | |
2093 | .access = PL0_RW, .type = ARM_CP_ALIAS, | |
2094 | .fgt = FGT_PMSELR_EL0, | |
2095 | .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmselr), | |
2096 | .accessfn = pmreg_access_selr, .writefn = pmselr_write, | |
2097 | .raw_writefn = raw_write}, | |
2098 | { .name = "PMSELR_EL0", .state = ARM_CP_STATE_AA64, | |
2099 | .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 5, | |
2100 | .access = PL0_RW, .accessfn = pmreg_access_selr, | |
2101 | .fgt = FGT_PMSELR_EL0, | |
2102 | .fieldoffset = offsetof(CPUARMState, cp15.c9_pmselr), | |
2103 | .writefn = pmselr_write, .raw_writefn = raw_write, }, | |
2104 | { .name = "PMCCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 0, | |
2105 | .access = PL0_RW, .resetvalue = 0, .type = ARM_CP_ALIAS | ARM_CP_IO, | |
2106 | .fgt = FGT_PMCCNTR_EL0, | |
2107 | .readfn = pmccntr_read, .writefn = pmccntr_write32, | |
2108 | .accessfn = pmreg_access_ccntr }, | |
2109 | { .name = "PMCCNTR_EL0", .state = ARM_CP_STATE_AA64, | |
2110 | .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 0, | |
2111 | .access = PL0_RW, .accessfn = pmreg_access_ccntr, | |
2112 | .fgt = FGT_PMCCNTR_EL0, | |
2113 | .type = ARM_CP_IO, | |
2114 | .fieldoffset = offsetof(CPUARMState, cp15.c15_ccnt), | |
2115 | .readfn = pmccntr_read, .writefn = pmccntr_write, | |
2116 | .raw_readfn = raw_read, .raw_writefn = raw_write, }, | |
2117 | { .name = "PMCCFILTR", .cp = 15, .opc1 = 0, .crn = 14, .crm = 15, .opc2 = 7, | |
2118 | .writefn = pmccfiltr_write_a32, .readfn = pmccfiltr_read_a32, | |
2119 | .access = PL0_RW, .accessfn = pmreg_access, | |
2120 | .fgt = FGT_PMCCFILTR_EL0, | |
2121 | .type = ARM_CP_ALIAS | ARM_CP_IO, | |
2122 | .resetvalue = 0, }, | |
2123 | { .name = "PMCCFILTR_EL0", .state = ARM_CP_STATE_AA64, | |
2124 | .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 15, .opc2 = 7, | |
2125 | .writefn = pmccfiltr_write, .raw_writefn = raw_write, | |
2126 | .access = PL0_RW, .accessfn = pmreg_access, | |
2127 | .fgt = FGT_PMCCFILTR_EL0, | |
2128 | .type = ARM_CP_IO, | |
2129 | .fieldoffset = offsetof(CPUARMState, cp15.pmccfiltr_el0), | |
2130 | .resetvalue = 0, }, | |
2131 | { .name = "PMXEVTYPER", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 1, | |
2132 | .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO, | |
2133 | .accessfn = pmreg_access, | |
2134 | .fgt = FGT_PMEVTYPERN_EL0, | |
2135 | .writefn = pmxevtyper_write, .readfn = pmxevtyper_read }, | |
2136 | { .name = "PMXEVTYPER_EL0", .state = ARM_CP_STATE_AA64, | |
2137 | .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 1, | |
2138 | .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO, | |
2139 | .accessfn = pmreg_access, | |
2140 | .fgt = FGT_PMEVTYPERN_EL0, | |
2141 | .writefn = pmxevtyper_write, .readfn = pmxevtyper_read }, | |
2142 | { .name = "PMXEVCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 2, | |
2143 | .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO, | |
2144 | .accessfn = pmreg_access_xevcntr, | |
2145 | .fgt = FGT_PMEVCNTRN_EL0, | |
2146 | .writefn = pmxevcntr_write, .readfn = pmxevcntr_read }, | |
2147 | { .name = "PMXEVCNTR_EL0", .state = ARM_CP_STATE_AA64, | |
2148 | .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 2, | |
2149 | .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO, | |
2150 | .accessfn = pmreg_access_xevcntr, | |
2151 | .fgt = FGT_PMEVCNTRN_EL0, | |
2152 | .writefn = pmxevcntr_write, .readfn = pmxevcntr_read }, | |
2153 | { .name = "PMUSERENR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 0, | |
2154 | .access = PL0_R | PL1_RW, .accessfn = access_tpm, | |
2155 | .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmuserenr), | |
2156 | .resetvalue = 0, | |
2157 | .writefn = pmuserenr_write, .raw_writefn = raw_write }, | |
2158 | { .name = "PMUSERENR_EL0", .state = ARM_CP_STATE_AA64, | |
2159 | .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 14, .opc2 = 0, | |
2160 | .access = PL0_R | PL1_RW, .accessfn = access_tpm, .type = ARM_CP_ALIAS, | |
2161 | .fieldoffset = offsetof(CPUARMState, cp15.c9_pmuserenr), | |
2162 | .resetvalue = 0, | |
2163 | .writefn = pmuserenr_write, .raw_writefn = raw_write }, | |
2164 | { .name = "PMINTENSET", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 1, | |
2165 | .access = PL1_RW, .accessfn = access_tpm, | |
2166 | .fgt = FGT_PMINTEN, | |
2167 | .type = ARM_CP_ALIAS | ARM_CP_IO, | |
2168 | .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pminten), | |
2169 | .resetvalue = 0, | |
2170 | .writefn = pmintenset_write, .raw_writefn = raw_write }, | |
2171 | { .name = "PMINTENSET_EL1", .state = ARM_CP_STATE_AA64, | |
2172 | .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 1, | |
2173 | .access = PL1_RW, .accessfn = access_tpm, | |
2174 | .fgt = FGT_PMINTEN, | |
2175 | .type = ARM_CP_IO, | |
2176 | .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten), | |
2177 | .writefn = pmintenset_write, .raw_writefn = raw_write, | |
2178 | .resetvalue = 0x0 }, | |
2179 | { .name = "PMINTENCLR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 2, | |
2180 | .access = PL1_RW, .accessfn = access_tpm, | |
2181 | .fgt = FGT_PMINTEN, | |
2182 | .type = ARM_CP_ALIAS | ARM_CP_IO | ARM_CP_NO_RAW, | |
2183 | .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten), | |
2184 | .writefn = pmintenclr_write, }, | |
2185 | { .name = "PMINTENCLR_EL1", .state = ARM_CP_STATE_AA64, | |
2186 | .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 2, | |
2187 | .access = PL1_RW, .accessfn = access_tpm, | |
2188 | .fgt = FGT_PMINTEN, | |
2189 | .type = ARM_CP_ALIAS | ARM_CP_IO | ARM_CP_NO_RAW, | |
2190 | .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten), | |
2191 | .writefn = pmintenclr_write }, | |
2192 | { .name = "CCSIDR", .state = ARM_CP_STATE_BOTH, | |
2193 | .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 0, | |
2194 | .access = PL1_R, | |
2195 | .accessfn = access_tid4, | |
2196 | .fgt = FGT_CCSIDR_EL1, | |
2197 | .readfn = ccsidr_read, .type = ARM_CP_NO_RAW }, | |
2198 | { .name = "CSSELR", .state = ARM_CP_STATE_BOTH, | |
2199 | .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 2, .opc2 = 0, | |
2200 | .access = PL1_RW, | |
2201 | .accessfn = access_tid4, | |
2202 | .fgt = FGT_CSSELR_EL1, | |
2203 | .writefn = csselr_write, .resetvalue = 0, | |
2204 | .bank_fieldoffsets = { offsetof(CPUARMState, cp15.csselr_s), | |
2205 | offsetof(CPUARMState, cp15.csselr_ns) } }, | |
2206 | /* | |
2207 | * Auxiliary ID register: this actually has an IMPDEF value but for now | |
2208 | * just RAZ for all cores: | |
2209 | */ | |
2210 | { .name = "AIDR", .state = ARM_CP_STATE_BOTH, | |
2211 | .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 7, | |
2212 | .access = PL1_R, .type = ARM_CP_CONST, | |
2213 | .accessfn = access_aa64_tid1, | |
2214 | .fgt = FGT_AIDR_EL1, | |
2215 | .resetvalue = 0 }, | |
2216 | /* | |
2217 | * Auxiliary fault status registers: these also are IMPDEF, and we | |
2218 | * choose to RAZ/WI for all cores. | |
2219 | */ | |
2220 | { .name = "AFSR0_EL1", .state = ARM_CP_STATE_BOTH, | |
2221 | .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 0, | |
2222 | .access = PL1_RW, .accessfn = access_tvm_trvm, | |
2223 | .fgt = FGT_AFSR0_EL1, | |
2224 | .type = ARM_CP_CONST, .resetvalue = 0 }, | |
2225 | { .name = "AFSR1_EL1", .state = ARM_CP_STATE_BOTH, | |
2226 | .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 1, | |
2227 | .access = PL1_RW, .accessfn = access_tvm_trvm, | |
2228 | .fgt = FGT_AFSR1_EL1, | |
2229 | .type = ARM_CP_CONST, .resetvalue = 0 }, | |
2230 | /* | |
2231 | * MAIR can just read-as-written because we don't implement caches | |
2232 | * and so don't need to care about memory attributes. | |
2233 | */ | |
2234 | { .name = "MAIR_EL1", .state = ARM_CP_STATE_AA64, | |
2235 | .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0, | |
2236 | .access = PL1_RW, .accessfn = access_tvm_trvm, | |
2237 | .fgt = FGT_MAIR_EL1, | |
2238 | .fieldoffset = offsetof(CPUARMState, cp15.mair_el[1]), | |
2239 | .resetvalue = 0 }, | |
2240 | { .name = "MAIR_EL3", .state = ARM_CP_STATE_AA64, | |
2241 | .opc0 = 3, .opc1 = 6, .crn = 10, .crm = 2, .opc2 = 0, | |
2242 | .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[3]), | |
2243 | .resetvalue = 0 }, | |
2244 | /* | |
2245 | * For non-long-descriptor page tables these are PRRR and NMRR; | |
2246 | * regardless they still act as reads-as-written for QEMU. | |
2247 | */ | |
2248 | /* | |
2249 | * MAIR0/1 are defined separately from their 64-bit counterpart which | |
2250 | * allows them to assign the correct fieldoffset based on the endianness | |
2251 | * handled in the field definitions. | |
2252 | */ | |
2253 | { .name = "MAIR0", .state = ARM_CP_STATE_AA32, | |
2254 | .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0, | |
2255 | .access = PL1_RW, .accessfn = access_tvm_trvm, | |
2256 | .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair0_s), | |
2257 | offsetof(CPUARMState, cp15.mair0_ns) }, | |
2258 | .resetfn = arm_cp_reset_ignore }, | |
2259 | { .name = "MAIR1", .state = ARM_CP_STATE_AA32, | |
2260 | .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 1, | |
2261 | .access = PL1_RW, .accessfn = access_tvm_trvm, | |
2262 | .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair1_s), | |
2263 | offsetof(CPUARMState, cp15.mair1_ns) }, | |
2264 | .resetfn = arm_cp_reset_ignore }, | |
2265 | { .name = "ISR_EL1", .state = ARM_CP_STATE_BOTH, | |
2266 | .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 1, .opc2 = 0, | |
2267 | .fgt = FGT_ISR_EL1, | |
2268 | .type = ARM_CP_NO_RAW, .access = PL1_R, .readfn = isr_read }, | |
2269 | /* 32 bit ITLB invalidates */ | |
2270 | { .name = "ITLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 0, | |
2271 | .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, | |
2272 | .writefn = tlbiall_write }, | |
2273 | { .name = "ITLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 1, | |
2274 | .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, | |
2275 | .writefn = tlbimva_write }, | |
2276 | { .name = "ITLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 2, | |
2277 | .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, | |
2278 | .writefn = tlbiasid_write }, | |
2279 | /* 32 bit DTLB invalidates */ | |
2280 | { .name = "DTLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 0, | |
2281 | .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, | |
2282 | .writefn = tlbiall_write }, | |
2283 | { .name = "DTLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 1, | |
2284 | .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, | |
2285 | .writefn = tlbimva_write }, | |
2286 | { .name = "DTLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 2, | |
2287 | .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, | |
2288 | .writefn = tlbiasid_write }, | |
2289 | /* 32 bit TLB invalidates */ | |
2290 | { .name = "TLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0, | |
2291 | .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, | |
2292 | .writefn = tlbiall_write }, | |
2293 | { .name = "TLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1, | |
2294 | .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, | |
2295 | .writefn = tlbimva_write }, | |
2296 | { .name = "TLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2, | |
2297 | .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, | |
2298 | .writefn = tlbiasid_write }, | |
2299 | { .name = "TLBIMVAA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3, | |
2300 | .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, | |
2301 | .writefn = tlbimvaa_write }, | |
2302 | }; | |
2303 | ||
2304 | static const ARMCPRegInfo v7mp_cp_reginfo[] = { | |
2305 | /* 32 bit TLB invalidates, Inner Shareable */ | |
2306 | { .name = "TLBIALLIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0, | |
2307 | .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlbis, | |
2308 | .writefn = tlbiall_is_write }, | |
2309 | { .name = "TLBIMVAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1, | |
2310 | .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlbis, | |
2311 | .writefn = tlbimva_is_write }, | |
2312 | { .name = "TLBIASIDIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2, | |
2313 | .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlbis, | |
2314 | .writefn = tlbiasid_is_write }, | |
2315 | { .name = "TLBIMVAAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3, | |
2316 | .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlbis, | |
2317 | .writefn = tlbimvaa_is_write }, | |
2318 | }; | |
2319 | ||
2320 | static const ARMCPRegInfo pmovsset_cp_reginfo[] = { | |
2321 | /* PMOVSSET is not implemented in v7 before v7ve */ | |
2322 | { .name = "PMOVSSET", .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 3, | |
2323 | .access = PL0_RW, .accessfn = pmreg_access, | |
2324 | .fgt = FGT_PMOVS, | |
2325 | .type = ARM_CP_ALIAS | ARM_CP_IO, | |
2326 | .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmovsr), | |
2327 | .writefn = pmovsset_write, | |
2328 | .raw_writefn = raw_write }, | |
2329 | { .name = "PMOVSSET_EL0", .state = ARM_CP_STATE_AA64, | |
2330 | .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 14, .opc2 = 3, | |
2331 | .access = PL0_RW, .accessfn = pmreg_access, | |
2332 | .fgt = FGT_PMOVS, | |
2333 | .type = ARM_CP_ALIAS | ARM_CP_IO, | |
2334 | .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr), | |
2335 | .writefn = pmovsset_write, | |
2336 | .raw_writefn = raw_write }, | |
2337 | }; | |
2338 | ||
2339 | static void teecr_write(CPUARMState *env, const ARMCPRegInfo *ri, | |
2340 | uint64_t value) | |
2341 | { | |
2342 | value &= 1; | |
2343 | env->teecr = value; | |
2344 | } | |
2345 | ||
2346 | static CPAccessResult teecr_access(CPUARMState *env, const ARMCPRegInfo *ri, | |
2347 | bool isread) | |
2348 | { | |
2349 | /* | |
2350 | * HSTR.TTEE only exists in v7A, not v8A, but v8A doesn't have T2EE | |
2351 | * at all, so we don't need to check whether we're v8A. | |
2352 | */ | |
2353 | if (arm_current_el(env) < 2 && !arm_is_secure_below_el3(env) && | |
2354 | (env->cp15.hstr_el2 & HSTR_TTEE)) { | |
2355 | return CP_ACCESS_TRAP_EL2; | |
2356 | } | |
2357 | return CP_ACCESS_OK; | |
2358 | } | |
2359 | ||
2360 | static CPAccessResult teehbr_access(CPUARMState *env, const ARMCPRegInfo *ri, | |
2361 | bool isread) | |
2362 | { | |
2363 | if (arm_current_el(env) == 0 && (env->teecr & 1)) { | |
2364 | return CP_ACCESS_TRAP; | |
2365 | } | |
2366 | return teecr_access(env, ri, isread); | |
2367 | } | |
2368 | ||
2369 | static const ARMCPRegInfo t2ee_cp_reginfo[] = { | |
2370 | { .name = "TEECR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 6, .opc2 = 0, | |
2371 | .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, teecr), | |
2372 | .resetvalue = 0, | |
2373 | .writefn = teecr_write, .accessfn = teecr_access }, | |
2374 | { .name = "TEEHBR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 6, .opc2 = 0, | |
2375 | .access = PL0_RW, .fieldoffset = offsetof(CPUARMState, teehbr), | |
2376 | .accessfn = teehbr_access, .resetvalue = 0 }, | |
2377 | }; | |
2378 | ||
2379 | static const ARMCPRegInfo v6k_cp_reginfo[] = { | |
2380 | { .name = "TPIDR_EL0", .state = ARM_CP_STATE_AA64, | |
2381 | .opc0 = 3, .opc1 = 3, .opc2 = 2, .crn = 13, .crm = 0, | |
2382 | .access = PL0_RW, | |
2383 | .fgt = FGT_TPIDR_EL0, | |
2384 | .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[0]), .resetvalue = 0 }, | |
2385 | { .name = "TPIDRURW", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 2, | |
2386 | .access = PL0_RW, | |
2387 | .fgt = FGT_TPIDR_EL0, | |
2388 | .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrurw_s), | |
2389 | offsetoflow32(CPUARMState, cp15.tpidrurw_ns) }, | |
2390 | .resetfn = arm_cp_reset_ignore }, | |
2391 | { .name = "TPIDRRO_EL0", .state = ARM_CP_STATE_AA64, | |
2392 | .opc0 = 3, .opc1 = 3, .opc2 = 3, .crn = 13, .crm = 0, | |
2393 | .access = PL0_R | PL1_W, | |
2394 | .fgt = FGT_TPIDRRO_EL0, | |
2395 | .fieldoffset = offsetof(CPUARMState, cp15.tpidrro_el[0]), | |
2396 | .resetvalue = 0}, | |
2397 | { .name = "TPIDRURO", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 3, | |
2398 | .access = PL0_R | PL1_W, | |
2399 | .fgt = FGT_TPIDRRO_EL0, | |
2400 | .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidruro_s), | |
2401 | offsetoflow32(CPUARMState, cp15.tpidruro_ns) }, | |
2402 | .resetfn = arm_cp_reset_ignore }, | |
2403 | { .name = "TPIDR_EL1", .state = ARM_CP_STATE_AA64, | |
2404 | .opc0 = 3, .opc1 = 0, .opc2 = 4, .crn = 13, .crm = 0, | |
2405 | .access = PL1_RW, | |
2406 | .fgt = FGT_TPIDR_EL1, | |
2407 | .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[1]), .resetvalue = 0 }, | |
2408 | { .name = "TPIDRPRW", .opc1 = 0, .cp = 15, .crn = 13, .crm = 0, .opc2 = 4, | |
2409 | .access = PL1_RW, | |
2410 | .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrprw_s), | |
2411 | offsetoflow32(CPUARMState, cp15.tpidrprw_ns) }, | |
2412 | .resetvalue = 0 }, | |
2413 | }; | |
2414 | ||
2415 | #ifndef CONFIG_USER_ONLY | |
2416 | ||
2417 | static CPAccessResult gt_cntfrq_access(CPUARMState *env, const ARMCPRegInfo *ri, | |
2418 | bool isread) | |
2419 | { | |
2420 | /* | |
2421 | * CNTFRQ: not visible from PL0 if both PL0PCTEN and PL0VCTEN are zero. | |
2422 | * Writable only at the highest implemented exception level. | |
2423 | */ | |
2424 | int el = arm_current_el(env); | |
2425 | uint64_t hcr; | |
2426 | uint32_t cntkctl; | |
2427 | ||
2428 | switch (el) { | |
2429 | case 0: | |
2430 | hcr = arm_hcr_el2_eff(env); | |
2431 | if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) { | |
2432 | cntkctl = env->cp15.cnthctl_el2; | |
2433 | } else { | |
2434 | cntkctl = env->cp15.c14_cntkctl; | |
2435 | } | |
2436 | if (!extract32(cntkctl, 0, 2)) { | |
2437 | return CP_ACCESS_TRAP; | |
2438 | } | |
2439 | break; | |
2440 | case 1: | |
2441 | if (!isread && ri->state == ARM_CP_STATE_AA32 && | |
2442 | arm_is_secure_below_el3(env)) { | |
2443 | /* Accesses from 32-bit Secure EL1 UNDEF (*not* trap to EL3!) */ | |
2444 | return CP_ACCESS_TRAP_UNCATEGORIZED; | |
2445 | } | |
2446 | break; | |
2447 | case 2: | |
2448 | case 3: | |
2449 | break; | |
2450 | } | |
2451 | ||
2452 | if (!isread && el < arm_highest_el(env)) { | |
2453 | return CP_ACCESS_TRAP_UNCATEGORIZED; | |
2454 | } | |
2455 | ||
2456 | return CP_ACCESS_OK; | |
2457 | } | |
2458 | ||
2459 | static CPAccessResult gt_counter_access(CPUARMState *env, int timeridx, | |
2460 | bool isread) | |
2461 | { | |
2462 | unsigned int cur_el = arm_current_el(env); | |
2463 | bool has_el2 = arm_is_el2_enabled(env); | |
2464 | uint64_t hcr = arm_hcr_el2_eff(env); | |
2465 | ||
2466 | switch (cur_el) { | |
2467 | case 0: | |
2468 | /* If HCR_EL2.<E2H,TGE> == '11': check CNTHCTL_EL2.EL0[PV]CTEN. */ | |
2469 | if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) { | |
2470 | return (extract32(env->cp15.cnthctl_el2, timeridx, 1) | |
2471 | ? CP_ACCESS_OK : CP_ACCESS_TRAP_EL2); | |
2472 | } | |
2473 | ||
2474 | /* CNT[PV]CT: not visible from PL0 if EL0[PV]CTEN is zero */ | |
2475 | if (!extract32(env->cp15.c14_cntkctl, timeridx, 1)) { | |
2476 | return CP_ACCESS_TRAP; | |
2477 | } | |
2478 | ||
2479 | /* If HCR_EL2.<E2H,TGE> == '10': check CNTHCTL_EL2.EL1PCTEN. */ | |
2480 | if (hcr & HCR_E2H) { | |
2481 | if (timeridx == GTIMER_PHYS && | |
2482 | !extract32(env->cp15.cnthctl_el2, 10, 1)) { | |
2483 | return CP_ACCESS_TRAP_EL2; | |
2484 | } | |
2485 | } else { | |
2486 | /* If HCR_EL2.<E2H> == 0: check CNTHCTL_EL2.EL1PCEN. */ | |
2487 | if (has_el2 && timeridx == GTIMER_PHYS && | |
2488 | !extract32(env->cp15.cnthctl_el2, 1, 1)) { | |
2489 | return CP_ACCESS_TRAP_EL2; | |
2490 | } | |
2491 | } | |
2492 | break; | |
2493 | ||
2494 | case 1: | |
2495 | /* Check CNTHCTL_EL2.EL1PCTEN, which changes location based on E2H. */ | |
2496 | if (has_el2 && timeridx == GTIMER_PHYS && | |
2497 | (hcr & HCR_E2H | |
2498 | ? !extract32(env->cp15.cnthctl_el2, 10, 1) | |
2499 | : !extract32(env->cp15.cnthctl_el2, 0, 1))) { | |
2500 | return CP_ACCESS_TRAP_EL2; | |
2501 | } | |
2502 | break; | |
2503 | } | |
2504 | return CP_ACCESS_OK; | |
2505 | } | |
2506 | ||
2507 | static CPAccessResult gt_timer_access(CPUARMState *env, int timeridx, | |
2508 | bool isread) | |
2509 | { | |
2510 | unsigned int cur_el = arm_current_el(env); | |
2511 | bool has_el2 = arm_is_el2_enabled(env); | |
2512 | uint64_t hcr = arm_hcr_el2_eff(env); | |
2513 | ||
2514 | switch (cur_el) { | |
2515 | case 0: | |
2516 | if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) { | |
2517 | /* If HCR_EL2.<E2H,TGE> == '11': check CNTHCTL_EL2.EL0[PV]TEN. */ | |
2518 | return (extract32(env->cp15.cnthctl_el2, 9 - timeridx, 1) | |
2519 | ? CP_ACCESS_OK : CP_ACCESS_TRAP_EL2); | |
2520 | } | |
2521 | ||
2522 | /* | |
2523 | * CNT[PV]_CVAL, CNT[PV]_CTL, CNT[PV]_TVAL: not visible from | |
2524 | * EL0 if EL0[PV]TEN is zero. | |
2525 | */ | |
2526 | if (!extract32(env->cp15.c14_cntkctl, 9 - timeridx, 1)) { | |
2527 | return CP_ACCESS_TRAP; | |
2528 | } | |
2529 | /* fall through */ | |
2530 | ||
2531 | case 1: | |
2532 | if (has_el2 && timeridx == GTIMER_PHYS) { | |
2533 | if (hcr & HCR_E2H) { | |
2534 | /* If HCR_EL2.<E2H,TGE> == '10': check CNTHCTL_EL2.EL1PTEN. */ | |
2535 | if (!extract32(env->cp15.cnthctl_el2, 11, 1)) { | |
2536 | return CP_ACCESS_TRAP_EL2; | |
2537 | } | |
2538 | } else { | |
2539 | /* If HCR_EL2.<E2H> == 0: check CNTHCTL_EL2.EL1PCEN. */ | |
2540 | if (!extract32(env->cp15.cnthctl_el2, 1, 1)) { | |
2541 | return CP_ACCESS_TRAP_EL2; | |
2542 | } | |
2543 | } | |
2544 | } | |
2545 | break; | |
2546 | } | |
2547 | return CP_ACCESS_OK; | |
2548 | } | |
2549 | ||
2550 | static CPAccessResult gt_pct_access(CPUARMState *env, | |
2551 | const ARMCPRegInfo *ri, | |
2552 | bool isread) | |
2553 | { | |
2554 | return gt_counter_access(env, GTIMER_PHYS, isread); | |
2555 | } | |
2556 | ||
2557 | static CPAccessResult gt_vct_access(CPUARMState *env, | |
2558 | const ARMCPRegInfo *ri, | |
2559 | bool isread) | |
2560 | { | |
2561 | return gt_counter_access(env, GTIMER_VIRT, isread); | |
2562 | } | |
2563 | ||
2564 | static CPAccessResult gt_ptimer_access(CPUARMState *env, const ARMCPRegInfo *ri, | |
2565 | bool isread) | |
2566 | { | |
2567 | return gt_timer_access(env, GTIMER_PHYS, isread); | |
2568 | } | |
2569 | ||
2570 | static CPAccessResult gt_vtimer_access(CPUARMState *env, const ARMCPRegInfo *ri, | |
2571 | bool isread) | |
2572 | { | |
2573 | return gt_timer_access(env, GTIMER_VIRT, isread); | |
2574 | } | |
2575 | ||
2576 | static CPAccessResult gt_stimer_access(CPUARMState *env, | |
2577 | const ARMCPRegInfo *ri, | |
2578 | bool isread) | |
2579 | { | |
2580 | /* | |
2581 | * The AArch64 register view of the secure physical timer is | |
2582 | * always accessible from EL3, and configurably accessible from | |
2583 | * Secure EL1. | |
2584 | */ | |
2585 | switch (arm_current_el(env)) { | |
2586 | case 1: | |
2587 | if (!arm_is_secure(env)) { | |
2588 | return CP_ACCESS_TRAP; | |
2589 | } | |
2590 | if (!(env->cp15.scr_el3 & SCR_ST)) { | |
2591 | return CP_ACCESS_TRAP_EL3; | |
2592 | } | |
2593 | return CP_ACCESS_OK; | |
2594 | case 0: | |
2595 | case 2: | |
2596 | return CP_ACCESS_TRAP; | |
2597 | case 3: | |
2598 | return CP_ACCESS_OK; | |
2599 | default: | |
2600 | g_assert_not_reached(); | |
2601 | } | |
2602 | } | |
2603 | ||
2604 | static uint64_t gt_get_countervalue(CPUARMState *env) | |
2605 | { | |
2606 | ARMCPU *cpu = env_archcpu(env); | |
2607 | ||
2608 | return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) / gt_cntfrq_period_ns(cpu); | |
2609 | } | |
2610 | ||
2611 | static void gt_recalc_timer(ARMCPU *cpu, int timeridx) | |
2612 | { | |
2613 | ARMGenericTimer *gt = &cpu->env.cp15.c14_timer[timeridx]; | |
2614 | ||
2615 | if (gt->ctl & 1) { | |
2616 | /* | |
2617 | * Timer enabled: calculate and set current ISTATUS, irq, and | |
2618 | * reset timer to when ISTATUS next has to change | |
2619 | */ | |
2620 | uint64_t offset = timeridx == GTIMER_VIRT ? | |
2621 | cpu->env.cp15.cntvoff_el2 : 0; | |
2622 | uint64_t count = gt_get_countervalue(&cpu->env); | |
2623 | /* Note that this must be unsigned 64 bit arithmetic: */ | |
2624 | int istatus = count - offset >= gt->cval; | |
2625 | uint64_t nexttick; | |
2626 | int irqstate; | |
2627 | ||
2628 | gt->ctl = deposit32(gt->ctl, 2, 1, istatus); | |
2629 | ||
2630 | irqstate = (istatus && !(gt->ctl & 2)); | |
2631 | qemu_set_irq(cpu->gt_timer_outputs[timeridx], irqstate); | |
2632 | ||
2633 | if (istatus) { | |
2634 | /* Next transition is when count rolls back over to zero */ | |
2635 | nexttick = UINT64_MAX; | |
2636 | } else { | |
2637 | /* Next transition is when we hit cval */ | |
2638 | nexttick = gt->cval + offset; | |
2639 | } | |
2640 | /* | |
2641 | * Note that the desired next expiry time might be beyond the | |
2642 | * signed-64-bit range of a QEMUTimer -- in this case we just | |
2643 | * set the timer for as far in the future as possible. When the | |
2644 | * timer expires we will reset the timer for any remaining period. | |
2645 | */ | |
2646 | if (nexttick > INT64_MAX / gt_cntfrq_period_ns(cpu)) { | |
2647 | timer_mod_ns(cpu->gt_timer[timeridx], INT64_MAX); | |
2648 | } else { | |
2649 | timer_mod(cpu->gt_timer[timeridx], nexttick); | |
2650 | } | |
2651 | trace_arm_gt_recalc(timeridx, irqstate, nexttick); | |
2652 | } else { | |
2653 | /* Timer disabled: ISTATUS and timer output always clear */ | |
2654 | gt->ctl &= ~4; | |
2655 | qemu_set_irq(cpu->gt_timer_outputs[timeridx], 0); | |
2656 | timer_del(cpu->gt_timer[timeridx]); | |
2657 | trace_arm_gt_recalc_disabled(timeridx); | |
2658 | } | |
2659 | } | |
2660 | ||
2661 | static void gt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri, | |
2662 | int timeridx) | |
2663 | { | |
2664 | ARMCPU *cpu = env_archcpu(env); | |
2665 | ||
2666 | timer_del(cpu->gt_timer[timeridx]); | |
2667 | } | |
2668 | ||
2669 | static uint64_t gt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri) | |
2670 | { | |
2671 | return gt_get_countervalue(env); | |
2672 | } | |
2673 | ||
2674 | static uint64_t gt_virt_cnt_offset(CPUARMState *env) | |
2675 | { | |
2676 | uint64_t hcr; | |
2677 | ||
2678 | switch (arm_current_el(env)) { | |
2679 | case 2: | |
2680 | hcr = arm_hcr_el2_eff(env); | |
2681 | if (hcr & HCR_E2H) { | |
2682 | return 0; | |
2683 | } | |
2684 | break; | |
2685 | case 0: | |
2686 | hcr = arm_hcr_el2_eff(env); | |
2687 | if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) { | |
2688 | return 0; | |
2689 | } | |
2690 | break; | |
2691 | } | |
2692 | ||
2693 | return env->cp15.cntvoff_el2; | |
2694 | } | |
2695 | ||
2696 | static uint64_t gt_virt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri) | |
2697 | { | |
2698 | return gt_get_countervalue(env) - gt_virt_cnt_offset(env); | |
2699 | } | |
2700 | ||
2701 | static void gt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, | |
2702 | int timeridx, | |
2703 | uint64_t value) | |
2704 | { | |
2705 | trace_arm_gt_cval_write(timeridx, value); | |
2706 | env->cp15.c14_timer[timeridx].cval = value; | |
2707 | gt_recalc_timer(env_archcpu(env), timeridx); | |
2708 | } | |
2709 | ||
2710 | static uint64_t gt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri, | |
2711 | int timeridx) | |
2712 | { | |
2713 | uint64_t offset = 0; | |
2714 | ||
2715 | switch (timeridx) { | |
2716 | case GTIMER_VIRT: | |
2717 | case GTIMER_HYPVIRT: | |
2718 | offset = gt_virt_cnt_offset(env); | |
2719 | break; | |
2720 | } | |
2721 | ||
2722 | return (uint32_t)(env->cp15.c14_timer[timeridx].cval - | |
2723 | (gt_get_countervalue(env) - offset)); | |
2724 | } | |
2725 | ||
2726 | static void gt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, | |
2727 | int timeridx, | |
2728 | uint64_t value) | |
2729 | { | |
2730 | uint64_t offset = 0; | |
2731 | ||
2732 | switch (timeridx) { | |
2733 | case GTIMER_VIRT: | |
2734 | case GTIMER_HYPVIRT: | |
2735 | offset = gt_virt_cnt_offset(env); | |
2736 | break; | |
2737 | } | |
2738 | ||
2739 | trace_arm_gt_tval_write(timeridx, value); | |
2740 | env->cp15.c14_timer[timeridx].cval = gt_get_countervalue(env) - offset + | |
2741 | sextract64(value, 0, 32); | |
2742 | gt_recalc_timer(env_archcpu(env), timeridx); | |
2743 | } | |
2744 | ||
2745 | static void gt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, | |
2746 | int timeridx, | |
2747 | uint64_t value) | |
2748 | { | |
2749 | ARMCPU *cpu = env_archcpu(env); | |
2750 | uint32_t oldval = env->cp15.c14_timer[timeridx].ctl; | |
2751 | ||
2752 | trace_arm_gt_ctl_write(timeridx, value); | |
2753 | env->cp15.c14_timer[timeridx].ctl = deposit64(oldval, 0, 2, value); | |
2754 | if ((oldval ^ value) & 1) { | |
2755 | /* Enable toggled */ | |
2756 | gt_recalc_timer(cpu, timeridx); | |
2757 | } else if ((oldval ^ value) & 2) { | |
2758 | /* | |
2759 | * IMASK toggled: don't need to recalculate, | |
2760 | * just set the interrupt line based on ISTATUS | |
2761 | */ | |
2762 | int irqstate = (oldval & 4) && !(value & 2); | |
2763 | ||
2764 | trace_arm_gt_imask_toggle(timeridx, irqstate); | |
2765 | qemu_set_irq(cpu->gt_timer_outputs[timeridx], irqstate); | |
2766 | } | |
2767 | } | |
2768 | ||
2769 | static void gt_phys_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri) | |
2770 | { | |
2771 | gt_timer_reset(env, ri, GTIMER_PHYS); | |
2772 | } | |
2773 | ||
2774 | static void gt_phys_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, | |
2775 | uint64_t value) | |
2776 | { | |
2777 | gt_cval_write(env, ri, GTIMER_PHYS, value); | |
2778 | } | |
2779 | ||
2780 | static uint64_t gt_phys_tval_read(CPUARMState *env, const ARMCPRegInfo *ri) | |
2781 | { | |
2782 | return gt_tval_read(env, ri, GTIMER_PHYS); | |
2783 | } | |
2784 | ||
2785 | static void gt_phys_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, | |
2786 | uint64_t value) | |
2787 | { | |
2788 | gt_tval_write(env, ri, GTIMER_PHYS, value); | |
2789 | } | |
2790 | ||
2791 | static void gt_phys_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, | |
2792 | uint64_t value) | |
2793 | { | |
2794 | gt_ctl_write(env, ri, GTIMER_PHYS, value); | |
2795 | } | |
2796 | ||
2797 | static int gt_phys_redir_timeridx(CPUARMState *env) | |
2798 | { | |
2799 | switch (arm_mmu_idx(env)) { | |
2800 | case ARMMMUIdx_E20_0: | |
2801 | case ARMMMUIdx_E20_2: | |
2802 | case ARMMMUIdx_E20_2_PAN: | |
2803 | return GTIMER_HYP; | |
2804 | default: | |
2805 | return GTIMER_PHYS; | |
2806 | } | |
2807 | } | |
2808 | ||
2809 | static int gt_virt_redir_timeridx(CPUARMState *env) | |
2810 | { | |
2811 | switch (arm_mmu_idx(env)) { | |
2812 | case ARMMMUIdx_E20_0: | |
2813 | case ARMMMUIdx_E20_2: | |
2814 | case ARMMMUIdx_E20_2_PAN: | |
2815 | return GTIMER_HYPVIRT; | |
2816 | default: | |
2817 | return GTIMER_VIRT; | |
2818 | } | |
2819 | } | |
2820 | ||
2821 | static uint64_t gt_phys_redir_cval_read(CPUARMState *env, | |
2822 | const ARMCPRegInfo *ri) | |
2823 | { | |
2824 | int timeridx = gt_phys_redir_timeridx(env); | |
2825 | return env->cp15.c14_timer[timeridx].cval; | |
2826 | } | |
2827 | ||
2828 | static void gt_phys_redir_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, | |
2829 | uint64_t value) | |
2830 | { | |
2831 | int timeridx = gt_phys_redir_timeridx(env); | |
2832 | gt_cval_write(env, ri, timeridx, value); | |
2833 | } | |
2834 | ||
2835 | static uint64_t gt_phys_redir_tval_read(CPUARMState *env, | |
2836 | const ARMCPRegInfo *ri) | |
2837 | { | |
2838 | int timeridx = gt_phys_redir_timeridx(env); | |
2839 | return gt_tval_read(env, ri, timeridx); | |
2840 | } | |
2841 | ||
2842 | static void gt_phys_redir_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, | |
2843 | uint64_t value) | |
2844 | { | |
2845 | int timeridx = gt_phys_redir_timeridx(env); | |
2846 | gt_tval_write(env, ri, timeridx, value); | |
2847 | } | |
2848 | ||
2849 | static uint64_t gt_phys_redir_ctl_read(CPUARMState *env, | |
2850 | const ARMCPRegInfo *ri) | |
2851 | { | |
2852 | int timeridx = gt_phys_redir_timeridx(env); | |
2853 | return env->cp15.c14_timer[timeridx].ctl; | |
2854 | } | |
2855 | ||
2856 | static void gt_phys_redir_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, | |
2857 | uint64_t value) | |
2858 | { | |
2859 | int timeridx = gt_phys_redir_timeridx(env); | |
2860 | gt_ctl_write(env, ri, timeridx, value); | |
2861 | } | |
2862 | ||
2863 | static void gt_virt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri) | |
2864 | { | |
2865 | gt_timer_reset(env, ri, GTIMER_VIRT); | |
2866 | } | |
2867 | ||
2868 | static void gt_virt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, | |
2869 | uint64_t value) | |
2870 | { | |
2871 | gt_cval_write(env, ri, GTIMER_VIRT, value); | |
2872 | } | |
2873 | ||
2874 | static uint64_t gt_virt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri) | |
2875 | { | |
2876 | return gt_tval_read(env, ri, GTIMER_VIRT); | |
2877 | } | |
2878 | ||
2879 | static void gt_virt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, | |
2880 | uint64_t value) | |
2881 | { | |
2882 | gt_tval_write(env, ri, GTIMER_VIRT, value); | |
2883 | } | |
2884 | ||
2885 | static void gt_virt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, | |
2886 | uint64_t value) | |
2887 | { | |
2888 | gt_ctl_write(env, ri, GTIMER_VIRT, value); | |
2889 | } | |
2890 | ||
2891 | static void gt_cntvoff_write(CPUARMState *env, const ARMCPRegInfo *ri, | |
2892 | uint64_t value) | |
2893 | { | |
2894 | ARMCPU *cpu = env_archcpu(env); | |
2895 | ||
2896 | trace_arm_gt_cntvoff_write(value); | |
2897 | raw_write(env, ri, value); | |
2898 | gt_recalc_timer(cpu, GTIMER_VIRT); | |
2899 | } | |
2900 | ||
2901 | static uint64_t gt_virt_redir_cval_read(CPUARMState *env, | |
2902 | const ARMCPRegInfo *ri) | |
2903 | { | |
2904 | int timeridx = gt_virt_redir_timeridx(env); | |
2905 | return env->cp15.c14_timer[timeridx].cval; | |
2906 | } | |
2907 | ||
2908 | static void gt_virt_redir_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, | |
2909 | uint64_t value) | |
2910 | { | |
2911 | int timeridx = gt_virt_redir_timeridx(env); | |
2912 | gt_cval_write(env, ri, timeridx, value); | |
2913 | } | |
2914 | ||
2915 | static uint64_t gt_virt_redir_tval_read(CPUARMState *env, | |
2916 | const ARMCPRegInfo *ri) | |
2917 | { | |
2918 | int timeridx = gt_virt_redir_timeridx(env); | |
2919 | return gt_tval_read(env, ri, timeridx); | |
2920 | } | |
2921 | ||
2922 | static void gt_virt_redir_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, | |
2923 | uint64_t value) | |
2924 | { | |
2925 | int timeridx = gt_virt_redir_timeridx(env); | |
2926 | gt_tval_write(env, ri, timeridx, value); | |
2927 | } | |
2928 | ||
2929 | static uint64_t gt_virt_redir_ctl_read(CPUARMState *env, | |
2930 | const ARMCPRegInfo *ri) | |
2931 | { | |
2932 | int timeridx = gt_virt_redir_timeridx(env); | |
2933 | return env->cp15.c14_timer[timeridx].ctl; | |
2934 | } | |
2935 | ||
2936 | static void gt_virt_redir_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, | |
2937 | uint64_t value) | |
2938 | { | |
2939 | int timeridx = gt_virt_redir_timeridx(env); | |
2940 | gt_ctl_write(env, ri, timeridx, value); | |
2941 | } | |
2942 | ||
2943 | static void gt_hyp_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri) | |
2944 | { | |
2945 | gt_timer_reset(env, ri, GTIMER_HYP); | |
2946 | } | |
2947 | ||
2948 | static void gt_hyp_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, | |
2949 | uint64_t value) | |
2950 | { | |
2951 | gt_cval_write(env, ri, GTIMER_HYP, value); | |
2952 | } | |
2953 | ||
2954 | static uint64_t gt_hyp_tval_read(CPUARMState *env, const ARMCPRegInfo *ri) | |
2955 | { | |
2956 | return gt_tval_read(env, ri, GTIMER_HYP); | |
2957 | } | |
2958 | ||
2959 | static void gt_hyp_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, | |
2960 | uint64_t value) | |
2961 | { | |
2962 | gt_tval_write(env, ri, GTIMER_HYP, value); | |
2963 | } | |
2964 | ||
2965 | static void gt_hyp_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, | |
2966 | uint64_t value) | |
2967 | { | |
2968 | gt_ctl_write(env, ri, GTIMER_HYP, value); | |
2969 | } | |
2970 | ||
2971 | static void gt_sec_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri) | |
2972 | { | |
2973 | gt_timer_reset(env, ri, GTIMER_SEC); | |
2974 | } | |
2975 | ||
2976 | static void gt_sec_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, | |
2977 | uint64_t value) | |
2978 | { | |
2979 | gt_cval_write(env, ri, GTIMER_SEC, value); | |
2980 | } | |
2981 | ||
2982 | static uint64_t gt_sec_tval_read(CPUARMState *env, const ARMCPRegInfo *ri) | |
2983 | { | |
2984 | return gt_tval_read(env, ri, GTIMER_SEC); | |
2985 | } | |
2986 | ||
2987 | static void gt_sec_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, | |
2988 | uint64_t value) | |
2989 | { | |
2990 | gt_tval_write(env, ri, GTIMER_SEC, value); | |
2991 | } | |
2992 | ||
2993 | static void gt_sec_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, | |
2994 | uint64_t value) | |
2995 | { | |
2996 | gt_ctl_write(env, ri, GTIMER_SEC, value); | |
2997 | } | |
2998 | ||
2999 | static void gt_hv_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri) | |
3000 | { | |
3001 | gt_timer_reset(env, ri, GTIMER_HYPVIRT); | |
3002 | } | |
3003 | ||
3004 | static void gt_hv_cval_write(CPUARMState *env, const ARMCPRegInfo *ri, | |
3005 | uint64_t value) | |
3006 | { | |
3007 | gt_cval_write(env, ri, GTIMER_HYPVIRT, value); | |
3008 | } | |
3009 | ||
3010 | static uint64_t gt_hv_tval_read(CPUARMState *env, const ARMCPRegInfo *ri) | |
3011 | { | |
3012 | return gt_tval_read(env, ri, GTIMER_HYPVIRT); | |
3013 | } | |
3014 | ||
3015 | static void gt_hv_tval_write(CPUARMState *env, const ARMCPRegInfo *ri, | |
3016 | uint64_t value) | |
3017 | { | |
3018 | gt_tval_write(env, ri, GTIMER_HYPVIRT, value); | |
3019 | } | |
3020 | ||
3021 | static void gt_hv_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, | |
3022 | uint64_t value) | |
3023 | { | |
3024 | gt_ctl_write(env, ri, GTIMER_HYPVIRT, value); | |
3025 | } | |
3026 | ||
3027 | void arm_gt_ptimer_cb(void *opaque) | |
3028 | { | |
3029 | ARMCPU *cpu = opaque; | |
3030 | ||
3031 | gt_recalc_timer(cpu, GTIMER_PHYS); | |
3032 | } | |
3033 | ||
3034 | void arm_gt_vtimer_cb(void *opaque) | |
3035 | { | |
3036 | ARMCPU *cpu = opaque; | |
3037 | ||
3038 | gt_recalc_timer(cpu, GTIMER_VIRT); | |
3039 | } | |
3040 | ||
3041 | void arm_gt_htimer_cb(void *opaque) | |
3042 | { | |
3043 | ARMCPU *cpu = opaque; | |
3044 | ||
3045 | gt_recalc_timer(cpu, GTIMER_HYP); | |
3046 | } | |
3047 | ||
3048 | void arm_gt_stimer_cb(void *opaque) | |
3049 | { | |
3050 | ARMCPU *cpu = opaque; | |
3051 | ||
3052 | gt_recalc_timer(cpu, GTIMER_SEC); | |
3053 | } | |
3054 | ||
3055 | void arm_gt_hvtimer_cb(void *opaque) | |
3056 | { | |
3057 | ARMCPU *cpu = opaque; | |
3058 | ||
3059 | gt_recalc_timer(cpu, GTIMER_HYPVIRT); | |
3060 | } | |
3061 | ||
3062 | static void arm_gt_cntfrq_reset(CPUARMState *env, const ARMCPRegInfo *opaque) | |
3063 | { | |
3064 | ARMCPU *cpu = env_archcpu(env); | |
3065 | ||
3066 | cpu->env.cp15.c14_cntfrq = cpu->gt_cntfrq_hz; | |
3067 | } | |
3068 | ||
3069 | static const ARMCPRegInfo generic_timer_cp_reginfo[] = { | |
3070 | /* | |
3071 | * Note that CNTFRQ is purely reads-as-written for the benefit | |
3072 | * of software; writing it doesn't actually change the timer frequency. | |
3073 | * Our reset value matches the fixed frequency we implement the timer at. | |
3074 | */ | |
3075 | { .name = "CNTFRQ", .cp = 15, .crn = 14, .crm = 0, .opc1 = 0, .opc2 = 0, | |
3076 | .type = ARM_CP_ALIAS, | |
3077 | .access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access, | |
3078 | .fieldoffset = offsetoflow32(CPUARMState, cp15.c14_cntfrq), | |
3079 | }, | |
3080 | { .name = "CNTFRQ_EL0", .state = ARM_CP_STATE_AA64, | |
3081 | .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 0, | |
3082 | .access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access, | |
3083 | .fieldoffset = offsetof(CPUARMState, cp15.c14_cntfrq), | |
3084 | .resetfn = arm_gt_cntfrq_reset, | |
3085 | }, | |
3086 | /* overall control: mostly access permissions */ | |
3087 | { .name = "CNTKCTL", .state = ARM_CP_STATE_BOTH, | |
3088 | .opc0 = 3, .opc1 = 0, .crn = 14, .crm = 1, .opc2 = 0, | |
3089 | .access = PL1_RW, | |
3090 | .fieldoffset = offsetof(CPUARMState, cp15.c14_cntkctl), | |
3091 | .resetvalue = 0, | |
3092 | }, | |
3093 | /* per-timer control */ | |
3094 | { .name = "CNTP_CTL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 1, | |
3095 | .secure = ARM_CP_SECSTATE_NS, | |
3096 | .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL0_RW, | |
3097 | .accessfn = gt_ptimer_access, | |
3098 | .fieldoffset = offsetoflow32(CPUARMState, | |
3099 | cp15.c14_timer[GTIMER_PHYS].ctl), | |
3100 | .readfn = gt_phys_redir_ctl_read, .raw_readfn = raw_read, | |
3101 | .writefn = gt_phys_redir_ctl_write, .raw_writefn = raw_write, | |
3102 | }, | |
3103 | { .name = "CNTP_CTL_S", | |
3104 | .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 1, | |
3105 | .secure = ARM_CP_SECSTATE_S, | |
3106 | .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL0_RW, | |
3107 | .accessfn = gt_ptimer_access, | |
3108 | .fieldoffset = offsetoflow32(CPUARMState, | |
3109 | cp15.c14_timer[GTIMER_SEC].ctl), | |
3110 | .writefn = gt_sec_ctl_write, .raw_writefn = raw_write, | |
3111 | }, | |
3112 | { .name = "CNTP_CTL_EL0", .state = ARM_CP_STATE_AA64, | |
3113 | .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 1, | |
3114 | .type = ARM_CP_IO, .access = PL0_RW, | |
3115 | .accessfn = gt_ptimer_access, | |
3116 | .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].ctl), | |
3117 | .resetvalue = 0, | |
3118 | .readfn = gt_phys_redir_ctl_read, .raw_readfn = raw_read, | |
3119 | .writefn = gt_phys_redir_ctl_write, .raw_writefn = raw_write, | |
3120 | }, | |
3121 | { .name = "CNTV_CTL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 1, | |
3122 | .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL0_RW, | |
3123 | .accessfn = gt_vtimer_access, | |
3124 | .fieldoffset = offsetoflow32(CPUARMState, | |
3125 | cp15.c14_timer[GTIMER_VIRT].ctl), | |
3126 | .readfn = gt_virt_redir_ctl_read, .raw_readfn = raw_read, | |
3127 | .writefn = gt_virt_redir_ctl_write, .raw_writefn = raw_write, | |
3128 | }, | |
3129 | { .name = "CNTV_CTL_EL0", .state = ARM_CP_STATE_AA64, | |
3130 | .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 1, | |
3131 | .type = ARM_CP_IO, .access = PL0_RW, | |
3132 | .accessfn = gt_vtimer_access, | |
3133 | .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].ctl), | |
3134 | .resetvalue = 0, | |
3135 | .readfn = gt_virt_redir_ctl_read, .raw_readfn = raw_read, | |
3136 | .writefn = gt_virt_redir_ctl_write, .raw_writefn = raw_write, | |
3137 | }, | |
3138 | /* TimerValue views: a 32 bit downcounting view of the underlying state */ | |
3139 | { .name = "CNTP_TVAL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 0, | |
3140 | .secure = ARM_CP_SECSTATE_NS, | |
3141 | .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW, | |
3142 | .accessfn = gt_ptimer_access, | |
3143 | .readfn = gt_phys_redir_tval_read, .writefn = gt_phys_redir_tval_write, | |
3144 | }, | |
3145 | { .name = "CNTP_TVAL_S", | |
3146 | .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 0, | |
3147 | .secure = ARM_CP_SECSTATE_S, | |
3148 | .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW, | |
3149 | .accessfn = gt_ptimer_access, | |
3150 | .readfn = gt_sec_tval_read, .writefn = gt_sec_tval_write, | |
3151 | }, | |
3152 | { .name = "CNTP_TVAL_EL0", .state = ARM_CP_STATE_AA64, | |
3153 | .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 0, | |
3154 | .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW, | |
3155 | .accessfn = gt_ptimer_access, .resetfn = gt_phys_timer_reset, | |
3156 | .readfn = gt_phys_redir_tval_read, .writefn = gt_phys_redir_tval_write, | |
3157 | }, | |
3158 | { .name = "CNTV_TVAL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 0, | |
3159 | .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW, | |
3160 | .accessfn = gt_vtimer_access, | |
3161 | .readfn = gt_virt_redir_tval_read, .writefn = gt_virt_redir_tval_write, | |
3162 | }, | |
3163 | { .name = "CNTV_TVAL_EL0", .state = ARM_CP_STATE_AA64, | |
3164 | .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 0, | |
3165 | .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL0_RW, | |
3166 | .accessfn = gt_vtimer_access, .resetfn = gt_virt_timer_reset, | |
3167 | .readfn = gt_virt_redir_tval_read, .writefn = gt_virt_redir_tval_write, | |
3168 | }, | |
3169 | /* The counter itself */ | |
3170 | { .name = "CNTPCT", .cp = 15, .crm = 14, .opc1 = 0, | |
3171 | .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO, | |
3172 | .accessfn = gt_pct_access, | |
3173 | .readfn = gt_cnt_read, .resetfn = arm_cp_reset_ignore, | |
3174 | }, | |
3175 | { .name = "CNTPCT_EL0", .state = ARM_CP_STATE_AA64, | |
3176 | .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 1, | |
3177 | .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO, | |
3178 | .accessfn = gt_pct_access, .readfn = gt_cnt_read, | |
3179 | }, | |
3180 | { .name = "CNTVCT", .cp = 15, .crm = 14, .opc1 = 1, | |
3181 | .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO, | |
3182 | .accessfn = gt_vct_access, | |
3183 | .readfn = gt_virt_cnt_read, .resetfn = arm_cp_reset_ignore, | |
3184 | }, | |
3185 | { .name = "CNTVCT_EL0", .state = ARM_CP_STATE_AA64, | |
3186 | .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 2, | |
3187 | .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO, | |
3188 | .accessfn = gt_vct_access, .readfn = gt_virt_cnt_read, | |
3189 | }, | |
3190 | /* Comparison value, indicating when the timer goes off */ | |
3191 | { .name = "CNTP_CVAL", .cp = 15, .crm = 14, .opc1 = 2, | |
3192 | .secure = ARM_CP_SECSTATE_NS, | |
3193 | .access = PL0_RW, | |
3194 | .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS, | |
3195 | .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval), | |
3196 | .accessfn = gt_ptimer_access, | |
3197 | .readfn = gt_phys_redir_cval_read, .raw_readfn = raw_read, | |
3198 | .writefn = gt_phys_redir_cval_write, .raw_writefn = raw_write, | |
3199 | }, | |
3200 | { .name = "CNTP_CVAL_S", .cp = 15, .crm = 14, .opc1 = 2, | |
3201 | .secure = ARM_CP_SECSTATE_S, | |
3202 | .access = PL0_RW, | |
3203 | .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS, | |
3204 | .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].cval), | |
3205 | .accessfn = gt_ptimer_access, | |
3206 | .writefn = gt_sec_cval_write, .raw_writefn = raw_write, | |
3207 | }, | |
3208 | { .name = "CNTP_CVAL_EL0", .state = ARM_CP_STATE_AA64, | |
3209 | .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 2, | |
3210 | .access = PL0_RW, | |
3211 | .type = ARM_CP_IO, | |
3212 | .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval), | |
3213 | .resetvalue = 0, .accessfn = gt_ptimer_access, | |
3214 | .readfn = gt_phys_redir_cval_read, .raw_readfn = raw_read, | |
3215 | .writefn = gt_phys_redir_cval_write, .raw_writefn = raw_write, | |
3216 | }, | |
3217 | { .name = "CNTV_CVAL", .cp = 15, .crm = 14, .opc1 = 3, | |
3218 | .access = PL0_RW, | |
3219 | .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS, | |
3220 | .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval), | |
3221 | .accessfn = gt_vtimer_access, | |
3222 | .readfn = gt_virt_redir_cval_read, .raw_readfn = raw_read, | |
3223 | .writefn = gt_virt_redir_cval_write, .raw_writefn = raw_write, | |
3224 | }, | |
3225 | { .name = "CNTV_CVAL_EL0", .state = ARM_CP_STATE_AA64, | |
3226 | .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 2, | |
3227 | .access = PL0_RW, | |
3228 | .type = ARM_CP_IO, | |
3229 | .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval), | |
3230 | .resetvalue = 0, .accessfn = gt_vtimer_access, | |
3231 | .readfn = gt_virt_redir_cval_read, .raw_readfn = raw_read, | |
3232 | .writefn = gt_virt_redir_cval_write, .raw_writefn = raw_write, | |
3233 | }, | |
3234 | /* | |
3235 | * Secure timer -- this is actually restricted to only EL3 | |
3236 | * and configurably Secure-EL1 via the accessfn. | |
3237 | */ | |
3238 | { .name = "CNTPS_TVAL_EL1", .state = ARM_CP_STATE_AA64, | |
3239 | .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 0, | |
3240 | .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW, | |
3241 | .accessfn = gt_stimer_access, | |
3242 | .readfn = gt_sec_tval_read, | |
3243 | .writefn = gt_sec_tval_write, | |
3244 | .resetfn = gt_sec_timer_reset, | |
3245 | }, | |
3246 | { .name = "CNTPS_CTL_EL1", .state = ARM_CP_STATE_AA64, | |
3247 | .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 1, | |
3248 | .type = ARM_CP_IO, .access = PL1_RW, | |
3249 | .accessfn = gt_stimer_access, | |
3250 | .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].ctl), | |
3251 | .resetvalue = 0, | |
3252 | .writefn = gt_sec_ctl_write, .raw_writefn = raw_write, | |
3253 | }, | |
3254 | { .name = "CNTPS_CVAL_EL1", .state = ARM_CP_STATE_AA64, | |
3255 | .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 2, | |
3256 | .type = ARM_CP_IO, .access = PL1_RW, | |
3257 | .accessfn = gt_stimer_access, | |
3258 | .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].cval), | |
3259 | .writefn = gt_sec_cval_write, .raw_writefn = raw_write, | |
3260 | }, | |
3261 | }; | |
3262 | ||
3263 | static CPAccessResult e2h_access(CPUARMState *env, const ARMCPRegInfo *ri, | |
3264 | bool isread) | |
3265 | { | |
3266 | if (!(arm_hcr_el2_eff(env) & HCR_E2H)) { | |
3267 | return CP_ACCESS_TRAP; | |
3268 | } | |
3269 | return CP_ACCESS_OK; | |
3270 | } | |
3271 | ||
3272 | #else | |
3273 | ||
3274 | /* | |
3275 | * In user-mode most of the generic timer registers are inaccessible | |
3276 | * however modern kernels (4.12+) allow access to cntvct_el0 | |
3277 | */ | |
3278 | ||
3279 | static uint64_t gt_virt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri) | |
3280 | { | |
3281 | ARMCPU *cpu = env_archcpu(env); | |
3282 | ||
3283 | /* | |
3284 | * Currently we have no support for QEMUTimer in linux-user so we | |
3285 | * can't call gt_get_countervalue(env), instead we directly | |
3286 | * call the lower level functions. | |
3287 | */ | |
3288 | return cpu_get_clock() / gt_cntfrq_period_ns(cpu); | |
3289 | } | |
3290 | ||
3291 | static const ARMCPRegInfo generic_timer_cp_reginfo[] = { | |
3292 | { .name = "CNTFRQ_EL0", .state = ARM_CP_STATE_AA64, | |
3293 | .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 0, | |
3294 | .type = ARM_CP_CONST, .access = PL0_R /* no PL1_RW in linux-user */, | |
3295 | .fieldoffset = offsetof(CPUARMState, cp15.c14_cntfrq), | |
3296 | .resetvalue = NANOSECONDS_PER_SECOND / GTIMER_SCALE, | |
3297 | }, | |
3298 | { .name = "CNTVCT_EL0", .state = ARM_CP_STATE_AA64, | |
3299 | .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 2, | |
3300 | .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO, | |
3301 | .readfn = gt_virt_cnt_read, | |
3302 | }, | |
3303 | }; | |
3304 | ||
3305 | #endif | |
3306 | ||
3307 | static void par_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) | |
3308 | { | |
3309 | if (arm_feature(env, ARM_FEATURE_LPAE)) { | |
3310 | raw_write(env, ri, value); | |
3311 | } else if (arm_feature(env, ARM_FEATURE_V7)) { | |
3312 | raw_write(env, ri, value & 0xfffff6ff); | |
3313 | } else { | |
3314 | raw_write(env, ri, value & 0xfffff1ff); | |
3315 | } | |
3316 | } | |
3317 | ||
3318 | #ifndef CONFIG_USER_ONLY | |
3319 | /* get_phys_addr() isn't present for user-mode-only targets */ | |
3320 | ||
3321 | static CPAccessResult ats_access(CPUARMState *env, const ARMCPRegInfo *ri, | |
3322 | bool isread) | |
3323 | { | |
3324 | if (ri->opc2 & 4) { | |
3325 | /* | |
3326 | * The ATS12NSO* operations must trap to EL3 or EL2 if executed in | |
3327 | * Secure EL1 (which can only happen if EL3 is AArch64). | |
3328 | * They are simply UNDEF if executed from NS EL1. | |
3329 | * They function normally from EL2 or EL3. | |
3330 | */ | |
3331 | if (arm_current_el(env) == 1) { | |
3332 | if (arm_is_secure_below_el3(env)) { | |
3333 | if (env->cp15.scr_el3 & SCR_EEL2) { | |
3334 | return CP_ACCESS_TRAP_EL2; | |
3335 | } | |
3336 | return CP_ACCESS_TRAP_EL3; | |
3337 | } | |
3338 | return CP_ACCESS_TRAP_UNCATEGORIZED; | |
3339 | } | |
3340 | } | |
3341 | return CP_ACCESS_OK; | |
3342 | } | |
3343 | ||
3344 | #ifdef CONFIG_TCG | |
3345 | static uint64_t do_ats_write(CPUARMState *env, uint64_t value, | |
3346 | MMUAccessType access_type, ARMMMUIdx mmu_idx, | |
3347 | bool is_secure) | |
3348 | { | |
3349 | bool ret; | |
3350 | uint64_t par64; | |
3351 | bool format64 = false; | |
3352 | ARMMMUFaultInfo fi = {}; | |
3353 | GetPhysAddrResult res = {}; | |
3354 | ||
3355 | ret = get_phys_addr_with_secure(env, value, access_type, mmu_idx, | |
3356 | is_secure, &res, &fi); | |
3357 | ||
3358 | /* | |
3359 | * ATS operations only do S1 or S1+S2 translations, so we never | |
3360 | * have to deal with the ARMCacheAttrs format for S2 only. | |
3361 | */ | |
3362 | assert(!res.cacheattrs.is_s2_format); | |
3363 | ||
3364 | if (ret) { | |
3365 | /* | |
3366 | * Some kinds of translation fault must cause exceptions rather | |
3367 | * than being reported in the PAR. | |
3368 | */ | |
3369 | int current_el = arm_current_el(env); | |
3370 | int target_el; | |
3371 | uint32_t syn, fsr, fsc; | |
3372 | bool take_exc = false; | |
3373 | ||
3374 | if (fi.s1ptw && current_el == 1 | |
3375 | && arm_mmu_idx_is_stage1_of_2(mmu_idx)) { | |
3376 | /* | |
3377 | * Synchronous stage 2 fault on an access made as part of the | |
3378 | * translation table walk for AT S1E0* or AT S1E1* insn | |
3379 | * executed from NS EL1. If this is a synchronous external abort | |
3380 | * and SCR_EL3.EA == 1, then we take a synchronous external abort | |
3381 | * to EL3. Otherwise the fault is taken as an exception to EL2, | |
3382 | * and HPFAR_EL2 holds the faulting IPA. | |
3383 | */ | |
3384 | if (fi.type == ARMFault_SyncExternalOnWalk && | |
3385 | (env->cp15.scr_el3 & SCR_EA)) { | |
3386 | target_el = 3; | |
3387 | } else { | |
3388 | env->cp15.hpfar_el2 = extract64(fi.s2addr, 12, 47) << 4; | |
3389 | if (arm_is_secure_below_el3(env) && fi.s1ns) { | |
3390 | env->cp15.hpfar_el2 |= HPFAR_NS; | |
3391 | } | |
3392 | target_el = 2; | |
3393 | } | |
3394 | take_exc = true; | |
3395 | } else if (fi.type == ARMFault_SyncExternalOnWalk) { | |
3396 | /* | |
3397 | * Synchronous external aborts during a translation table walk | |
3398 | * are taken as Data Abort exceptions. | |
3399 | */ | |
3400 | if (fi.stage2) { | |
3401 | if (current_el == 3) { | |
3402 | target_el = 3; | |
3403 | } else { | |
3404 | target_el = 2; | |
3405 | } | |
3406 | } else { | |
3407 | target_el = exception_target_el(env); | |
3408 | } | |
3409 | take_exc = true; | |
3410 | } | |
3411 | ||
3412 | if (take_exc) { | |
3413 | /* Construct FSR and FSC using same logic as arm_deliver_fault() */ | |
3414 | if (target_el == 2 || arm_el_is_aa64(env, target_el) || | |
3415 | arm_s1_regime_using_lpae_format(env, mmu_idx)) { | |
3416 | fsr = arm_fi_to_lfsc(&fi); | |
3417 | fsc = extract32(fsr, 0, 6); | |
3418 | } else { | |
3419 | fsr = arm_fi_to_sfsc(&fi); | |
3420 | fsc = 0x3f; | |
3421 | } | |
3422 | /* | |
3423 | * Report exception with ESR indicating a fault due to a | |
3424 | * translation table walk for a cache maintenance instruction. | |
3425 | */ | |
3426 | syn = syn_data_abort_no_iss(current_el == target_el, 0, | |
3427 | fi.ea, 1, fi.s1ptw, 1, fsc); | |
3428 | env->exception.vaddress = value; | |
3429 | env->exception.fsr = fsr; | |
3430 | raise_exception(env, EXCP_DATA_ABORT, syn, target_el); | |
3431 | } | |
3432 | } | |
3433 | ||
3434 | if (is_a64(env)) { | |
3435 | format64 = true; | |
3436 | } else if (arm_feature(env, ARM_FEATURE_LPAE)) { | |
3437 | /* | |
3438 | * ATS1Cxx: | |
3439 | * * TTBCR.EAE determines whether the result is returned using the | |
3440 | * 32-bit or the 64-bit PAR format | |
3441 | * * Instructions executed in Hyp mode always use the 64bit format | |
3442 | * | |
3443 | * ATS1S2NSOxx uses the 64bit format if any of the following is true: | |
3444 | * * The Non-secure TTBCR.EAE bit is set to 1 | |
3445 | * * The implementation includes EL2, and the value of HCR.VM is 1 | |
3446 | * | |
3447 | * (Note that HCR.DC makes HCR.VM behave as if it is 1.) | |
3448 | * | |
3449 | * ATS1Hx always uses the 64bit format. | |
3450 | */ | |
3451 | format64 = arm_s1_regime_using_lpae_format(env, mmu_idx); | |
3452 | ||
3453 | if (arm_feature(env, ARM_FEATURE_EL2)) { | |
3454 | if (mmu_idx == ARMMMUIdx_E10_0 || | |
3455 | mmu_idx == ARMMMUIdx_E10_1 || | |
3456 | mmu_idx == ARMMMUIdx_E10_1_PAN) { | |
3457 | format64 |= env->cp15.hcr_el2 & (HCR_VM | HCR_DC); | |
3458 | } else { | |
3459 | format64 |= arm_current_el(env) == 2; | |
3460 | } | |
3461 | } | |
3462 | } | |
3463 | ||
3464 | if (format64) { | |
3465 | /* Create a 64-bit PAR */ | |
3466 | par64 = (1 << 11); /* LPAE bit always set */ | |
3467 | if (!ret) { | |
3468 | par64 |= res.f.phys_addr & ~0xfffULL; | |
3469 | if (!res.f.attrs.secure) { | |
3470 | par64 |= (1 << 9); /* NS */ | |
3471 | } | |
3472 | par64 |= (uint64_t)res.cacheattrs.attrs << 56; /* ATTR */ | |
3473 | par64 |= res.cacheattrs.shareability << 7; /* SH */ | |
3474 | } else { | |
3475 | uint32_t fsr = arm_fi_to_lfsc(&fi); | |
3476 | ||
3477 | par64 |= 1; /* F */ | |
3478 | par64 |= (fsr & 0x3f) << 1; /* FS */ | |
3479 | if (fi.stage2) { | |
3480 | par64 |= (1 << 9); /* S */ | |
3481 | } | |
3482 | if (fi.s1ptw) { | |
3483 | par64 |= (1 << 8); /* PTW */ | |
3484 | } | |
3485 | } | |
3486 | } else { | |
3487 | /* | |
3488 | * fsr is a DFSR/IFSR value for the short descriptor | |
3489 | * translation table format (with WnR always clear). | |
3490 | * Convert it to a 32-bit PAR. | |
3491 | */ | |
3492 | if (!ret) { | |
3493 | /* We do not set any attribute bits in the PAR */ | |
3494 | if (res.f.lg_page_size == 24 | |
3495 | && arm_feature(env, ARM_FEATURE_V7)) { | |
3496 | par64 = (res.f.phys_addr & 0xff000000) | (1 << 1); | |
3497 | } else { | |
3498 | par64 = res.f.phys_addr & 0xfffff000; | |
3499 | } | |
3500 | if (!res.f.attrs.secure) { | |
3501 | par64 |= (1 << 9); /* NS */ | |
3502 | } | |
3503 | } else { | |
3504 | uint32_t fsr = arm_fi_to_sfsc(&fi); | |
3505 | ||
3506 | par64 = ((fsr & (1 << 10)) >> 5) | ((fsr & (1 << 12)) >> 6) | | |
3507 | ((fsr & 0xf) << 1) | 1; | |
3508 | } | |
3509 | } | |
3510 | return par64; | |
3511 | } | |
3512 | #endif /* CONFIG_TCG */ | |
3513 | ||
3514 | static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) | |
3515 | { | |
3516 | #ifdef CONFIG_TCG | |
3517 | MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD; | |
3518 | uint64_t par64; | |
3519 | ARMMMUIdx mmu_idx; | |
3520 | int el = arm_current_el(env); | |
3521 | bool secure = arm_is_secure_below_el3(env); | |
3522 | ||
3523 | switch (ri->opc2 & 6) { | |
3524 | case 0: | |
3525 | /* stage 1 current state PL1: ATS1CPR, ATS1CPW, ATS1CPRP, ATS1CPWP */ | |
3526 | switch (el) { | |
3527 | case 3: | |
3528 | mmu_idx = ARMMMUIdx_E3; | |
3529 | secure = true; | |
3530 | break; | |
3531 | case 2: | |
3532 | g_assert(!secure); /* ARMv8.4-SecEL2 is 64-bit only */ | |
3533 | /* fall through */ | |
3534 | case 1: | |
3535 | if (ri->crm == 9 && (env->uncached_cpsr & CPSR_PAN)) { | |
3536 | mmu_idx = ARMMMUIdx_Stage1_E1_PAN; | |
3537 | } else { | |
3538 | mmu_idx = ARMMMUIdx_Stage1_E1; | |
3539 | } | |
3540 | break; | |
3541 | default: | |
3542 | g_assert_not_reached(); | |
3543 | } | |
3544 | break; | |
3545 | case 2: | |
3546 | /* stage 1 current state PL0: ATS1CUR, ATS1CUW */ | |
3547 | switch (el) { | |
3548 | case 3: | |
3549 | mmu_idx = ARMMMUIdx_E10_0; | |
3550 | secure = true; | |
3551 | break; | |
3552 | case 2: | |
3553 | g_assert(!secure); /* ARMv8.4-SecEL2 is 64-bit only */ | |
3554 | mmu_idx = ARMMMUIdx_Stage1_E0; | |
3555 | break; | |
3556 | case 1: | |
3557 | mmu_idx = ARMMMUIdx_Stage1_E0; | |
3558 | break; | |
3559 | default: | |
3560 | g_assert_not_reached(); | |
3561 | } | |
3562 | break; | |
3563 | case 4: | |
3564 | /* stage 1+2 NonSecure PL1: ATS12NSOPR, ATS12NSOPW */ | |
3565 | mmu_idx = ARMMMUIdx_E10_1; | |
3566 | secure = false; | |
3567 | break; | |
3568 | case 6: | |
3569 | /* stage 1+2 NonSecure PL0: ATS12NSOUR, ATS12NSOUW */ | |
3570 | mmu_idx = ARMMMUIdx_E10_0; | |
3571 | secure = false; | |
3572 | break; | |
3573 | default: | |
3574 | g_assert_not_reached(); | |
3575 | } | |
3576 | ||
3577 | par64 = do_ats_write(env, value, access_type, mmu_idx, secure); | |
3578 | ||
3579 | A32_BANKED_CURRENT_REG_SET(env, par, par64); | |
3580 | #else | |
3581 | /* Handled by hardware accelerator. */ | |
3582 | g_assert_not_reached(); | |
3583 | #endif /* CONFIG_TCG */ | |
3584 | } | |
3585 | ||
3586 | static void ats1h_write(CPUARMState *env, const ARMCPRegInfo *ri, | |
3587 | uint64_t value) | |
3588 | { | |
3589 | #ifdef CONFIG_TCG | |
3590 | MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD; | |
3591 | uint64_t par64; | |
3592 | ||
3593 | /* There is no SecureEL2 for AArch32. */ | |
3594 | par64 = do_ats_write(env, value, access_type, ARMMMUIdx_E2, false); | |
3595 | ||
3596 | A32_BANKED_CURRENT_REG_SET(env, par, par64); | |
3597 | #else | |
3598 | /* Handled by hardware accelerator. */ | |
3599 | g_assert_not_reached(); | |
3600 | #endif /* CONFIG_TCG */ | |
3601 | } | |
3602 | ||
3603 | static CPAccessResult at_s1e2_access(CPUARMState *env, const ARMCPRegInfo *ri, | |
3604 | bool isread) | |
3605 | { | |
3606 | if (arm_current_el(env) == 3 && | |
3607 | !(env->cp15.scr_el3 & (SCR_NS | SCR_EEL2))) { | |
3608 | return CP_ACCESS_TRAP; | |
3609 | } | |
3610 | return CP_ACCESS_OK; | |
3611 | } | |
3612 | ||
3613 | static void ats_write64(CPUARMState *env, const ARMCPRegInfo *ri, | |
3614 | uint64_t value) | |
3615 | { | |
3616 | #ifdef CONFIG_TCG | |
3617 | MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD; | |
3618 | ARMMMUIdx mmu_idx; | |
3619 | int secure = arm_is_secure_below_el3(env); | |
3620 | uint64_t hcr_el2 = arm_hcr_el2_eff(env); | |
3621 | bool regime_e20 = (hcr_el2 & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE); | |
3622 | ||
3623 | switch (ri->opc2 & 6) { | |
3624 | case 0: | |
3625 | switch (ri->opc1) { | |
3626 | case 0: /* AT S1E1R, AT S1E1W, AT S1E1RP, AT S1E1WP */ | |
3627 | if (ri->crm == 9 && (env->pstate & PSTATE_PAN)) { | |
3628 | mmu_idx = regime_e20 ? | |
3629 | ARMMMUIdx_E20_2_PAN : ARMMMUIdx_Stage1_E1_PAN; | |
3630 | } else { | |
3631 | mmu_idx = regime_e20 ? ARMMMUIdx_E20_2 : ARMMMUIdx_Stage1_E1; | |
3632 | } | |
3633 | break; | |
3634 | case 4: /* AT S1E2R, AT S1E2W */ | |
3635 | mmu_idx = hcr_el2 & HCR_E2H ? ARMMMUIdx_E20_2 : ARMMMUIdx_E2; | |
3636 | break; | |
3637 | case 6: /* AT S1E3R, AT S1E3W */ | |
3638 | mmu_idx = ARMMMUIdx_E3; | |
3639 | secure = true; | |
3640 | break; | |
3641 | default: | |
3642 | g_assert_not_reached(); | |
3643 | } | |
3644 | break; | |
3645 | case 2: /* AT S1E0R, AT S1E0W */ | |
3646 | mmu_idx = regime_e20 ? ARMMMUIdx_E20_0 : ARMMMUIdx_Stage1_E0; | |
3647 | break; | |
3648 | case 4: /* AT S12E1R, AT S12E1W */ | |
3649 | mmu_idx = regime_e20 ? ARMMMUIdx_E20_2 : ARMMMUIdx_E10_1; | |
3650 | break; | |
3651 | case 6: /* AT S12E0R, AT S12E0W */ | |
3652 | mmu_idx = regime_e20 ? ARMMMUIdx_E20_0 : ARMMMUIdx_E10_0; | |
3653 | break; | |
3654 | default: | |
3655 | g_assert_not_reached(); | |
3656 | } | |
3657 | ||
3658 | env->cp15.par_el[1] = do_ats_write(env, value, access_type, | |
3659 | mmu_idx, secure); | |
3660 | #else | |
3661 | /* Handled by hardware accelerator. */ | |
3662 | g_assert_not_reached(); | |
3663 | #endif /* CONFIG_TCG */ | |
3664 | } | |
3665 | #endif | |
3666 | ||
3667 | static const ARMCPRegInfo vapa_cp_reginfo[] = { | |
3668 | { .name = "PAR", .cp = 15, .crn = 7, .crm = 4, .opc1 = 0, .opc2 = 0, | |
3669 | .access = PL1_RW, .resetvalue = 0, | |
3670 | .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.par_s), | |
3671 | offsetoflow32(CPUARMState, cp15.par_ns) }, | |
3672 | .writefn = par_write }, | |
3673 | #ifndef CONFIG_USER_ONLY | |
3674 | /* This underdecoding is safe because the reginfo is NO_RAW. */ | |
3675 | { .name = "ATS", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = CP_ANY, | |
3676 | .access = PL1_W, .accessfn = ats_access, | |
3677 | .writefn = ats_write, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC }, | |
3678 | #endif | |
3679 | }; | |
3680 | ||
3681 | /* Return basic MPU access permission bits. */ | |
3682 | static uint32_t simple_mpu_ap_bits(uint32_t val) | |
3683 | { | |
3684 | uint32_t ret; | |
3685 | uint32_t mask; | |
3686 | int i; | |
3687 | ret = 0; | |
3688 | mask = 3; | |
3689 | for (i = 0; i < 16; i += 2) { | |
3690 | ret |= (val >> i) & mask; | |
3691 | mask <<= 2; | |
3692 | } | |
3693 | return ret; | |
3694 | } | |
3695 | ||
3696 | /* Pad basic MPU access permission bits to extended format. */ | |
3697 | static uint32_t extended_mpu_ap_bits(uint32_t val) | |
3698 | { | |
3699 | uint32_t ret; | |
3700 | uint32_t mask; | |
3701 | int i; | |
3702 | ret = 0; | |
3703 | mask = 3; | |
3704 | for (i = 0; i < 16; i += 2) { | |
3705 | ret |= (val & mask) << i; | |
3706 | mask <<= 2; | |
3707 | } | |
3708 | return ret; | |
3709 | } | |
3710 | ||
3711 | static void pmsav5_data_ap_write(CPUARMState *env, const ARMCPRegInfo *ri, | |
3712 | uint64_t value) | |
3713 | { | |
3714 | env->cp15.pmsav5_data_ap = extended_mpu_ap_bits(value); | |
3715 | } | |
3716 | ||
3717 | static uint64_t pmsav5_data_ap_read(CPUARMState *env, const ARMCPRegInfo *ri) | |
3718 | { | |
3719 | return simple_mpu_ap_bits(env->cp15.pmsav5_data_ap); | |
3720 | } | |
3721 | ||
3722 | static void pmsav5_insn_ap_write(CPUARMState *env, const ARMCPRegInfo *ri, | |
3723 | uint64_t value) | |
3724 | { | |
3725 | env->cp15.pmsav5_insn_ap = extended_mpu_ap_bits(value); | |
3726 | } | |
3727 | ||
3728 | static uint64_t pmsav5_insn_ap_read(CPUARMState *env, const ARMCPRegInfo *ri) | |
3729 | { | |
3730 | return simple_mpu_ap_bits(env->cp15.pmsav5_insn_ap); | |
3731 | } | |
3732 | ||
3733 | static uint64_t pmsav7_read(CPUARMState *env, const ARMCPRegInfo *ri) | |
3734 | { | |
3735 | uint32_t *u32p = *(uint32_t **)raw_ptr(env, ri); | |
3736 | ||
3737 | if (!u32p) { | |
3738 | return 0; | |
3739 | } | |
3740 | ||
3741 | u32p += env->pmsav7.rnr[M_REG_NS]; | |
3742 | return *u32p; | |
3743 | } | |
3744 | ||
3745 | static void pmsav7_write(CPUARMState *env, const ARMCPRegInfo *ri, | |
3746 | uint64_t value) | |
3747 | { | |
3748 | ARMCPU *cpu = env_archcpu(env); | |
3749 | uint32_t *u32p = *(uint32_t **)raw_ptr(env, ri); | |
3750 | ||
3751 | if (!u32p) { | |
3752 | return; | |
3753 | } | |
3754 | ||
3755 | u32p += env->pmsav7.rnr[M_REG_NS]; | |
3756 | tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */ | |
3757 | *u32p = value; | |
3758 | } | |
3759 | ||
3760 | static void pmsav7_rgnr_write(CPUARMState *env, const ARMCPRegInfo *ri, | |
3761 | uint64_t value) | |
3762 | { | |
3763 | ARMCPU *cpu = env_archcpu(env); | |
3764 | uint32_t nrgs = cpu->pmsav7_dregion; | |
3765 | ||
3766 | if (value >= nrgs) { | |
3767 | qemu_log_mask(LOG_GUEST_ERROR, | |
3768 | "PMSAv7 RGNR write >= # supported regions, %" PRIu32 | |
3769 | " > %" PRIu32 "\n", (uint32_t)value, nrgs); | |
3770 | return; | |
3771 | } | |
3772 | ||
3773 | raw_write(env, ri, value); | |
3774 | } | |
3775 | ||
3776 | static void prbar_write(CPUARMState *env, const ARMCPRegInfo *ri, | |
3777 | uint64_t value) | |
3778 | { | |
3779 | ARMCPU *cpu = env_archcpu(env); | |
3780 | ||
3781 | tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */ | |
3782 | env->pmsav8.rbar[M_REG_NS][env->pmsav7.rnr[M_REG_NS]] = value; | |
3783 | } | |
3784 | ||
3785 | static uint64_t prbar_read(CPUARMState *env, const ARMCPRegInfo *ri) | |
3786 | { | |
3787 | return env->pmsav8.rbar[M_REG_NS][env->pmsav7.rnr[M_REG_NS]]; | |
3788 | } | |
3789 | ||
3790 | static void prlar_write(CPUARMState *env, const ARMCPRegInfo *ri, | |
3791 | uint64_t value) | |
3792 | { | |
3793 | ARMCPU *cpu = env_archcpu(env); | |
3794 | ||
3795 | tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */ | |
3796 | env->pmsav8.rlar[M_REG_NS][env->pmsav7.rnr[M_REG_NS]] = value; | |
3797 | } | |
3798 | ||
3799 | static uint64_t prlar_read(CPUARMState *env, const ARMCPRegInfo *ri) | |
3800 | { | |
3801 | return env->pmsav8.rlar[M_REG_NS][env->pmsav7.rnr[M_REG_NS]]; | |
3802 | } | |
3803 | ||
3804 | static void prselr_write(CPUARMState *env, const ARMCPRegInfo *ri, | |
3805 | uint64_t value) | |
3806 | { | |
3807 | ARMCPU *cpu = env_archcpu(env); | |
3808 | ||
3809 | /* | |
3810 | * Ignore writes that would select not implemented region. | |
3811 | * This is architecturally UNPREDICTABLE. | |
3812 | */ | |
3813 | if (value >= cpu->pmsav7_dregion) { | |
3814 | return; | |
3815 | } | |
3816 | ||
3817 | env->pmsav7.rnr[M_REG_NS] = value; | |
3818 | } | |
3819 | ||
3820 | static void hprbar_write(CPUARMState *env, const ARMCPRegInfo *ri, | |
3821 | uint64_t value) | |
3822 | { | |
3823 | ARMCPU *cpu = env_archcpu(env); | |
3824 | ||
3825 | tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */ | |
3826 | env->pmsav8.hprbar[env->pmsav8.hprselr] = value; | |
3827 | } | |
3828 | ||
3829 | static uint64_t hprbar_read(CPUARMState *env, const ARMCPRegInfo *ri) | |
3830 | { | |
3831 | return env->pmsav8.hprbar[env->pmsav8.hprselr]; | |
3832 | } | |
3833 | ||
3834 | static void hprlar_write(CPUARMState *env, const ARMCPRegInfo *ri, | |
3835 | uint64_t value) | |
3836 | { | |
3837 | ARMCPU *cpu = env_archcpu(env); | |
3838 | ||
3839 | tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */ | |
3840 | env->pmsav8.hprlar[env->pmsav8.hprselr] = value; | |
3841 | } | |
3842 | ||
3843 | static uint64_t hprlar_read(CPUARMState *env, const ARMCPRegInfo *ri) | |
3844 | { | |
3845 | return env->pmsav8.hprlar[env->pmsav8.hprselr]; | |
3846 | } | |
3847 | ||
3848 | static void hprenr_write(CPUARMState *env, const ARMCPRegInfo *ri, | |
3849 | uint64_t value) | |
3850 | { | |
3851 | uint32_t n; | |
3852 | uint32_t bit; | |
3853 | ARMCPU *cpu = env_archcpu(env); | |
3854 | ||
3855 | /* Ignore writes to unimplemented regions */ | |
3856 | int rmax = MIN(cpu->pmsav8r_hdregion, 32); | |
3857 | value &= MAKE_64BIT_MASK(0, rmax); | |
3858 | ||
3859 | tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */ | |
3860 | ||
3861 | /* Register alias is only valid for first 32 indexes */ | |
3862 | for (n = 0; n < rmax; ++n) { | |
3863 | bit = extract32(value, n, 1); | |
3864 | env->pmsav8.hprlar[n] = deposit32( | |
3865 | env->pmsav8.hprlar[n], 0, 1, bit); | |
3866 | } | |
3867 | } | |
3868 | ||
3869 | static uint64_t hprenr_read(CPUARMState *env, const ARMCPRegInfo *ri) | |
3870 | { | |
3871 | uint32_t n; | |
3872 | uint32_t result = 0x0; | |
3873 | ARMCPU *cpu = env_archcpu(env); | |
3874 | ||
3875 | /* Register alias is only valid for first 32 indexes */ | |
3876 | for (n = 0; n < MIN(cpu->pmsav8r_hdregion, 32); ++n) { | |
3877 | if (env->pmsav8.hprlar[n] & 0x1) { | |
3878 | result |= (0x1 << n); | |
3879 | } | |
3880 | } | |
3881 | return result; | |
3882 | } | |
3883 | ||
3884 | static void hprselr_write(CPUARMState *env, const ARMCPRegInfo *ri, | |
3885 | uint64_t value) | |
3886 | { | |
3887 | ARMCPU *cpu = env_archcpu(env); | |
3888 | ||
3889 | /* | |
3890 | * Ignore writes that would select not implemented region. | |
3891 | * This is architecturally UNPREDICTABLE. | |
3892 | */ | |
3893 | if (value >= cpu->pmsav8r_hdregion) { | |
3894 | return; | |
3895 | } | |
3896 | ||
3897 | env->pmsav8.hprselr = value; | |
3898 | } | |
3899 | ||
3900 | static void pmsav8r_regn_write(CPUARMState *env, const ARMCPRegInfo *ri, | |
3901 | uint64_t value) | |
3902 | { | |
3903 | ARMCPU *cpu = env_archcpu(env); | |
3904 | uint8_t index = (extract32(ri->opc0, 0, 1) << 4) | | |
3905 | (extract32(ri->crm, 0, 3) << 1) | extract32(ri->opc2, 2, 1); | |
3906 | ||
3907 | tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */ | |
3908 | ||
3909 | if (ri->opc1 & 4) { | |
3910 | if (index >= cpu->pmsav8r_hdregion) { | |
3911 | return; | |
3912 | } | |
3913 | if (ri->opc2 & 0x1) { | |
3914 | env->pmsav8.hprlar[index] = value; | |
3915 | } else { | |
3916 | env->pmsav8.hprbar[index] = value; | |
3917 | } | |
3918 | } else { | |
3919 | if (index >= cpu->pmsav7_dregion) { | |
3920 | return; | |
3921 | } | |
3922 | if (ri->opc2 & 0x1) { | |
3923 | env->pmsav8.rlar[M_REG_NS][index] = value; | |
3924 | } else { | |
3925 | env->pmsav8.rbar[M_REG_NS][index] = value; | |
3926 | } | |
3927 | } | |
3928 | } | |
3929 | ||
3930 | static uint64_t pmsav8r_regn_read(CPUARMState *env, const ARMCPRegInfo *ri) | |
3931 | { | |
3932 | ARMCPU *cpu = env_archcpu(env); | |
3933 | uint8_t index = (extract32(ri->opc0, 0, 1) << 4) | | |
3934 | (extract32(ri->crm, 0, 3) << 1) | extract32(ri->opc2, 2, 1); | |
3935 | ||
3936 | if (ri->opc1 & 4) { | |
3937 | if (index >= cpu->pmsav8r_hdregion) { | |
3938 | return 0x0; | |
3939 | } | |
3940 | if (ri->opc2 & 0x1) { | |
3941 | return env->pmsav8.hprlar[index]; | |
3942 | } else { | |
3943 | return env->pmsav8.hprbar[index]; | |
3944 | } | |
3945 | } else { | |
3946 | if (index >= cpu->pmsav7_dregion) { | |
3947 | return 0x0; | |
3948 | } | |
3949 | if (ri->opc2 & 0x1) { | |
3950 | return env->pmsav8.rlar[M_REG_NS][index]; | |
3951 | } else { | |
3952 | return env->pmsav8.rbar[M_REG_NS][index]; | |
3953 | } | |
3954 | } | |
3955 | } | |
3956 | ||
3957 | static const ARMCPRegInfo pmsav8r_cp_reginfo[] = { | |
3958 | { .name = "PRBAR", | |
3959 | .cp = 15, .opc1 = 0, .crn = 6, .crm = 3, .opc2 = 0, | |
3960 | .access = PL1_RW, .type = ARM_CP_NO_RAW, | |
3961 | .accessfn = access_tvm_trvm, | |
3962 | .readfn = prbar_read, .writefn = prbar_write }, | |
3963 | { .name = "PRLAR", | |
3964 | .cp = 15, .opc1 = 0, .crn = 6, .crm = 3, .opc2 = 1, | |
3965 | .access = PL1_RW, .type = ARM_CP_NO_RAW, | |
3966 | .accessfn = access_tvm_trvm, | |
3967 | .readfn = prlar_read, .writefn = prlar_write }, | |
3968 | { .name = "PRSELR", .resetvalue = 0, | |
3969 | .cp = 15, .opc1 = 0, .crn = 6, .crm = 2, .opc2 = 1, | |
3970 | .access = PL1_RW, .accessfn = access_tvm_trvm, | |
3971 | .writefn = prselr_write, | |
3972 | .fieldoffset = offsetof(CPUARMState, pmsav7.rnr[M_REG_NS]) }, | |
3973 | { .name = "HPRBAR", .resetvalue = 0, | |
3974 | .cp = 15, .opc1 = 4, .crn = 6, .crm = 3, .opc2 = 0, | |
3975 | .access = PL2_RW, .type = ARM_CP_NO_RAW, | |
3976 | .readfn = hprbar_read, .writefn = hprbar_write }, | |
3977 | { .name = "HPRLAR", | |
3978 | .cp = 15, .opc1 = 4, .crn = 6, .crm = 3, .opc2 = 1, | |
3979 | .access = PL2_RW, .type = ARM_CP_NO_RAW, | |
3980 | .readfn = hprlar_read, .writefn = hprlar_write }, | |
3981 | { .name = "HPRSELR", .resetvalue = 0, | |
3982 | .cp = 15, .opc1 = 4, .crn = 6, .crm = 2, .opc2 = 1, | |
3983 | .access = PL2_RW, | |
3984 | .writefn = hprselr_write, | |
3985 | .fieldoffset = offsetof(CPUARMState, pmsav8.hprselr) }, | |
3986 | { .name = "HPRENR", | |
3987 | .cp = 15, .opc1 = 4, .crn = 6, .crm = 1, .opc2 = 1, | |
3988 | .access = PL2_RW, .type = ARM_CP_NO_RAW, | |
3989 | .readfn = hprenr_read, .writefn = hprenr_write }, | |
3990 | }; | |
3991 | ||
3992 | static const ARMCPRegInfo pmsav7_cp_reginfo[] = { | |
3993 | /* | |
3994 | * Reset for all these registers is handled in arm_cpu_reset(), | |
3995 | * because the PMSAv7 is also used by M-profile CPUs, which do | |
3996 | * not register cpregs but still need the state to be reset. | |
3997 | */ | |
3998 | { .name = "DRBAR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 0, | |
3999 | .access = PL1_RW, .type = ARM_CP_NO_RAW, | |
4000 | .fieldoffset = offsetof(CPUARMState, pmsav7.drbar), | |
4001 | .readfn = pmsav7_read, .writefn = pmsav7_write, | |
4002 | .resetfn = arm_cp_reset_ignore }, | |
4003 | { .name = "DRSR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 2, | |
4004 | .access = PL1_RW, .type = ARM_CP_NO_RAW, | |
4005 | .fieldoffset = offsetof(CPUARMState, pmsav7.drsr), | |
4006 | .readfn = pmsav7_read, .writefn = pmsav7_write, | |
4007 | .resetfn = arm_cp_reset_ignore }, | |
4008 | { .name = "DRACR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 4, | |
4009 | .access = PL1_RW, .type = ARM_CP_NO_RAW, | |
4010 | .fieldoffset = offsetof(CPUARMState, pmsav7.dracr), | |
4011 | .readfn = pmsav7_read, .writefn = pmsav7_write, | |
4012 | .resetfn = arm_cp_reset_ignore }, | |
4013 | { .name = "RGNR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 2, .opc2 = 0, | |
4014 | .access = PL1_RW, | |
4015 | .fieldoffset = offsetof(CPUARMState, pmsav7.rnr[M_REG_NS]), | |
4016 | .writefn = pmsav7_rgnr_write, | |
4017 | .resetfn = arm_cp_reset_ignore }, | |
4018 | }; | |
4019 | ||
4020 | static const ARMCPRegInfo pmsav5_cp_reginfo[] = { | |
4021 | { .name = "DATA_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0, | |
4022 | .access = PL1_RW, .type = ARM_CP_ALIAS, | |
4023 | .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_data_ap), | |
4024 | .readfn = pmsav5_data_ap_read, .writefn = pmsav5_data_ap_write, }, | |
4025 | { .name = "INSN_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1, | |
4026 | .access = PL1_RW, .type = ARM_CP_ALIAS, | |
4027 | .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_insn_ap), | |
4028 | .readfn = pmsav5_insn_ap_read, .writefn = pmsav5_insn_ap_write, }, | |
4029 | { .name = "DATA_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 2, | |
4030 | .access = PL1_RW, | |
4031 | .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_data_ap), | |
4032 | .resetvalue = 0, }, | |
4033 | { .name = "INSN_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 3, | |
4034 | .access = PL1_RW, | |
4035 | .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_insn_ap), | |
4036 | .resetvalue = 0, }, | |
4037 | { .name = "DCACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0, | |
4038 | .access = PL1_RW, | |
4039 | .fieldoffset = offsetof(CPUARMState, cp15.c2_data), .resetvalue = 0, }, | |
4040 | { .name = "ICACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 1, | |
4041 | .access = PL1_RW, | |
4042 | .fieldoffset = offsetof(CPUARMState, cp15.c2_insn), .resetvalue = 0, }, | |
4043 | /* Protection region base and size registers */ | |
4044 | { .name = "946_PRBS0", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, | |
4045 | .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, | |
4046 | .fieldoffset = offsetof(CPUARMState, cp15.c6_region[0]) }, | |
4047 | { .name = "946_PRBS1", .cp = 15, .crn = 6, .crm = 1, .opc1 = 0, | |
4048 | .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, | |
4049 | .fieldoffset = offsetof(CPUARMState, cp15.c6_region[1]) }, | |
4050 | { .name = "946_PRBS2", .cp = 15, .crn = 6, .crm = 2, .opc1 = 0, | |
4051 | .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, | |
4052 | .fieldoffset = offsetof(CPUARMState, cp15.c6_region[2]) }, | |
4053 | { .name = "946_PRBS3", .cp = 15, .crn = 6, .crm = 3, .opc1 = 0, | |
4054 | .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, | |
4055 | .fieldoffset = offsetof(CPUARMState, cp15.c6_region[3]) }, | |
4056 | { .name = "946_PRBS4", .cp = 15, .crn = 6, .crm = 4, .opc1 = 0, | |
4057 | .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, | |
4058 | .fieldoffset = offsetof(CPUARMState, cp15.c6_region[4]) }, | |
4059 | { .name = "946_PRBS5", .cp = 15, .crn = 6, .crm = 5, .opc1 = 0, | |
4060 | .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, | |
4061 | .fieldoffset = offsetof(CPUARMState, cp15.c6_region[5]) }, | |
4062 | { .name = "946_PRBS6", .cp = 15, .crn = 6, .crm = 6, .opc1 = 0, | |
4063 | .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, | |
4064 | .fieldoffset = offsetof(CPUARMState, cp15.c6_region[6]) }, | |
4065 | { .name = "946_PRBS7", .cp = 15, .crn = 6, .crm = 7, .opc1 = 0, | |
4066 | .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0, | |
4067 | .fieldoffset = offsetof(CPUARMState, cp15.c6_region[7]) }, | |
4068 | }; | |
4069 | ||
4070 | static void vmsa_ttbcr_write(CPUARMState *env, const ARMCPRegInfo *ri, | |
4071 | uint64_t value) | |
4072 | { | |
4073 | ARMCPU *cpu = env_archcpu(env); | |
4074 | ||
4075 | if (!arm_feature(env, ARM_FEATURE_V8)) { | |
4076 | if (arm_feature(env, ARM_FEATURE_LPAE) && (value & TTBCR_EAE)) { | |
4077 | /* | |
4078 | * Pre ARMv8 bits [21:19], [15:14] and [6:3] are UNK/SBZP when | |
4079 | * using Long-descriptor translation table format | |
4080 | */ | |
4081 | value &= ~((7 << 19) | (3 << 14) | (0xf << 3)); | |
4082 | } else if (arm_feature(env, ARM_FEATURE_EL3)) { | |
4083 | /* | |
4084 | * In an implementation that includes the Security Extensions | |
4085 | * TTBCR has additional fields PD0 [4] and PD1 [5] for | |
4086 | * Short-descriptor translation table format. | |
4087 | */ | |
4088 | value &= TTBCR_PD1 | TTBCR_PD0 | TTBCR_N; | |
4089 | } else { | |
4090 | value &= TTBCR_N; | |
4091 | } | |
4092 | } | |
4093 | ||
4094 | if (arm_feature(env, ARM_FEATURE_LPAE)) { | |
4095 | /* | |
4096 | * With LPAE the TTBCR could result in a change of ASID | |
4097 | * via the TTBCR.A1 bit, so do a TLB flush. | |
4098 | */ | |
4099 | tlb_flush(CPU(cpu)); | |
4100 | } | |
4101 | raw_write(env, ri, value); | |
4102 | } | |
4103 | ||
4104 | static void vmsa_tcr_el12_write(CPUARMState *env, const ARMCPRegInfo *ri, | |
4105 | uint64_t value) | |
4106 | { | |
4107 | ARMCPU *cpu = env_archcpu(env); | |
4108 | ||
4109 | /* For AArch64 the A1 bit could result in a change of ASID, so TLB flush. */ | |
4110 | tlb_flush(CPU(cpu)); | |
4111 | raw_write(env, ri, value); | |
4112 | } | |
4113 | ||
4114 | static void vmsa_ttbr_write(CPUARMState *env, const ARMCPRegInfo *ri, | |
4115 | uint64_t value) | |
4116 | { | |
4117 | /* If the ASID changes (with a 64-bit write), we must flush the TLB. */ | |
4118 | if (cpreg_field_is_64bit(ri) && | |
4119 | extract64(raw_read(env, ri) ^ value, 48, 16) != 0) { | |
4120 | ARMCPU *cpu = env_archcpu(env); | |
4121 | tlb_flush(CPU(cpu)); | |
4122 | } | |
4123 | raw_write(env, ri, value); | |
4124 | } | |
4125 | ||
4126 | static void vmsa_tcr_ttbr_el2_write(CPUARMState *env, const ARMCPRegInfo *ri, | |
4127 | uint64_t value) | |
4128 | { | |
4129 | /* | |
4130 | * If we are running with E2&0 regime, then an ASID is active. | |
4131 | * Flush if that might be changing. Note we're not checking | |
4132 | * TCR_EL2.A1 to know if this is really the TTBRx_EL2 that | |
4133 | * holds the active ASID, only checking the field that might. | |
4134 | */ | |
4135 | if (extract64(raw_read(env, ri) ^ value, 48, 16) && | |
4136 | (arm_hcr_el2_eff(env) & HCR_E2H)) { | |
4137 | uint16_t mask = ARMMMUIdxBit_E20_2 | | |
4138 | ARMMMUIdxBit_E20_2_PAN | | |
4139 | ARMMMUIdxBit_E20_0; | |
4140 | tlb_flush_by_mmuidx(env_cpu(env), mask); | |
4141 | } | |
4142 | raw_write(env, ri, value); | |
4143 | } | |
4144 | ||
4145 | static void vttbr_write(CPUARMState *env, const ARMCPRegInfo *ri, | |
4146 | uint64_t value) | |
4147 | { | |
4148 | ARMCPU *cpu = env_archcpu(env); | |
4149 | CPUState *cs = CPU(cpu); | |
4150 | ||
4151 | /* | |
4152 | * A change in VMID to the stage2 page table (Stage2) invalidates | |
4153 | * the stage2 and combined stage 1&2 tlbs (EL10_1 and EL10_0). | |
4154 | */ | |
4155 | if (extract64(raw_read(env, ri) ^ value, 48, 16) != 0) { | |
4156 | tlb_flush_by_mmuidx(cs, alle1_tlbmask(env)); | |
4157 | } | |
4158 | raw_write(env, ri, value); | |
4159 | } | |
4160 | ||
4161 | static const ARMCPRegInfo vmsa_pmsa_cp_reginfo[] = { | |
4162 | { .name = "DFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0, | |
4163 | .access = PL1_RW, .accessfn = access_tvm_trvm, .type = ARM_CP_ALIAS, | |
4164 | .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dfsr_s), | |
4165 | offsetoflow32(CPUARMState, cp15.dfsr_ns) }, }, | |
4166 | { .name = "IFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1, | |
4167 | .access = PL1_RW, .accessfn = access_tvm_trvm, .resetvalue = 0, | |
4168 | .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.ifsr_s), | |
4169 | offsetoflow32(CPUARMState, cp15.ifsr_ns) } }, | |
4170 | { .name = "DFAR", .cp = 15, .opc1 = 0, .crn = 6, .crm = 0, .opc2 = 0, | |
4171 | .access = PL1_RW, .accessfn = access_tvm_trvm, .resetvalue = 0, | |
4172 | .bank_fieldoffsets = { offsetof(CPUARMState, cp15.dfar_s), | |
4173 | offsetof(CPUARMState, cp15.dfar_ns) } }, | |
4174 | { .name = "FAR_EL1", .state = ARM_CP_STATE_AA64, | |
4175 | .opc0 = 3, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 0, | |
4176 | .access = PL1_RW, .accessfn = access_tvm_trvm, | |
4177 | .fgt = FGT_FAR_EL1, | |
4178 | .fieldoffset = offsetof(CPUARMState, cp15.far_el[1]), | |
4179 | .resetvalue = 0, }, | |
4180 | }; | |
4181 | ||
4182 | static const ARMCPRegInfo vmsa_cp_reginfo[] = { | |
4183 | { .name = "ESR_EL1", .state = ARM_CP_STATE_AA64, | |
4184 | .opc0 = 3, .crn = 5, .crm = 2, .opc1 = 0, .opc2 = 0, | |
4185 | .access = PL1_RW, .accessfn = access_tvm_trvm, | |
4186 | .fgt = FGT_ESR_EL1, | |
4187 | .fieldoffset = offsetof(CPUARMState, cp15.esr_el[1]), .resetvalue = 0, }, | |
4188 | { .name = "TTBR0_EL1", .state = ARM_CP_STATE_BOTH, | |
4189 | .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 0, | |
4190 | .access = PL1_RW, .accessfn = access_tvm_trvm, | |
4191 | .fgt = FGT_TTBR0_EL1, | |
4192 | .writefn = vmsa_ttbr_write, .resetvalue = 0, | |
4193 | .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr0_s), | |
4194 | offsetof(CPUARMState, cp15.ttbr0_ns) } }, | |
4195 | { .name = "TTBR1_EL1", .state = ARM_CP_STATE_BOTH, | |
4196 | .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 1, | |
4197 | .access = PL1_RW, .accessfn = access_tvm_trvm, | |
4198 | .fgt = FGT_TTBR1_EL1, | |
4199 | .writefn = vmsa_ttbr_write, .resetvalue = 0, | |
4200 | .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s), | |
4201 | offsetof(CPUARMState, cp15.ttbr1_ns) } }, | |
4202 | { .name = "TCR_EL1", .state = ARM_CP_STATE_AA64, | |
4203 | .opc0 = 3, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2, | |
4204 | .access = PL1_RW, .accessfn = access_tvm_trvm, | |
4205 | .fgt = FGT_TCR_EL1, | |
4206 | .writefn = vmsa_tcr_el12_write, | |
4207 | .raw_writefn = raw_write, | |
4208 | .resetvalue = 0, | |
4209 | .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[1]) }, | |
4210 | { .name = "TTBCR", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2, | |
4211 | .access = PL1_RW, .accessfn = access_tvm_trvm, | |
4212 | .type = ARM_CP_ALIAS, .writefn = vmsa_ttbcr_write, | |
4213 | .raw_writefn = raw_write, | |
4214 | .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tcr_el[3]), | |
4215 | offsetoflow32(CPUARMState, cp15.tcr_el[1])} }, | |
4216 | }; | |
4217 | ||
4218 | /* | |
4219 | * Note that unlike TTBCR, writing to TTBCR2 does not require flushing | |
4220 | * qemu tlbs nor adjusting cached masks. | |
4221 | */ | |
4222 | static const ARMCPRegInfo ttbcr2_reginfo = { | |
4223 | .name = "TTBCR2", .cp = 15, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 3, | |
4224 | .access = PL1_RW, .accessfn = access_tvm_trvm, | |
4225 | .type = ARM_CP_ALIAS, | |
4226 | .bank_fieldoffsets = { | |
4227 | offsetofhigh32(CPUARMState, cp15.tcr_el[3]), | |
4228 | offsetofhigh32(CPUARMState, cp15.tcr_el[1]), | |
4229 | }, | |
4230 | }; | |
4231 | ||
4232 | static void omap_ticonfig_write(CPUARMState *env, const ARMCPRegInfo *ri, | |
4233 | uint64_t value) | |
4234 | { | |
4235 | env->cp15.c15_ticonfig = value & 0xe7; | |
4236 | /* The OS_TYPE bit in this register changes the reported CPUID! */ | |
4237 | env->cp15.c0_cpuid = (value & (1 << 5)) ? | |
4238 | ARM_CPUID_TI915T : ARM_CPUID_TI925T; | |
4239 | } | |
4240 | ||
4241 | static void omap_threadid_write(CPUARMState *env, const ARMCPRegInfo *ri, | |
4242 | uint64_t value) | |
4243 | { | |
4244 | env->cp15.c15_threadid = value & 0xffff; | |
4245 | } | |
4246 | ||
4247 | static void omap_wfi_write(CPUARMState *env, const ARMCPRegInfo *ri, | |
4248 | uint64_t value) | |
4249 | { | |
4250 | /* Wait-for-interrupt (deprecated) */ | |
4251 | cpu_interrupt(env_cpu(env), CPU_INTERRUPT_HALT); | |
4252 | } | |
4253 | ||
4254 | static void omap_cachemaint_write(CPUARMState *env, const ARMCPRegInfo *ri, | |
4255 | uint64_t value) | |
4256 | { | |
4257 | /* | |
4258 | * On OMAP there are registers indicating the max/min index of dcache lines | |
4259 | * containing a dirty line; cache flush operations have to reset these. | |
4260 | */ | |
4261 | env->cp15.c15_i_max = 0x000; | |
4262 | env->cp15.c15_i_min = 0xff0; | |
4263 | } | |
4264 | ||
4265 | static const ARMCPRegInfo omap_cp_reginfo[] = { | |
4266 | { .name = "DFSR", .cp = 15, .crn = 5, .crm = CP_ANY, | |
4267 | .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_OVERRIDE, | |
4268 | .fieldoffset = offsetoflow32(CPUARMState, cp15.esr_el[1]), | |
4269 | .resetvalue = 0, }, | |
4270 | { .name = "", .cp = 15, .crn = 15, .crm = 0, .opc1 = 0, .opc2 = 0, | |
4271 | .access = PL1_RW, .type = ARM_CP_NOP }, | |
4272 | { .name = "TICONFIG", .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0, | |
4273 | .access = PL1_RW, | |
4274 | .fieldoffset = offsetof(CPUARMState, cp15.c15_ticonfig), .resetvalue = 0, | |
4275 | .writefn = omap_ticonfig_write }, | |
4276 | { .name = "IMAX", .cp = 15, .crn = 15, .crm = 2, .opc1 = 0, .opc2 = 0, | |
4277 | .access = PL1_RW, | |
4278 | .fieldoffset = offsetof(CPUARMState, cp15.c15_i_max), .resetvalue = 0, }, | |
4279 | { .name = "IMIN", .cp = 15, .crn = 15, .crm = 3, .opc1 = 0, .opc2 = 0, | |
4280 | .access = PL1_RW, .resetvalue = 0xff0, | |
4281 | .fieldoffset = offsetof(CPUARMState, cp15.c15_i_min) }, | |
4282 | { .name = "THREADID", .cp = 15, .crn = 15, .crm = 4, .opc1 = 0, .opc2 = 0, | |
4283 | .access = PL1_RW, | |
4284 | .fieldoffset = offsetof(CPUARMState, cp15.c15_threadid), .resetvalue = 0, | |
4285 | .writefn = omap_threadid_write }, | |
4286 | { .name = "TI925T_STATUS", .cp = 15, .crn = 15, | |
4287 | .crm = 8, .opc1 = 0, .opc2 = 0, .access = PL1_RW, | |
4288 | .type = ARM_CP_NO_RAW, | |
4289 | .readfn = arm_cp_read_zero, .writefn = omap_wfi_write, }, | |
4290 | /* | |
4291 | * TODO: Peripheral port remap register: | |
4292 | * On OMAP2 mcr p15, 0, rn, c15, c2, 4 sets up the interrupt controller | |
4293 | * base address at $rn & ~0xfff and map size of 0x200 << ($rn & 0xfff), | |
4294 | * when MMU is off. | |
4295 | */ | |
4296 | { .name = "OMAP_CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY, | |
4297 | .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W, | |
4298 | .type = ARM_CP_OVERRIDE | ARM_CP_NO_RAW, | |
4299 | .writefn = omap_cachemaint_write }, | |
4300 | { .name = "C9", .cp = 15, .crn = 9, | |
4301 | .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, | |
4302 | .type = ARM_CP_CONST | ARM_CP_OVERRIDE, .resetvalue = 0 }, | |
4303 | }; | |
4304 | ||
4305 | static void xscale_cpar_write(CPUARMState *env, const ARMCPRegInfo *ri, | |
4306 | uint64_t value) | |
4307 | { | |
4308 | env->cp15.c15_cpar = value & 0x3fff; | |
4309 | } | |
4310 | ||
4311 | static const ARMCPRegInfo xscale_cp_reginfo[] = { | |
4312 | { .name = "XSCALE_CPAR", | |
4313 | .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0, .access = PL1_RW, | |
4314 | .fieldoffset = offsetof(CPUARMState, cp15.c15_cpar), .resetvalue = 0, | |
4315 | .writefn = xscale_cpar_write, }, | |
4316 | { .name = "XSCALE_AUXCR", | |
4317 | .cp = 15, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 1, .access = PL1_RW, | |
4318 | .fieldoffset = offsetof(CPUARMState, cp15.c1_xscaleauxcr), | |
4319 | .resetvalue = 0, }, | |
4320 | /* | |
4321 | * XScale specific cache-lockdown: since we have no cache we NOP these | |
4322 | * and hope the guest does not really rely on cache behaviour. | |
4323 | */ | |
4324 | { .name = "XSCALE_LOCK_ICACHE_LINE", | |
4325 | .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 0, | |
4326 | .access = PL1_W, .type = ARM_CP_NOP }, | |
4327 | { .name = "XSCALE_UNLOCK_ICACHE", | |
4328 | .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 1, | |
4329 | .access = PL1_W, .type = ARM_CP_NOP }, | |
4330 | { .name = "XSCALE_DCACHE_LOCK", | |
4331 | .cp = 15, .opc1 = 0, .crn = 9, .crm = 2, .opc2 = 0, | |
4332 | .access = PL1_RW, .type = ARM_CP_NOP }, | |
4333 | { .name = "XSCALE_UNLOCK_DCACHE", | |
4334 | .cp = 15, .opc1 = 0, .crn = 9, .crm = 2, .opc2 = 1, | |
4335 | .access = PL1_W, .type = ARM_CP_NOP }, | |
4336 | }; | |
4337 | ||
4338 | static const ARMCPRegInfo dummy_c15_cp_reginfo[] = { | |
4339 | /* | |
4340 | * RAZ/WI the whole crn=15 space, when we don't have a more specific | |
4341 | * implementation of this implementation-defined space. | |
4342 | * Ideally this should eventually disappear in favour of actually | |
4343 | * implementing the correct behaviour for all cores. | |
4344 | */ | |
4345 | { .name = "C15_IMPDEF", .cp = 15, .crn = 15, | |
4346 | .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY, | |
4347 | .access = PL1_RW, | |
4348 | .type = ARM_CP_CONST | ARM_CP_NO_RAW | ARM_CP_OVERRIDE, | |
4349 | .resetvalue = 0 }, | |
4350 | }; | |
4351 | ||
4352 | static const ARMCPRegInfo cache_dirty_status_cp_reginfo[] = { | |
4353 | /* Cache status: RAZ because we have no cache so it's always clean */ | |
4354 | { .name = "CDSR", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 6, | |
4355 | .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW, | |
4356 | .resetvalue = 0 }, | |
4357 | }; | |
4358 | ||
4359 | static const ARMCPRegInfo cache_block_ops_cp_reginfo[] = { | |
4360 | /* We never have a block transfer operation in progress */ | |
4361 | { .name = "BXSR", .cp = 15, .crn = 7, .crm = 12, .opc1 = 0, .opc2 = 4, | |
4362 | .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW, | |
4363 | .resetvalue = 0 }, | |
4364 | /* The cache ops themselves: these all NOP for QEMU */ | |
4365 | { .name = "IICR", .cp = 15, .crm = 5, .opc1 = 0, | |
4366 | .access = PL1_W, .type = ARM_CP_NOP | ARM_CP_64BIT }, | |
4367 | { .name = "IDCR", .cp = 15, .crm = 6, .opc1 = 0, | |
4368 | .access = PL1_W, .type = ARM_CP_NOP | ARM_CP_64BIT }, | |
4369 | { .name = "CDCR", .cp = 15, .crm = 12, .opc1 = 0, | |
4370 | .access = PL0_W, .type = ARM_CP_NOP | ARM_CP_64BIT }, | |
4371 | { .name = "PIR", .cp = 15, .crm = 12, .opc1 = 1, | |
4372 | .access = PL0_W, .type = ARM_CP_NOP | ARM_CP_64BIT }, | |
4373 | { .name = "PDR", .cp = 15, .crm = 12, .opc1 = 2, | |
4374 | .access = PL0_W, .type = ARM_CP_NOP | ARM_CP_64BIT }, | |
4375 | { .name = "CIDCR", .cp = 15, .crm = 14, .opc1 = 0, | |
4376 | .access = PL1_W, .type = ARM_CP_NOP | ARM_CP_64BIT }, | |
4377 | }; | |
4378 | ||
4379 | static const ARMCPRegInfo cache_test_clean_cp_reginfo[] = { | |
4380 | /* | |
4381 | * The cache test-and-clean instructions always return (1 << 30) | |
4382 | * to indicate that there are no dirty cache lines. | |
4383 | */ | |
4384 | { .name = "TC_DCACHE", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 3, | |
4385 | .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW, | |
4386 | .resetvalue = (1 << 30) }, | |
4387 | { .name = "TCI_DCACHE", .cp = 15, .crn = 7, .crm = 14, .opc1 = 0, .opc2 = 3, | |
4388 | .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW, | |
4389 | .resetvalue = (1 << 30) }, | |
4390 | }; | |
4391 | ||
4392 | static const ARMCPRegInfo strongarm_cp_reginfo[] = { | |
4393 | /* Ignore ReadBuffer accesses */ | |
4394 | { .name = "C9_READBUFFER", .cp = 15, .crn = 9, | |
4395 | .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY, | |
4396 | .access = PL1_RW, .resetvalue = 0, | |
4397 | .type = ARM_CP_CONST | ARM_CP_OVERRIDE | ARM_CP_NO_RAW }, | |
4398 | }; | |
4399 | ||
4400 | static uint64_t midr_read(CPUARMState *env, const ARMCPRegInfo *ri) | |
4401 | { | |
4402 | unsigned int cur_el = arm_current_el(env); | |
4403 | ||
4404 | if (arm_is_el2_enabled(env) && cur_el == 1) { | |
4405 | return env->cp15.vpidr_el2; | |
4406 | } | |
4407 | return raw_read(env, ri); | |
4408 | } | |
4409 | ||
4410 | static uint64_t mpidr_read_val(CPUARMState *env) | |
4411 | { | |
4412 | ARMCPU *cpu = env_archcpu(env); | |
4413 | uint64_t mpidr = cpu->mp_affinity; | |
4414 | ||
4415 | if (arm_feature(env, ARM_FEATURE_V7MP)) { | |
4416 | mpidr |= (1U << 31); | |
4417 | /* | |
4418 | * Cores which are uniprocessor (non-coherent) | |
4419 | * but still implement the MP extensions set | |
4420 | * bit 30. (For instance, Cortex-R5). | |
4421 | */ | |
4422 | if (cpu->mp_is_up) { | |
4423 | mpidr |= (1u << 30); | |
4424 | } | |
4425 | } | |
4426 | return mpidr; | |
4427 | } | |
4428 | ||
4429 | static uint64_t mpidr_read(CPUARMState *env, const ARMCPRegInfo *ri) | |
4430 | { | |
4431 | unsigned int cur_el = arm_current_el(env); | |
4432 | ||
4433 | if (arm_is_el2_enabled(env) && cur_el == 1) { | |
4434 | return env->cp15.vmpidr_el2; | |
4435 | } | |
4436 | return mpidr_read_val(env); | |
4437 | } | |
4438 | ||
4439 | static const ARMCPRegInfo lpae_cp_reginfo[] = { | |
4440 | /* NOP AMAIR0/1 */ | |
4441 | { .name = "AMAIR0", .state = ARM_CP_STATE_BOTH, | |
4442 | .opc0 = 3, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 0, | |
4443 | .access = PL1_RW, .accessfn = access_tvm_trvm, | |
4444 | .fgt = FGT_AMAIR_EL1, | |
4445 | .type = ARM_CP_CONST, .resetvalue = 0 }, | |
4446 | /* AMAIR1 is mapped to AMAIR_EL1[63:32] */ | |
4447 | { .name = "AMAIR1", .cp = 15, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 1, | |
4448 | .access = PL1_RW, .accessfn = access_tvm_trvm, | |
4449 | .type = ARM_CP_CONST, .resetvalue = 0 }, | |
4450 | { .name = "PAR", .cp = 15, .crm = 7, .opc1 = 0, | |
4451 | .access = PL1_RW, .type = ARM_CP_64BIT, .resetvalue = 0, | |
4452 | .bank_fieldoffsets = { offsetof(CPUARMState, cp15.par_s), | |
4453 | offsetof(CPUARMState, cp15.par_ns)} }, | |
4454 | { .name = "TTBR0", .cp = 15, .crm = 2, .opc1 = 0, | |
4455 | .access = PL1_RW, .accessfn = access_tvm_trvm, | |
4456 | .type = ARM_CP_64BIT | ARM_CP_ALIAS, | |
4457 | .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr0_s), | |
4458 | offsetof(CPUARMState, cp15.ttbr0_ns) }, | |
4459 | .writefn = vmsa_ttbr_write, }, | |
4460 | { .name = "TTBR1", .cp = 15, .crm = 2, .opc1 = 1, | |
4461 | .access = PL1_RW, .accessfn = access_tvm_trvm, | |
4462 | .type = ARM_CP_64BIT | ARM_CP_ALIAS, | |
4463 | .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s), | |
4464 | offsetof(CPUARMState, cp15.ttbr1_ns) }, | |
4465 | .writefn = vmsa_ttbr_write, }, | |
4466 | }; | |
4467 | ||
4468 | static uint64_t aa64_fpcr_read(CPUARMState *env, const ARMCPRegInfo *ri) | |
4469 | { | |
4470 | return vfp_get_fpcr(env); | |
4471 | } | |
4472 | ||
4473 | static void aa64_fpcr_write(CPUARMState *env, const ARMCPRegInfo *ri, | |
4474 | uint64_t value) | |
4475 | { | |
4476 | vfp_set_fpcr(env, value); | |
4477 | } | |
4478 | ||
4479 | static uint64_t aa64_fpsr_read(CPUARMState *env, const ARMCPRegInfo *ri) | |
4480 | { | |
4481 | return vfp_get_fpsr(env); | |
4482 | } | |
4483 | ||
4484 | static void aa64_fpsr_write(CPUARMState *env, const ARMCPRegInfo *ri, | |
4485 | uint64_t value) | |
4486 | { | |
4487 | vfp_set_fpsr(env, value); | |
4488 | } | |
4489 | ||
4490 | static CPAccessResult aa64_daif_access(CPUARMState *env, const ARMCPRegInfo *ri, | |
4491 | bool isread) | |
4492 | { | |
4493 | if (arm_current_el(env) == 0 && !(arm_sctlr(env, 0) & SCTLR_UMA)) { | |
4494 | return CP_ACCESS_TRAP; | |
4495 | } | |
4496 | return CP_ACCESS_OK; | |
4497 | } | |
4498 | ||
4499 | static void aa64_daif_write(CPUARMState *env, const ARMCPRegInfo *ri, | |
4500 | uint64_t value) | |
4501 | { | |
4502 | env->daif = value & PSTATE_DAIF; | |
4503 | } | |
4504 | ||
4505 | static uint64_t aa64_pan_read(CPUARMState *env, const ARMCPRegInfo *ri) | |
4506 | { | |
4507 | return env->pstate & PSTATE_PAN; | |
4508 | } | |
4509 | ||
4510 | static void aa64_pan_write(CPUARMState *env, const ARMCPRegInfo *ri, | |
4511 | uint64_t value) | |
4512 | { | |
4513 | env->pstate = (env->pstate & ~PSTATE_PAN) | (value & PSTATE_PAN); | |
4514 | } | |
4515 | ||
4516 | static const ARMCPRegInfo pan_reginfo = { | |
4517 | .name = "PAN", .state = ARM_CP_STATE_AA64, | |
4518 | .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 3, | |
4519 | .type = ARM_CP_NO_RAW, .access = PL1_RW, | |
4520 | .readfn = aa64_pan_read, .writefn = aa64_pan_write | |
4521 | }; | |
4522 | ||
4523 | static uint64_t aa64_uao_read(CPUARMState *env, const ARMCPRegInfo *ri) | |
4524 | { | |
4525 | return env->pstate & PSTATE_UAO; | |
4526 | } | |
4527 | ||
4528 | static void aa64_uao_write(CPUARMState *env, const ARMCPRegInfo *ri, | |
4529 | uint64_t value) | |
4530 | { | |
4531 | env->pstate = (env->pstate & ~PSTATE_UAO) | (value & PSTATE_UAO); | |
4532 | } | |
4533 | ||
4534 | static const ARMCPRegInfo uao_reginfo = { | |
4535 | .name = "UAO", .state = ARM_CP_STATE_AA64, | |
4536 | .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 4, | |
4537 | .type = ARM_CP_NO_RAW, .access = PL1_RW, | |
4538 | .readfn = aa64_uao_read, .writefn = aa64_uao_write | |
4539 | }; | |
4540 | ||
4541 | static uint64_t aa64_dit_read(CPUARMState *env, const ARMCPRegInfo *ri) | |
4542 | { | |
4543 | return env->pstate & PSTATE_DIT; | |
4544 | } | |
4545 | ||
4546 | static void aa64_dit_write(CPUARMState *env, const ARMCPRegInfo *ri, | |
4547 | uint64_t value) | |
4548 | { | |
4549 | env->pstate = (env->pstate & ~PSTATE_DIT) | (value & PSTATE_DIT); | |
4550 | } | |
4551 | ||
4552 | static const ARMCPRegInfo dit_reginfo = { | |
4553 | .name = "DIT", .state = ARM_CP_STATE_AA64, | |
4554 | .opc0 = 3, .opc1 = 3, .crn = 4, .crm = 2, .opc2 = 5, | |
4555 | .type = ARM_CP_NO_RAW, .access = PL0_RW, | |
4556 | .readfn = aa64_dit_read, .writefn = aa64_dit_write | |
4557 | }; | |
4558 | ||
4559 | static uint64_t aa64_ssbs_read(CPUARMState *env, const ARMCPRegInfo *ri) | |
4560 | { | |
4561 | return env->pstate & PSTATE_SSBS; | |
4562 | } | |
4563 | ||
4564 | static void aa64_ssbs_write(CPUARMState *env, const ARMCPRegInfo *ri, | |
4565 | uint64_t value) | |
4566 | { | |
4567 | env->pstate = (env->pstate & ~PSTATE_SSBS) | (value & PSTATE_SSBS); | |
4568 | } | |
4569 | ||
4570 | static const ARMCPRegInfo ssbs_reginfo = { | |
4571 | .name = "SSBS", .state = ARM_CP_STATE_AA64, | |
4572 | .opc0 = 3, .opc1 = 3, .crn = 4, .crm = 2, .opc2 = 6, | |
4573 | .type = ARM_CP_NO_RAW, .access = PL0_RW, | |
4574 | .readfn = aa64_ssbs_read, .writefn = aa64_ssbs_write | |
4575 | }; | |
4576 | ||
4577 | static CPAccessResult aa64_cacheop_poc_access(CPUARMState *env, | |
4578 | const ARMCPRegInfo *ri, | |
4579 | bool isread) | |
4580 | { | |
4581 | /* Cache invalidate/clean to Point of Coherency or Persistence... */ | |
4582 | switch (arm_current_el(env)) { | |
4583 | case 0: | |
4584 | /* ... EL0 must UNDEF unless SCTLR_EL1.UCI is set. */ | |
4585 | if (!(arm_sctlr(env, 0) & SCTLR_UCI)) { | |
4586 | return CP_ACCESS_TRAP; | |
4587 | } | |
4588 | /* fall through */ | |
4589 | case 1: | |
4590 | /* ... EL1 must trap to EL2 if HCR_EL2.TPCP is set. */ | |
4591 | if (arm_hcr_el2_eff(env) & HCR_TPCP) { | |
4592 | return CP_ACCESS_TRAP_EL2; | |
4593 | } | |
4594 | break; | |
4595 | } | |
4596 | return CP_ACCESS_OK; | |
4597 | } | |
4598 | ||
4599 | static CPAccessResult do_cacheop_pou_access(CPUARMState *env, uint64_t hcrflags) | |
4600 | { | |
4601 | /* Cache invalidate/clean to Point of Unification... */ | |
4602 | switch (arm_current_el(env)) { | |
4603 | case 0: | |
4604 | /* ... EL0 must UNDEF unless SCTLR_EL1.UCI is set. */ | |
4605 | if (!(arm_sctlr(env, 0) & SCTLR_UCI)) { | |
4606 | return CP_ACCESS_TRAP; | |
4607 | } | |
4608 | /* fall through */ | |
4609 | case 1: | |
4610 | /* ... EL1 must trap to EL2 if relevant HCR_EL2 flags are set. */ | |
4611 | if (arm_hcr_el2_eff(env) & hcrflags) { | |
4612 | return CP_ACCESS_TRAP_EL2; | |
4613 | } | |
4614 | break; | |
4615 | } | |
4616 | return CP_ACCESS_OK; | |
4617 | } | |
4618 | ||
4619 | static CPAccessResult access_ticab(CPUARMState *env, const ARMCPRegInfo *ri, | |
4620 | bool isread) | |
4621 | { | |
4622 | return do_cacheop_pou_access(env, HCR_TICAB | HCR_TPU); | |
4623 | } | |
4624 | ||
4625 | static CPAccessResult access_tocu(CPUARMState *env, const ARMCPRegInfo *ri, | |
4626 | bool isread) | |
4627 | { | |
4628 | return do_cacheop_pou_access(env, HCR_TOCU | HCR_TPU); | |
4629 | } | |
4630 | ||
4631 | /* | |
4632 | * See: D4.7.2 TLB maintenance requirements and the TLB maintenance instructions | |
4633 | * Page D4-1736 (DDI0487A.b) | |
4634 | */ | |
4635 | ||
4636 | static int vae1_tlbmask(CPUARMState *env) | |
4637 | { | |
4638 | uint64_t hcr = arm_hcr_el2_eff(env); | |
4639 | uint16_t mask; | |
4640 | ||
4641 | if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) { | |
4642 | mask = ARMMMUIdxBit_E20_2 | | |
4643 | ARMMMUIdxBit_E20_2_PAN | | |
4644 | ARMMMUIdxBit_E20_0; | |
4645 | } else { | |
4646 | mask = ARMMMUIdxBit_E10_1 | | |
4647 | ARMMMUIdxBit_E10_1_PAN | | |
4648 | ARMMMUIdxBit_E10_0; | |
4649 | } | |
4650 | return mask; | |
4651 | } | |
4652 | ||
4653 | /* Return 56 if TBI is enabled, 64 otherwise. */ | |
4654 | static int tlbbits_for_regime(CPUARMState *env, ARMMMUIdx mmu_idx, | |
4655 | uint64_t addr) | |
4656 | { | |
4657 | uint64_t tcr = regime_tcr(env, mmu_idx); | |
4658 | int tbi = aa64_va_parameter_tbi(tcr, mmu_idx); | |
4659 | int select = extract64(addr, 55, 1); | |
4660 | ||
4661 | return (tbi >> select) & 1 ? 56 : 64; | |
4662 | } | |
4663 | ||
4664 | static int vae1_tlbbits(CPUARMState *env, uint64_t addr) | |
4665 | { | |
4666 | uint64_t hcr = arm_hcr_el2_eff(env); | |
4667 | ARMMMUIdx mmu_idx; | |
4668 | ||
4669 | /* Only the regime of the mmu_idx below is significant. */ | |
4670 | if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) { | |
4671 | mmu_idx = ARMMMUIdx_E20_0; | |
4672 | } else { | |
4673 | mmu_idx = ARMMMUIdx_E10_0; | |
4674 | } | |
4675 | ||
4676 | return tlbbits_for_regime(env, mmu_idx, addr); | |
4677 | } | |
4678 | ||
4679 | static void tlbi_aa64_vmalle1is_write(CPUARMState *env, const ARMCPRegInfo *ri, | |
4680 | uint64_t value) | |
4681 | { | |
4682 | CPUState *cs = env_cpu(env); | |
4683 | int mask = vae1_tlbmask(env); | |
4684 | ||
4685 | tlb_flush_by_mmuidx_all_cpus_synced(cs, mask); | |
4686 | } | |
4687 | ||
4688 | static void tlbi_aa64_vmalle1_write(CPUARMState *env, const ARMCPRegInfo *ri, | |
4689 | uint64_t value) | |
4690 | { | |
4691 | CPUState *cs = env_cpu(env); | |
4692 | int mask = vae1_tlbmask(env); | |
4693 | ||
4694 | if (tlb_force_broadcast(env)) { | |
4695 | tlb_flush_by_mmuidx_all_cpus_synced(cs, mask); | |
4696 | } else { | |
4697 | tlb_flush_by_mmuidx(cs, mask); | |
4698 | } | |
4699 | } | |
4700 | ||
4701 | static int e2_tlbmask(CPUARMState *env) | |
4702 | { | |
4703 | return (ARMMMUIdxBit_E20_0 | | |
4704 | ARMMMUIdxBit_E20_2 | | |
4705 | ARMMMUIdxBit_E20_2_PAN | | |
4706 | ARMMMUIdxBit_E2); | |
4707 | } | |
4708 | ||
4709 | static void tlbi_aa64_alle1_write(CPUARMState *env, const ARMCPRegInfo *ri, | |
4710 | uint64_t value) | |
4711 | { | |
4712 | CPUState *cs = env_cpu(env); | |
4713 | int mask = alle1_tlbmask(env); | |
4714 | ||
4715 | tlb_flush_by_mmuidx(cs, mask); | |
4716 | } | |
4717 | ||
4718 | static void tlbi_aa64_alle2_write(CPUARMState *env, const ARMCPRegInfo *ri, | |
4719 | uint64_t value) | |
4720 | { | |
4721 | CPUState *cs = env_cpu(env); | |
4722 | int mask = e2_tlbmask(env); | |
4723 | ||
4724 | tlb_flush_by_mmuidx(cs, mask); | |
4725 | } | |
4726 | ||
4727 | static void tlbi_aa64_alle3_write(CPUARMState *env, const ARMCPRegInfo *ri, | |
4728 | uint64_t value) | |
4729 | { | |
4730 | ARMCPU *cpu = env_archcpu(env); | |
4731 | CPUState *cs = CPU(cpu); | |
4732 | ||
4733 | tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_E3); | |
4734 | } | |
4735 | ||
4736 | static void tlbi_aa64_alle1is_write(CPUARMState *env, const ARMCPRegInfo *ri, | |
4737 | uint64_t value) | |
4738 | { | |
4739 | CPUState *cs = env_cpu(env); | |
4740 | int mask = alle1_tlbmask(env); | |
4741 | ||
4742 | tlb_flush_by_mmuidx_all_cpus_synced(cs, mask); | |
4743 | } | |
4744 | ||
4745 | static void tlbi_aa64_alle2is_write(CPUARMState *env, const ARMCPRegInfo *ri, | |
4746 | uint64_t value) | |
4747 | { | |
4748 | CPUState *cs = env_cpu(env); | |
4749 | int mask = e2_tlbmask(env); | |
4750 | ||
4751 | tlb_flush_by_mmuidx_all_cpus_synced(cs, mask); | |
4752 | } | |
4753 | ||
4754 | static void tlbi_aa64_alle3is_write(CPUARMState *env, const ARMCPRegInfo *ri, | |
4755 | uint64_t value) | |
4756 | { | |
4757 | CPUState *cs = env_cpu(env); | |
4758 | ||
4759 | tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_E3); | |
4760 | } | |
4761 | ||
4762 | static void tlbi_aa64_vae2_write(CPUARMState *env, const ARMCPRegInfo *ri, | |
4763 | uint64_t value) | |
4764 | { | |
4765 | /* | |
4766 | * Invalidate by VA, EL2 | |
4767 | * Currently handles both VAE2 and VALE2, since we don't support | |
4768 | * flush-last-level-only. | |
4769 | */ | |
4770 | CPUState *cs = env_cpu(env); | |
4771 | int mask = e2_tlbmask(env); | |
4772 | uint64_t pageaddr = sextract64(value << 12, 0, 56); | |
4773 | ||
4774 | tlb_flush_page_by_mmuidx(cs, pageaddr, mask); | |
4775 | } | |
4776 | ||
4777 | static void tlbi_aa64_vae3_write(CPUARMState *env, const ARMCPRegInfo *ri, | |
4778 | uint64_t value) | |
4779 | { | |
4780 | /* | |
4781 | * Invalidate by VA, EL3 | |
4782 | * Currently handles both VAE3 and VALE3, since we don't support | |
4783 | * flush-last-level-only. | |
4784 | */ | |
4785 | ARMCPU *cpu = env_archcpu(env); | |
4786 | CPUState *cs = CPU(cpu); | |
4787 | uint64_t pageaddr = sextract64(value << 12, 0, 56); | |
4788 | ||
4789 | tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_E3); | |
4790 | } | |
4791 | ||
4792 | static void tlbi_aa64_vae1is_write(CPUARMState *env, const ARMCPRegInfo *ri, | |
4793 | uint64_t value) | |
4794 | { | |
4795 | CPUState *cs = env_cpu(env); | |
4796 | int mask = vae1_tlbmask(env); | |
4797 | uint64_t pageaddr = sextract64(value << 12, 0, 56); | |
4798 | int bits = vae1_tlbbits(env, pageaddr); | |
4799 | ||
4800 | tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs, pageaddr, mask, bits); | |
4801 | } | |
4802 | ||
4803 | static void tlbi_aa64_vae1_write(CPUARMState *env, const ARMCPRegInfo *ri, | |
4804 | uint64_t value) | |
4805 | { | |
4806 | /* | |
4807 | * Invalidate by VA, EL1&0 (AArch64 version). | |
4808 | * Currently handles all of VAE1, VAAE1, VAALE1 and VALE1, | |
4809 | * since we don't support flush-for-specific-ASID-only or | |
4810 | * flush-last-level-only. | |
4811 | */ | |
4812 | CPUState *cs = env_cpu(env); | |
4813 | int mask = vae1_tlbmask(env); | |
4814 | uint64_t pageaddr = sextract64(value << 12, 0, 56); | |
4815 | int bits = vae1_tlbbits(env, pageaddr); | |
4816 | ||
4817 | if (tlb_force_broadcast(env)) { | |
4818 | tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs, pageaddr, mask, bits); | |
4819 | } else { | |
4820 | tlb_flush_page_bits_by_mmuidx(cs, pageaddr, mask, bits); | |
4821 | } | |
4822 | } | |
4823 | ||
4824 | static void tlbi_aa64_vae2is_write(CPUARMState *env, const ARMCPRegInfo *ri, | |
4825 | uint64_t value) | |
4826 | { | |
4827 | CPUState *cs = env_cpu(env); | |
4828 | uint64_t pageaddr = sextract64(value << 12, 0, 56); | |
4829 | int bits = tlbbits_for_regime(env, ARMMMUIdx_E2, pageaddr); | |
4830 | ||
4831 | tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs, pageaddr, | |
4832 | ARMMMUIdxBit_E2, bits); | |
4833 | } | |
4834 | ||
4835 | static void tlbi_aa64_vae3is_write(CPUARMState *env, const ARMCPRegInfo *ri, | |
4836 | uint64_t value) | |
4837 | { | |
4838 | CPUState *cs = env_cpu(env); | |
4839 | uint64_t pageaddr = sextract64(value << 12, 0, 56); | |
4840 | int bits = tlbbits_for_regime(env, ARMMMUIdx_E3, pageaddr); | |
4841 | ||
4842 | tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs, pageaddr, | |
4843 | ARMMMUIdxBit_E3, bits); | |
4844 | } | |
4845 | ||
4846 | static int ipas2e1_tlbmask(CPUARMState *env, int64_t value) | |
4847 | { | |
4848 | /* | |
4849 | * The MSB of value is the NS field, which only applies if SEL2 | |
4850 | * is implemented and SCR_EL3.NS is not set (i.e. in secure mode). | |
4851 | */ | |
4852 | return (value >= 0 | |
4853 | && cpu_isar_feature(aa64_sel2, env_archcpu(env)) | |
4854 | && arm_is_secure_below_el3(env) | |
4855 | ? ARMMMUIdxBit_Stage2_S | |
4856 | : ARMMMUIdxBit_Stage2); | |
4857 | } | |
4858 | ||
4859 | static void tlbi_aa64_ipas2e1_write(CPUARMState *env, const ARMCPRegInfo *ri, | |
4860 | uint64_t value) | |
4861 | { | |
4862 | CPUState *cs = env_cpu(env); | |
4863 | int mask = ipas2e1_tlbmask(env, value); | |
4864 | uint64_t pageaddr = sextract64(value << 12, 0, 56); | |
4865 | ||
4866 | if (tlb_force_broadcast(env)) { | |
4867 | tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, mask); | |
4868 | } else { | |
4869 | tlb_flush_page_by_mmuidx(cs, pageaddr, mask); | |
4870 | } | |
4871 | } | |
4872 | ||
4873 | static void tlbi_aa64_ipas2e1is_write(CPUARMState *env, const ARMCPRegInfo *ri, | |
4874 | uint64_t value) | |
4875 | { | |
4876 | CPUState *cs = env_cpu(env); | |
4877 | int mask = ipas2e1_tlbmask(env, value); | |
4878 | uint64_t pageaddr = sextract64(value << 12, 0, 56); | |
4879 | ||
4880 | tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, mask); | |
4881 | } | |
4882 | ||
4883 | #ifdef TARGET_AARCH64 | |
4884 | typedef struct { | |
4885 | uint64_t base; | |
4886 | uint64_t length; | |
4887 | } TLBIRange; | |
4888 | ||
4889 | static ARMGranuleSize tlbi_range_tg_to_gran_size(int tg) | |
4890 | { | |
4891 | /* | |
4892 | * Note that the TLBI range TG field encoding differs from both | |
4893 | * TG0 and TG1 encodings. | |
4894 | */ | |
4895 | switch (tg) { | |
4896 | case 1: | |
4897 | return Gran4K; | |
4898 | case 2: | |
4899 | return Gran16K; | |
4900 | case 3: | |
4901 | return Gran64K; | |
4902 | default: | |
4903 | return GranInvalid; | |
4904 | } | |
4905 | } | |
4906 | ||
4907 | static TLBIRange tlbi_aa64_get_range(CPUARMState *env, ARMMMUIdx mmuidx, | |
4908 | uint64_t value) | |
4909 | { | |
4910 | unsigned int page_size_granule, page_shift, num, scale, exponent; | |
4911 | /* Extract one bit to represent the va selector in use. */ | |
4912 | uint64_t select = sextract64(value, 36, 1); | |
4913 | ARMVAParameters param = aa64_va_parameters(env, select, mmuidx, true, false); | |
4914 | TLBIRange ret = { }; | |
4915 | ARMGranuleSize gran; | |
4916 | ||
4917 | page_size_granule = extract64(value, 46, 2); | |
4918 | gran = tlbi_range_tg_to_gran_size(page_size_granule); | |
4919 | ||
4920 | /* The granule encoded in value must match the granule in use. */ | |
4921 | if (gran != param.gran) { | |
4922 | qemu_log_mask(LOG_GUEST_ERROR, "Invalid tlbi page size granule %d\n", | |
4923 | page_size_granule); | |
4924 | return ret; | |
4925 | } | |
4926 | ||
4927 | page_shift = arm_granule_bits(gran); | |
4928 | num = extract64(value, 39, 5); | |
4929 | scale = extract64(value, 44, 2); | |
4930 | exponent = (5 * scale) + 1; | |
4931 | ||
4932 | ret.length = (num + 1) << (exponent + page_shift); | |
4933 | ||
4934 | if (param.select) { | |
4935 | ret.base = sextract64(value, 0, 37); | |
4936 | } else { | |
4937 | ret.base = extract64(value, 0, 37); | |
4938 | } | |
4939 | if (param.ds) { | |
4940 | /* | |
4941 | * With DS=1, BaseADDR is always shifted 16 so that it is able | |
4942 | * to address all 52 va bits. The input address is perforce | |
4943 | * aligned on a 64k boundary regardless of translation granule. | |
4944 | */ | |
4945 | page_shift = 16; | |
4946 | } | |
4947 | ret.base <<= page_shift; | |
4948 | ||
4949 | return ret; | |
4950 | } | |
4951 | ||
4952 | static void do_rvae_write(CPUARMState *env, uint64_t value, | |
4953 | int idxmap, bool synced) | |
4954 | { | |
4955 | ARMMMUIdx one_idx = ARM_MMU_IDX_A | ctz32(idxmap); | |
4956 | TLBIRange range; | |
4957 | int bits; | |
4958 | ||
4959 | range = tlbi_aa64_get_range(env, one_idx, value); | |
4960 | bits = tlbbits_for_regime(env, one_idx, range.base); | |
4961 | ||
4962 | if (synced) { | |
4963 | tlb_flush_range_by_mmuidx_all_cpus_synced(env_cpu(env), | |
4964 | range.base, | |
4965 | range.length, | |
4966 | idxmap, | |
4967 | bits); | |
4968 | } else { | |
4969 | tlb_flush_range_by_mmuidx(env_cpu(env), range.base, | |
4970 | range.length, idxmap, bits); | |
4971 | } | |
4972 | } | |
4973 | ||
4974 | static void tlbi_aa64_rvae1_write(CPUARMState *env, | |
4975 | const ARMCPRegInfo *ri, | |
4976 | uint64_t value) | |
4977 | { | |
4978 | /* | |
4979 | * Invalidate by VA range, EL1&0. | |
4980 | * Currently handles all of RVAE1, RVAAE1, RVAALE1 and RVALE1, | |
4981 | * since we don't support flush-for-specific-ASID-only or | |
4982 | * flush-last-level-only. | |
4983 | */ | |
4984 | ||
4985 | do_rvae_write(env, value, vae1_tlbmask(env), | |
4986 | tlb_force_broadcast(env)); | |
4987 | } | |
4988 | ||
4989 | static void tlbi_aa64_rvae1is_write(CPUARMState *env, | |
4990 | const ARMCPRegInfo *ri, | |
4991 | uint64_t value) | |
4992 | { | |
4993 | /* | |
4994 | * Invalidate by VA range, Inner/Outer Shareable EL1&0. | |
4995 | * Currently handles all of RVAE1IS, RVAE1OS, RVAAE1IS, RVAAE1OS, | |
4996 | * RVAALE1IS, RVAALE1OS, RVALE1IS and RVALE1OS, since we don't support | |
4997 | * flush-for-specific-ASID-only, flush-last-level-only or inner/outer | |
4998 | * shareable specific flushes. | |
4999 | */ | |
5000 | ||
5001 | do_rvae_write(env, value, vae1_tlbmask(env), true); | |
5002 | } | |
5003 | ||
5004 | static int vae2_tlbmask(CPUARMState *env) | |
5005 | { | |
5006 | return ARMMMUIdxBit_E2; | |
5007 | } | |
5008 | ||
5009 | static void tlbi_aa64_rvae2_write(CPUARMState *env, | |
5010 | const ARMCPRegInfo *ri, | |
5011 | uint64_t value) | |
5012 | { | |
5013 | /* | |
5014 | * Invalidate by VA range, EL2. | |
5015 | * Currently handles all of RVAE2 and RVALE2, | |
5016 | * since we don't support flush-for-specific-ASID-only or | |
5017 | * flush-last-level-only. | |
5018 | */ | |
5019 | ||
5020 | do_rvae_write(env, value, vae2_tlbmask(env), | |
5021 | tlb_force_broadcast(env)); | |
5022 | ||
5023 | ||
5024 | } | |
5025 | ||
5026 | static void tlbi_aa64_rvae2is_write(CPUARMState *env, | |
5027 | const ARMCPRegInfo *ri, | |
5028 | uint64_t value) | |
5029 | { | |
5030 | /* | |
5031 | * Invalidate by VA range, Inner/Outer Shareable, EL2. | |
5032 | * Currently handles all of RVAE2IS, RVAE2OS, RVALE2IS and RVALE2OS, | |
5033 | * since we don't support flush-for-specific-ASID-only, | |
5034 | * flush-last-level-only or inner/outer shareable specific flushes. | |
5035 | */ | |
5036 | ||
5037 | do_rvae_write(env, value, vae2_tlbmask(env), true); | |
5038 | ||
5039 | } | |
5040 | ||
5041 | static void tlbi_aa64_rvae3_write(CPUARMState *env, | |
5042 | const ARMCPRegInfo *ri, | |
5043 | uint64_t value) | |
5044 | { | |
5045 | /* | |
5046 | * Invalidate by VA range, EL3. | |
5047 | * Currently handles all of RVAE3 and RVALE3, | |
5048 | * since we don't support flush-for-specific-ASID-only or | |
5049 | * flush-last-level-only. | |
5050 | */ | |
5051 | ||
5052 | do_rvae_write(env, value, ARMMMUIdxBit_E3, tlb_force_broadcast(env)); | |
5053 | } | |
5054 | ||
5055 | static void tlbi_aa64_rvae3is_write(CPUARMState *env, | |
5056 | const ARMCPRegInfo *ri, | |
5057 | uint64_t value) | |
5058 | { | |
5059 | /* | |
5060 | * Invalidate by VA range, EL3, Inner/Outer Shareable. | |
5061 | * Currently handles all of RVAE3IS, RVAE3OS, RVALE3IS and RVALE3OS, | |
5062 | * since we don't support flush-for-specific-ASID-only, | |
5063 | * flush-last-level-only or inner/outer specific flushes. | |
5064 | */ | |
5065 | ||
5066 | do_rvae_write(env, value, ARMMMUIdxBit_E3, true); | |
5067 | } | |
5068 | ||
5069 | static void tlbi_aa64_ripas2e1_write(CPUARMState *env, const ARMCPRegInfo *ri, | |
5070 | uint64_t value) | |
5071 | { | |
5072 | do_rvae_write(env, value, ipas2e1_tlbmask(env, value), | |
5073 | tlb_force_broadcast(env)); | |
5074 | } | |
5075 | ||
5076 | static void tlbi_aa64_ripas2e1is_write(CPUARMState *env, | |
5077 | const ARMCPRegInfo *ri, | |
5078 | uint64_t value) | |
5079 | { | |
5080 | do_rvae_write(env, value, ipas2e1_tlbmask(env, value), true); | |
5081 | } | |
5082 | #endif | |
5083 | ||
5084 | static CPAccessResult aa64_zva_access(CPUARMState *env, const ARMCPRegInfo *ri, | |
5085 | bool isread) | |
5086 | { | |
5087 | int cur_el = arm_current_el(env); | |
5088 | ||
5089 | if (cur_el < 2) { | |
5090 | uint64_t hcr = arm_hcr_el2_eff(env); | |
5091 | ||
5092 | if (cur_el == 0) { | |
5093 | if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) { | |
5094 | if (!(env->cp15.sctlr_el[2] & SCTLR_DZE)) { | |
5095 | return CP_ACCESS_TRAP_EL2; | |
5096 | } | |
5097 | } else { | |
5098 | if (!(env->cp15.sctlr_el[1] & SCTLR_DZE)) { | |
5099 | return CP_ACCESS_TRAP; | |
5100 | } | |
5101 | if (hcr & HCR_TDZ) { | |
5102 | return CP_ACCESS_TRAP_EL2; | |
5103 | } | |
5104 | } | |
5105 | } else if (hcr & HCR_TDZ) { | |
5106 | return CP_ACCESS_TRAP_EL2; | |
5107 | } | |
5108 | } | |
5109 | return CP_ACCESS_OK; | |
5110 | } | |
5111 | ||
5112 | static uint64_t aa64_dczid_read(CPUARMState *env, const ARMCPRegInfo *ri) | |
5113 | { | |
5114 | ARMCPU *cpu = env_archcpu(env); | |
5115 | int dzp_bit = 1 << 4; | |
5116 | ||
5117 | /* DZP indicates whether DC ZVA access is allowed */ | |
5118 | if (aa64_zva_access(env, NULL, false) == CP_ACCESS_OK) { | |
5119 | dzp_bit = 0; | |
5120 | } | |
5121 | return cpu->dcz_blocksize | dzp_bit; | |
5122 | } | |
5123 | ||
5124 | static CPAccessResult sp_el0_access(CPUARMState *env, const ARMCPRegInfo *ri, | |
5125 | bool isread) | |
5126 | { | |
5127 | if (!(env->pstate & PSTATE_SP)) { | |
5128 | /* | |
5129 | * Access to SP_EL0 is undefined if it's being used as | |
5130 | * the stack pointer. | |
5131 | */ | |
5132 | return CP_ACCESS_TRAP_UNCATEGORIZED; | |
5133 | } | |
5134 | return CP_ACCESS_OK; | |
5135 | } | |
5136 | ||
5137 | static uint64_t spsel_read(CPUARMState *env, const ARMCPRegInfo *ri) | |
5138 | { | |
5139 | return env->pstate & PSTATE_SP; | |
5140 | } | |
5141 | ||
5142 | static void spsel_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t val) | |
5143 | { | |
5144 | update_spsel(env, val); | |
5145 | } | |
5146 | ||
5147 | static void sctlr_write(CPUARMState *env, const ARMCPRegInfo *ri, | |
5148 | uint64_t value) | |
5149 | { | |
5150 | ARMCPU *cpu = env_archcpu(env); | |
5151 | ||
5152 | if (arm_feature(env, ARM_FEATURE_PMSA) && !cpu->has_mpu) { | |
5153 | /* M bit is RAZ/WI for PMSA with no MPU implemented */ | |
5154 | value &= ~SCTLR_M; | |
5155 | } | |
5156 | ||
5157 | /* ??? Lots of these bits are not implemented. */ | |
5158 | ||
5159 | if (ri->state == ARM_CP_STATE_AA64 && !cpu_isar_feature(aa64_mte, cpu)) { | |
5160 | if (ri->opc1 == 6) { /* SCTLR_EL3 */ | |
5161 | value &= ~(SCTLR_ITFSB | SCTLR_TCF | SCTLR_ATA); | |
5162 | } else { | |
5163 | value &= ~(SCTLR_ITFSB | SCTLR_TCF0 | SCTLR_TCF | | |
5164 | SCTLR_ATA0 | SCTLR_ATA); | |
5165 | } | |
5166 | } | |
5167 | ||
5168 | if (raw_read(env, ri) == value) { | |
5169 | /* | |
5170 | * Skip the TLB flush if nothing actually changed; Linux likes | |
5171 | * to do a lot of pointless SCTLR writes. | |
5172 | */ | |
5173 | return; | |
5174 | } | |
5175 | ||
5176 | raw_write(env, ri, value); | |
5177 | ||
5178 | /* This may enable/disable the MMU, so do a TLB flush. */ | |
5179 | tlb_flush(CPU(cpu)); | |
5180 | ||
5181 | if (tcg_enabled() && ri->type & ARM_CP_SUPPRESS_TB_END) { | |
5182 | /* | |
5183 | * Normally we would always end the TB on an SCTLR write; see the | |
5184 | * comment in ARMCPRegInfo sctlr initialization below for why Xscale | |
5185 | * is special. Setting ARM_CP_SUPPRESS_TB_END also stops the rebuild | |
5186 | * of hflags from the translator, so do it here. | |
5187 | */ | |
5188 | arm_rebuild_hflags(env); | |
5189 | } | |
5190 | } | |
5191 | ||
5192 | static void mdcr_el3_write(CPUARMState *env, const ARMCPRegInfo *ri, | |
5193 | uint64_t value) | |
5194 | { | |
5195 | /* | |
5196 | * Some MDCR_EL3 bits affect whether PMU counters are running: | |
5197 | * if we are trying to change any of those then we must | |
5198 | * bracket this update with PMU start/finish calls. | |
5199 | */ | |
5200 | bool pmu_op = (env->cp15.mdcr_el3 ^ value) & MDCR_EL3_PMU_ENABLE_BITS; | |
5201 | ||
5202 | if (pmu_op) { | |
5203 | pmu_op_start(env); | |
5204 | } | |
5205 | env->cp15.mdcr_el3 = value; | |
5206 | if (pmu_op) { | |
5207 | pmu_op_finish(env); | |
5208 | } | |
5209 | } | |
5210 | ||
5211 | static void sdcr_write(CPUARMState *env, const ARMCPRegInfo *ri, | |
5212 | uint64_t value) | |
5213 | { | |
5214 | /* Not all bits defined for MDCR_EL3 exist in the AArch32 SDCR */ | |
5215 | mdcr_el3_write(env, ri, value & SDCR_VALID_MASK); | |
5216 | } | |
5217 | ||
5218 | static void mdcr_el2_write(CPUARMState *env, const ARMCPRegInfo *ri, | |
5219 | uint64_t value) | |
5220 | { | |
5221 | /* | |
5222 | * Some MDCR_EL2 bits affect whether PMU counters are running: | |
5223 | * if we are trying to change any of those then we must | |
5224 | * bracket this update with PMU start/finish calls. | |
5225 | */ | |
5226 | bool pmu_op = (env->cp15.mdcr_el2 ^ value) & MDCR_EL2_PMU_ENABLE_BITS; | |
5227 | ||
5228 | if (pmu_op) { | |
5229 | pmu_op_start(env); | |
5230 | } | |
5231 | env->cp15.mdcr_el2 = value; | |
5232 | if (pmu_op) { | |
5233 | pmu_op_finish(env); | |
5234 | } | |
5235 | } | |
5236 | ||
5237 | static const ARMCPRegInfo v8_cp_reginfo[] = { | |
5238 | /* | |
5239 | * Minimal set of EL0-visible registers. This will need to be expanded | |
5240 | * significantly for system emulation of AArch64 CPUs. | |
5241 | */ | |
5242 | { .name = "NZCV", .state = ARM_CP_STATE_AA64, | |
5243 | .opc0 = 3, .opc1 = 3, .opc2 = 0, .crn = 4, .crm = 2, | |
5244 | .access = PL0_RW, .type = ARM_CP_NZCV }, | |
5245 | { .name = "DAIF", .state = ARM_CP_STATE_AA64, | |
5246 | .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 4, .crm = 2, | |
5247 | .type = ARM_CP_NO_RAW, | |
5248 | .access = PL0_RW, .accessfn = aa64_daif_access, | |
5249 | .fieldoffset = offsetof(CPUARMState, daif), | |
5250 | .writefn = aa64_daif_write, .resetfn = arm_cp_reset_ignore }, | |
5251 | { .name = "FPCR", .state = ARM_CP_STATE_AA64, | |
5252 | .opc0 = 3, .opc1 = 3, .opc2 = 0, .crn = 4, .crm = 4, | |
5253 | .access = PL0_RW, .type = ARM_CP_FPU | ARM_CP_SUPPRESS_TB_END, | |
5254 | .readfn = aa64_fpcr_read, .writefn = aa64_fpcr_write }, | |
5255 | { .name = "FPSR", .state = ARM_CP_STATE_AA64, | |
5256 | .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 4, .crm = 4, | |
5257 | .access = PL0_RW, .type = ARM_CP_FPU | ARM_CP_SUPPRESS_TB_END, | |
5258 | .readfn = aa64_fpsr_read, .writefn = aa64_fpsr_write }, | |
5259 | { .name = "DCZID_EL0", .state = ARM_CP_STATE_AA64, | |
5260 | .opc0 = 3, .opc1 = 3, .opc2 = 7, .crn = 0, .crm = 0, | |
5261 | .access = PL0_R, .type = ARM_CP_NO_RAW, | |
5262 | .fgt = FGT_DCZID_EL0, | |
5263 | .readfn = aa64_dczid_read }, | |
5264 | { .name = "DC_ZVA", .state = ARM_CP_STATE_AA64, | |
5265 | .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 4, .opc2 = 1, | |
5266 | .access = PL0_W, .type = ARM_CP_DC_ZVA, | |
5267 | #ifndef CONFIG_USER_ONLY | |
5268 | /* Avoid overhead of an access check that always passes in user-mode */ | |
5269 | .accessfn = aa64_zva_access, | |
5270 | .fgt = FGT_DCZVA, | |
5271 | #endif | |
5272 | }, | |
5273 | { .name = "CURRENTEL", .state = ARM_CP_STATE_AA64, | |
5274 | .opc0 = 3, .opc1 = 0, .opc2 = 2, .crn = 4, .crm = 2, | |
5275 | .access = PL1_R, .type = ARM_CP_CURRENTEL }, | |
5276 | /* Cache ops: all NOPs since we don't emulate caches */ | |
5277 | { .name = "IC_IALLUIS", .state = ARM_CP_STATE_AA64, | |
5278 | .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0, | |
5279 | .access = PL1_W, .type = ARM_CP_NOP, | |
5280 | .fgt = FGT_ICIALLUIS, | |
5281 | .accessfn = access_ticab }, | |
5282 | { .name = "IC_IALLU", .state = ARM_CP_STATE_AA64, | |
5283 | .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 0, | |
5284 | .access = PL1_W, .type = ARM_CP_NOP, | |
5285 | .fgt = FGT_ICIALLU, | |
5286 | .accessfn = access_tocu }, | |
5287 | { .name = "IC_IVAU", .state = ARM_CP_STATE_AA64, | |
5288 | .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 5, .opc2 = 1, | |
5289 | .access = PL0_W, .type = ARM_CP_NOP, | |
5290 | .fgt = FGT_ICIVAU, | |
5291 | .accessfn = access_tocu }, | |
5292 | { .name = "DC_IVAC", .state = ARM_CP_STATE_AA64, | |
5293 | .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 1, | |
5294 | .access = PL1_W, .accessfn = aa64_cacheop_poc_access, | |
5295 | .fgt = FGT_DCIVAC, | |
5296 | .type = ARM_CP_NOP }, | |
5297 | { .name = "DC_ISW", .state = ARM_CP_STATE_AA64, | |
5298 | .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 2, | |
5299 | .fgt = FGT_DCISW, | |
5300 | .access = PL1_W, .accessfn = access_tsw, .type = ARM_CP_NOP }, | |
5301 | { .name = "DC_CVAC", .state = ARM_CP_STATE_AA64, | |
5302 | .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 10, .opc2 = 1, | |
5303 | .access = PL0_W, .type = ARM_CP_NOP, | |
5304 | .fgt = FGT_DCCVAC, | |
5305 | .accessfn = aa64_cacheop_poc_access }, | |
5306 | { .name = "DC_CSW", .state = ARM_CP_STATE_AA64, | |
5307 | .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 2, | |
5308 | .fgt = FGT_DCCSW, | |
5309 | .access = PL1_W, .accessfn = access_tsw, .type = ARM_CP_NOP }, | |
5310 | { .name = "DC_CVAU", .state = ARM_CP_STATE_AA64, | |
5311 | .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 11, .opc2 = 1, | |
5312 | .access = PL0_W, .type = ARM_CP_NOP, | |
5313 | .fgt = FGT_DCCVAU, | |
5314 | .accessfn = access_tocu }, | |
5315 | { .name = "DC_CIVAC", .state = ARM_CP_STATE_AA64, | |
5316 | .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 14, .opc2 = 1, | |
5317 | .access = PL0_W, .type = ARM_CP_NOP, | |
5318 | .fgt = FGT_DCCIVAC, | |
5319 | .accessfn = aa64_cacheop_poc_access }, | |
5320 | { .name = "DC_CISW", .state = ARM_CP_STATE_AA64, | |
5321 | .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 2, | |
5322 | .fgt = FGT_DCCISW, | |
5323 | .access = PL1_W, .accessfn = access_tsw, .type = ARM_CP_NOP }, | |
5324 | /* TLBI operations */ | |
5325 | { .name = "TLBI_VMALLE1IS", .state = ARM_CP_STATE_AA64, | |
5326 | .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0, | |
5327 | .access = PL1_W, .accessfn = access_ttlbis, .type = ARM_CP_NO_RAW, | |
5328 | .fgt = FGT_TLBIVMALLE1IS, | |
5329 | .writefn = tlbi_aa64_vmalle1is_write }, | |
5330 | { .name = "TLBI_VAE1IS", .state = ARM_CP_STATE_AA64, | |
5331 | .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1, | |
5332 | .access = PL1_W, .accessfn = access_ttlbis, .type = ARM_CP_NO_RAW, | |
5333 | .fgt = FGT_TLBIVAE1IS, | |
5334 | .writefn = tlbi_aa64_vae1is_write }, | |
5335 | { .name = "TLBI_ASIDE1IS", .state = ARM_CP_STATE_AA64, | |
5336 | .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2, | |
5337 | .access = PL1_W, .accessfn = access_ttlbis, .type = ARM_CP_NO_RAW, | |
5338 | .fgt = FGT_TLBIASIDE1IS, | |
5339 | .writefn = tlbi_aa64_vmalle1is_write }, | |
5340 | { .name = "TLBI_VAAE1IS", .state = ARM_CP_STATE_AA64, | |
5341 | .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3, | |
5342 | .access = PL1_W, .accessfn = access_ttlbis, .type = ARM_CP_NO_RAW, | |
5343 | .fgt = FGT_TLBIVAAE1IS, | |
5344 | .writefn = tlbi_aa64_vae1is_write }, | |
5345 | { .name = "TLBI_VALE1IS", .state = ARM_CP_STATE_AA64, | |
5346 | .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 5, | |
5347 | .access = PL1_W, .accessfn = access_ttlbis, .type = ARM_CP_NO_RAW, | |
5348 | .fgt = FGT_TLBIVALE1IS, | |
5349 | .writefn = tlbi_aa64_vae1is_write }, | |
5350 | { .name = "TLBI_VAALE1IS", .state = ARM_CP_STATE_AA64, | |
5351 | .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 7, | |
5352 | .access = PL1_W, .accessfn = access_ttlbis, .type = ARM_CP_NO_RAW, | |
5353 | .fgt = FGT_TLBIVAALE1IS, | |
5354 | .writefn = tlbi_aa64_vae1is_write }, | |
5355 | { .name = "TLBI_VMALLE1", .state = ARM_CP_STATE_AA64, | |
5356 | .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0, | |
5357 | .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, | |
5358 | .fgt = FGT_TLBIVMALLE1, | |
5359 | .writefn = tlbi_aa64_vmalle1_write }, | |
5360 | { .name = "TLBI_VAE1", .state = ARM_CP_STATE_AA64, | |
5361 | .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1, | |
5362 | .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, | |
5363 | .fgt = FGT_TLBIVAE1, | |
5364 | .writefn = tlbi_aa64_vae1_write }, | |
5365 | { .name = "TLBI_ASIDE1", .state = ARM_CP_STATE_AA64, | |
5366 | .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2, | |
5367 | .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, | |
5368 | .fgt = FGT_TLBIASIDE1, | |
5369 | .writefn = tlbi_aa64_vmalle1_write }, | |
5370 | { .name = "TLBI_VAAE1", .state = ARM_CP_STATE_AA64, | |
5371 | .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3, | |
5372 | .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, | |
5373 | .fgt = FGT_TLBIVAAE1, | |
5374 | .writefn = tlbi_aa64_vae1_write }, | |
5375 | { .name = "TLBI_VALE1", .state = ARM_CP_STATE_AA64, | |
5376 | .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 5, | |
5377 | .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, | |
5378 | .fgt = FGT_TLBIVALE1, | |
5379 | .writefn = tlbi_aa64_vae1_write }, | |
5380 | { .name = "TLBI_VAALE1", .state = ARM_CP_STATE_AA64, | |
5381 | .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 7, | |
5382 | .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, | |
5383 | .fgt = FGT_TLBIVAALE1, | |
5384 | .writefn = tlbi_aa64_vae1_write }, | |
5385 | { .name = "TLBI_IPAS2E1IS", .state = ARM_CP_STATE_AA64, | |
5386 | .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 1, | |
5387 | .access = PL2_W, .type = ARM_CP_NO_RAW, | |
5388 | .writefn = tlbi_aa64_ipas2e1is_write }, | |
5389 | { .name = "TLBI_IPAS2LE1IS", .state = ARM_CP_STATE_AA64, | |
5390 | .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 5, | |
5391 | .access = PL2_W, .type = ARM_CP_NO_RAW, | |
5392 | .writefn = tlbi_aa64_ipas2e1is_write }, | |
5393 | { .name = "TLBI_ALLE1IS", .state = ARM_CP_STATE_AA64, | |
5394 | .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 4, | |
5395 | .access = PL2_W, .type = ARM_CP_NO_RAW, | |
5396 | .writefn = tlbi_aa64_alle1is_write }, | |
5397 | { .name = "TLBI_VMALLS12E1IS", .state = ARM_CP_STATE_AA64, | |
5398 | .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 6, | |
5399 | .access = PL2_W, .type = ARM_CP_NO_RAW, | |
5400 | .writefn = tlbi_aa64_alle1is_write }, | |
5401 | { .name = "TLBI_IPAS2E1", .state = ARM_CP_STATE_AA64, | |
5402 | .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 1, | |
5403 | .access = PL2_W, .type = ARM_CP_NO_RAW, | |
5404 | .writefn = tlbi_aa64_ipas2e1_write }, | |
5405 | { .name = "TLBI_IPAS2LE1", .state = ARM_CP_STATE_AA64, | |
5406 | .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 5, | |
5407 | .access = PL2_W, .type = ARM_CP_NO_RAW, | |
5408 | .writefn = tlbi_aa64_ipas2e1_write }, | |
5409 | { .name = "TLBI_ALLE1", .state = ARM_CP_STATE_AA64, | |
5410 | .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 4, | |
5411 | .access = PL2_W, .type = ARM_CP_NO_RAW, | |
5412 | .writefn = tlbi_aa64_alle1_write }, | |
5413 | { .name = "TLBI_VMALLS12E1", .state = ARM_CP_STATE_AA64, | |
5414 | .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 6, | |
5415 | .access = PL2_W, .type = ARM_CP_NO_RAW, | |
5416 | .writefn = tlbi_aa64_alle1is_write }, | |
5417 | #ifndef CONFIG_USER_ONLY | |
5418 | /* 64 bit address translation operations */ | |
5419 | { .name = "AT_S1E1R", .state = ARM_CP_STATE_AA64, | |
5420 | .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 0, | |
5421 | .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, | |
5422 | .fgt = FGT_ATS1E1R, | |
5423 | .writefn = ats_write64 }, | |
5424 | { .name = "AT_S1E1W", .state = ARM_CP_STATE_AA64, | |
5425 | .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 1, | |
5426 | .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, | |
5427 | .fgt = FGT_ATS1E1W, | |
5428 | .writefn = ats_write64 }, | |
5429 | { .name = "AT_S1E0R", .state = ARM_CP_STATE_AA64, | |
5430 | .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 2, | |
5431 | .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, | |
5432 | .fgt = FGT_ATS1E0R, | |
5433 | .writefn = ats_write64 }, | |
5434 | { .name = "AT_S1E0W", .state = ARM_CP_STATE_AA64, | |
5435 | .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 3, | |
5436 | .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, | |
5437 | .fgt = FGT_ATS1E0W, | |
5438 | .writefn = ats_write64 }, | |
5439 | { .name = "AT_S12E1R", .state = ARM_CP_STATE_AA64, | |
5440 | .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 4, | |
5441 | .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, | |
5442 | .writefn = ats_write64 }, | |
5443 | { .name = "AT_S12E1W", .state = ARM_CP_STATE_AA64, | |
5444 | .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 5, | |
5445 | .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, | |
5446 | .writefn = ats_write64 }, | |
5447 | { .name = "AT_S12E0R", .state = ARM_CP_STATE_AA64, | |
5448 | .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 6, | |
5449 | .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, | |
5450 | .writefn = ats_write64 }, | |
5451 | { .name = "AT_S12E0W", .state = ARM_CP_STATE_AA64, | |
5452 | .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 7, | |
5453 | .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, | |
5454 | .writefn = ats_write64 }, | |
5455 | /* AT S1E2* are elsewhere as they UNDEF from EL3 if EL2 is not present */ | |
5456 | { .name = "AT_S1E3R", .state = ARM_CP_STATE_AA64, | |
5457 | .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 8, .opc2 = 0, | |
5458 | .access = PL3_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, | |
5459 | .writefn = ats_write64 }, | |
5460 | { .name = "AT_S1E3W", .state = ARM_CP_STATE_AA64, | |
5461 | .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 8, .opc2 = 1, | |
5462 | .access = PL3_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, | |
5463 | .writefn = ats_write64 }, | |
5464 | { .name = "PAR_EL1", .state = ARM_CP_STATE_AA64, | |
5465 | .type = ARM_CP_ALIAS, | |
5466 | .opc0 = 3, .opc1 = 0, .crn = 7, .crm = 4, .opc2 = 0, | |
5467 | .access = PL1_RW, .resetvalue = 0, | |
5468 | .fgt = FGT_PAR_EL1, | |
5469 | .fieldoffset = offsetof(CPUARMState, cp15.par_el[1]), | |
5470 | .writefn = par_write }, | |
5471 | #endif | |
5472 | /* TLB invalidate last level of translation table walk */ | |
5473 | { .name = "TLBIMVALIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 5, | |
5474 | .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlbis, | |
5475 | .writefn = tlbimva_is_write }, | |
5476 | { .name = "TLBIMVAALIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 7, | |
5477 | .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlbis, | |
5478 | .writefn = tlbimvaa_is_write }, | |
5479 | { .name = "TLBIMVAL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 5, | |
5480 | .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, | |
5481 | .writefn = tlbimva_write }, | |
5482 | { .name = "TLBIMVAAL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 7, | |
5483 | .type = ARM_CP_NO_RAW, .access = PL1_W, .accessfn = access_ttlb, | |
5484 | .writefn = tlbimvaa_write }, | |
5485 | { .name = "TLBIMVALH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 5, | |
5486 | .type = ARM_CP_NO_RAW, .access = PL2_W, | |
5487 | .writefn = tlbimva_hyp_write }, | |
5488 | { .name = "TLBIMVALHIS", | |
5489 | .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 5, | |
5490 | .type = ARM_CP_NO_RAW, .access = PL2_W, | |
5491 | .writefn = tlbimva_hyp_is_write }, | |
5492 | { .name = "TLBIIPAS2", | |
5493 | .cp = 15, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 1, | |
5494 | .type = ARM_CP_NO_RAW, .access = PL2_W, | |
5495 | .writefn = tlbiipas2_hyp_write }, | |
5496 | { .name = "TLBIIPAS2IS", | |
5497 | .cp = 15, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 1, | |
5498 | .type = ARM_CP_NO_RAW, .access = PL2_W, | |
5499 | .writefn = tlbiipas2is_hyp_write }, | |
5500 | { .name = "TLBIIPAS2L", | |
5501 | .cp = 15, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 5, | |
5502 | .type = ARM_CP_NO_RAW, .access = PL2_W, | |
5503 | .writefn = tlbiipas2_hyp_write }, | |
5504 | { .name = "TLBIIPAS2LIS", | |
5505 | .cp = 15, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 5, | |
5506 | .type = ARM_CP_NO_RAW, .access = PL2_W, | |
5507 | .writefn = tlbiipas2is_hyp_write }, | |
5508 | /* 32 bit cache operations */ | |
5509 | { .name = "ICIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0, | |
5510 | .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_ticab }, | |
5511 | { .name = "BPIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 6, | |
5512 | .type = ARM_CP_NOP, .access = PL1_W }, | |
5513 | { .name = "ICIALLU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 0, | |
5514 | .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tocu }, | |
5515 | { .name = "ICIMVAU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 1, | |
5516 | .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tocu }, | |
5517 | { .name = "BPIALL", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 6, | |
5518 | .type = ARM_CP_NOP, .access = PL1_W }, | |
5519 | { .name = "BPIMVA", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 7, | |
5520 | .type = ARM_CP_NOP, .access = PL1_W }, | |
5521 | { .name = "DCIMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 1, | |
5522 | .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_poc_access }, | |
5523 | { .name = "DCISW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 2, | |
5524 | .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw }, | |
5525 | { .name = "DCCMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 1, | |
5526 | .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_poc_access }, | |
5527 | { .name = "DCCSW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 2, | |
5528 | .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw }, | |
5529 | { .name = "DCCMVAU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 11, .opc2 = 1, | |
5530 | .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tocu }, | |
5531 | { .name = "DCCIMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 1, | |
5532 | .type = ARM_CP_NOP, .access = PL1_W, .accessfn = aa64_cacheop_poc_access }, | |
5533 | { .name = "DCCISW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 2, | |
5534 | .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw }, | |
5535 | /* MMU Domain access control / MPU write buffer control */ | |
5536 | { .name = "DACR", .cp = 15, .opc1 = 0, .crn = 3, .crm = 0, .opc2 = 0, | |
5537 | .access = PL1_RW, .accessfn = access_tvm_trvm, .resetvalue = 0, | |
5538 | .writefn = dacr_write, .raw_writefn = raw_write, | |
5539 | .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dacr_s), | |
5540 | offsetoflow32(CPUARMState, cp15.dacr_ns) } }, | |
5541 | { .name = "ELR_EL1", .state = ARM_CP_STATE_AA64, | |
5542 | .type = ARM_CP_ALIAS, | |
5543 | .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 1, | |
5544 | .access = PL1_RW, | |
5545 | .fieldoffset = offsetof(CPUARMState, elr_el[1]) }, | |
5546 | { .name = "SPSR_EL1", .state = ARM_CP_STATE_AA64, | |
5547 | .type = ARM_CP_ALIAS, | |
5548 | .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 0, | |
5549 | .access = PL1_RW, | |
5550 | .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_SVC]) }, | |
5551 | /* | |
5552 | * We rely on the access checks not allowing the guest to write to the | |
5553 | * state field when SPSel indicates that it's being used as the stack | |
5554 | * pointer. | |
5555 | */ | |
5556 | { .name = "SP_EL0", .state = ARM_CP_STATE_AA64, | |
5557 | .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 1, .opc2 = 0, | |
5558 | .access = PL1_RW, .accessfn = sp_el0_access, | |
5559 | .type = ARM_CP_ALIAS, | |
5560 | .fieldoffset = offsetof(CPUARMState, sp_el[0]) }, | |
5561 | { .name = "SP_EL1", .state = ARM_CP_STATE_AA64, | |
5562 | .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 1, .opc2 = 0, | |
5563 | .access = PL2_RW, .type = ARM_CP_ALIAS | ARM_CP_EL3_NO_EL2_KEEP, | |
5564 | .fieldoffset = offsetof(CPUARMState, sp_el[1]) }, | |
5565 | { .name = "SPSel", .state = ARM_CP_STATE_AA64, | |
5566 | .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 0, | |
5567 | .type = ARM_CP_NO_RAW, | |
5568 | .access = PL1_RW, .readfn = spsel_read, .writefn = spsel_write }, | |
5569 | { .name = "FPEXC32_EL2", .state = ARM_CP_STATE_AA64, | |
5570 | .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 3, .opc2 = 0, | |
5571 | .access = PL2_RW, | |
5572 | .type = ARM_CP_ALIAS | ARM_CP_FPU | ARM_CP_EL3_NO_EL2_KEEP, | |
5573 | .fieldoffset = offsetof(CPUARMState, vfp.xregs[ARM_VFP_FPEXC]) }, | |
5574 | { .name = "DACR32_EL2", .state = ARM_CP_STATE_AA64, | |
5575 | .opc0 = 3, .opc1 = 4, .crn = 3, .crm = 0, .opc2 = 0, | |
5576 | .access = PL2_RW, .resetvalue = 0, .type = ARM_CP_EL3_NO_EL2_KEEP, | |
5577 | .writefn = dacr_write, .raw_writefn = raw_write, | |
5578 | .fieldoffset = offsetof(CPUARMState, cp15.dacr32_el2) }, | |
5579 | { .name = "IFSR32_EL2", .state = ARM_CP_STATE_AA64, | |
5580 | .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 0, .opc2 = 1, | |
5581 | .access = PL2_RW, .resetvalue = 0, .type = ARM_CP_EL3_NO_EL2_KEEP, | |
5582 | .fieldoffset = offsetof(CPUARMState, cp15.ifsr32_el2) }, | |
5583 | { .name = "SPSR_IRQ", .state = ARM_CP_STATE_AA64, | |
5584 | .type = ARM_CP_ALIAS, | |
5585 | .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 0, | |
5586 | .access = PL2_RW, | |
5587 | .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_IRQ]) }, | |
5588 | { .name = "SPSR_ABT", .state = ARM_CP_STATE_AA64, | |
5589 | .type = ARM_CP_ALIAS, | |
5590 | .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 1, | |
5591 | .access = PL2_RW, | |
5592 | .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_ABT]) }, | |
5593 | { .name = "SPSR_UND", .state = ARM_CP_STATE_AA64, | |
5594 | .type = ARM_CP_ALIAS, | |
5595 | .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 2, | |
5596 | .access = PL2_RW, | |
5597 | .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_UND]) }, | |
5598 | { .name = "SPSR_FIQ", .state = ARM_CP_STATE_AA64, | |
5599 | .type = ARM_CP_ALIAS, | |
5600 | .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 3, | |
5601 | .access = PL2_RW, | |
5602 | .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_FIQ]) }, | |
5603 | { .name = "MDCR_EL3", .state = ARM_CP_STATE_AA64, | |
5604 | .type = ARM_CP_IO, | |
5605 | .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 3, .opc2 = 1, | |
5606 | .resetvalue = 0, | |
5607 | .access = PL3_RW, | |
5608 | .writefn = mdcr_el3_write, | |
5609 | .fieldoffset = offsetof(CPUARMState, cp15.mdcr_el3) }, | |
5610 | { .name = "SDCR", .type = ARM_CP_ALIAS | ARM_CP_IO, | |
5611 | .cp = 15, .opc1 = 0, .crn = 1, .crm = 3, .opc2 = 1, | |
5612 | .access = PL1_RW, .accessfn = access_trap_aa32s_el1, | |
5613 | .writefn = sdcr_write, | |
5614 | .fieldoffset = offsetoflow32(CPUARMState, cp15.mdcr_el3) }, | |
5615 | }; | |
5616 | ||
5617 | static void do_hcr_write(CPUARMState *env, uint64_t value, uint64_t valid_mask) | |
5618 | { | |
5619 | ARMCPU *cpu = env_archcpu(env); | |
5620 | ||
5621 | if (arm_feature(env, ARM_FEATURE_V8)) { | |
5622 | valid_mask |= MAKE_64BIT_MASK(0, 34); /* ARMv8.0 */ | |
5623 | } else { | |
5624 | valid_mask |= MAKE_64BIT_MASK(0, 28); /* ARMv7VE */ | |
5625 | } | |
5626 | ||
5627 | if (arm_feature(env, ARM_FEATURE_EL3)) { | |
5628 | valid_mask &= ~HCR_HCD; | |
5629 | } else if (cpu->psci_conduit != QEMU_PSCI_CONDUIT_SMC) { | |
5630 | /* | |
5631 | * Architecturally HCR.TSC is RES0 if EL3 is not implemented. | |
5632 | * However, if we're using the SMC PSCI conduit then QEMU is | |
5633 | * effectively acting like EL3 firmware and so the guest at | |
5634 | * EL2 should retain the ability to prevent EL1 from being | |
5635 | * able to make SMC calls into the ersatz firmware, so in | |
5636 | * that case HCR.TSC should be read/write. | |
5637 | */ | |
5638 | valid_mask &= ~HCR_TSC; | |
5639 | } | |
5640 | ||
5641 | if (arm_feature(env, ARM_FEATURE_AARCH64)) { | |
5642 | if (cpu_isar_feature(aa64_vh, cpu)) { | |
5643 | valid_mask |= HCR_E2H; | |
5644 | } | |
5645 | if (cpu_isar_feature(aa64_ras, cpu)) { | |
5646 | valid_mask |= HCR_TERR | HCR_TEA; | |
5647 | } | |
5648 | if (cpu_isar_feature(aa64_lor, cpu)) { | |
5649 | valid_mask |= HCR_TLOR; | |
5650 | } | |
5651 | if (cpu_isar_feature(aa64_pauth, cpu)) { | |
5652 | valid_mask |= HCR_API | HCR_APK; | |
5653 | } | |
5654 | if (cpu_isar_feature(aa64_mte, cpu)) { | |
5655 | valid_mask |= HCR_ATA | HCR_DCT | HCR_TID5; | |
5656 | } | |
5657 | if (cpu_isar_feature(aa64_scxtnum, cpu)) { | |
5658 | valid_mask |= HCR_ENSCXT; | |
5659 | } | |
5660 | if (cpu_isar_feature(aa64_fwb, cpu)) { | |
5661 | valid_mask |= HCR_FWB; | |
5662 | } | |
5663 | if (cpu_isar_feature(aa64_rme, cpu)) { | |
5664 | valid_mask |= HCR_GPF; | |
5665 | } | |
5666 | } | |
5667 | ||
5668 | if (cpu_isar_feature(any_evt, cpu)) { | |
5669 | valid_mask |= HCR_TTLBIS | HCR_TTLBOS | HCR_TICAB | HCR_TOCU | HCR_TID4; | |
5670 | } else if (cpu_isar_feature(any_half_evt, cpu)) { | |
5671 | valid_mask |= HCR_TICAB | HCR_TOCU | HCR_TID4; | |
5672 | } | |
5673 | ||
5674 | /* Clear RES0 bits. */ | |
5675 | value &= valid_mask; | |
5676 | ||
5677 | /* | |
5678 | * These bits change the MMU setup: | |
5679 | * HCR_VM enables stage 2 translation | |
5680 | * HCR_PTW forbids certain page-table setups | |
5681 | * HCR_DC disables stage1 and enables stage2 translation | |
5682 | * HCR_DCT enables tagging on (disabled) stage1 translation | |
5683 | * HCR_FWB changes the interpretation of stage2 descriptor bits | |
5684 | */ | |
5685 | if ((env->cp15.hcr_el2 ^ value) & | |
5686 | (HCR_VM | HCR_PTW | HCR_DC | HCR_DCT | HCR_FWB)) { | |
5687 | tlb_flush(CPU(cpu)); | |
5688 | } | |
5689 | env->cp15.hcr_el2 = value; | |
5690 | ||
5691 | /* | |
5692 | * Updates to VI and VF require us to update the status of | |
5693 | * virtual interrupts, which are the logical OR of these bits | |
5694 | * and the state of the input lines from the GIC. (This requires | |
5695 | * that we have the iothread lock, which is done by marking the | |
5696 | * reginfo structs as ARM_CP_IO.) | |
5697 | * Note that if a write to HCR pends a VIRQ or VFIQ it is never | |
5698 | * possible for it to be taken immediately, because VIRQ and | |
5699 | * VFIQ are masked unless running at EL0 or EL1, and HCR | |
5700 | * can only be written at EL2. | |
5701 | */ | |
5702 | g_assert(qemu_mutex_iothread_locked()); | |
5703 | arm_cpu_update_virq(cpu); | |
5704 | arm_cpu_update_vfiq(cpu); | |
5705 | arm_cpu_update_vserr(cpu); | |
5706 | } | |
5707 | ||
5708 | static void hcr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) | |
5709 | { | |
5710 | do_hcr_write(env, value, 0); | |
5711 | } | |
5712 | ||
5713 | static void hcr_writehigh(CPUARMState *env, const ARMCPRegInfo *ri, | |
5714 | uint64_t value) | |
5715 | { | |
5716 | /* Handle HCR2 write, i.e. write to high half of HCR_EL2 */ | |
5717 | value = deposit64(env->cp15.hcr_el2, 32, 32, value); | |
5718 | do_hcr_write(env, value, MAKE_64BIT_MASK(0, 32)); | |
5719 | } | |
5720 | ||
5721 | static void hcr_writelow(CPUARMState *env, const ARMCPRegInfo *ri, | |
5722 | uint64_t value) | |
5723 | { | |
5724 | /* Handle HCR write, i.e. write to low half of HCR_EL2 */ | |
5725 | value = deposit64(env->cp15.hcr_el2, 0, 32, value); | |
5726 | do_hcr_write(env, value, MAKE_64BIT_MASK(32, 32)); | |
5727 | } | |
5728 | ||
5729 | /* | |
5730 | * Return the effective value of HCR_EL2, at the given security state. | |
5731 | * Bits that are not included here: | |
5732 | * RW (read from SCR_EL3.RW as needed) | |
5733 | */ | |
5734 | uint64_t arm_hcr_el2_eff_secstate(CPUARMState *env, bool secure) | |
5735 | { | |
5736 | uint64_t ret = env->cp15.hcr_el2; | |
5737 | ||
5738 | if (!arm_is_el2_enabled_secstate(env, secure)) { | |
5739 | /* | |
5740 | * "This register has no effect if EL2 is not enabled in the | |
5741 | * current Security state". This is ARMv8.4-SecEL2 speak for | |
5742 | * !(SCR_EL3.NS==1 || SCR_EL3.EEL2==1). | |
5743 | * | |
5744 | * Prior to that, the language was "In an implementation that | |
5745 | * includes EL3, when the value of SCR_EL3.NS is 0 the PE behaves | |
5746 | * as if this field is 0 for all purposes other than a direct | |
5747 | * read or write access of HCR_EL2". With lots of enumeration | |
5748 | * on a per-field basis. In current QEMU, this is condition | |
5749 | * is arm_is_secure_below_el3. | |
5750 | * | |
5751 | * Since the v8.4 language applies to the entire register, and | |
5752 | * appears to be backward compatible, use that. | |
5753 | */ | |
5754 | return 0; | |
5755 | } | |
5756 | ||
5757 | /* | |
5758 | * For a cpu that supports both aarch64 and aarch32, we can set bits | |
5759 | * in HCR_EL2 (e.g. via EL3) that are RES0 when we enter EL2 as aa32. | |
5760 | * Ignore all of the bits in HCR+HCR2 that are not valid for aarch32. | |
5761 | */ | |
5762 | if (!arm_el_is_aa64(env, 2)) { | |
5763 | uint64_t aa32_valid; | |
5764 | ||
5765 | /* | |
5766 | * These bits are up-to-date as of ARMv8.6. | |
5767 | * For HCR, it's easiest to list just the 2 bits that are invalid. | |
5768 | * For HCR2, list those that are valid. | |
5769 | */ | |
5770 | aa32_valid = MAKE_64BIT_MASK(0, 32) & ~(HCR_RW | HCR_TDZ); | |
5771 | aa32_valid |= (HCR_CD | HCR_ID | HCR_TERR | HCR_TEA | HCR_MIOCNCE | | |
5772 | HCR_TID4 | HCR_TICAB | HCR_TOCU | HCR_TTLBIS); | |
5773 | ret &= aa32_valid; | |
5774 | } | |
5775 | ||
5776 | if (ret & HCR_TGE) { | |
5777 | /* These bits are up-to-date as of ARMv8.6. */ | |
5778 | if (ret & HCR_E2H) { | |
5779 | ret &= ~(HCR_VM | HCR_FMO | HCR_IMO | HCR_AMO | | |
5780 | HCR_BSU_MASK | HCR_DC | HCR_TWI | HCR_TWE | | |
5781 | HCR_TID0 | HCR_TID2 | HCR_TPCP | HCR_TPU | | |
5782 | HCR_TDZ | HCR_CD | HCR_ID | HCR_MIOCNCE | | |
5783 | HCR_TID4 | HCR_TICAB | HCR_TOCU | HCR_ENSCXT | | |
5784 | HCR_TTLBIS | HCR_TTLBOS | HCR_TID5); | |
5785 | } else { | |
5786 | ret |= HCR_FMO | HCR_IMO | HCR_AMO; | |
5787 | } | |
5788 | ret &= ~(HCR_SWIO | HCR_PTW | HCR_VF | HCR_VI | HCR_VSE | | |
5789 | HCR_FB | HCR_TID1 | HCR_TID3 | HCR_TSC | HCR_TACR | | |
5790 | HCR_TSW | HCR_TTLB | HCR_TVM | HCR_HCD | HCR_TRVM | | |
5791 | HCR_TLOR); | |
5792 | } | |
5793 | ||
5794 | return ret; | |
5795 | } | |
5796 | ||
5797 | uint64_t arm_hcr_el2_eff(CPUARMState *env) | |
5798 | { | |
5799 | if (arm_feature(env, ARM_FEATURE_M)) { | |
5800 | return 0; | |
5801 | } | |
5802 | return arm_hcr_el2_eff_secstate(env, arm_is_secure_below_el3(env)); | |
5803 | } | |
5804 | ||
5805 | /* | |
5806 | * Corresponds to ARM pseudocode function ELIsInHost(). | |
5807 | */ | |
5808 | bool el_is_in_host(CPUARMState *env, int el) | |
5809 | { | |
5810 | uint64_t mask; | |
5811 | ||
5812 | /* | |
5813 | * Since we only care about E2H and TGE, we can skip arm_hcr_el2_eff(). | |
5814 | * Perform the simplest bit tests first, and validate EL2 afterward. | |
5815 | */ | |
5816 | if (el & 1) { | |
5817 | return false; /* EL1 or EL3 */ | |
5818 | } | |
5819 | ||
5820 | /* | |
5821 | * Note that hcr_write() checks isar_feature_aa64_vh(), | |
5822 | * aka HaveVirtHostExt(), in allowing HCR_E2H to be set. | |
5823 | */ | |
5824 | mask = el ? HCR_E2H : HCR_E2H | HCR_TGE; | |
5825 | if ((env->cp15.hcr_el2 & mask) != mask) { | |
5826 | return false; | |
5827 | } | |
5828 | ||
5829 | /* TGE and/or E2H set: double check those bits are currently legal. */ | |
5830 | return arm_is_el2_enabled(env) && arm_el_is_aa64(env, 2); | |
5831 | } | |
5832 | ||
5833 | static void hcrx_write(CPUARMState *env, const ARMCPRegInfo *ri, | |
5834 | uint64_t value) | |
5835 | { | |
5836 | uint64_t valid_mask = 0; | |
5837 | ||
5838 | /* No features adding bits to HCRX are implemented. */ | |
5839 | ||
5840 | /* Clear RES0 bits. */ | |
5841 | env->cp15.hcrx_el2 = value & valid_mask; | |
5842 | } | |
5843 | ||
5844 | static CPAccessResult access_hxen(CPUARMState *env, const ARMCPRegInfo *ri, | |
5845 | bool isread) | |
5846 | { | |
5847 | if (arm_current_el(env) < 3 | |
5848 | && arm_feature(env, ARM_FEATURE_EL3) | |
5849 | && !(env->cp15.scr_el3 & SCR_HXEN)) { | |
5850 | return CP_ACCESS_TRAP_EL3; | |
5851 | } | |
5852 | return CP_ACCESS_OK; | |
5853 | } | |
5854 | ||
5855 | static const ARMCPRegInfo hcrx_el2_reginfo = { | |
5856 | .name = "HCRX_EL2", .state = ARM_CP_STATE_AA64, | |
5857 | .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 2, | |
5858 | .access = PL2_RW, .writefn = hcrx_write, .accessfn = access_hxen, | |
5859 | .fieldoffset = offsetof(CPUARMState, cp15.hcrx_el2), | |
5860 | }; | |
5861 | ||
5862 | /* Return the effective value of HCRX_EL2. */ | |
5863 | uint64_t arm_hcrx_el2_eff(CPUARMState *env) | |
5864 | { | |
5865 | /* | |
5866 | * The bits in this register behave as 0 for all purposes other than | |
5867 | * direct reads of the register if: | |
5868 | * - EL2 is not enabled in the current security state, | |
5869 | * - SCR_EL3.HXEn is 0. | |
5870 | */ | |
5871 | if (!arm_is_el2_enabled(env) | |
5872 | || (arm_feature(env, ARM_FEATURE_EL3) | |
5873 | && !(env->cp15.scr_el3 & SCR_HXEN))) { | |
5874 | return 0; | |
5875 | } | |
5876 | return env->cp15.hcrx_el2; | |
5877 | } | |
5878 | ||
5879 | static void cptr_el2_write(CPUARMState *env, const ARMCPRegInfo *ri, | |
5880 | uint64_t value) | |
5881 | { | |
5882 | /* | |
5883 | * For A-profile AArch32 EL3, if NSACR.CP10 | |
5884 | * is 0 then HCPTR.{TCP11,TCP10} ignore writes and read as 1. | |
5885 | */ | |
5886 | if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) && | |
5887 | !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) { | |
5888 | uint64_t mask = R_HCPTR_TCP11_MASK | R_HCPTR_TCP10_MASK; | |
5889 | value = (value & ~mask) | (env->cp15.cptr_el[2] & mask); | |
5890 | } | |
5891 | env->cp15.cptr_el[2] = value; | |
5892 | } | |
5893 | ||
5894 | static uint64_t cptr_el2_read(CPUARMState *env, const ARMCPRegInfo *ri) | |
5895 | { | |
5896 | /* | |
5897 | * For A-profile AArch32 EL3, if NSACR.CP10 | |
5898 | * is 0 then HCPTR.{TCP11,TCP10} ignore writes and read as 1. | |
5899 | */ | |
5900 | uint64_t value = env->cp15.cptr_el[2]; | |
5901 | ||
5902 | if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) && | |
5903 | !arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) { | |
5904 | value |= R_HCPTR_TCP11_MASK | R_HCPTR_TCP10_MASK; | |
5905 | } | |
5906 | return value; | |
5907 | } | |
5908 | ||
5909 | static const ARMCPRegInfo el2_cp_reginfo[] = { | |
5910 | { .name = "HCR_EL2", .state = ARM_CP_STATE_AA64, | |
5911 | .type = ARM_CP_IO, | |
5912 | .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0, | |
5913 | .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.hcr_el2), | |
5914 | .writefn = hcr_write }, | |
5915 | { .name = "HCR", .state = ARM_CP_STATE_AA32, | |
5916 | .type = ARM_CP_ALIAS | ARM_CP_IO, | |
5917 | .cp = 15, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0, | |
5918 | .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.hcr_el2), | |
5919 | .writefn = hcr_writelow }, | |
5920 | { .name = "HACR_EL2", .state = ARM_CP_STATE_BOTH, | |
5921 | .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 7, | |
5922 | .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, | |
5923 | { .name = "ELR_EL2", .state = ARM_CP_STATE_AA64, | |
5924 | .type = ARM_CP_ALIAS, | |
5925 | .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 1, | |
5926 | .access = PL2_RW, | |
5927 | .fieldoffset = offsetof(CPUARMState, elr_el[2]) }, | |
5928 | { .name = "ESR_EL2", .state = ARM_CP_STATE_BOTH, | |
5929 | .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 2, .opc2 = 0, | |
5930 | .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.esr_el[2]) }, | |
5931 | { .name = "FAR_EL2", .state = ARM_CP_STATE_BOTH, | |
5932 | .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 0, | |
5933 | .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[2]) }, | |
5934 | { .name = "HIFAR", .state = ARM_CP_STATE_AA32, | |
5935 | .type = ARM_CP_ALIAS, | |
5936 | .cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 2, | |
5937 | .access = PL2_RW, | |
5938 | .fieldoffset = offsetofhigh32(CPUARMState, cp15.far_el[2]) }, | |
5939 | { .name = "SPSR_EL2", .state = ARM_CP_STATE_AA64, | |
5940 | .type = ARM_CP_ALIAS, | |
5941 | .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 0, | |
5942 | .access = PL2_RW, | |
5943 | .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_HYP]) }, | |
5944 | { .name = "VBAR_EL2", .state = ARM_CP_STATE_BOTH, | |
5945 | .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 0, | |
5946 | .access = PL2_RW, .writefn = vbar_write, | |
5947 | .fieldoffset = offsetof(CPUARMState, cp15.vbar_el[2]), | |
5948 | .resetvalue = 0 }, | |
5949 | { .name = "SP_EL2", .state = ARM_CP_STATE_AA64, | |
5950 | .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 1, .opc2 = 0, | |
5951 | .access = PL3_RW, .type = ARM_CP_ALIAS, | |
5952 | .fieldoffset = offsetof(CPUARMState, sp_el[2]) }, | |
5953 | { .name = "CPTR_EL2", .state = ARM_CP_STATE_BOTH, | |
5954 | .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 2, | |
5955 | .access = PL2_RW, .accessfn = cptr_access, .resetvalue = 0, | |
5956 | .fieldoffset = offsetof(CPUARMState, cp15.cptr_el[2]), | |
5957 | .readfn = cptr_el2_read, .writefn = cptr_el2_write }, | |
5958 | { .name = "MAIR_EL2", .state = ARM_CP_STATE_BOTH, | |
5959 | .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 0, | |
5960 | .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[2]), | |
5961 | .resetvalue = 0 }, | |
5962 | { .name = "HMAIR1", .state = ARM_CP_STATE_AA32, | |
5963 | .cp = 15, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 1, | |
5964 | .access = PL2_RW, .type = ARM_CP_ALIAS, | |
5965 | .fieldoffset = offsetofhigh32(CPUARMState, cp15.mair_el[2]) }, | |
5966 | { .name = "AMAIR_EL2", .state = ARM_CP_STATE_BOTH, | |
5967 | .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 0, | |
5968 | .access = PL2_RW, .type = ARM_CP_CONST, | |
5969 | .resetvalue = 0 }, | |
5970 | /* HAMAIR1 is mapped to AMAIR_EL2[63:32] */ | |
5971 | { .name = "HAMAIR1", .state = ARM_CP_STATE_AA32, | |
5972 | .cp = 15, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 1, | |
5973 | .access = PL2_RW, .type = ARM_CP_CONST, | |
5974 | .resetvalue = 0 }, | |
5975 | { .name = "AFSR0_EL2", .state = ARM_CP_STATE_BOTH, | |
5976 | .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 0, | |
5977 | .access = PL2_RW, .type = ARM_CP_CONST, | |
5978 | .resetvalue = 0 }, | |
5979 | { .name = "AFSR1_EL2", .state = ARM_CP_STATE_BOTH, | |
5980 | .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 1, | |
5981 | .access = PL2_RW, .type = ARM_CP_CONST, | |
5982 | .resetvalue = 0 }, | |
5983 | { .name = "TCR_EL2", .state = ARM_CP_STATE_BOTH, | |
5984 | .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 2, | |
5985 | .access = PL2_RW, .writefn = vmsa_tcr_el12_write, | |
5986 | .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[2]) }, | |
5987 | { .name = "VTCR", .state = ARM_CP_STATE_AA32, | |
5988 | .cp = 15, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2, | |
5989 | .type = ARM_CP_ALIAS, | |
5990 | .access = PL2_RW, .accessfn = access_el3_aa32ns, | |
5991 | .fieldoffset = offsetoflow32(CPUARMState, cp15.vtcr_el2) }, | |
5992 | { .name = "VTCR_EL2", .state = ARM_CP_STATE_AA64, | |
5993 | .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2, | |
5994 | .access = PL2_RW, | |
5995 | /* no .writefn needed as this can't cause an ASID change */ | |
5996 | .fieldoffset = offsetof(CPUARMState, cp15.vtcr_el2) }, | |
5997 | { .name = "VTTBR", .state = ARM_CP_STATE_AA32, | |
5998 | .cp = 15, .opc1 = 6, .crm = 2, | |
5999 | .type = ARM_CP_64BIT | ARM_CP_ALIAS, | |
6000 | .access = PL2_RW, .accessfn = access_el3_aa32ns, | |
6001 | .fieldoffset = offsetof(CPUARMState, cp15.vttbr_el2), | |
6002 | .writefn = vttbr_write }, | |
6003 | { .name = "VTTBR_EL2", .state = ARM_CP_STATE_AA64, | |
6004 | .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 0, | |
6005 | .access = PL2_RW, .writefn = vttbr_write, | |
6006 | .fieldoffset = offsetof(CPUARMState, cp15.vttbr_el2) }, | |
6007 | { .name = "SCTLR_EL2", .state = ARM_CP_STATE_BOTH, | |
6008 | .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 0, | |
6009 | .access = PL2_RW, .raw_writefn = raw_write, .writefn = sctlr_write, | |
6010 | .fieldoffset = offsetof(CPUARMState, cp15.sctlr_el[2]) }, | |
6011 | { .name = "TPIDR_EL2", .state = ARM_CP_STATE_BOTH, | |
6012 | .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 2, | |
6013 | .access = PL2_RW, .resetvalue = 0, | |
6014 | .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[2]) }, | |
6015 | { .name = "TTBR0_EL2", .state = ARM_CP_STATE_AA64, | |
6016 | .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 0, | |
6017 | .access = PL2_RW, .resetvalue = 0, .writefn = vmsa_tcr_ttbr_el2_write, | |
6018 | .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[2]) }, | |
6019 | { .name = "HTTBR", .cp = 15, .opc1 = 4, .crm = 2, | |
6020 | .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS, | |
6021 | .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[2]) }, | |
6022 | { .name = "TLBIALLNSNH", | |
6023 | .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 4, | |
6024 | .type = ARM_CP_NO_RAW, .access = PL2_W, | |
6025 | .writefn = tlbiall_nsnh_write }, | |
6026 | { .name = "TLBIALLNSNHIS", | |
6027 | .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 4, | |
6028 | .type = ARM_CP_NO_RAW, .access = PL2_W, | |
6029 | .writefn = tlbiall_nsnh_is_write }, | |
6030 | { .name = "TLBIALLH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 0, | |
6031 | .type = ARM_CP_NO_RAW, .access = PL2_W, | |
6032 | .writefn = tlbiall_hyp_write }, | |
6033 | { .name = "TLBIALLHIS", .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 0, | |
6034 | .type = ARM_CP_NO_RAW, .access = PL2_W, | |
6035 | .writefn = tlbiall_hyp_is_write }, | |
6036 | { .name = "TLBIMVAH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 1, | |
6037 | .type = ARM_CP_NO_RAW, .access = PL2_W, | |
6038 | .writefn = tlbimva_hyp_write }, | |
6039 | { .name = "TLBIMVAHIS", .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 1, | |
6040 | .type = ARM_CP_NO_RAW, .access = PL2_W, | |
6041 | .writefn = tlbimva_hyp_is_write }, | |
6042 | { .name = "TLBI_ALLE2", .state = ARM_CP_STATE_AA64, | |
6043 | .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 0, | |
6044 | .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF, | |
6045 | .writefn = tlbi_aa64_alle2_write }, | |
6046 | { .name = "TLBI_VAE2", .state = ARM_CP_STATE_AA64, | |
6047 | .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 1, | |
6048 | .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF, | |
6049 | .writefn = tlbi_aa64_vae2_write }, | |
6050 | { .name = "TLBI_VALE2", .state = ARM_CP_STATE_AA64, | |
6051 | .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 5, | |
6052 | .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF, | |
6053 | .writefn = tlbi_aa64_vae2_write }, | |
6054 | { .name = "TLBI_ALLE2IS", .state = ARM_CP_STATE_AA64, | |
6055 | .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 0, | |
6056 | .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF, | |
6057 | .writefn = tlbi_aa64_alle2is_write }, | |
6058 | { .name = "TLBI_VAE2IS", .state = ARM_CP_STATE_AA64, | |
6059 | .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 1, | |
6060 | .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF, | |
6061 | .writefn = tlbi_aa64_vae2is_write }, | |
6062 | { .name = "TLBI_VALE2IS", .state = ARM_CP_STATE_AA64, | |
6063 | .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 5, | |
6064 | .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF, | |
6065 | .writefn = tlbi_aa64_vae2is_write }, | |
6066 | #ifndef CONFIG_USER_ONLY | |
6067 | /* | |
6068 | * Unlike the other EL2-related AT operations, these must | |
6069 | * UNDEF from EL3 if EL2 is not implemented, which is why we | |
6070 | * define them here rather than with the rest of the AT ops. | |
6071 | */ | |
6072 | { .name = "AT_S1E2R", .state = ARM_CP_STATE_AA64, | |
6073 | .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 0, | |
6074 | .access = PL2_W, .accessfn = at_s1e2_access, | |
6075 | .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC | ARM_CP_EL3_NO_EL2_UNDEF, | |
6076 | .writefn = ats_write64 }, | |
6077 | { .name = "AT_S1E2W", .state = ARM_CP_STATE_AA64, | |
6078 | .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 1, | |
6079 | .access = PL2_W, .accessfn = at_s1e2_access, | |
6080 | .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC | ARM_CP_EL3_NO_EL2_UNDEF, | |
6081 | .writefn = ats_write64 }, | |
6082 | /* | |
6083 | * The AArch32 ATS1H* operations are CONSTRAINED UNPREDICTABLE | |
6084 | * if EL2 is not implemented; we choose to UNDEF. Behaviour at EL3 | |
6085 | * with SCR.NS == 0 outside Monitor mode is UNPREDICTABLE; we choose | |
6086 | * to behave as if SCR.NS was 1. | |
6087 | */ | |
6088 | { .name = "ATS1HR", .cp = 15, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 0, | |
6089 | .access = PL2_W, | |
6090 | .writefn = ats1h_write, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC }, | |
6091 | { .name = "ATS1HW", .cp = 15, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 1, | |
6092 | .access = PL2_W, | |
6093 | .writefn = ats1h_write, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC }, | |
6094 | { .name = "CNTHCTL_EL2", .state = ARM_CP_STATE_BOTH, | |
6095 | .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 1, .opc2 = 0, | |
6096 | /* | |
6097 | * ARMv7 requires bit 0 and 1 to reset to 1. ARMv8 defines the | |
6098 | * reset values as IMPDEF. We choose to reset to 3 to comply with | |
6099 | * both ARMv7 and ARMv8. | |
6100 | */ | |
6101 | .access = PL2_RW, .resetvalue = 3, | |
6102 | .fieldoffset = offsetof(CPUARMState, cp15.cnthctl_el2) }, | |
6103 | { .name = "CNTVOFF_EL2", .state = ARM_CP_STATE_AA64, | |
6104 | .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 0, .opc2 = 3, | |
6105 | .access = PL2_RW, .type = ARM_CP_IO, .resetvalue = 0, | |
6106 | .writefn = gt_cntvoff_write, | |
6107 | .fieldoffset = offsetof(CPUARMState, cp15.cntvoff_el2) }, | |
6108 | { .name = "CNTVOFF", .cp = 15, .opc1 = 4, .crm = 14, | |
6109 | .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS | ARM_CP_IO, | |
6110 | .writefn = gt_cntvoff_write, | |
6111 | .fieldoffset = offsetof(CPUARMState, cp15.cntvoff_el2) }, | |
6112 | { .name = "CNTHP_CVAL_EL2", .state = ARM_CP_STATE_AA64, | |
6113 | .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 2, | |
6114 | .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].cval), | |
6115 | .type = ARM_CP_IO, .access = PL2_RW, | |
6116 | .writefn = gt_hyp_cval_write, .raw_writefn = raw_write }, | |
6117 | { .name = "CNTHP_CVAL", .cp = 15, .opc1 = 6, .crm = 14, | |
6118 | .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].cval), | |
6119 | .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_IO, | |
6120 | .writefn = gt_hyp_cval_write, .raw_writefn = raw_write }, | |
6121 | { .name = "CNTHP_TVAL_EL2", .state = ARM_CP_STATE_BOTH, | |
6122 | .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 0, | |
6123 | .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL2_RW, | |
6124 | .resetfn = gt_hyp_timer_reset, | |
6125 | .readfn = gt_hyp_tval_read, .writefn = gt_hyp_tval_write }, | |
6126 | { .name = "CNTHP_CTL_EL2", .state = ARM_CP_STATE_BOTH, | |
6127 | .type = ARM_CP_IO, | |
6128 | .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 1, | |
6129 | .access = PL2_RW, | |
6130 | .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].ctl), | |
6131 | .resetvalue = 0, | |
6132 | .writefn = gt_hyp_ctl_write, .raw_writefn = raw_write }, | |
6133 | #endif | |
6134 | { .name = "HPFAR", .state = ARM_CP_STATE_AA32, | |
6135 | .cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4, | |
6136 | .access = PL2_RW, .accessfn = access_el3_aa32ns, | |
6137 | .fieldoffset = offsetof(CPUARMState, cp15.hpfar_el2) }, | |
6138 | { .name = "HPFAR_EL2", .state = ARM_CP_STATE_AA64, | |
6139 | .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4, | |
6140 | .access = PL2_RW, | |
6141 | .fieldoffset = offsetof(CPUARMState, cp15.hpfar_el2) }, | |
6142 | { .name = "HSTR_EL2", .state = ARM_CP_STATE_BOTH, | |
6143 | .cp = 15, .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 3, | |
6144 | .access = PL2_RW, | |
6145 | .fieldoffset = offsetof(CPUARMState, cp15.hstr_el2) }, | |
6146 | }; | |
6147 | ||
6148 | static const ARMCPRegInfo el2_v8_cp_reginfo[] = { | |
6149 | { .name = "HCR2", .state = ARM_CP_STATE_AA32, | |
6150 | .type = ARM_CP_ALIAS | ARM_CP_IO, | |
6151 | .cp = 15, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 4, | |
6152 | .access = PL2_RW, | |
6153 | .fieldoffset = offsetofhigh32(CPUARMState, cp15.hcr_el2), | |
6154 | .writefn = hcr_writehigh }, | |
6155 | }; | |
6156 | ||
6157 | static CPAccessResult sel2_access(CPUARMState *env, const ARMCPRegInfo *ri, | |
6158 | bool isread) | |
6159 | { | |
6160 | if (arm_current_el(env) == 3 || arm_is_secure_below_el3(env)) { | |
6161 | return CP_ACCESS_OK; | |
6162 | } | |
6163 | return CP_ACCESS_TRAP_UNCATEGORIZED; | |
6164 | } | |
6165 | ||
6166 | static const ARMCPRegInfo el2_sec_cp_reginfo[] = { | |
6167 | { .name = "VSTTBR_EL2", .state = ARM_CP_STATE_AA64, | |
6168 | .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 6, .opc2 = 0, | |
6169 | .access = PL2_RW, .accessfn = sel2_access, | |
6170 | .fieldoffset = offsetof(CPUARMState, cp15.vsttbr_el2) }, | |
6171 | { .name = "VSTCR_EL2", .state = ARM_CP_STATE_AA64, | |
6172 | .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 6, .opc2 = 2, | |
6173 | .access = PL2_RW, .accessfn = sel2_access, | |
6174 | .fieldoffset = offsetof(CPUARMState, cp15.vstcr_el2) }, | |
6175 | }; | |
6176 | ||
6177 | static CPAccessResult nsacr_access(CPUARMState *env, const ARMCPRegInfo *ri, | |
6178 | bool isread) | |
6179 | { | |
6180 | /* | |
6181 | * The NSACR is RW at EL3, and RO for NS EL1 and NS EL2. | |
6182 | * At Secure EL1 it traps to EL3 or EL2. | |
6183 | */ | |
6184 | if (arm_current_el(env) == 3) { | |
6185 | return CP_ACCESS_OK; | |
6186 | } | |
6187 | if (arm_is_secure_below_el3(env)) { | |
6188 | if (env->cp15.scr_el3 & SCR_EEL2) { | |
6189 | return CP_ACCESS_TRAP_EL2; | |
6190 | } | |
6191 | return CP_ACCESS_TRAP_EL3; | |
6192 | } | |
6193 | /* Accesses from EL1 NS and EL2 NS are UNDEF for write but allow reads. */ | |
6194 | if (isread) { | |
6195 | return CP_ACCESS_OK; | |
6196 | } | |
6197 | return CP_ACCESS_TRAP_UNCATEGORIZED; | |
6198 | } | |
6199 | ||
6200 | static const ARMCPRegInfo el3_cp_reginfo[] = { | |
6201 | { .name = "SCR_EL3", .state = ARM_CP_STATE_AA64, | |
6202 | .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 0, | |
6203 | .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.scr_el3), | |
6204 | .resetfn = scr_reset, .writefn = scr_write }, | |
6205 | { .name = "SCR", .type = ARM_CP_ALIAS | ARM_CP_NEWEL, | |
6206 | .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 0, | |
6207 | .access = PL1_RW, .accessfn = access_trap_aa32s_el1, | |
6208 | .fieldoffset = offsetoflow32(CPUARMState, cp15.scr_el3), | |
6209 | .writefn = scr_write }, | |
6210 | { .name = "SDER32_EL3", .state = ARM_CP_STATE_AA64, | |
6211 | .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 1, | |
6212 | .access = PL3_RW, .resetvalue = 0, | |
6213 | .fieldoffset = offsetof(CPUARMState, cp15.sder) }, | |
6214 | { .name = "SDER", | |
6215 | .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 1, | |
6216 | .access = PL3_RW, .resetvalue = 0, | |
6217 | .fieldoffset = offsetoflow32(CPUARMState, cp15.sder) }, | |
6218 | { .name = "MVBAR", .cp = 15, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1, | |
6219 | .access = PL1_RW, .accessfn = access_trap_aa32s_el1, | |
6220 | .writefn = vbar_write, .resetvalue = 0, | |
6221 | .fieldoffset = offsetof(CPUARMState, cp15.mvbar) }, | |
6222 | { .name = "TTBR0_EL3", .state = ARM_CP_STATE_AA64, | |
6223 | .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 0, | |
6224 | .access = PL3_RW, .resetvalue = 0, | |
6225 | .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[3]) }, | |
6226 | { .name = "TCR_EL3", .state = ARM_CP_STATE_AA64, | |
6227 | .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 2, | |
6228 | .access = PL3_RW, | |
6229 | /* no .writefn needed as this can't cause an ASID change */ | |
6230 | .resetvalue = 0, | |
6231 | .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[3]) }, | |
6232 | { .name = "ELR_EL3", .state = ARM_CP_STATE_AA64, | |
6233 | .type = ARM_CP_ALIAS, | |
6234 | .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 0, .opc2 = 1, | |
6235 | .access = PL3_RW, | |
6236 | .fieldoffset = offsetof(CPUARMState, elr_el[3]) }, | |
6237 | { .name = "ESR_EL3", .state = ARM_CP_STATE_AA64, | |
6238 | .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 2, .opc2 = 0, | |
6239 | .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.esr_el[3]) }, | |
6240 | { .name = "FAR_EL3", .state = ARM_CP_STATE_AA64, | |
6241 | .opc0 = 3, .opc1 = 6, .crn = 6, .crm = 0, .opc2 = 0, | |
6242 | .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[3]) }, | |
6243 | { .name = "SPSR_EL3", .state = ARM_CP_STATE_AA64, | |
6244 | .type = ARM_CP_ALIAS, | |
6245 | .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 0, .opc2 = 0, | |
6246 | .access = PL3_RW, | |
6247 | .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_MON]) }, | |
6248 | { .name = "VBAR_EL3", .state = ARM_CP_STATE_AA64, | |
6249 | .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 0, | |
6250 | .access = PL3_RW, .writefn = vbar_write, | |
6251 | .fieldoffset = offsetof(CPUARMState, cp15.vbar_el[3]), | |
6252 | .resetvalue = 0 }, | |
6253 | { .name = "CPTR_EL3", .state = ARM_CP_STATE_AA64, | |
6254 | .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 2, | |
6255 | .access = PL3_RW, .accessfn = cptr_access, .resetvalue = 0, | |
6256 | .fieldoffset = offsetof(CPUARMState, cp15.cptr_el[3]) }, | |
6257 | { .name = "TPIDR_EL3", .state = ARM_CP_STATE_AA64, | |
6258 | .opc0 = 3, .opc1 = 6, .crn = 13, .crm = 0, .opc2 = 2, | |
6259 | .access = PL3_RW, .resetvalue = 0, | |
6260 | .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[3]) }, | |
6261 | { .name = "AMAIR_EL3", .state = ARM_CP_STATE_AA64, | |
6262 | .opc0 = 3, .opc1 = 6, .crn = 10, .crm = 3, .opc2 = 0, | |
6263 | .access = PL3_RW, .type = ARM_CP_CONST, | |
6264 | .resetvalue = 0 }, | |
6265 | { .name = "AFSR0_EL3", .state = ARM_CP_STATE_BOTH, | |
6266 | .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 1, .opc2 = 0, | |
6267 | .access = PL3_RW, .type = ARM_CP_CONST, | |
6268 | .resetvalue = 0 }, | |
6269 | { .name = "AFSR1_EL3", .state = ARM_CP_STATE_BOTH, | |
6270 | .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 1, .opc2 = 1, | |
6271 | .access = PL3_RW, .type = ARM_CP_CONST, | |
6272 | .resetvalue = 0 }, | |
6273 | { .name = "TLBI_ALLE3IS", .state = ARM_CP_STATE_AA64, | |
6274 | .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 0, | |
6275 | .access = PL3_W, .type = ARM_CP_NO_RAW, | |
6276 | .writefn = tlbi_aa64_alle3is_write }, | |
6277 | { .name = "TLBI_VAE3IS", .state = ARM_CP_STATE_AA64, | |
6278 | .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 1, | |
6279 | .access = PL3_W, .type = ARM_CP_NO_RAW, | |
6280 | .writefn = tlbi_aa64_vae3is_write }, | |
6281 | { .name = "TLBI_VALE3IS", .state = ARM_CP_STATE_AA64, | |
6282 | .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 5, | |
6283 | .access = PL3_W, .type = ARM_CP_NO_RAW, | |
6284 | .writefn = tlbi_aa64_vae3is_write }, | |
6285 | { .name = "TLBI_ALLE3", .state = ARM_CP_STATE_AA64, | |
6286 | .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 0, | |
6287 | .access = PL3_W, .type = ARM_CP_NO_RAW, | |
6288 | .writefn = tlbi_aa64_alle3_write }, | |
6289 | { .name = "TLBI_VAE3", .state = ARM_CP_STATE_AA64, | |
6290 | .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 1, | |
6291 | .access = PL3_W, .type = ARM_CP_NO_RAW, | |
6292 | .writefn = tlbi_aa64_vae3_write }, | |
6293 | { .name = "TLBI_VALE3", .state = ARM_CP_STATE_AA64, | |
6294 | .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 5, | |
6295 | .access = PL3_W, .type = ARM_CP_NO_RAW, | |
6296 | .writefn = tlbi_aa64_vae3_write }, | |
6297 | }; | |
6298 | ||
6299 | #ifndef CONFIG_USER_ONLY | |
6300 | /* Test if system register redirection is to occur in the current state. */ | |
6301 | static bool redirect_for_e2h(CPUARMState *env) | |
6302 | { | |
6303 | return arm_current_el(env) == 2 && (arm_hcr_el2_eff(env) & HCR_E2H); | |
6304 | } | |
6305 | ||
6306 | static uint64_t el2_e2h_read(CPUARMState *env, const ARMCPRegInfo *ri) | |
6307 | { | |
6308 | CPReadFn *readfn; | |
6309 | ||
6310 | if (redirect_for_e2h(env)) { | |
6311 | /* Switch to the saved EL2 version of the register. */ | |
6312 | ri = ri->opaque; | |
6313 | readfn = ri->readfn; | |
6314 | } else { | |
6315 | readfn = ri->orig_readfn; | |
6316 | } | |
6317 | if (readfn == NULL) { | |
6318 | readfn = raw_read; | |
6319 | } | |
6320 | return readfn(env, ri); | |
6321 | } | |
6322 | ||
6323 | static void el2_e2h_write(CPUARMState *env, const ARMCPRegInfo *ri, | |
6324 | uint64_t value) | |
6325 | { | |
6326 | CPWriteFn *writefn; | |
6327 | ||
6328 | if (redirect_for_e2h(env)) { | |
6329 | /* Switch to the saved EL2 version of the register. */ | |
6330 | ri = ri->opaque; | |
6331 | writefn = ri->writefn; | |
6332 | } else { | |
6333 | writefn = ri->orig_writefn; | |
6334 | } | |
6335 | if (writefn == NULL) { | |
6336 | writefn = raw_write; | |
6337 | } | |
6338 | writefn(env, ri, value); | |
6339 | } | |
6340 | ||
6341 | static void define_arm_vh_e2h_redirects_aliases(ARMCPU *cpu) | |
6342 | { | |
6343 | struct E2HAlias { | |
6344 | uint32_t src_key, dst_key, new_key; | |
6345 | const char *src_name, *dst_name, *new_name; | |
6346 | bool (*feature)(const ARMISARegisters *id); | |
6347 | }; | |
6348 | ||
6349 | #define K(op0, op1, crn, crm, op2) \ | |
6350 | ENCODE_AA64_CP_REG(CP_REG_ARM64_SYSREG_CP, crn, crm, op0, op1, op2) | |
6351 | ||
6352 | static const struct E2HAlias aliases[] = { | |
6353 | { K(3, 0, 1, 0, 0), K(3, 4, 1, 0, 0), K(3, 5, 1, 0, 0), | |
6354 | "SCTLR", "SCTLR_EL2", "SCTLR_EL12" }, | |
6355 | { K(3, 0, 1, 0, 2), K(3, 4, 1, 1, 2), K(3, 5, 1, 0, 2), | |
6356 | "CPACR", "CPTR_EL2", "CPACR_EL12" }, | |
6357 | { K(3, 0, 2, 0, 0), K(3, 4, 2, 0, 0), K(3, 5, 2, 0, 0), | |
6358 | "TTBR0_EL1", "TTBR0_EL2", "TTBR0_EL12" }, | |
6359 | { K(3, 0, 2, 0, 1), K(3, 4, 2, 0, 1), K(3, 5, 2, 0, 1), | |
6360 | "TTBR1_EL1", "TTBR1_EL2", "TTBR1_EL12" }, | |
6361 | { K(3, 0, 2, 0, 2), K(3, 4, 2, 0, 2), K(3, 5, 2, 0, 2), | |
6362 | "TCR_EL1", "TCR_EL2", "TCR_EL12" }, | |
6363 | { K(3, 0, 4, 0, 0), K(3, 4, 4, 0, 0), K(3, 5, 4, 0, 0), | |
6364 | "SPSR_EL1", "SPSR_EL2", "SPSR_EL12" }, | |
6365 | { K(3, 0, 4, 0, 1), K(3, 4, 4, 0, 1), K(3, 5, 4, 0, 1), | |
6366 | "ELR_EL1", "ELR_EL2", "ELR_EL12" }, | |
6367 | { K(3, 0, 5, 1, 0), K(3, 4, 5, 1, 0), K(3, 5, 5, 1, 0), | |
6368 | "AFSR0_EL1", "AFSR0_EL2", "AFSR0_EL12" }, | |
6369 | { K(3, 0, 5, 1, 1), K(3, 4, 5, 1, 1), K(3, 5, 5, 1, 1), | |
6370 | "AFSR1_EL1", "AFSR1_EL2", "AFSR1_EL12" }, | |
6371 | { K(3, 0, 5, 2, 0), K(3, 4, 5, 2, 0), K(3, 5, 5, 2, 0), | |
6372 | "ESR_EL1", "ESR_EL2", "ESR_EL12" }, | |
6373 | { K(3, 0, 6, 0, 0), K(3, 4, 6, 0, 0), K(3, 5, 6, 0, 0), | |
6374 | "FAR_EL1", "FAR_EL2", "FAR_EL12" }, | |
6375 | { K(3, 0, 10, 2, 0), K(3, 4, 10, 2, 0), K(3, 5, 10, 2, 0), | |
6376 | "MAIR_EL1", "MAIR_EL2", "MAIR_EL12" }, | |
6377 | { K(3, 0, 10, 3, 0), K(3, 4, 10, 3, 0), K(3, 5, 10, 3, 0), | |
6378 | "AMAIR0", "AMAIR_EL2", "AMAIR_EL12" }, | |
6379 | { K(3, 0, 12, 0, 0), K(3, 4, 12, 0, 0), K(3, 5, 12, 0, 0), | |
6380 | "VBAR", "VBAR_EL2", "VBAR_EL12" }, | |
6381 | { K(3, 0, 13, 0, 1), K(3, 4, 13, 0, 1), K(3, 5, 13, 0, 1), | |
6382 | "CONTEXTIDR_EL1", "CONTEXTIDR_EL2", "CONTEXTIDR_EL12" }, | |
6383 | { K(3, 0, 14, 1, 0), K(3, 4, 14, 1, 0), K(3, 5, 14, 1, 0), | |
6384 | "CNTKCTL", "CNTHCTL_EL2", "CNTKCTL_EL12" }, | |
6385 | ||
6386 | /* | |
6387 | * Note that redirection of ZCR is mentioned in the description | |
6388 | * of ZCR_EL2, and aliasing in the description of ZCR_EL1, but | |
6389 | * not in the summary table. | |
6390 | */ | |
6391 | { K(3, 0, 1, 2, 0), K(3, 4, 1, 2, 0), K(3, 5, 1, 2, 0), | |
6392 | "ZCR_EL1", "ZCR_EL2", "ZCR_EL12", isar_feature_aa64_sve }, | |
6393 | { K(3, 0, 1, 2, 6), K(3, 4, 1, 2, 6), K(3, 5, 1, 2, 6), | |
6394 | "SMCR_EL1", "SMCR_EL2", "SMCR_EL12", isar_feature_aa64_sme }, | |
6395 | ||
6396 | { K(3, 0, 5, 6, 0), K(3, 4, 5, 6, 0), K(3, 5, 5, 6, 0), | |
6397 | "TFSR_EL1", "TFSR_EL2", "TFSR_EL12", isar_feature_aa64_mte }, | |
6398 | ||
6399 | { K(3, 0, 13, 0, 7), K(3, 4, 13, 0, 7), K(3, 5, 13, 0, 7), | |
6400 | "SCXTNUM_EL1", "SCXTNUM_EL2", "SCXTNUM_EL12", | |
6401 | isar_feature_aa64_scxtnum }, | |
6402 | ||
6403 | /* TODO: ARMv8.2-SPE -- PMSCR_EL2 */ | |
6404 | /* TODO: ARMv8.4-Trace -- TRFCR_EL2 */ | |
6405 | }; | |
6406 | #undef K | |
6407 | ||
6408 | size_t i; | |
6409 | ||
6410 | for (i = 0; i < ARRAY_SIZE(aliases); i++) { | |
6411 | const struct E2HAlias *a = &aliases[i]; | |
6412 | ARMCPRegInfo *src_reg, *dst_reg, *new_reg; | |
6413 | bool ok; | |
6414 | ||
6415 | if (a->feature && !a->feature(&cpu->isar)) { | |
6416 | continue; | |
6417 | } | |
6418 | ||
6419 | src_reg = g_hash_table_lookup(cpu->cp_regs, | |
6420 | (gpointer)(uintptr_t)a->src_key); | |
6421 | dst_reg = g_hash_table_lookup(cpu->cp_regs, | |
6422 | (gpointer)(uintptr_t)a->dst_key); | |
6423 | g_assert(src_reg != NULL); | |
6424 | g_assert(dst_reg != NULL); | |
6425 | ||
6426 | /* Cross-compare names to detect typos in the keys. */ | |
6427 | g_assert(strcmp(src_reg->name, a->src_name) == 0); | |
6428 | g_assert(strcmp(dst_reg->name, a->dst_name) == 0); | |
6429 | ||
6430 | /* None of the core system registers use opaque; we will. */ | |
6431 | g_assert(src_reg->opaque == NULL); | |
6432 | ||
6433 | /* Create alias before redirection so we dup the right data. */ | |
6434 | new_reg = g_memdup(src_reg, sizeof(ARMCPRegInfo)); | |
6435 | ||
6436 | new_reg->name = a->new_name; | |
6437 | new_reg->type |= ARM_CP_ALIAS; | |
6438 | /* Remove PL1/PL0 access, leaving PL2/PL3 R/W in place. */ | |
6439 | new_reg->access &= PL2_RW | PL3_RW; | |
6440 | ||
6441 | ok = g_hash_table_insert(cpu->cp_regs, | |
6442 | (gpointer)(uintptr_t)a->new_key, new_reg); | |
6443 | g_assert(ok); | |
6444 | ||
6445 | src_reg->opaque = dst_reg; | |
6446 | src_reg->orig_readfn = src_reg->readfn ?: raw_read; | |
6447 | src_reg->orig_writefn = src_reg->writefn ?: raw_write; | |
6448 | if (!src_reg->raw_readfn) { | |
6449 | src_reg->raw_readfn = raw_read; | |
6450 | } | |
6451 | if (!src_reg->raw_writefn) { | |
6452 | src_reg->raw_writefn = raw_write; | |
6453 | } | |
6454 | src_reg->readfn = el2_e2h_read; | |
6455 | src_reg->writefn = el2_e2h_write; | |
6456 | } | |
6457 | } | |
6458 | #endif | |
6459 | ||
6460 | static CPAccessResult ctr_el0_access(CPUARMState *env, const ARMCPRegInfo *ri, | |
6461 | bool isread) | |
6462 | { | |
6463 | int cur_el = arm_current_el(env); | |
6464 | ||
6465 | if (cur_el < 2) { | |
6466 | uint64_t hcr = arm_hcr_el2_eff(env); | |
6467 | ||
6468 | if (cur_el == 0) { | |
6469 | if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) { | |
6470 | if (!(env->cp15.sctlr_el[2] & SCTLR_UCT)) { | |
6471 | return CP_ACCESS_TRAP_EL2; | |
6472 | } | |
6473 | } else { | |
6474 | if (!(env->cp15.sctlr_el[1] & SCTLR_UCT)) { | |
6475 | return CP_ACCESS_TRAP; | |
6476 | } | |
6477 | if (hcr & HCR_TID2) { | |
6478 | return CP_ACCESS_TRAP_EL2; | |
6479 | } | |
6480 | } | |
6481 | } else if (hcr & HCR_TID2) { | |
6482 | return CP_ACCESS_TRAP_EL2; | |
6483 | } | |
6484 | } | |
6485 | ||
6486 | if (arm_current_el(env) < 2 && arm_hcr_el2_eff(env) & HCR_TID2) { | |
6487 | return CP_ACCESS_TRAP_EL2; | |
6488 | } | |
6489 | ||
6490 | return CP_ACCESS_OK; | |
6491 | } | |
6492 | ||
6493 | /* | |
6494 | * Check for traps to RAS registers, which are controlled | |
6495 | * by HCR_EL2.TERR and SCR_EL3.TERR. | |
6496 | */ | |
6497 | static CPAccessResult access_terr(CPUARMState *env, const ARMCPRegInfo *ri, | |
6498 | bool isread) | |
6499 | { | |
6500 | int el = arm_current_el(env); | |
6501 | ||
6502 | if (el < 2 && (arm_hcr_el2_eff(env) & HCR_TERR)) { | |
6503 | return CP_ACCESS_TRAP_EL2; | |
6504 | } | |
6505 | if (el < 3 && (env->cp15.scr_el3 & SCR_TERR)) { | |
6506 | return CP_ACCESS_TRAP_EL3; | |
6507 | } | |
6508 | return CP_ACCESS_OK; | |
6509 | } | |
6510 | ||
6511 | static uint64_t disr_read(CPUARMState *env, const ARMCPRegInfo *ri) | |
6512 | { | |
6513 | int el = arm_current_el(env); | |
6514 | ||
6515 | if (el < 2 && (arm_hcr_el2_eff(env) & HCR_AMO)) { | |
6516 | return env->cp15.vdisr_el2; | |
6517 | } | |
6518 | if (el < 3 && (env->cp15.scr_el3 & SCR_EA)) { | |
6519 | return 0; /* RAZ/WI */ | |
6520 | } | |
6521 | return env->cp15.disr_el1; | |
6522 | } | |
6523 | ||
6524 | static void disr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t val) | |
6525 | { | |
6526 | int el = arm_current_el(env); | |
6527 | ||
6528 | if (el < 2 && (arm_hcr_el2_eff(env) & HCR_AMO)) { | |
6529 | env->cp15.vdisr_el2 = val; | |
6530 | return; | |
6531 | } | |
6532 | if (el < 3 && (env->cp15.scr_el3 & SCR_EA)) { | |
6533 | return; /* RAZ/WI */ | |
6534 | } | |
6535 | env->cp15.disr_el1 = val; | |
6536 | } | |
6537 | ||
6538 | /* | |
6539 | * Minimal RAS implementation with no Error Records. | |
6540 | * Which means that all of the Error Record registers: | |
6541 | * ERXADDR_EL1 | |
6542 | * ERXCTLR_EL1 | |
6543 | * ERXFR_EL1 | |
6544 | * ERXMISC0_EL1 | |
6545 | * ERXMISC1_EL1 | |
6546 | * ERXMISC2_EL1 | |
6547 | * ERXMISC3_EL1 | |
6548 | * ERXPFGCDN_EL1 (RASv1p1) | |
6549 | * ERXPFGCTL_EL1 (RASv1p1) | |
6550 | * ERXPFGF_EL1 (RASv1p1) | |
6551 | * ERXSTATUS_EL1 | |
6552 | * and | |
6553 | * ERRSELR_EL1 | |
6554 | * may generate UNDEFINED, which is the effect we get by not | |
6555 | * listing them at all. | |
6556 | * | |
6557 | * These registers have fine-grained trap bits, but UNDEF-to-EL1 | |
6558 | * is higher priority than FGT-to-EL2 so we do not need to list them | |
6559 | * in order to check for an FGT. | |
6560 | */ | |
6561 | static const ARMCPRegInfo minimal_ras_reginfo[] = { | |
6562 | { .name = "DISR_EL1", .state = ARM_CP_STATE_BOTH, | |
6563 | .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 1, .opc2 = 1, | |
6564 | .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.disr_el1), | |
6565 | .readfn = disr_read, .writefn = disr_write, .raw_writefn = raw_write }, | |
6566 | { .name = "ERRIDR_EL1", .state = ARM_CP_STATE_BOTH, | |
6567 | .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 3, .opc2 = 0, | |
6568 | .access = PL1_R, .accessfn = access_terr, | |
6569 | .fgt = FGT_ERRIDR_EL1, | |
6570 | .type = ARM_CP_CONST, .resetvalue = 0 }, | |
6571 | { .name = "VDISR_EL2", .state = ARM_CP_STATE_BOTH, | |
6572 | .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 1, .opc2 = 1, | |
6573 | .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.vdisr_el2) }, | |
6574 | { .name = "VSESR_EL2", .state = ARM_CP_STATE_BOTH, | |
6575 | .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 2, .opc2 = 3, | |
6576 | .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.vsesr_el2) }, | |
6577 | }; | |
6578 | ||
6579 | /* | |
6580 | * Return the exception level to which exceptions should be taken | |
6581 | * via SVEAccessTrap. This excludes the check for whether the exception | |
6582 | * should be routed through AArch64.AdvSIMDFPAccessTrap. That can easily | |
6583 | * be found by testing 0 < fp_exception_el < sve_exception_el. | |
6584 | * | |
6585 | * C.f. the ARM pseudocode function CheckSVEEnabled. Note that the | |
6586 | * pseudocode does *not* separate out the FP trap checks, but has them | |
6587 | * all in one function. | |
6588 | */ | |
6589 | int sve_exception_el(CPUARMState *env, int el) | |
6590 | { | |
6591 | #ifndef CONFIG_USER_ONLY | |
6592 | if (el <= 1 && !el_is_in_host(env, el)) { | |
6593 | switch (FIELD_EX64(env->cp15.cpacr_el1, CPACR_EL1, ZEN)) { | |
6594 | case 1: | |
6595 | if (el != 0) { | |
6596 | break; | |
6597 | } | |
6598 | /* fall through */ | |
6599 | case 0: | |
6600 | case 2: | |
6601 | return 1; | |
6602 | } | |
6603 | } | |
6604 | ||
6605 | if (el <= 2 && arm_is_el2_enabled(env)) { | |
6606 | /* CPTR_EL2 changes format with HCR_EL2.E2H (regardless of TGE). */ | |
6607 | if (env->cp15.hcr_el2 & HCR_E2H) { | |
6608 | switch (FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, ZEN)) { | |
6609 | case 1: | |
6610 | if (el != 0 || !(env->cp15.hcr_el2 & HCR_TGE)) { | |
6611 | break; | |
6612 | } | |
6613 | /* fall through */ | |
6614 | case 0: | |
6615 | case 2: | |
6616 | return 2; | |
6617 | } | |
6618 | } else { | |
6619 | if (FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, TZ)) { | |
6620 | return 2; | |
6621 | } | |
6622 | } | |
6623 | } | |
6624 | ||
6625 | /* CPTR_EL3. Since EZ is negative we must check for EL3. */ | |
6626 | if (arm_feature(env, ARM_FEATURE_EL3) | |
6627 | && !FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, EZ)) { | |
6628 | return 3; | |
6629 | } | |
6630 | #endif | |
6631 | return 0; | |
6632 | } | |
6633 | ||
6634 | /* | |
6635 | * Return the exception level to which exceptions should be taken for SME. | |
6636 | * C.f. the ARM pseudocode function CheckSMEAccess. | |
6637 | */ | |
6638 | int sme_exception_el(CPUARMState *env, int el) | |
6639 | { | |
6640 | #ifndef CONFIG_USER_ONLY | |
6641 | if (el <= 1 && !el_is_in_host(env, el)) { | |
6642 | switch (FIELD_EX64(env->cp15.cpacr_el1, CPACR_EL1, SMEN)) { | |
6643 | case 1: | |
6644 | if (el != 0) { | |
6645 | break; | |
6646 | } | |
6647 | /* fall through */ | |
6648 | case 0: | |
6649 | case 2: | |
6650 | return 1; | |
6651 | } | |
6652 | } | |
6653 | ||
6654 | if (el <= 2 && arm_is_el2_enabled(env)) { | |
6655 | /* CPTR_EL2 changes format with HCR_EL2.E2H (regardless of TGE). */ | |
6656 | if (env->cp15.hcr_el2 & HCR_E2H) { | |
6657 | switch (FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, SMEN)) { | |
6658 | case 1: | |
6659 | if (el != 0 || !(env->cp15.hcr_el2 & HCR_TGE)) { | |
6660 | break; | |
6661 | } | |
6662 | /* fall through */ | |
6663 | case 0: | |
6664 | case 2: | |
6665 | return 2; | |
6666 | } | |
6667 | } else { | |
6668 | if (FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, TSM)) { | |
6669 | return 2; | |
6670 | } | |
6671 | } | |
6672 | } | |
6673 | ||
6674 | /* CPTR_EL3. Since ESM is negative we must check for EL3. */ | |
6675 | if (arm_feature(env, ARM_FEATURE_EL3) | |
6676 | && !FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, ESM)) { | |
6677 | return 3; | |
6678 | } | |
6679 | #endif | |
6680 | return 0; | |
6681 | } | |
6682 | ||
6683 | /* | |
6684 | * Given that SVE is enabled, return the vector length for EL. | |
6685 | */ | |
6686 | uint32_t sve_vqm1_for_el_sm(CPUARMState *env, int el, bool sm) | |
6687 | { | |
6688 | ARMCPU *cpu = env_archcpu(env); | |
6689 | uint64_t *cr = env->vfp.zcr_el; | |
6690 | uint32_t map = cpu->sve_vq.map; | |
6691 | uint32_t len = ARM_MAX_VQ - 1; | |
6692 | ||
6693 | if (sm) { | |
6694 | cr = env->vfp.smcr_el; | |
6695 | map = cpu->sme_vq.map; | |
6696 | } | |
6697 | ||
6698 | if (el <= 1 && !el_is_in_host(env, el)) { | |
6699 | len = MIN(len, 0xf & (uint32_t)cr[1]); | |
6700 | } | |
6701 | if (el <= 2 && arm_feature(env, ARM_FEATURE_EL2)) { | |
6702 | len = MIN(len, 0xf & (uint32_t)cr[2]); | |
6703 | } | |
6704 | if (arm_feature(env, ARM_FEATURE_EL3)) { | |
6705 | len = MIN(len, 0xf & (uint32_t)cr[3]); | |
6706 | } | |
6707 | ||
6708 | map &= MAKE_64BIT_MASK(0, len + 1); | |
6709 | if (map != 0) { | |
6710 | return 31 - clz32(map); | |
6711 | } | |
6712 | ||
6713 | /* Bit 0 is always set for Normal SVE -- not so for Streaming SVE. */ | |
6714 | assert(sm); | |
6715 | return ctz32(cpu->sme_vq.map); | |
6716 | } | |
6717 | ||
6718 | uint32_t sve_vqm1_for_el(CPUARMState *env, int el) | |
6719 | { | |
6720 | return sve_vqm1_for_el_sm(env, el, FIELD_EX64(env->svcr, SVCR, SM)); | |
6721 | } | |
6722 | ||
6723 | static void zcr_write(CPUARMState *env, const ARMCPRegInfo *ri, | |
6724 | uint64_t value) | |
6725 | { | |
6726 | int cur_el = arm_current_el(env); | |
6727 | int old_len = sve_vqm1_for_el(env, cur_el); | |
6728 | int new_len; | |
6729 | ||
6730 | /* Bits other than [3:0] are RAZ/WI. */ | |
6731 | QEMU_BUILD_BUG_ON(ARM_MAX_VQ > 16); | |
6732 | raw_write(env, ri, value & 0xf); | |
6733 | ||
6734 | /* | |
6735 | * Because we arrived here, we know both FP and SVE are enabled; | |
6736 | * otherwise we would have trapped access to the ZCR_ELn register. | |
6737 | */ | |
6738 | new_len = sve_vqm1_for_el(env, cur_el); | |
6739 | if (new_len < old_len) { | |
6740 | aarch64_sve_narrow_vq(env, new_len + 1); | |
6741 | } | |
6742 | } | |
6743 | ||
6744 | static const ARMCPRegInfo zcr_reginfo[] = { | |
6745 | { .name = "ZCR_EL1", .state = ARM_CP_STATE_AA64, | |
6746 | .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 2, .opc2 = 0, | |
6747 | .access = PL1_RW, .type = ARM_CP_SVE, | |
6748 | .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[1]), | |
6749 | .writefn = zcr_write, .raw_writefn = raw_write }, | |
6750 | { .name = "ZCR_EL2", .state = ARM_CP_STATE_AA64, | |
6751 | .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 0, | |
6752 | .access = PL2_RW, .type = ARM_CP_SVE, | |
6753 | .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[2]), | |
6754 | .writefn = zcr_write, .raw_writefn = raw_write }, | |
6755 | { .name = "ZCR_EL3", .state = ARM_CP_STATE_AA64, | |
6756 | .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 2, .opc2 = 0, | |
6757 | .access = PL3_RW, .type = ARM_CP_SVE, | |
6758 | .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[3]), | |
6759 | .writefn = zcr_write, .raw_writefn = raw_write }, | |
6760 | }; | |
6761 | ||
6762 | #ifdef TARGET_AARCH64 | |
6763 | static CPAccessResult access_tpidr2(CPUARMState *env, const ARMCPRegInfo *ri, | |
6764 | bool isread) | |
6765 | { | |
6766 | int el = arm_current_el(env); | |
6767 | ||
6768 | if (el == 0) { | |
6769 | uint64_t sctlr = arm_sctlr(env, el); | |
6770 | if (!(sctlr & SCTLR_EnTP2)) { | |
6771 | return CP_ACCESS_TRAP; | |
6772 | } | |
6773 | } | |
6774 | /* TODO: FEAT_FGT */ | |
6775 | if (el < 3 | |
6776 | && arm_feature(env, ARM_FEATURE_EL3) | |
6777 | && !(env->cp15.scr_el3 & SCR_ENTP2)) { | |
6778 | return CP_ACCESS_TRAP_EL3; | |
6779 | } | |
6780 | return CP_ACCESS_OK; | |
6781 | } | |
6782 | ||
6783 | static CPAccessResult access_esm(CPUARMState *env, const ARMCPRegInfo *ri, | |
6784 | bool isread) | |
6785 | { | |
6786 | /* TODO: FEAT_FGT for SMPRI_EL1 but not SMPRIMAP_EL2 */ | |
6787 | if (arm_current_el(env) < 3 | |
6788 | && arm_feature(env, ARM_FEATURE_EL3) | |
6789 | && !FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, ESM)) { | |
6790 | return CP_ACCESS_TRAP_EL3; | |
6791 | } | |
6792 | return CP_ACCESS_OK; | |
6793 | } | |
6794 | ||
6795 | /* ResetSVEState */ | |
6796 | static void arm_reset_sve_state(CPUARMState *env) | |
6797 | { | |
6798 | memset(env->vfp.zregs, 0, sizeof(env->vfp.zregs)); | |
6799 | /* Recall that FFR is stored as pregs[16]. */ | |
6800 | memset(env->vfp.pregs, 0, sizeof(env->vfp.pregs)); | |
6801 | vfp_set_fpcr(env, 0x0800009f); | |
6802 | } | |
6803 | ||
6804 | void aarch64_set_svcr(CPUARMState *env, uint64_t new, uint64_t mask) | |
6805 | { | |
6806 | uint64_t change = (env->svcr ^ new) & mask; | |
6807 | ||
6808 | if (change == 0) { | |
6809 | return; | |
6810 | } | |
6811 | env->svcr ^= change; | |
6812 | ||
6813 | if (change & R_SVCR_SM_MASK) { | |
6814 | arm_reset_sve_state(env); | |
6815 | } | |
6816 | ||
6817 | /* | |
6818 | * ResetSMEState. | |
6819 | * | |
6820 | * SetPSTATE_ZA zeros on enable and disable. We can zero this only | |
6821 | * on enable: while disabled, the storage is inaccessible and the | |
6822 | * value does not matter. We're not saving the storage in vmstate | |
6823 | * when disabled either. | |
6824 | */ | |
6825 | if (change & new & R_SVCR_ZA_MASK) { | |
6826 | memset(env->zarray, 0, sizeof(env->zarray)); | |
6827 | } | |
6828 | ||
6829 | if (tcg_enabled()) { | |
6830 | arm_rebuild_hflags(env); | |
6831 | } | |
6832 | } | |
6833 | ||
6834 | static void svcr_write(CPUARMState *env, const ARMCPRegInfo *ri, | |
6835 | uint64_t value) | |
6836 | { | |
6837 | aarch64_set_svcr(env, value, -1); | |
6838 | } | |
6839 | ||
6840 | static void smcr_write(CPUARMState *env, const ARMCPRegInfo *ri, | |
6841 | uint64_t value) | |
6842 | { | |
6843 | int cur_el = arm_current_el(env); | |
6844 | int old_len = sve_vqm1_for_el(env, cur_el); | |
6845 | int new_len; | |
6846 | ||
6847 | QEMU_BUILD_BUG_ON(ARM_MAX_VQ > R_SMCR_LEN_MASK + 1); | |
6848 | value &= R_SMCR_LEN_MASK | R_SMCR_FA64_MASK; | |
6849 | raw_write(env, ri, value); | |
6850 | ||
6851 | /* | |
6852 | * Note that it is CONSTRAINED UNPREDICTABLE what happens to ZA storage | |
6853 | * when SVL is widened (old values kept, or zeros). Choose to keep the | |
6854 | * current values for simplicity. But for QEMU internals, we must still | |
6855 | * apply the narrower SVL to the Zregs and Pregs -- see the comment | |
6856 | * above aarch64_sve_narrow_vq. | |
6857 | */ | |
6858 | new_len = sve_vqm1_for_el(env, cur_el); | |
6859 | if (new_len < old_len) { | |
6860 | aarch64_sve_narrow_vq(env, new_len + 1); | |
6861 | } | |
6862 | } | |
6863 | ||
6864 | static const ARMCPRegInfo sme_reginfo[] = { | |
6865 | { .name = "TPIDR2_EL0", .state = ARM_CP_STATE_AA64, | |
6866 | .opc0 = 3, .opc1 = 3, .crn = 13, .crm = 0, .opc2 = 5, | |
6867 | .access = PL0_RW, .accessfn = access_tpidr2, | |
6868 | .fgt = FGT_NTPIDR2_EL0, | |
6869 | .fieldoffset = offsetof(CPUARMState, cp15.tpidr2_el0) }, | |
6870 | { .name = "SVCR", .state = ARM_CP_STATE_AA64, | |
6871 | .opc0 = 3, .opc1 = 3, .crn = 4, .crm = 2, .opc2 = 2, | |
6872 | .access = PL0_RW, .type = ARM_CP_SME, | |
6873 | .fieldoffset = offsetof(CPUARMState, svcr), | |
6874 | .writefn = svcr_write, .raw_writefn = raw_write }, | |
6875 | { .name = "SMCR_EL1", .state = ARM_CP_STATE_AA64, | |
6876 | .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 2, .opc2 = 6, | |
6877 | .access = PL1_RW, .type = ARM_CP_SME, | |
6878 | .fieldoffset = offsetof(CPUARMState, vfp.smcr_el[1]), | |
6879 | .writefn = smcr_write, .raw_writefn = raw_write }, | |
6880 | { .name = "SMCR_EL2", .state = ARM_CP_STATE_AA64, | |
6881 | .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 6, | |
6882 | .access = PL2_RW, .type = ARM_CP_SME, | |
6883 | .fieldoffset = offsetof(CPUARMState, vfp.smcr_el[2]), | |
6884 | .writefn = smcr_write, .raw_writefn = raw_write }, | |
6885 | { .name = "SMCR_EL3", .state = ARM_CP_STATE_AA64, | |
6886 | .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 2, .opc2 = 6, | |
6887 | .access = PL3_RW, .type = ARM_CP_SME, | |
6888 | .fieldoffset = offsetof(CPUARMState, vfp.smcr_el[3]), | |
6889 | .writefn = smcr_write, .raw_writefn = raw_write }, | |
6890 | { .name = "SMIDR_EL1", .state = ARM_CP_STATE_AA64, | |
6891 | .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 6, | |
6892 | .access = PL1_R, .accessfn = access_aa64_tid1, | |
6893 | /* | |
6894 | * IMPLEMENTOR = 0 (software) | |
6895 | * REVISION = 0 (implementation defined) | |
6896 | * SMPS = 0 (no streaming execution priority in QEMU) | |
6897 | * AFFINITY = 0 (streaming sve mode not shared with other PEs) | |
6898 | */ | |
6899 | .type = ARM_CP_CONST, .resetvalue = 0, }, | |
6900 | /* | |
6901 | * Because SMIDR_EL1.SMPS is 0, SMPRI_EL1 and SMPRIMAP_EL2 are RES 0. | |
6902 | */ | |
6903 | { .name = "SMPRI_EL1", .state = ARM_CP_STATE_AA64, | |
6904 | .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 2, .opc2 = 4, | |
6905 | .access = PL1_RW, .accessfn = access_esm, | |
6906 | .fgt = FGT_NSMPRI_EL1, | |
6907 | .type = ARM_CP_CONST, .resetvalue = 0 }, | |
6908 | { .name = "SMPRIMAP_EL2", .state = ARM_CP_STATE_AA64, | |
6909 | .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 5, | |
6910 | .access = PL2_RW, .accessfn = access_esm, | |
6911 | .type = ARM_CP_CONST, .resetvalue = 0 }, | |
6912 | }; | |
6913 | ||
6914 | static void tlbi_aa64_paall_write(CPUARMState *env, const ARMCPRegInfo *ri, | |
6915 | uint64_t value) | |
6916 | { | |
6917 | CPUState *cs = env_cpu(env); | |
6918 | ||
6919 | tlb_flush(cs); | |
6920 | } | |
6921 | ||
6922 | static void gpccr_write(CPUARMState *env, const ARMCPRegInfo *ri, | |
6923 | uint64_t value) | |
6924 | { | |
6925 | /* L0GPTSZ is RO; other bits not mentioned are RES0. */ | |
6926 | uint64_t rw_mask = R_GPCCR_PPS_MASK | R_GPCCR_IRGN_MASK | | |
6927 | R_GPCCR_ORGN_MASK | R_GPCCR_SH_MASK | R_GPCCR_PGS_MASK | | |
6928 | R_GPCCR_GPC_MASK | R_GPCCR_GPCP_MASK; | |
6929 | ||
6930 | env->cp15.gpccr_el3 = (value & rw_mask) | (env->cp15.gpccr_el3 & ~rw_mask); | |
6931 | } | |
6932 | ||
6933 | static void gpccr_reset(CPUARMState *env, const ARMCPRegInfo *ri) | |
6934 | { | |
6935 | env->cp15.gpccr_el3 = FIELD_DP64(0, GPCCR, L0GPTSZ, | |
6936 | env_archcpu(env)->reset_l0gptsz); | |
6937 | } | |
6938 | ||
6939 | static void tlbi_aa64_paallos_write(CPUARMState *env, const ARMCPRegInfo *ri, | |
6940 | uint64_t value) | |
6941 | { | |
6942 | CPUState *cs = env_cpu(env); | |
6943 | ||
6944 | tlb_flush_all_cpus_synced(cs); | |
6945 | } | |
6946 | ||
6947 | static const ARMCPRegInfo rme_reginfo[] = { | |
6948 | { .name = "GPCCR_EL3", .state = ARM_CP_STATE_AA64, | |
6949 | .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 1, .opc2 = 6, | |
6950 | .access = PL3_RW, .writefn = gpccr_write, .resetfn = gpccr_reset, | |
6951 | .fieldoffset = offsetof(CPUARMState, cp15.gpccr_el3) }, | |
6952 | { .name = "GPTBR_EL3", .state = ARM_CP_STATE_AA64, | |
6953 | .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 1, .opc2 = 4, | |
6954 | .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.gptbr_el3) }, | |
6955 | { .name = "MFAR_EL3", .state = ARM_CP_STATE_AA64, | |
6956 | .opc0 = 3, .opc1 = 6, .crn = 6, .crm = 0, .opc2 = 5, | |
6957 | .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.mfar_el3) }, | |
6958 | { .name = "TLBI_PAALL", .state = ARM_CP_STATE_AA64, | |
6959 | .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 4, | |
6960 | .access = PL3_W, .type = ARM_CP_NO_RAW, | |
6961 | .writefn = tlbi_aa64_paall_write }, | |
6962 | { .name = "TLBI_PAALLOS", .state = ARM_CP_STATE_AA64, | |
6963 | .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 1, .opc2 = 4, | |
6964 | .access = PL3_W, .type = ARM_CP_NO_RAW, | |
6965 | .writefn = tlbi_aa64_paallos_write }, | |
6966 | /* | |
6967 | * QEMU does not have a way to invalidate by physical address, thus | |
6968 | * invalidating a range of physical addresses is accomplished by | |
6969 | * flushing all tlb entries in the outer sharable domain, | |
6970 | * just like PAALLOS. | |
6971 | */ | |
6972 | { .name = "TLBI_RPALOS", .state = ARM_CP_STATE_AA64, | |
6973 | .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 4, .opc2 = 7, | |
6974 | .access = PL3_W, .type = ARM_CP_NO_RAW, | |
6975 | .writefn = tlbi_aa64_paallos_write }, | |
6976 | { .name = "TLBI_RPAOS", .state = ARM_CP_STATE_AA64, | |
6977 | .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 4, .opc2 = 3, | |
6978 | .access = PL3_W, .type = ARM_CP_NO_RAW, | |
6979 | .writefn = tlbi_aa64_paallos_write }, | |
6980 | { .name = "DC_CIPAPA", .state = ARM_CP_STATE_AA64, | |
6981 | .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 14, .opc2 = 1, | |
6982 | .access = PL3_W, .type = ARM_CP_NOP }, | |
6983 | }; | |
6984 | ||
6985 | static const ARMCPRegInfo rme_mte_reginfo[] = { | |
6986 | { .name = "DC_CIGDPAPA", .state = ARM_CP_STATE_AA64, | |
6987 | .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 14, .opc2 = 5, | |
6988 | .access = PL3_W, .type = ARM_CP_NOP }, | |
6989 | }; | |
6990 | #endif /* TARGET_AARCH64 */ | |
6991 | ||
6992 | static void define_pmu_regs(ARMCPU *cpu) | |
6993 | { | |
6994 | /* | |
6995 | * v7 performance monitor control register: same implementor | |
6996 | * field as main ID register, and we implement four counters in | |
6997 | * addition to the cycle count register. | |
6998 | */ | |
6999 | unsigned int i, pmcrn = pmu_num_counters(&cpu->env); | |
7000 | ARMCPRegInfo pmcr = { | |
7001 | .name = "PMCR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 0, | |
7002 | .access = PL0_RW, | |
7003 | .fgt = FGT_PMCR_EL0, | |
7004 | .type = ARM_CP_IO | ARM_CP_ALIAS, | |
7005 | .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcr), | |
7006 | .accessfn = pmreg_access, .writefn = pmcr_write, | |
7007 | .raw_writefn = raw_write, | |
7008 | }; | |
7009 | ARMCPRegInfo pmcr64 = { | |
7010 | .name = "PMCR_EL0", .state = ARM_CP_STATE_AA64, | |
7011 | .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 0, | |
7012 | .access = PL0_RW, .accessfn = pmreg_access, | |
7013 | .fgt = FGT_PMCR_EL0, | |
7014 | .type = ARM_CP_IO, | |
7015 | .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcr), | |
7016 | .resetvalue = cpu->isar.reset_pmcr_el0, | |
7017 | .writefn = pmcr_write, .raw_writefn = raw_write, | |
7018 | }; | |
7019 | ||
7020 | define_one_arm_cp_reg(cpu, &pmcr); | |
7021 | define_one_arm_cp_reg(cpu, &pmcr64); | |
7022 | for (i = 0; i < pmcrn; i++) { | |
7023 | char *pmevcntr_name = g_strdup_printf("PMEVCNTR%d", i); | |
7024 | char *pmevcntr_el0_name = g_strdup_printf("PMEVCNTR%d_EL0", i); | |
7025 | char *pmevtyper_name = g_strdup_printf("PMEVTYPER%d", i); | |
7026 | char *pmevtyper_el0_name = g_strdup_printf("PMEVTYPER%d_EL0", i); | |
7027 | ARMCPRegInfo pmev_regs[] = { | |
7028 | { .name = pmevcntr_name, .cp = 15, .crn = 14, | |
7029 | .crm = 8 | (3 & (i >> 3)), .opc1 = 0, .opc2 = i & 7, | |
7030 | .access = PL0_RW, .type = ARM_CP_IO | ARM_CP_ALIAS, | |
7031 | .fgt = FGT_PMEVCNTRN_EL0, | |
7032 | .readfn = pmevcntr_readfn, .writefn = pmevcntr_writefn, | |
7033 | .accessfn = pmreg_access_xevcntr }, | |
7034 | { .name = pmevcntr_el0_name, .state = ARM_CP_STATE_AA64, | |
7035 | .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 8 | (3 & (i >> 3)), | |
7036 | .opc2 = i & 7, .access = PL0_RW, .accessfn = pmreg_access_xevcntr, | |
7037 | .type = ARM_CP_IO, | |
7038 | .fgt = FGT_PMEVCNTRN_EL0, | |
7039 | .readfn = pmevcntr_readfn, .writefn = pmevcntr_writefn, | |
7040 | .raw_readfn = pmevcntr_rawread, | |
7041 | .raw_writefn = pmevcntr_rawwrite }, | |
7042 | { .name = pmevtyper_name, .cp = 15, .crn = 14, | |
7043 | .crm = 12 | (3 & (i >> 3)), .opc1 = 0, .opc2 = i & 7, | |
7044 | .access = PL0_RW, .type = ARM_CP_IO | ARM_CP_ALIAS, | |
7045 | .fgt = FGT_PMEVTYPERN_EL0, | |
7046 | .readfn = pmevtyper_readfn, .writefn = pmevtyper_writefn, | |
7047 | .accessfn = pmreg_access }, | |
7048 | { .name = pmevtyper_el0_name, .state = ARM_CP_STATE_AA64, | |
7049 | .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 12 | (3 & (i >> 3)), | |
7050 | .opc2 = i & 7, .access = PL0_RW, .accessfn = pmreg_access, | |
7051 | .fgt = FGT_PMEVTYPERN_EL0, | |
7052 | .type = ARM_CP_IO, | |
7053 | .readfn = pmevtyper_readfn, .writefn = pmevtyper_writefn, | |
7054 | .raw_writefn = pmevtyper_rawwrite }, | |
7055 | }; | |
7056 | define_arm_cp_regs(cpu, pmev_regs); | |
7057 | g_free(pmevcntr_name); | |
7058 | g_free(pmevcntr_el0_name); | |
7059 | g_free(pmevtyper_name); | |
7060 | g_free(pmevtyper_el0_name); | |
7061 | } | |
7062 | if (cpu_isar_feature(aa32_pmuv3p1, cpu)) { | |
7063 | ARMCPRegInfo v81_pmu_regs[] = { | |
7064 | { .name = "PMCEID2", .state = ARM_CP_STATE_AA32, | |
7065 | .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 4, | |
7066 | .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, | |
7067 | .fgt = FGT_PMCEIDN_EL0, | |
7068 | .resetvalue = extract64(cpu->pmceid0, 32, 32) }, | |
7069 | { .name = "PMCEID3", .state = ARM_CP_STATE_AA32, | |
7070 | .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 5, | |
7071 | .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, | |
7072 | .fgt = FGT_PMCEIDN_EL0, | |
7073 | .resetvalue = extract64(cpu->pmceid1, 32, 32) }, | |
7074 | }; | |
7075 | define_arm_cp_regs(cpu, v81_pmu_regs); | |
7076 | } | |
7077 | if (cpu_isar_feature(any_pmuv3p4, cpu)) { | |
7078 | static const ARMCPRegInfo v84_pmmir = { | |
7079 | .name = "PMMIR_EL1", .state = ARM_CP_STATE_BOTH, | |
7080 | .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 6, | |
7081 | .access = PL1_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, | |
7082 | .fgt = FGT_PMMIR_EL1, | |
7083 | .resetvalue = 0 | |
7084 | }; | |
7085 | define_one_arm_cp_reg(cpu, &v84_pmmir); | |
7086 | } | |
7087 | } | |
7088 | ||
7089 | #ifndef CONFIG_USER_ONLY | |
7090 | /* | |
7091 | * We don't know until after realize whether there's a GICv3 | |
7092 | * attached, and that is what registers the gicv3 sysregs. | |
7093 | * So we have to fill in the GIC fields in ID_PFR/ID_PFR1_EL1/ID_AA64PFR0_EL1 | |
7094 | * at runtime. | |
7095 | */ | |
7096 | static uint64_t id_pfr1_read(CPUARMState *env, const ARMCPRegInfo *ri) | |
7097 | { | |
7098 | ARMCPU *cpu = env_archcpu(env); | |
7099 | uint64_t pfr1 = cpu->isar.id_pfr1; | |
7100 | ||
7101 | if (env->gicv3state) { | |
7102 | pfr1 |= 1 << 28; | |
7103 | } | |
7104 | return pfr1; | |
7105 | } | |
7106 | ||
7107 | static uint64_t id_aa64pfr0_read(CPUARMState *env, const ARMCPRegInfo *ri) | |
7108 | { | |
7109 | ARMCPU *cpu = env_archcpu(env); | |
7110 | uint64_t pfr0 = cpu->isar.id_aa64pfr0; | |
7111 | ||
7112 | if (env->gicv3state) { | |
7113 | pfr0 |= 1 << 24; | |
7114 | } | |
7115 | return pfr0; | |
7116 | } | |
7117 | #endif | |
7118 | ||
7119 | /* | |
7120 | * Shared logic between LORID and the rest of the LOR* registers. | |
7121 | * Secure state exclusion has already been dealt with. | |
7122 | */ | |
7123 | static CPAccessResult access_lor_ns(CPUARMState *env, | |
7124 | const ARMCPRegInfo *ri, bool isread) | |
7125 | { | |
7126 | int el = arm_current_el(env); | |
7127 | ||
7128 | if (el < 2 && (arm_hcr_el2_eff(env) & HCR_TLOR)) { | |
7129 | return CP_ACCESS_TRAP_EL2; | |
7130 | } | |
7131 | if (el < 3 && (env->cp15.scr_el3 & SCR_TLOR)) { | |
7132 | return CP_ACCESS_TRAP_EL3; | |
7133 | } | |
7134 | return CP_ACCESS_OK; | |
7135 | } | |
7136 | ||
7137 | static CPAccessResult access_lor_other(CPUARMState *env, | |
7138 | const ARMCPRegInfo *ri, bool isread) | |
7139 | { | |
7140 | if (arm_is_secure_below_el3(env)) { | |
7141 | /* Access denied in secure mode. */ | |
7142 | return CP_ACCESS_TRAP; | |
7143 | } | |
7144 | return access_lor_ns(env, ri, isread); | |
7145 | } | |
7146 | ||
7147 | /* | |
7148 | * A trivial implementation of ARMv8.1-LOR leaves all of these | |
7149 | * registers fixed at 0, which indicates that there are zero | |
7150 | * supported Limited Ordering regions. | |
7151 | */ | |
7152 | static const ARMCPRegInfo lor_reginfo[] = { | |
7153 | { .name = "LORSA_EL1", .state = ARM_CP_STATE_AA64, | |
7154 | .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 0, | |
7155 | .access = PL1_RW, .accessfn = access_lor_other, | |
7156 | .fgt = FGT_LORSA_EL1, | |
7157 | .type = ARM_CP_CONST, .resetvalue = 0 }, | |
7158 | { .name = "LOREA_EL1", .state = ARM_CP_STATE_AA64, | |
7159 | .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 1, | |
7160 | .access = PL1_RW, .accessfn = access_lor_other, | |
7161 | .fgt = FGT_LOREA_EL1, | |
7162 | .type = ARM_CP_CONST, .resetvalue = 0 }, | |
7163 | { .name = "LORN_EL1", .state = ARM_CP_STATE_AA64, | |
7164 | .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 2, | |
7165 | .access = PL1_RW, .accessfn = access_lor_other, | |
7166 | .fgt = FGT_LORN_EL1, | |
7167 | .type = ARM_CP_CONST, .resetvalue = 0 }, | |
7168 | { .name = "LORC_EL1", .state = ARM_CP_STATE_AA64, | |
7169 | .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 3, | |
7170 | .access = PL1_RW, .accessfn = access_lor_other, | |
7171 | .fgt = FGT_LORC_EL1, | |
7172 | .type = ARM_CP_CONST, .resetvalue = 0 }, | |
7173 | { .name = "LORID_EL1", .state = ARM_CP_STATE_AA64, | |
7174 | .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 7, | |
7175 | .access = PL1_R, .accessfn = access_lor_ns, | |
7176 | .fgt = FGT_LORID_EL1, | |
7177 | .type = ARM_CP_CONST, .resetvalue = 0 }, | |
7178 | }; | |
7179 | ||
7180 | #ifdef TARGET_AARCH64 | |
7181 | static CPAccessResult access_pauth(CPUARMState *env, const ARMCPRegInfo *ri, | |
7182 | bool isread) | |
7183 | { | |
7184 | int el = arm_current_el(env); | |
7185 | ||
7186 | if (el < 2 && | |
7187 | arm_is_el2_enabled(env) && | |
7188 | !(arm_hcr_el2_eff(env) & HCR_APK)) { | |
7189 | return CP_ACCESS_TRAP_EL2; | |
7190 | } | |
7191 | if (el < 3 && | |
7192 | arm_feature(env, ARM_FEATURE_EL3) && | |
7193 | !(env->cp15.scr_el3 & SCR_APK)) { | |
7194 | return CP_ACCESS_TRAP_EL3; | |
7195 | } | |
7196 | return CP_ACCESS_OK; | |
7197 | } | |
7198 | ||
7199 | static const ARMCPRegInfo pauth_reginfo[] = { | |
7200 | { .name = "APDAKEYLO_EL1", .state = ARM_CP_STATE_AA64, | |
7201 | .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 0, | |
7202 | .access = PL1_RW, .accessfn = access_pauth, | |
7203 | .fgt = FGT_APDAKEY, | |
7204 | .fieldoffset = offsetof(CPUARMState, keys.apda.lo) }, | |
7205 | { .name = "APDAKEYHI_EL1", .state = ARM_CP_STATE_AA64, | |
7206 | .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 1, | |
7207 | .access = PL1_RW, .accessfn = access_pauth, | |
7208 | .fgt = FGT_APDAKEY, | |
7209 | .fieldoffset = offsetof(CPUARMState, keys.apda.hi) }, | |
7210 | { .name = "APDBKEYLO_EL1", .state = ARM_CP_STATE_AA64, | |
7211 | .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 2, | |
7212 | .access = PL1_RW, .accessfn = access_pauth, | |
7213 | .fgt = FGT_APDBKEY, | |
7214 | .fieldoffset = offsetof(CPUARMState, keys.apdb.lo) }, | |
7215 | { .name = "APDBKEYHI_EL1", .state = ARM_CP_STATE_AA64, | |
7216 | .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 2, .opc2 = 3, | |
7217 | .access = PL1_RW, .accessfn = access_pauth, | |
7218 | .fgt = FGT_APDBKEY, | |
7219 | .fieldoffset = offsetof(CPUARMState, keys.apdb.hi) }, | |
7220 | { .name = "APGAKEYLO_EL1", .state = ARM_CP_STATE_AA64, | |
7221 | .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 3, .opc2 = 0, | |
7222 | .access = PL1_RW, .accessfn = access_pauth, | |
7223 | .fgt = FGT_APGAKEY, | |
7224 | .fieldoffset = offsetof(CPUARMState, keys.apga.lo) }, | |
7225 | { .name = "APGAKEYHI_EL1", .state = ARM_CP_STATE_AA64, | |
7226 | .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 3, .opc2 = 1, | |
7227 | .access = PL1_RW, .accessfn = access_pauth, | |
7228 | .fgt = FGT_APGAKEY, | |
7229 | .fieldoffset = offsetof(CPUARMState, keys.apga.hi) }, | |
7230 | { .name = "APIAKEYLO_EL1", .state = ARM_CP_STATE_AA64, | |
7231 | .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 0, | |
7232 | .access = PL1_RW, .accessfn = access_pauth, | |
7233 | .fgt = FGT_APIAKEY, | |
7234 | .fieldoffset = offsetof(CPUARMState, keys.apia.lo) }, | |
7235 | { .name = "APIAKEYHI_EL1", .state = ARM_CP_STATE_AA64, | |
7236 | .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 1, | |
7237 | .access = PL1_RW, .accessfn = access_pauth, | |
7238 | .fgt = FGT_APIAKEY, | |
7239 | .fieldoffset = offsetof(CPUARMState, keys.apia.hi) }, | |
7240 | { .name = "APIBKEYLO_EL1", .state = ARM_CP_STATE_AA64, | |
7241 | .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 2, | |
7242 | .access = PL1_RW, .accessfn = access_pauth, | |
7243 | .fgt = FGT_APIBKEY, | |
7244 | .fieldoffset = offsetof(CPUARMState, keys.apib.lo) }, | |
7245 | { .name = "APIBKEYHI_EL1", .state = ARM_CP_STATE_AA64, | |
7246 | .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 1, .opc2 = 3, | |
7247 | .access = PL1_RW, .accessfn = access_pauth, | |
7248 | .fgt = FGT_APIBKEY, | |
7249 | .fieldoffset = offsetof(CPUARMState, keys.apib.hi) }, | |
7250 | }; | |
7251 | ||
7252 | static const ARMCPRegInfo tlbirange_reginfo[] = { | |
7253 | { .name = "TLBI_RVAE1IS", .state = ARM_CP_STATE_AA64, | |
7254 | .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 2, .opc2 = 1, | |
7255 | .access = PL1_W, .accessfn = access_ttlbis, .type = ARM_CP_NO_RAW, | |
7256 | .fgt = FGT_TLBIRVAE1IS, | |
7257 | .writefn = tlbi_aa64_rvae1is_write }, | |
7258 | { .name = "TLBI_RVAAE1IS", .state = ARM_CP_STATE_AA64, | |
7259 | .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 2, .opc2 = 3, | |
7260 | .access = PL1_W, .accessfn = access_ttlbis, .type = ARM_CP_NO_RAW, | |
7261 | .fgt = FGT_TLBIRVAAE1IS, | |
7262 | .writefn = tlbi_aa64_rvae1is_write }, | |
7263 | { .name = "TLBI_RVALE1IS", .state = ARM_CP_STATE_AA64, | |
7264 | .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 2, .opc2 = 5, | |
7265 | .access = PL1_W, .accessfn = access_ttlbis, .type = ARM_CP_NO_RAW, | |
7266 | .fgt = FGT_TLBIRVALE1IS, | |
7267 | .writefn = tlbi_aa64_rvae1is_write }, | |
7268 | { .name = "TLBI_RVAALE1IS", .state = ARM_CP_STATE_AA64, | |
7269 | .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 2, .opc2 = 7, | |
7270 | .access = PL1_W, .accessfn = access_ttlbis, .type = ARM_CP_NO_RAW, | |
7271 | .fgt = FGT_TLBIRVAALE1IS, | |
7272 | .writefn = tlbi_aa64_rvae1is_write }, | |
7273 | { .name = "TLBI_RVAE1OS", .state = ARM_CP_STATE_AA64, | |
7274 | .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 1, | |
7275 | .access = PL1_W, .accessfn = access_ttlbos, .type = ARM_CP_NO_RAW, | |
7276 | .fgt = FGT_TLBIRVAE1OS, | |
7277 | .writefn = tlbi_aa64_rvae1is_write }, | |
7278 | { .name = "TLBI_RVAAE1OS", .state = ARM_CP_STATE_AA64, | |
7279 | .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 3, | |
7280 | .access = PL1_W, .accessfn = access_ttlbos, .type = ARM_CP_NO_RAW, | |
7281 | .fgt = FGT_TLBIRVAAE1OS, | |
7282 | .writefn = tlbi_aa64_rvae1is_write }, | |
7283 | { .name = "TLBI_RVALE1OS", .state = ARM_CP_STATE_AA64, | |
7284 | .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 5, | |
7285 | .access = PL1_W, .accessfn = access_ttlbos, .type = ARM_CP_NO_RAW, | |
7286 | .fgt = FGT_TLBIRVALE1OS, | |
7287 | .writefn = tlbi_aa64_rvae1is_write }, | |
7288 | { .name = "TLBI_RVAALE1OS", .state = ARM_CP_STATE_AA64, | |
7289 | .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 7, | |
7290 | .access = PL1_W, .accessfn = access_ttlbos, .type = ARM_CP_NO_RAW, | |
7291 | .fgt = FGT_TLBIRVAALE1OS, | |
7292 | .writefn = tlbi_aa64_rvae1is_write }, | |
7293 | { .name = "TLBI_RVAE1", .state = ARM_CP_STATE_AA64, | |
7294 | .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 1, | |
7295 | .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, | |
7296 | .fgt = FGT_TLBIRVAE1, | |
7297 | .writefn = tlbi_aa64_rvae1_write }, | |
7298 | { .name = "TLBI_RVAAE1", .state = ARM_CP_STATE_AA64, | |
7299 | .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 3, | |
7300 | .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, | |
7301 | .fgt = FGT_TLBIRVAAE1, | |
7302 | .writefn = tlbi_aa64_rvae1_write }, | |
7303 | { .name = "TLBI_RVALE1", .state = ARM_CP_STATE_AA64, | |
7304 | .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 5, | |
7305 | .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, | |
7306 | .fgt = FGT_TLBIRVALE1, | |
7307 | .writefn = tlbi_aa64_rvae1_write }, | |
7308 | { .name = "TLBI_RVAALE1", .state = ARM_CP_STATE_AA64, | |
7309 | .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 7, | |
7310 | .access = PL1_W, .accessfn = access_ttlb, .type = ARM_CP_NO_RAW, | |
7311 | .fgt = FGT_TLBIRVAALE1, | |
7312 | .writefn = tlbi_aa64_rvae1_write }, | |
7313 | { .name = "TLBI_RIPAS2E1IS", .state = ARM_CP_STATE_AA64, | |
7314 | .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 2, | |
7315 | .access = PL2_W, .type = ARM_CP_NO_RAW, | |
7316 | .writefn = tlbi_aa64_ripas2e1is_write }, | |
7317 | { .name = "TLBI_RIPAS2LE1IS", .state = ARM_CP_STATE_AA64, | |
7318 | .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 6, | |
7319 | .access = PL2_W, .type = ARM_CP_NO_RAW, | |
7320 | .writefn = tlbi_aa64_ripas2e1is_write }, | |
7321 | { .name = "TLBI_RVAE2IS", .state = ARM_CP_STATE_AA64, | |
7322 | .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 2, .opc2 = 1, | |
7323 | .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF, | |
7324 | .writefn = tlbi_aa64_rvae2is_write }, | |
7325 | { .name = "TLBI_RVALE2IS", .state = ARM_CP_STATE_AA64, | |
7326 | .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 2, .opc2 = 5, | |
7327 | .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF, | |
7328 | .writefn = tlbi_aa64_rvae2is_write }, | |
7329 | { .name = "TLBI_RIPAS2E1", .state = ARM_CP_STATE_AA64, | |
7330 | .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 2, | |
7331 | .access = PL2_W, .type = ARM_CP_NO_RAW, | |
7332 | .writefn = tlbi_aa64_ripas2e1_write }, | |
7333 | { .name = "TLBI_RIPAS2LE1", .state = ARM_CP_STATE_AA64, | |
7334 | .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 6, | |
7335 | .access = PL2_W, .type = ARM_CP_NO_RAW, | |
7336 | .writefn = tlbi_aa64_ripas2e1_write }, | |
7337 | { .name = "TLBI_RVAE2OS", .state = ARM_CP_STATE_AA64, | |
7338 | .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 5, .opc2 = 1, | |
7339 | .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF, | |
7340 | .writefn = tlbi_aa64_rvae2is_write }, | |
7341 | { .name = "TLBI_RVALE2OS", .state = ARM_CP_STATE_AA64, | |
7342 | .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 5, .opc2 = 5, | |
7343 | .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF, | |
7344 | .writefn = tlbi_aa64_rvae2is_write }, | |
7345 | { .name = "TLBI_RVAE2", .state = ARM_CP_STATE_AA64, | |
7346 | .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 6, .opc2 = 1, | |
7347 | .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF, | |
7348 | .writefn = tlbi_aa64_rvae2_write }, | |
7349 | { .name = "TLBI_RVALE2", .state = ARM_CP_STATE_AA64, | |
7350 | .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 6, .opc2 = 5, | |
7351 | .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF, | |
7352 | .writefn = tlbi_aa64_rvae2_write }, | |
7353 | { .name = "TLBI_RVAE3IS", .state = ARM_CP_STATE_AA64, | |
7354 | .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 2, .opc2 = 1, | |
7355 | .access = PL3_W, .type = ARM_CP_NO_RAW, | |
7356 | .writefn = tlbi_aa64_rvae3is_write }, | |
7357 | { .name = "TLBI_RVALE3IS", .state = ARM_CP_STATE_AA64, | |
7358 | .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 2, .opc2 = 5, | |
7359 | .access = PL3_W, .type = ARM_CP_NO_RAW, | |
7360 | .writefn = tlbi_aa64_rvae3is_write }, | |
7361 | { .name = "TLBI_RVAE3OS", .state = ARM_CP_STATE_AA64, | |
7362 | .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 5, .opc2 = 1, | |
7363 | .access = PL3_W, .type = ARM_CP_NO_RAW, | |
7364 | .writefn = tlbi_aa64_rvae3is_write }, | |
7365 | { .name = "TLBI_RVALE3OS", .state = ARM_CP_STATE_AA64, | |
7366 | .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 5, .opc2 = 5, | |
7367 | .access = PL3_W, .type = ARM_CP_NO_RAW, | |
7368 | .writefn = tlbi_aa64_rvae3is_write }, | |
7369 | { .name = "TLBI_RVAE3", .state = ARM_CP_STATE_AA64, | |
7370 | .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 6, .opc2 = 1, | |
7371 | .access = PL3_W, .type = ARM_CP_NO_RAW, | |
7372 | .writefn = tlbi_aa64_rvae3_write }, | |
7373 | { .name = "TLBI_RVALE3", .state = ARM_CP_STATE_AA64, | |
7374 | .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 6, .opc2 = 5, | |
7375 | .access = PL3_W, .type = ARM_CP_NO_RAW, | |
7376 | .writefn = tlbi_aa64_rvae3_write }, | |
7377 | }; | |
7378 | ||
7379 | static const ARMCPRegInfo tlbios_reginfo[] = { | |
7380 | { .name = "TLBI_VMALLE1OS", .state = ARM_CP_STATE_AA64, | |
7381 | .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 0, | |
7382 | .access = PL1_W, .accessfn = access_ttlbos, .type = ARM_CP_NO_RAW, | |
7383 | .fgt = FGT_TLBIVMALLE1OS, | |
7384 | .writefn = tlbi_aa64_vmalle1is_write }, | |
7385 | { .name = "TLBI_VAE1OS", .state = ARM_CP_STATE_AA64, | |
7386 | .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 1, | |
7387 | .fgt = FGT_TLBIVAE1OS, | |
7388 | .access = PL1_W, .accessfn = access_ttlbos, .type = ARM_CP_NO_RAW, | |
7389 | .writefn = tlbi_aa64_vae1is_write }, | |
7390 | { .name = "TLBI_ASIDE1OS", .state = ARM_CP_STATE_AA64, | |
7391 | .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 2, | |
7392 | .access = PL1_W, .accessfn = access_ttlbos, .type = ARM_CP_NO_RAW, | |
7393 | .fgt = FGT_TLBIASIDE1OS, | |
7394 | .writefn = tlbi_aa64_vmalle1is_write }, | |
7395 | { .name = "TLBI_VAAE1OS", .state = ARM_CP_STATE_AA64, | |
7396 | .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 3, | |
7397 | .access = PL1_W, .accessfn = access_ttlbos, .type = ARM_CP_NO_RAW, | |
7398 | .fgt = FGT_TLBIVAAE1OS, | |
7399 | .writefn = tlbi_aa64_vae1is_write }, | |
7400 | { .name = "TLBI_VALE1OS", .state = ARM_CP_STATE_AA64, | |
7401 | .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 5, | |
7402 | .access = PL1_W, .accessfn = access_ttlbos, .type = ARM_CP_NO_RAW, | |
7403 | .fgt = FGT_TLBIVALE1OS, | |
7404 | .writefn = tlbi_aa64_vae1is_write }, | |
7405 | { .name = "TLBI_VAALE1OS", .state = ARM_CP_STATE_AA64, | |
7406 | .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 7, | |
7407 | .access = PL1_W, .accessfn = access_ttlbos, .type = ARM_CP_NO_RAW, | |
7408 | .fgt = FGT_TLBIVAALE1OS, | |
7409 | .writefn = tlbi_aa64_vae1is_write }, | |
7410 | { .name = "TLBI_ALLE2OS", .state = ARM_CP_STATE_AA64, | |
7411 | .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 0, | |
7412 | .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF, | |
7413 | .writefn = tlbi_aa64_alle2is_write }, | |
7414 | { .name = "TLBI_VAE2OS", .state = ARM_CP_STATE_AA64, | |
7415 | .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 1, | |
7416 | .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF, | |
7417 | .writefn = tlbi_aa64_vae2is_write }, | |
7418 | { .name = "TLBI_ALLE1OS", .state = ARM_CP_STATE_AA64, | |
7419 | .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 4, | |
7420 | .access = PL2_W, .type = ARM_CP_NO_RAW, | |
7421 | .writefn = tlbi_aa64_alle1is_write }, | |
7422 | { .name = "TLBI_VALE2OS", .state = ARM_CP_STATE_AA64, | |
7423 | .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 5, | |
7424 | .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_EL3_NO_EL2_UNDEF, | |
7425 | .writefn = tlbi_aa64_vae2is_write }, | |
7426 | { .name = "TLBI_VMALLS12E1OS", .state = ARM_CP_STATE_AA64, | |
7427 | .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 6, | |
7428 | .access = PL2_W, .type = ARM_CP_NO_RAW, | |
7429 | .writefn = tlbi_aa64_alle1is_write }, | |
7430 | { .name = "TLBI_IPAS2E1OS", .state = ARM_CP_STATE_AA64, | |
7431 | .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 0, | |
7432 | .access = PL2_W, .type = ARM_CP_NOP }, | |
7433 | { .name = "TLBI_RIPAS2E1OS", .state = ARM_CP_STATE_AA64, | |
7434 | .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 3, | |
7435 | .access = PL2_W, .type = ARM_CP_NOP }, | |
7436 | { .name = "TLBI_IPAS2LE1OS", .state = ARM_CP_STATE_AA64, | |
7437 | .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 4, | |
7438 | .access = PL2_W, .type = ARM_CP_NOP }, | |
7439 | { .name = "TLBI_RIPAS2LE1OS", .state = ARM_CP_STATE_AA64, | |
7440 | .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 7, | |
7441 | .access = PL2_W, .type = ARM_CP_NOP }, | |
7442 | { .name = "TLBI_ALLE3OS", .state = ARM_CP_STATE_AA64, | |
7443 | .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 1, .opc2 = 0, | |
7444 | .access = PL3_W, .type = ARM_CP_NO_RAW, | |
7445 | .writefn = tlbi_aa64_alle3is_write }, | |
7446 | { .name = "TLBI_VAE3OS", .state = ARM_CP_STATE_AA64, | |
7447 | .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 1, .opc2 = 1, | |
7448 | .access = PL3_W, .type = ARM_CP_NO_RAW, | |
7449 | .writefn = tlbi_aa64_vae3is_write }, | |
7450 | { .name = "TLBI_VALE3OS", .state = ARM_CP_STATE_AA64, | |
7451 | .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 1, .opc2 = 5, | |
7452 | .access = PL3_W, .type = ARM_CP_NO_RAW, | |
7453 | .writefn = tlbi_aa64_vae3is_write }, | |
7454 | }; | |
7455 | ||
7456 | static uint64_t rndr_readfn(CPUARMState *env, const ARMCPRegInfo *ri) | |
7457 | { | |
7458 | Error *err = NULL; | |
7459 | uint64_t ret; | |
7460 | ||
7461 | /* Success sets NZCV = 0000. */ | |
7462 | env->NF = env->CF = env->VF = 0, env->ZF = 1; | |
7463 | ||
7464 | if (qemu_guest_getrandom(&ret, sizeof(ret), &err) < 0) { | |
7465 | /* | |
7466 | * ??? Failed, for unknown reasons in the crypto subsystem. | |
7467 | * The best we can do is log the reason and return the | |
7468 | * timed-out indication to the guest. There is no reason | |
7469 | * we know to expect this failure to be transitory, so the | |
7470 | * guest may well hang retrying the operation. | |
7471 | */ | |
7472 | qemu_log_mask(LOG_UNIMP, "%s: Crypto failure: %s", | |
7473 | ri->name, error_get_pretty(err)); | |
7474 | error_free(err); | |
7475 | ||
7476 | env->ZF = 0; /* NZCF = 0100 */ | |
7477 | return 0; | |
7478 | } | |
7479 | return ret; | |
7480 | } | |
7481 | ||
7482 | /* We do not support re-seeding, so the two registers operate the same. */ | |
7483 | static const ARMCPRegInfo rndr_reginfo[] = { | |
7484 | { .name = "RNDR", .state = ARM_CP_STATE_AA64, | |
7485 | .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END | ARM_CP_IO, | |
7486 | .opc0 = 3, .opc1 = 3, .crn = 2, .crm = 4, .opc2 = 0, | |
7487 | .access = PL0_R, .readfn = rndr_readfn }, | |
7488 | { .name = "RNDRRS", .state = ARM_CP_STATE_AA64, | |
7489 | .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END | ARM_CP_IO, | |
7490 | .opc0 = 3, .opc1 = 3, .crn = 2, .crm = 4, .opc2 = 1, | |
7491 | .access = PL0_R, .readfn = rndr_readfn }, | |
7492 | }; | |
7493 | ||
7494 | static void dccvap_writefn(CPUARMState *env, const ARMCPRegInfo *opaque, | |
7495 | uint64_t value) | |
7496 | { | |
7497 | ARMCPU *cpu = env_archcpu(env); | |
7498 | /* CTR_EL0 System register -> DminLine, bits [19:16] */ | |
7499 | uint64_t dline_size = 4 << ((cpu->ctr >> 16) & 0xF); | |
7500 | uint64_t vaddr_in = (uint64_t) value; | |
7501 | uint64_t vaddr = vaddr_in & ~(dline_size - 1); | |
7502 | void *haddr; | |
7503 | int mem_idx = cpu_mmu_index(env, false); | |
7504 | ||
7505 | /* This won't be crossing page boundaries */ | |
7506 | haddr = probe_read(env, vaddr, dline_size, mem_idx, GETPC()); | |
7507 | if (haddr) { | |
7508 | #ifndef CONFIG_USER_ONLY | |
7509 | ||
7510 | ram_addr_t offset; | |
7511 | MemoryRegion *mr; | |
7512 | ||
7513 | /* RCU lock is already being held */ | |
7514 | mr = memory_region_from_host(haddr, &offset); | |
7515 | ||
7516 | if (mr) { | |
7517 | memory_region_writeback(mr, offset, dline_size); | |
7518 | } | |
7519 | #endif /*CONFIG_USER_ONLY*/ | |
7520 | } | |
7521 | } | |
7522 | ||
7523 | static const ARMCPRegInfo dcpop_reg[] = { | |
7524 | { .name = "DC_CVAP", .state = ARM_CP_STATE_AA64, | |
7525 | .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 12, .opc2 = 1, | |
7526 | .access = PL0_W, .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END, | |
7527 | .fgt = FGT_DCCVAP, | |
7528 | .accessfn = aa64_cacheop_poc_access, .writefn = dccvap_writefn }, | |
7529 | }; | |
7530 | ||
7531 | static const ARMCPRegInfo dcpodp_reg[] = { | |
7532 | { .name = "DC_CVADP", .state = ARM_CP_STATE_AA64, | |
7533 | .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 13, .opc2 = 1, | |
7534 | .access = PL0_W, .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END, | |
7535 | .fgt = FGT_DCCVADP, | |
7536 | .accessfn = aa64_cacheop_poc_access, .writefn = dccvap_writefn }, | |
7537 | }; | |
7538 | ||
7539 | static CPAccessResult access_aa64_tid5(CPUARMState *env, const ARMCPRegInfo *ri, | |
7540 | bool isread) | |
7541 | { | |
7542 | if ((arm_current_el(env) < 2) && (arm_hcr_el2_eff(env) & HCR_TID5)) { | |
7543 | return CP_ACCESS_TRAP_EL2; | |
7544 | } | |
7545 | ||
7546 | return CP_ACCESS_OK; | |
7547 | } | |
7548 | ||
7549 | static CPAccessResult access_mte(CPUARMState *env, const ARMCPRegInfo *ri, | |
7550 | bool isread) | |
7551 | { | |
7552 | int el = arm_current_el(env); | |
7553 | ||
7554 | if (el < 2 && arm_is_el2_enabled(env)) { | |
7555 | uint64_t hcr = arm_hcr_el2_eff(env); | |
7556 | if (!(hcr & HCR_ATA) && (!(hcr & HCR_E2H) || !(hcr & HCR_TGE))) { | |
7557 | return CP_ACCESS_TRAP_EL2; | |
7558 | } | |
7559 | } | |
7560 | if (el < 3 && | |
7561 | arm_feature(env, ARM_FEATURE_EL3) && | |
7562 | !(env->cp15.scr_el3 & SCR_ATA)) { | |
7563 | return CP_ACCESS_TRAP_EL3; | |
7564 | } | |
7565 | return CP_ACCESS_OK; | |
7566 | } | |
7567 | ||
7568 | static uint64_t tco_read(CPUARMState *env, const ARMCPRegInfo *ri) | |
7569 | { | |
7570 | return env->pstate & PSTATE_TCO; | |
7571 | } | |
7572 | ||
7573 | static void tco_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t val) | |
7574 | { | |
7575 | env->pstate = (env->pstate & ~PSTATE_TCO) | (val & PSTATE_TCO); | |
7576 | } | |
7577 | ||
7578 | static const ARMCPRegInfo mte_reginfo[] = { | |
7579 | { .name = "TFSRE0_EL1", .state = ARM_CP_STATE_AA64, | |
7580 | .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 6, .opc2 = 1, | |
7581 | .access = PL1_RW, .accessfn = access_mte, | |
7582 | .fieldoffset = offsetof(CPUARMState, cp15.tfsr_el[0]) }, | |
7583 | { .name = "TFSR_EL1", .state = ARM_CP_STATE_AA64, | |
7584 | .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 6, .opc2 = 0, | |
7585 | .access = PL1_RW, .accessfn = access_mte, | |
7586 | .fieldoffset = offsetof(CPUARMState, cp15.tfsr_el[1]) }, | |
7587 | { .name = "TFSR_EL2", .state = ARM_CP_STATE_AA64, | |
7588 | .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 6, .opc2 = 0, | |
7589 | .access = PL2_RW, .accessfn = access_mte, | |
7590 | .fieldoffset = offsetof(CPUARMState, cp15.tfsr_el[2]) }, | |
7591 | { .name = "TFSR_EL3", .state = ARM_CP_STATE_AA64, | |
7592 | .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 6, .opc2 = 0, | |
7593 | .access = PL3_RW, | |
7594 | .fieldoffset = offsetof(CPUARMState, cp15.tfsr_el[3]) }, | |
7595 | { .name = "RGSR_EL1", .state = ARM_CP_STATE_AA64, | |
7596 | .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 5, | |
7597 | .access = PL1_RW, .accessfn = access_mte, | |
7598 | .fieldoffset = offsetof(CPUARMState, cp15.rgsr_el1) }, | |
7599 | { .name = "GCR_EL1", .state = ARM_CP_STATE_AA64, | |
7600 | .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 6, | |
7601 | .access = PL1_RW, .accessfn = access_mte, | |
7602 | .fieldoffset = offsetof(CPUARMState, cp15.gcr_el1) }, | |
7603 | { .name = "GMID_EL1", .state = ARM_CP_STATE_AA64, | |
7604 | .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 4, | |
7605 | .access = PL1_R, .accessfn = access_aa64_tid5, | |
7606 | .type = ARM_CP_CONST, .resetvalue = GMID_EL1_BS }, | |
7607 | { .name = "TCO", .state = ARM_CP_STATE_AA64, | |
7608 | .opc0 = 3, .opc1 = 3, .crn = 4, .crm = 2, .opc2 = 7, | |
7609 | .type = ARM_CP_NO_RAW, | |
7610 | .access = PL0_RW, .readfn = tco_read, .writefn = tco_write }, | |
7611 | { .name = "DC_IGVAC", .state = ARM_CP_STATE_AA64, | |
7612 | .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 3, | |
7613 | .type = ARM_CP_NOP, .access = PL1_W, | |
7614 | .fgt = FGT_DCIVAC, | |
7615 | .accessfn = aa64_cacheop_poc_access }, | |
7616 | { .name = "DC_IGSW", .state = ARM_CP_STATE_AA64, | |
7617 | .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 4, | |
7618 | .fgt = FGT_DCISW, | |
7619 | .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw }, | |
7620 | { .name = "DC_IGDVAC", .state = ARM_CP_STATE_AA64, | |
7621 | .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 5, | |
7622 | .type = ARM_CP_NOP, .access = PL1_W, | |
7623 | .fgt = FGT_DCIVAC, | |
7624 | .accessfn = aa64_cacheop_poc_access }, | |
7625 | { .name = "DC_IGDSW", .state = ARM_CP_STATE_AA64, | |
7626 | .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 6, | |
7627 | .fgt = FGT_DCISW, | |
7628 | .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw }, | |
7629 | { .name = "DC_CGSW", .state = ARM_CP_STATE_AA64, | |
7630 | .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 4, | |
7631 | .fgt = FGT_DCCSW, | |
7632 | .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw }, | |
7633 | { .name = "DC_CGDSW", .state = ARM_CP_STATE_AA64, | |
7634 | .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 6, | |
7635 | .fgt = FGT_DCCSW, | |
7636 | .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw }, | |
7637 | { .name = "DC_CIGSW", .state = ARM_CP_STATE_AA64, | |
7638 | .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 4, | |
7639 | .fgt = FGT_DCCISW, | |
7640 | .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw }, | |
7641 | { .name = "DC_CIGDSW", .state = ARM_CP_STATE_AA64, | |
7642 | .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 6, | |
7643 | .fgt = FGT_DCCISW, | |
7644 | .type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_tsw }, | |
7645 | }; | |
7646 | ||
7647 | static const ARMCPRegInfo mte_tco_ro_reginfo[] = { | |
7648 | { .name = "TCO", .state = ARM_CP_STATE_AA64, | |
7649 | .opc0 = 3, .opc1 = 3, .crn = 4, .crm = 2, .opc2 = 7, | |
7650 | .type = ARM_CP_CONST, .access = PL0_RW, }, | |
7651 | }; | |
7652 | ||
7653 | static const ARMCPRegInfo mte_el0_cacheop_reginfo[] = { | |
7654 | { .name = "DC_CGVAC", .state = ARM_CP_STATE_AA64, | |
7655 | .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 10, .opc2 = 3, | |
7656 | .type = ARM_CP_NOP, .access = PL0_W, | |
7657 | .fgt = FGT_DCCVAC, | |
7658 | .accessfn = aa64_cacheop_poc_access }, | |
7659 | { .name = "DC_CGDVAC", .state = ARM_CP_STATE_AA64, | |
7660 | .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 10, .opc2 = 5, | |
7661 | .type = ARM_CP_NOP, .access = PL0_W, | |
7662 | .fgt = FGT_DCCVAC, | |
7663 | .accessfn = aa64_cacheop_poc_access }, | |
7664 | { .name = "DC_CGVAP", .state = ARM_CP_STATE_AA64, | |
7665 | .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 12, .opc2 = 3, | |
7666 | .type = ARM_CP_NOP, .access = PL0_W, | |
7667 | .fgt = FGT_DCCVAP, | |
7668 | .accessfn = aa64_cacheop_poc_access }, | |
7669 | { .name = "DC_CGDVAP", .state = ARM_CP_STATE_AA64, | |
7670 | .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 12, .opc2 = 5, | |
7671 | .type = ARM_CP_NOP, .access = PL0_W, | |
7672 | .fgt = FGT_DCCVAP, | |
7673 | .accessfn = aa64_cacheop_poc_access }, | |
7674 | { .name = "DC_CGVADP", .state = ARM_CP_STATE_AA64, | |
7675 | .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 13, .opc2 = 3, | |
7676 | .type = ARM_CP_NOP, .access = PL0_W, | |
7677 | .fgt = FGT_DCCVADP, | |
7678 | .accessfn = aa64_cacheop_poc_access }, | |
7679 | { .name = "DC_CGDVADP", .state = ARM_CP_STATE_AA64, | |
7680 | .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 13, .opc2 = 5, | |
7681 | .type = ARM_CP_NOP, .access = PL0_W, | |
7682 | .fgt = FGT_DCCVADP, | |
7683 | .accessfn = aa64_cacheop_poc_access }, | |
7684 | { .name = "DC_CIGVAC", .state = ARM_CP_STATE_AA64, | |
7685 | .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 14, .opc2 = 3, | |
7686 | .type = ARM_CP_NOP, .access = PL0_W, | |
7687 | .fgt = FGT_DCCIVAC, | |
7688 | .accessfn = aa64_cacheop_poc_access }, | |
7689 | { .name = "DC_CIGDVAC", .state = ARM_CP_STATE_AA64, | |
7690 | .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 14, .opc2 = 5, | |
7691 | .type = ARM_CP_NOP, .access = PL0_W, | |
7692 | .fgt = FGT_DCCIVAC, | |
7693 | .accessfn = aa64_cacheop_poc_access }, | |
7694 | { .name = "DC_GVA", .state = ARM_CP_STATE_AA64, | |
7695 | .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 4, .opc2 = 3, | |
7696 | .access = PL0_W, .type = ARM_CP_DC_GVA, | |
7697 | #ifndef CONFIG_USER_ONLY | |
7698 | /* Avoid overhead of an access check that always passes in user-mode */ | |
7699 | .accessfn = aa64_zva_access, | |
7700 | .fgt = FGT_DCZVA, | |
7701 | #endif | |
7702 | }, | |
7703 | { .name = "DC_GZVA", .state = ARM_CP_STATE_AA64, | |
7704 | .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 4, .opc2 = 4, | |
7705 | .access = PL0_W, .type = ARM_CP_DC_GZVA, | |
7706 | #ifndef CONFIG_USER_ONLY | |
7707 | /* Avoid overhead of an access check that always passes in user-mode */ | |
7708 | .accessfn = aa64_zva_access, | |
7709 | .fgt = FGT_DCZVA, | |
7710 | #endif | |
7711 | }, | |
7712 | }; | |
7713 | ||
7714 | static CPAccessResult access_scxtnum(CPUARMState *env, const ARMCPRegInfo *ri, | |
7715 | bool isread) | |
7716 | { | |
7717 | uint64_t hcr = arm_hcr_el2_eff(env); | |
7718 | int el = arm_current_el(env); | |
7719 | ||
7720 | if (el == 0 && !((hcr & HCR_E2H) && (hcr & HCR_TGE))) { | |
7721 | if (env->cp15.sctlr_el[1] & SCTLR_TSCXT) { | |
7722 | if (hcr & HCR_TGE) { | |
7723 | return CP_ACCESS_TRAP_EL2; | |
7724 | } | |
7725 | return CP_ACCESS_TRAP; | |
7726 | } | |
7727 | } else if (el < 2 && (env->cp15.sctlr_el[2] & SCTLR_TSCXT)) { | |
7728 | return CP_ACCESS_TRAP_EL2; | |
7729 | } | |
7730 | if (el < 2 && arm_is_el2_enabled(env) && !(hcr & HCR_ENSCXT)) { | |
7731 | return CP_ACCESS_TRAP_EL2; | |
7732 | } | |
7733 | if (el < 3 | |
7734 | && arm_feature(env, ARM_FEATURE_EL3) | |
7735 | && !(env->cp15.scr_el3 & SCR_ENSCXT)) { | |
7736 | return CP_ACCESS_TRAP_EL3; | |
7737 | } | |
7738 | return CP_ACCESS_OK; | |
7739 | } | |
7740 | ||
7741 | static const ARMCPRegInfo scxtnum_reginfo[] = { | |
7742 | { .name = "SCXTNUM_EL0", .state = ARM_CP_STATE_AA64, | |
7743 | .opc0 = 3, .opc1 = 3, .crn = 13, .crm = 0, .opc2 = 7, | |
7744 | .access = PL0_RW, .accessfn = access_scxtnum, | |
7745 | .fgt = FGT_SCXTNUM_EL0, | |
7746 | .fieldoffset = offsetof(CPUARMState, scxtnum_el[0]) }, | |
7747 | { .name = "SCXTNUM_EL1", .state = ARM_CP_STATE_AA64, | |
7748 | .opc0 = 3, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 7, | |
7749 | .access = PL1_RW, .accessfn = access_scxtnum, | |
7750 | .fgt = FGT_SCXTNUM_EL1, | |
7751 | .fieldoffset = offsetof(CPUARMState, scxtnum_el[1]) }, | |
7752 | { .name = "SCXTNUM_EL2", .state = ARM_CP_STATE_AA64, | |
7753 | .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 7, | |
7754 | .access = PL2_RW, .accessfn = access_scxtnum, | |
7755 | .fieldoffset = offsetof(CPUARMState, scxtnum_el[2]) }, | |
7756 | { .name = "SCXTNUM_EL3", .state = ARM_CP_STATE_AA64, | |
7757 | .opc0 = 3, .opc1 = 6, .crn = 13, .crm = 0, .opc2 = 7, | |
7758 | .access = PL3_RW, | |
7759 | .fieldoffset = offsetof(CPUARMState, scxtnum_el[3]) }, | |
7760 | }; | |
7761 | ||
7762 | static CPAccessResult access_fgt(CPUARMState *env, const ARMCPRegInfo *ri, | |
7763 | bool isread) | |
7764 | { | |
7765 | if (arm_current_el(env) == 2 && | |
7766 | arm_feature(env, ARM_FEATURE_EL3) && !(env->cp15.scr_el3 & SCR_FGTEN)) { | |
7767 | return CP_ACCESS_TRAP_EL3; | |
7768 | } | |
7769 | return CP_ACCESS_OK; | |
7770 | } | |
7771 | ||
7772 | static const ARMCPRegInfo fgt_reginfo[] = { | |
7773 | { .name = "HFGRTR_EL2", .state = ARM_CP_STATE_AA64, | |
7774 | .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 4, | |
7775 | .access = PL2_RW, .accessfn = access_fgt, | |
7776 | .fieldoffset = offsetof(CPUARMState, cp15.fgt_read[FGTREG_HFGRTR]) }, | |
7777 | { .name = "HFGWTR_EL2", .state = ARM_CP_STATE_AA64, | |
7778 | .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 5, | |
7779 | .access = PL2_RW, .accessfn = access_fgt, | |
7780 | .fieldoffset = offsetof(CPUARMState, cp15.fgt_write[FGTREG_HFGWTR]) }, | |
7781 | { .name = "HDFGRTR_EL2", .state = ARM_CP_STATE_AA64, | |
7782 | .opc0 = 3, .opc1 = 4, .crn = 3, .crm = 1, .opc2 = 4, | |
7783 | .access = PL2_RW, .accessfn = access_fgt, | |
7784 | .fieldoffset = offsetof(CPUARMState, cp15.fgt_read[FGTREG_HDFGRTR]) }, | |
7785 | { .name = "HDFGWTR_EL2", .state = ARM_CP_STATE_AA64, | |
7786 | .opc0 = 3, .opc1 = 4, .crn = 3, .crm = 1, .opc2 = 5, | |
7787 | .access = PL2_RW, .accessfn = access_fgt, | |
7788 | .fieldoffset = offsetof(CPUARMState, cp15.fgt_write[FGTREG_HDFGWTR]) }, | |
7789 | { .name = "HFGITR_EL2", .state = ARM_CP_STATE_AA64, | |
7790 | .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 6, | |
7791 | .access = PL2_RW, .accessfn = access_fgt, | |
7792 | .fieldoffset = offsetof(CPUARMState, cp15.fgt_exec[FGTREG_HFGITR]) }, | |
7793 | }; | |
7794 | #endif /* TARGET_AARCH64 */ | |
7795 | ||
7796 | static CPAccessResult access_predinv(CPUARMState *env, const ARMCPRegInfo *ri, | |
7797 | bool isread) | |
7798 | { | |
7799 | int el = arm_current_el(env); | |
7800 | ||
7801 | if (el == 0) { | |
7802 | uint64_t sctlr = arm_sctlr(env, el); | |
7803 | if (!(sctlr & SCTLR_EnRCTX)) { | |
7804 | return CP_ACCESS_TRAP; | |
7805 | } | |
7806 | } else if (el == 1) { | |
7807 | uint64_t hcr = arm_hcr_el2_eff(env); | |
7808 | if (hcr & HCR_NV) { | |
7809 | return CP_ACCESS_TRAP_EL2; | |
7810 | } | |
7811 | } | |
7812 | return CP_ACCESS_OK; | |
7813 | } | |
7814 | ||
7815 | static const ARMCPRegInfo predinv_reginfo[] = { | |
7816 | { .name = "CFP_RCTX", .state = ARM_CP_STATE_AA64, | |
7817 | .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 3, .opc2 = 4, | |
7818 | .fgt = FGT_CFPRCTX, | |
7819 | .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv }, | |
7820 | { .name = "DVP_RCTX", .state = ARM_CP_STATE_AA64, | |
7821 | .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 3, .opc2 = 5, | |
7822 | .fgt = FGT_DVPRCTX, | |
7823 | .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv }, | |
7824 | { .name = "CPP_RCTX", .state = ARM_CP_STATE_AA64, | |
7825 | .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 3, .opc2 = 7, | |
7826 | .fgt = FGT_CPPRCTX, | |
7827 | .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv }, | |
7828 | /* | |
7829 | * Note the AArch32 opcodes have a different OPC1. | |
7830 | */ | |
7831 | { .name = "CFPRCTX", .state = ARM_CP_STATE_AA32, | |
7832 | .cp = 15, .opc1 = 0, .crn = 7, .crm = 3, .opc2 = 4, | |
7833 | .fgt = FGT_CFPRCTX, | |
7834 | .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv }, | |
7835 | { .name = "DVPRCTX", .state = ARM_CP_STATE_AA32, | |
7836 | .cp = 15, .opc1 = 0, .crn = 7, .crm = 3, .opc2 = 5, | |
7837 | .fgt = FGT_DVPRCTX, | |
7838 | .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv }, | |
7839 | { .name = "CPPRCTX", .state = ARM_CP_STATE_AA32, | |
7840 | .cp = 15, .opc1 = 0, .crn = 7, .crm = 3, .opc2 = 7, | |
7841 | .fgt = FGT_CPPRCTX, | |
7842 | .type = ARM_CP_NOP, .access = PL0_W, .accessfn = access_predinv }, | |
7843 | }; | |
7844 | ||
7845 | static uint64_t ccsidr2_read(CPUARMState *env, const ARMCPRegInfo *ri) | |
7846 | { | |
7847 | /* Read the high 32 bits of the current CCSIDR */ | |
7848 | return extract64(ccsidr_read(env, ri), 32, 32); | |
7849 | } | |
7850 | ||
7851 | static const ARMCPRegInfo ccsidr2_reginfo[] = { | |
7852 | { .name = "CCSIDR2", .state = ARM_CP_STATE_BOTH, | |
7853 | .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 2, | |
7854 | .access = PL1_R, | |
7855 | .accessfn = access_tid4, | |
7856 | .readfn = ccsidr2_read, .type = ARM_CP_NO_RAW }, | |
7857 | }; | |
7858 | ||
7859 | static CPAccessResult access_aa64_tid3(CPUARMState *env, const ARMCPRegInfo *ri, | |
7860 | bool isread) | |
7861 | { | |
7862 | if ((arm_current_el(env) < 2) && (arm_hcr_el2_eff(env) & HCR_TID3)) { | |
7863 | return CP_ACCESS_TRAP_EL2; | |
7864 | } | |
7865 | ||
7866 | return CP_ACCESS_OK; | |
7867 | } | |
7868 | ||
7869 | static CPAccessResult access_aa32_tid3(CPUARMState *env, const ARMCPRegInfo *ri, | |
7870 | bool isread) | |
7871 | { | |
7872 | if (arm_feature(env, ARM_FEATURE_V8)) { | |
7873 | return access_aa64_tid3(env, ri, isread); | |
7874 | } | |
7875 | ||
7876 | return CP_ACCESS_OK; | |
7877 | } | |
7878 | ||
7879 | static CPAccessResult access_jazelle(CPUARMState *env, const ARMCPRegInfo *ri, | |
7880 | bool isread) | |
7881 | { | |
7882 | if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TID0)) { | |
7883 | return CP_ACCESS_TRAP_EL2; | |
7884 | } | |
7885 | ||
7886 | return CP_ACCESS_OK; | |
7887 | } | |
7888 | ||
7889 | static CPAccessResult access_joscr_jmcr(CPUARMState *env, | |
7890 | const ARMCPRegInfo *ri, bool isread) | |
7891 | { | |
7892 | /* | |
7893 | * HSTR.TJDBX traps JOSCR and JMCR accesses, but it exists only | |
7894 | * in v7A, not in v8A. | |
7895 | */ | |
7896 | if (!arm_feature(env, ARM_FEATURE_V8) && | |
7897 | arm_current_el(env) < 2 && !arm_is_secure_below_el3(env) && | |
7898 | (env->cp15.hstr_el2 & HSTR_TJDBX)) { | |
7899 | return CP_ACCESS_TRAP_EL2; | |
7900 | } | |
7901 | return CP_ACCESS_OK; | |
7902 | } | |
7903 | ||
7904 | static const ARMCPRegInfo jazelle_regs[] = { | |
7905 | { .name = "JIDR", | |
7906 | .cp = 14, .crn = 0, .crm = 0, .opc1 = 7, .opc2 = 0, | |
7907 | .access = PL1_R, .accessfn = access_jazelle, | |
7908 | .type = ARM_CP_CONST, .resetvalue = 0 }, | |
7909 | { .name = "JOSCR", | |
7910 | .cp = 14, .crn = 1, .crm = 0, .opc1 = 7, .opc2 = 0, | |
7911 | .accessfn = access_joscr_jmcr, | |
7912 | .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, | |
7913 | { .name = "JMCR", | |
7914 | .cp = 14, .crn = 2, .crm = 0, .opc1 = 7, .opc2 = 0, | |
7915 | .accessfn = access_joscr_jmcr, | |
7916 | .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, | |
7917 | }; | |
7918 | ||
7919 | static const ARMCPRegInfo contextidr_el2 = { | |
7920 | .name = "CONTEXTIDR_EL2", .state = ARM_CP_STATE_AA64, | |
7921 | .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 1, | |
7922 | .access = PL2_RW, | |
7923 | .fieldoffset = offsetof(CPUARMState, cp15.contextidr_el[2]) | |
7924 | }; | |
7925 | ||
7926 | static const ARMCPRegInfo vhe_reginfo[] = { | |
7927 | { .name = "TTBR1_EL2", .state = ARM_CP_STATE_AA64, | |
7928 | .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 1, | |
7929 | .access = PL2_RW, .writefn = vmsa_tcr_ttbr_el2_write, | |
7930 | .fieldoffset = offsetof(CPUARMState, cp15.ttbr1_el[2]) }, | |
7931 | #ifndef CONFIG_USER_ONLY | |
7932 | { .name = "CNTHV_CVAL_EL2", .state = ARM_CP_STATE_AA64, | |
7933 | .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 3, .opc2 = 2, | |
7934 | .fieldoffset = | |
7935 | offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYPVIRT].cval), | |
7936 | .type = ARM_CP_IO, .access = PL2_RW, | |
7937 | .writefn = gt_hv_cval_write, .raw_writefn = raw_write }, | |
7938 | { .name = "CNTHV_TVAL_EL2", .state = ARM_CP_STATE_BOTH, | |
7939 | .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 3, .opc2 = 0, | |
7940 | .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL2_RW, | |
7941 | .resetfn = gt_hv_timer_reset, | |
7942 | .readfn = gt_hv_tval_read, .writefn = gt_hv_tval_write }, | |
7943 | { .name = "CNTHV_CTL_EL2", .state = ARM_CP_STATE_BOTH, | |
7944 | .type = ARM_CP_IO, | |
7945 | .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 3, .opc2 = 1, | |
7946 | .access = PL2_RW, | |
7947 | .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYPVIRT].ctl), | |
7948 | .writefn = gt_hv_ctl_write, .raw_writefn = raw_write }, | |
7949 | { .name = "CNTP_CTL_EL02", .state = ARM_CP_STATE_AA64, | |
7950 | .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 2, .opc2 = 1, | |
7951 | .type = ARM_CP_IO | ARM_CP_ALIAS, | |
7952 | .access = PL2_RW, .accessfn = e2h_access, | |
7953 | .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].ctl), | |
7954 | .writefn = gt_phys_ctl_write, .raw_writefn = raw_write }, | |
7955 | { .name = "CNTV_CTL_EL02", .state = ARM_CP_STATE_AA64, | |
7956 | .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 3, .opc2 = 1, | |
7957 | .type = ARM_CP_IO | ARM_CP_ALIAS, | |
7958 | .access = PL2_RW, .accessfn = e2h_access, | |
7959 | .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].ctl), | |
7960 | .writefn = gt_virt_ctl_write, .raw_writefn = raw_write }, | |
7961 | { .name = "CNTP_TVAL_EL02", .state = ARM_CP_STATE_AA64, | |
7962 | .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 2, .opc2 = 0, | |
7963 | .type = ARM_CP_NO_RAW | ARM_CP_IO | ARM_CP_ALIAS, | |
7964 | .access = PL2_RW, .accessfn = e2h_access, | |
7965 | .readfn = gt_phys_tval_read, .writefn = gt_phys_tval_write }, | |
7966 | { .name = "CNTV_TVAL_EL02", .state = ARM_CP_STATE_AA64, | |
7967 | .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 3, .opc2 = 0, | |
7968 | .type = ARM_CP_NO_RAW | ARM_CP_IO | ARM_CP_ALIAS, | |
7969 | .access = PL2_RW, .accessfn = e2h_access, | |
7970 | .readfn = gt_virt_tval_read, .writefn = gt_virt_tval_write }, | |
7971 | { .name = "CNTP_CVAL_EL02", .state = ARM_CP_STATE_AA64, | |
7972 | .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 2, .opc2 = 2, | |
7973 | .type = ARM_CP_IO | ARM_CP_ALIAS, | |
7974 | .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval), | |
7975 | .access = PL2_RW, .accessfn = e2h_access, | |
7976 | .writefn = gt_phys_cval_write, .raw_writefn = raw_write }, | |
7977 | { .name = "CNTV_CVAL_EL02", .state = ARM_CP_STATE_AA64, | |
7978 | .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 3, .opc2 = 2, | |
7979 | .type = ARM_CP_IO | ARM_CP_ALIAS, | |
7980 | .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval), | |
7981 | .access = PL2_RW, .accessfn = e2h_access, | |
7982 | .writefn = gt_virt_cval_write, .raw_writefn = raw_write }, | |
7983 | #endif | |
7984 | }; | |
7985 | ||
7986 | #ifndef CONFIG_USER_ONLY | |
7987 | static const ARMCPRegInfo ats1e1_reginfo[] = { | |
7988 | { .name = "AT_S1E1RP", .state = ARM_CP_STATE_AA64, | |
7989 | .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 0, | |
7990 | .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, | |
7991 | .fgt = FGT_ATS1E1RP, | |
7992 | .writefn = ats_write64 }, | |
7993 | { .name = "AT_S1E1WP", .state = ARM_CP_STATE_AA64, | |
7994 | .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 1, | |
7995 | .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, | |
7996 | .fgt = FGT_ATS1E1WP, | |
7997 | .writefn = ats_write64 }, | |
7998 | }; | |
7999 | ||
8000 | static const ARMCPRegInfo ats1cp_reginfo[] = { | |
8001 | { .name = "ATS1CPRP", | |
8002 | .cp = 15, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 0, | |
8003 | .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, | |
8004 | .writefn = ats_write }, | |
8005 | { .name = "ATS1CPWP", | |
8006 | .cp = 15, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 1, | |
8007 | .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, | |
8008 | .writefn = ats_write }, | |
8009 | }; | |
8010 | #endif | |
8011 | ||
8012 | /* | |
8013 | * ACTLR2 and HACTLR2 map to ACTLR_EL1[63:32] and | |
8014 | * ACTLR_EL2[63:32]. They exist only if the ID_MMFR4.AC2 field | |
8015 | * is non-zero, which is never for ARMv7, optionally in ARMv8 | |
8016 | * and mandatorily for ARMv8.2 and up. | |
8017 | * ACTLR2 is banked for S and NS if EL3 is AArch32. Since QEMU's | |
8018 | * implementation is RAZ/WI we can ignore this detail, as we | |
8019 | * do for ACTLR. | |
8020 | */ | |
8021 | static const ARMCPRegInfo actlr2_hactlr2_reginfo[] = { | |
8022 | { .name = "ACTLR2", .state = ARM_CP_STATE_AA32, | |
8023 | .cp = 15, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 3, | |
8024 | .access = PL1_RW, .accessfn = access_tacr, | |
8025 | .type = ARM_CP_CONST, .resetvalue = 0 }, | |
8026 | { .name = "HACTLR2", .state = ARM_CP_STATE_AA32, | |
8027 | .cp = 15, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 3, | |
8028 | .access = PL2_RW, .type = ARM_CP_CONST, | |
8029 | .resetvalue = 0 }, | |
8030 | }; | |
8031 | ||
8032 | void register_cp_regs_for_features(ARMCPU *cpu) | |
8033 | { | |
8034 | /* Register all the coprocessor registers based on feature bits */ | |
8035 | CPUARMState *env = &cpu->env; | |
8036 | if (arm_feature(env, ARM_FEATURE_M)) { | |
8037 | /* M profile has no coprocessor registers */ | |
8038 | return; | |
8039 | } | |
8040 | ||
8041 | define_arm_cp_regs(cpu, cp_reginfo); | |
8042 | if (!arm_feature(env, ARM_FEATURE_V8)) { | |
8043 | /* | |
8044 | * Must go early as it is full of wildcards that may be | |
8045 | * overridden by later definitions. | |
8046 | */ | |
8047 | define_arm_cp_regs(cpu, not_v8_cp_reginfo); | |
8048 | } | |
8049 | ||
8050 | if (arm_feature(env, ARM_FEATURE_V6)) { | |
8051 | /* The ID registers all have impdef reset values */ | |
8052 | ARMCPRegInfo v6_idregs[] = { | |
8053 | { .name = "ID_PFR0", .state = ARM_CP_STATE_BOTH, | |
8054 | .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 0, | |
8055 | .access = PL1_R, .type = ARM_CP_CONST, | |
8056 | .accessfn = access_aa32_tid3, | |
8057 | .resetvalue = cpu->isar.id_pfr0 }, | |
8058 | /* | |
8059 | * ID_PFR1 is not a plain ARM_CP_CONST because we don't know | |
8060 | * the value of the GIC field until after we define these regs. | |
8061 | */ | |
8062 | { .name = "ID_PFR1", .state = ARM_CP_STATE_BOTH, | |
8063 | .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 1, | |
8064 | .access = PL1_R, .type = ARM_CP_NO_RAW, | |
8065 | .accessfn = access_aa32_tid3, | |
8066 | #ifdef CONFIG_USER_ONLY | |
8067 | .type = ARM_CP_CONST, | |
8068 | .resetvalue = cpu->isar.id_pfr1, | |
8069 | #else | |
8070 | .type = ARM_CP_NO_RAW, | |
8071 | .accessfn = access_aa32_tid3, | |
8072 | .readfn = id_pfr1_read, | |
8073 | .writefn = arm_cp_write_ignore | |
8074 | #endif | |
8075 | }, | |
8076 | { .name = "ID_DFR0", .state = ARM_CP_STATE_BOTH, | |
8077 | .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 2, | |
8078 | .access = PL1_R, .type = ARM_CP_CONST, | |
8079 | .accessfn = access_aa32_tid3, | |
8080 | .resetvalue = cpu->isar.id_dfr0 }, | |
8081 | { .name = "ID_AFR0", .state = ARM_CP_STATE_BOTH, | |
8082 | .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 3, | |
8083 | .access = PL1_R, .type = ARM_CP_CONST, | |
8084 | .accessfn = access_aa32_tid3, | |
8085 | .resetvalue = cpu->id_afr0 }, | |
8086 | { .name = "ID_MMFR0", .state = ARM_CP_STATE_BOTH, | |
8087 | .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 4, | |
8088 | .access = PL1_R, .type = ARM_CP_CONST, | |
8089 | .accessfn = access_aa32_tid3, | |
8090 | .resetvalue = cpu->isar.id_mmfr0 }, | |
8091 | { .name = "ID_MMFR1", .state = ARM_CP_STATE_BOTH, | |
8092 | .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 5, | |
8093 | .access = PL1_R, .type = ARM_CP_CONST, | |
8094 | .accessfn = access_aa32_tid3, | |
8095 | .resetvalue = cpu->isar.id_mmfr1 }, | |
8096 | { .name = "ID_MMFR2", .state = ARM_CP_STATE_BOTH, | |
8097 | .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 6, | |
8098 | .access = PL1_R, .type = ARM_CP_CONST, | |
8099 | .accessfn = access_aa32_tid3, | |
8100 | .resetvalue = cpu->isar.id_mmfr2 }, | |
8101 | { .name = "ID_MMFR3", .state = ARM_CP_STATE_BOTH, | |
8102 | .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 7, | |
8103 | .access = PL1_R, .type = ARM_CP_CONST, | |
8104 | .accessfn = access_aa32_tid3, | |
8105 | .resetvalue = cpu->isar.id_mmfr3 }, | |
8106 | { .name = "ID_ISAR0", .state = ARM_CP_STATE_BOTH, | |
8107 | .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 0, | |
8108 | .access = PL1_R, .type = ARM_CP_CONST, | |
8109 | .accessfn = access_aa32_tid3, | |
8110 | .resetvalue = cpu->isar.id_isar0 }, | |
8111 | { .name = "ID_ISAR1", .state = ARM_CP_STATE_BOTH, | |
8112 | .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 1, | |
8113 | .access = PL1_R, .type = ARM_CP_CONST, | |
8114 | .accessfn = access_aa32_tid3, | |
8115 | .resetvalue = cpu->isar.id_isar1 }, | |
8116 | { .name = "ID_ISAR2", .state = ARM_CP_STATE_BOTH, | |
8117 | .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2, | |
8118 | .access = PL1_R, .type = ARM_CP_CONST, | |
8119 | .accessfn = access_aa32_tid3, | |
8120 | .resetvalue = cpu->isar.id_isar2 }, | |
8121 | { .name = "ID_ISAR3", .state = ARM_CP_STATE_BOTH, | |
8122 | .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 3, | |
8123 | .access = PL1_R, .type = ARM_CP_CONST, | |
8124 | .accessfn = access_aa32_tid3, | |
8125 | .resetvalue = cpu->isar.id_isar3 }, | |
8126 | { .name = "ID_ISAR4", .state = ARM_CP_STATE_BOTH, | |
8127 | .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 4, | |
8128 | .access = PL1_R, .type = ARM_CP_CONST, | |
8129 | .accessfn = access_aa32_tid3, | |
8130 | .resetvalue = cpu->isar.id_isar4 }, | |
8131 | { .name = "ID_ISAR5", .state = ARM_CP_STATE_BOTH, | |
8132 | .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 5, | |
8133 | .access = PL1_R, .type = ARM_CP_CONST, | |
8134 | .accessfn = access_aa32_tid3, | |
8135 | .resetvalue = cpu->isar.id_isar5 }, | |
8136 | { .name = "ID_MMFR4", .state = ARM_CP_STATE_BOTH, | |
8137 | .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 6, | |
8138 | .access = PL1_R, .type = ARM_CP_CONST, | |
8139 | .accessfn = access_aa32_tid3, | |
8140 | .resetvalue = cpu->isar.id_mmfr4 }, | |
8141 | { .name = "ID_ISAR6", .state = ARM_CP_STATE_BOTH, | |
8142 | .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 7, | |
8143 | .access = PL1_R, .type = ARM_CP_CONST, | |
8144 | .accessfn = access_aa32_tid3, | |
8145 | .resetvalue = cpu->isar.id_isar6 }, | |
8146 | }; | |
8147 | define_arm_cp_regs(cpu, v6_idregs); | |
8148 | define_arm_cp_regs(cpu, v6_cp_reginfo); | |
8149 | } else { | |
8150 | define_arm_cp_regs(cpu, not_v6_cp_reginfo); | |
8151 | } | |
8152 | if (arm_feature(env, ARM_FEATURE_V6K)) { | |
8153 | define_arm_cp_regs(cpu, v6k_cp_reginfo); | |
8154 | } | |
8155 | if (arm_feature(env, ARM_FEATURE_V7MP) && | |
8156 | !arm_feature(env, ARM_FEATURE_PMSA)) { | |
8157 | define_arm_cp_regs(cpu, v7mp_cp_reginfo); | |
8158 | } | |
8159 | if (arm_feature(env, ARM_FEATURE_V7VE)) { | |
8160 | define_arm_cp_regs(cpu, pmovsset_cp_reginfo); | |
8161 | } | |
8162 | if (arm_feature(env, ARM_FEATURE_V7)) { | |
8163 | ARMCPRegInfo clidr = { | |
8164 | .name = "CLIDR", .state = ARM_CP_STATE_BOTH, | |
8165 | .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 1, | |
8166 | .access = PL1_R, .type = ARM_CP_CONST, | |
8167 | .accessfn = access_tid4, | |
8168 | .fgt = FGT_CLIDR_EL1, | |
8169 | .resetvalue = cpu->clidr | |
8170 | }; | |
8171 | define_one_arm_cp_reg(cpu, &clidr); | |
8172 | define_arm_cp_regs(cpu, v7_cp_reginfo); | |
8173 | define_debug_regs(cpu); | |
8174 | define_pmu_regs(cpu); | |
8175 | } else { | |
8176 | define_arm_cp_regs(cpu, not_v7_cp_reginfo); | |
8177 | } | |
8178 | if (arm_feature(env, ARM_FEATURE_V8)) { | |
8179 | /* | |
8180 | * v8 ID registers, which all have impdef reset values. | |
8181 | * Note that within the ID register ranges the unused slots | |
8182 | * must all RAZ, not UNDEF; future architecture versions may | |
8183 | * define new registers here. | |
8184 | * ID registers which are AArch64 views of the AArch32 ID registers | |
8185 | * which already existed in v6 and v7 are handled elsewhere, | |
8186 | * in v6_idregs[]. | |
8187 | */ | |
8188 | int i; | |
8189 | ARMCPRegInfo v8_idregs[] = { | |
8190 | /* | |
8191 | * ID_AA64PFR0_EL1 is not a plain ARM_CP_CONST in system | |
8192 | * emulation because we don't know the right value for the | |
8193 | * GIC field until after we define these regs. | |
8194 | */ | |
8195 | { .name = "ID_AA64PFR0_EL1", .state = ARM_CP_STATE_AA64, | |
8196 | .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 0, | |
8197 | .access = PL1_R, | |
8198 | #ifdef CONFIG_USER_ONLY | |
8199 | .type = ARM_CP_CONST, | |
8200 | .resetvalue = cpu->isar.id_aa64pfr0 | |
8201 | #else | |
8202 | .type = ARM_CP_NO_RAW, | |
8203 | .accessfn = access_aa64_tid3, | |
8204 | .readfn = id_aa64pfr0_read, | |
8205 | .writefn = arm_cp_write_ignore | |
8206 | #endif | |
8207 | }, | |
8208 | { .name = "ID_AA64PFR1_EL1", .state = ARM_CP_STATE_AA64, | |
8209 | .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 1, | |
8210 | .access = PL1_R, .type = ARM_CP_CONST, | |
8211 | .accessfn = access_aa64_tid3, | |
8212 | .resetvalue = cpu->isar.id_aa64pfr1}, | |
8213 | { .name = "ID_AA64PFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64, | |
8214 | .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 2, | |
8215 | .access = PL1_R, .type = ARM_CP_CONST, | |
8216 | .accessfn = access_aa64_tid3, | |
8217 | .resetvalue = 0 }, | |
8218 | { .name = "ID_AA64PFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64, | |
8219 | .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 3, | |
8220 | .access = PL1_R, .type = ARM_CP_CONST, | |
8221 | .accessfn = access_aa64_tid3, | |
8222 | .resetvalue = 0 }, | |
8223 | { .name = "ID_AA64ZFR0_EL1", .state = ARM_CP_STATE_AA64, | |
8224 | .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 4, | |
8225 | .access = PL1_R, .type = ARM_CP_CONST, | |
8226 | .accessfn = access_aa64_tid3, | |
8227 | .resetvalue = cpu->isar.id_aa64zfr0 }, | |
8228 | { .name = "ID_AA64SMFR0_EL1", .state = ARM_CP_STATE_AA64, | |
8229 | .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 5, | |
8230 | .access = PL1_R, .type = ARM_CP_CONST, | |
8231 | .accessfn = access_aa64_tid3, | |
8232 | .resetvalue = cpu->isar.id_aa64smfr0 }, | |
8233 | { .name = "ID_AA64PFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64, | |
8234 | .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 6, | |
8235 | .access = PL1_R, .type = ARM_CP_CONST, | |
8236 | .accessfn = access_aa64_tid3, | |
8237 | .resetvalue = 0 }, | |
8238 | { .name = "ID_AA64PFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64, | |
8239 | .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 7, | |
8240 | .access = PL1_R, .type = ARM_CP_CONST, | |
8241 | .accessfn = access_aa64_tid3, | |
8242 | .resetvalue = 0 }, | |
8243 | { .name = "ID_AA64DFR0_EL1", .state = ARM_CP_STATE_AA64, | |
8244 | .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 0, | |
8245 | .access = PL1_R, .type = ARM_CP_CONST, | |
8246 | .accessfn = access_aa64_tid3, | |
8247 | .resetvalue = cpu->isar.id_aa64dfr0 }, | |
8248 | { .name = "ID_AA64DFR1_EL1", .state = ARM_CP_STATE_AA64, | |
8249 | .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 1, | |
8250 | .access = PL1_R, .type = ARM_CP_CONST, | |
8251 | .accessfn = access_aa64_tid3, | |
8252 | .resetvalue = cpu->isar.id_aa64dfr1 }, | |
8253 | { .name = "ID_AA64DFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64, | |
8254 | .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 2, | |
8255 | .access = PL1_R, .type = ARM_CP_CONST, | |
8256 | .accessfn = access_aa64_tid3, | |
8257 | .resetvalue = 0 }, | |
8258 | { .name = "ID_AA64DFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64, | |
8259 | .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 3, | |
8260 | .access = PL1_R, .type = ARM_CP_CONST, | |
8261 | .accessfn = access_aa64_tid3, | |
8262 | .resetvalue = 0 }, | |
8263 | { .name = "ID_AA64AFR0_EL1", .state = ARM_CP_STATE_AA64, | |
8264 | .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 4, | |
8265 | .access = PL1_R, .type = ARM_CP_CONST, | |
8266 | .accessfn = access_aa64_tid3, | |
8267 | .resetvalue = cpu->id_aa64afr0 }, | |
8268 | { .name = "ID_AA64AFR1_EL1", .state = ARM_CP_STATE_AA64, | |
8269 | .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 5, | |
8270 | .access = PL1_R, .type = ARM_CP_CONST, | |
8271 | .accessfn = access_aa64_tid3, | |
8272 | .resetvalue = cpu->id_aa64afr1 }, | |
8273 | { .name = "ID_AA64AFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64, | |
8274 | .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 6, | |
8275 | .access = PL1_R, .type = ARM_CP_CONST, | |
8276 | .accessfn = access_aa64_tid3, | |
8277 | .resetvalue = 0 }, | |
8278 | { .name = "ID_AA64AFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64, | |
8279 | .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 7, | |
8280 | .access = PL1_R, .type = ARM_CP_CONST, | |
8281 | .accessfn = access_aa64_tid3, | |
8282 | .resetvalue = 0 }, | |
8283 | { .name = "ID_AA64ISAR0_EL1", .state = ARM_CP_STATE_AA64, | |
8284 | .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 0, | |
8285 | .access = PL1_R, .type = ARM_CP_CONST, | |
8286 | .accessfn = access_aa64_tid3, | |
8287 | .resetvalue = cpu->isar.id_aa64isar0 }, | |
8288 | { .name = "ID_AA64ISAR1_EL1", .state = ARM_CP_STATE_AA64, | |
8289 | .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 1, | |
8290 | .access = PL1_R, .type = ARM_CP_CONST, | |
8291 | .accessfn = access_aa64_tid3, | |
8292 | .resetvalue = cpu->isar.id_aa64isar1 }, | |
8293 | { .name = "ID_AA64ISAR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64, | |
8294 | .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 2, | |
8295 | .access = PL1_R, .type = ARM_CP_CONST, | |
8296 | .accessfn = access_aa64_tid3, | |
8297 | .resetvalue = 0 }, | |
8298 | { .name = "ID_AA64ISAR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64, | |
8299 | .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 3, | |
8300 | .access = PL1_R, .type = ARM_CP_CONST, | |
8301 | .accessfn = access_aa64_tid3, | |
8302 | .resetvalue = 0 }, | |
8303 | { .name = "ID_AA64ISAR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64, | |
8304 | .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 4, | |
8305 | .access = PL1_R, .type = ARM_CP_CONST, | |
8306 | .accessfn = access_aa64_tid3, | |
8307 | .resetvalue = 0 }, | |
8308 | { .name = "ID_AA64ISAR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64, | |
8309 | .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 5, | |
8310 | .access = PL1_R, .type = ARM_CP_CONST, | |
8311 | .accessfn = access_aa64_tid3, | |
8312 | .resetvalue = 0 }, | |
8313 | { .name = "ID_AA64ISAR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64, | |
8314 | .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 6, | |
8315 | .access = PL1_R, .type = ARM_CP_CONST, | |
8316 | .accessfn = access_aa64_tid3, | |
8317 | .resetvalue = 0 }, | |
8318 | { .name = "ID_AA64ISAR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64, | |
8319 | .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 7, | |
8320 | .access = PL1_R, .type = ARM_CP_CONST, | |
8321 | .accessfn = access_aa64_tid3, | |
8322 | .resetvalue = 0 }, | |
8323 | { .name = "ID_AA64MMFR0_EL1", .state = ARM_CP_STATE_AA64, | |
8324 | .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0, | |
8325 | .access = PL1_R, .type = ARM_CP_CONST, | |
8326 | .accessfn = access_aa64_tid3, | |
8327 | .resetvalue = cpu->isar.id_aa64mmfr0 }, | |
8328 | { .name = "ID_AA64MMFR1_EL1", .state = ARM_CP_STATE_AA64, | |
8329 | .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 1, | |
8330 | .access = PL1_R, .type = ARM_CP_CONST, | |
8331 | .accessfn = access_aa64_tid3, | |
8332 | .resetvalue = cpu->isar.id_aa64mmfr1 }, | |
8333 | { .name = "ID_AA64MMFR2_EL1", .state = ARM_CP_STATE_AA64, | |
8334 | .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 2, | |
8335 | .access = PL1_R, .type = ARM_CP_CONST, | |
8336 | .accessfn = access_aa64_tid3, | |
8337 | .resetvalue = cpu->isar.id_aa64mmfr2 }, | |
8338 | { .name = "ID_AA64MMFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64, | |
8339 | .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 3, | |
8340 | .access = PL1_R, .type = ARM_CP_CONST, | |
8341 | .accessfn = access_aa64_tid3, | |
8342 | .resetvalue = 0 }, | |
8343 | { .name = "ID_AA64MMFR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64, | |
8344 | .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 4, | |
8345 | .access = PL1_R, .type = ARM_CP_CONST, | |
8346 | .accessfn = access_aa64_tid3, | |
8347 | .resetvalue = 0 }, | |
8348 | { .name = "ID_AA64MMFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64, | |
8349 | .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 5, | |
8350 | .access = PL1_R, .type = ARM_CP_CONST, | |
8351 | .accessfn = access_aa64_tid3, | |
8352 | .resetvalue = 0 }, | |
8353 | { .name = "ID_AA64MMFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64, | |
8354 | .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 6, | |
8355 | .access = PL1_R, .type = ARM_CP_CONST, | |
8356 | .accessfn = access_aa64_tid3, | |
8357 | .resetvalue = 0 }, | |
8358 | { .name = "ID_AA64MMFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64, | |
8359 | .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 7, | |
8360 | .access = PL1_R, .type = ARM_CP_CONST, | |
8361 | .accessfn = access_aa64_tid3, | |
8362 | .resetvalue = 0 }, | |
8363 | { .name = "MVFR0_EL1", .state = ARM_CP_STATE_AA64, | |
8364 | .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 0, | |
8365 | .access = PL1_R, .type = ARM_CP_CONST, | |
8366 | .accessfn = access_aa64_tid3, | |
8367 | .resetvalue = cpu->isar.mvfr0 }, | |
8368 | { .name = "MVFR1_EL1", .state = ARM_CP_STATE_AA64, | |
8369 | .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 1, | |
8370 | .access = PL1_R, .type = ARM_CP_CONST, | |
8371 | .accessfn = access_aa64_tid3, | |
8372 | .resetvalue = cpu->isar.mvfr1 }, | |
8373 | { .name = "MVFR2_EL1", .state = ARM_CP_STATE_AA64, | |
8374 | .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 2, | |
8375 | .access = PL1_R, .type = ARM_CP_CONST, | |
8376 | .accessfn = access_aa64_tid3, | |
8377 | .resetvalue = cpu->isar.mvfr2 }, | |
8378 | /* | |
8379 | * "0, c0, c3, {0,1,2}" are the encodings corresponding to | |
8380 | * AArch64 MVFR[012]_EL1. Define the STATE_AA32 encoding | |
8381 | * as RAZ, since it is in the "reserved for future ID | |
8382 | * registers, RAZ" part of the AArch32 encoding space. | |
8383 | */ | |
8384 | { .name = "RES_0_C0_C3_0", .state = ARM_CP_STATE_AA32, | |
8385 | .cp = 15, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 0, | |
8386 | .access = PL1_R, .type = ARM_CP_CONST, | |
8387 | .accessfn = access_aa64_tid3, | |
8388 | .resetvalue = 0 }, | |
8389 | { .name = "RES_0_C0_C3_1", .state = ARM_CP_STATE_AA32, | |
8390 | .cp = 15, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 1, | |
8391 | .access = PL1_R, .type = ARM_CP_CONST, | |
8392 | .accessfn = access_aa64_tid3, | |
8393 | .resetvalue = 0 }, | |
8394 | { .name = "RES_0_C0_C3_2", .state = ARM_CP_STATE_AA32, | |
8395 | .cp = 15, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 2, | |
8396 | .access = PL1_R, .type = ARM_CP_CONST, | |
8397 | .accessfn = access_aa64_tid3, | |
8398 | .resetvalue = 0 }, | |
8399 | /* | |
8400 | * Other encodings in "0, c0, c3, ..." are STATE_BOTH because | |
8401 | * they're also RAZ for AArch64, and in v8 are gradually | |
8402 | * being filled with AArch64-view-of-AArch32-ID-register | |
8403 | * for new ID registers. | |
8404 | */ | |
8405 | { .name = "RES_0_C0_C3_3", .state = ARM_CP_STATE_BOTH, | |
8406 | .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 3, | |
8407 | .access = PL1_R, .type = ARM_CP_CONST, | |
8408 | .accessfn = access_aa64_tid3, | |
8409 | .resetvalue = 0 }, | |
8410 | { .name = "ID_PFR2", .state = ARM_CP_STATE_BOTH, | |
8411 | .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 4, | |
8412 | .access = PL1_R, .type = ARM_CP_CONST, | |
8413 | .accessfn = access_aa64_tid3, | |
8414 | .resetvalue = cpu->isar.id_pfr2 }, | |
8415 | { .name = "ID_DFR1", .state = ARM_CP_STATE_BOTH, | |
8416 | .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 5, | |
8417 | .access = PL1_R, .type = ARM_CP_CONST, | |
8418 | .accessfn = access_aa64_tid3, | |
8419 | .resetvalue = cpu->isar.id_dfr1 }, | |
8420 | { .name = "ID_MMFR5", .state = ARM_CP_STATE_BOTH, | |
8421 | .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 6, | |
8422 | .access = PL1_R, .type = ARM_CP_CONST, | |
8423 | .accessfn = access_aa64_tid3, | |
8424 | .resetvalue = cpu->isar.id_mmfr5 }, | |
8425 | { .name = "RES_0_C0_C3_7", .state = ARM_CP_STATE_BOTH, | |
8426 | .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 7, | |
8427 | .access = PL1_R, .type = ARM_CP_CONST, | |
8428 | .accessfn = access_aa64_tid3, | |
8429 | .resetvalue = 0 }, | |
8430 | { .name = "PMCEID0", .state = ARM_CP_STATE_AA32, | |
8431 | .cp = 15, .opc1 = 0, .crn = 9, .crm = 12, .opc2 = 6, | |
8432 | .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, | |
8433 | .fgt = FGT_PMCEIDN_EL0, | |
8434 | .resetvalue = extract64(cpu->pmceid0, 0, 32) }, | |
8435 | { .name = "PMCEID0_EL0", .state = ARM_CP_STATE_AA64, | |
8436 | .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 6, | |
8437 | .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, | |
8438 | .fgt = FGT_PMCEIDN_EL0, | |
8439 | .resetvalue = cpu->pmceid0 }, | |
8440 | { .name = "PMCEID1", .state = ARM_CP_STATE_AA32, | |
8441 | .cp = 15, .opc1 = 0, .crn = 9, .crm = 12, .opc2 = 7, | |
8442 | .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, | |
8443 | .fgt = FGT_PMCEIDN_EL0, | |
8444 | .resetvalue = extract64(cpu->pmceid1, 0, 32) }, | |
8445 | { .name = "PMCEID1_EL0", .state = ARM_CP_STATE_AA64, | |
8446 | .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 7, | |
8447 | .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, | |
8448 | .fgt = FGT_PMCEIDN_EL0, | |
8449 | .resetvalue = cpu->pmceid1 }, | |
8450 | }; | |
8451 | #ifdef CONFIG_USER_ONLY | |
8452 | static const ARMCPRegUserSpaceInfo v8_user_idregs[] = { | |
8453 | { .name = "ID_AA64PFR0_EL1", | |
8454 | .exported_bits = R_ID_AA64PFR0_FP_MASK | | |
8455 | R_ID_AA64PFR0_ADVSIMD_MASK | | |
8456 | R_ID_AA64PFR0_SVE_MASK | | |
8457 | R_ID_AA64PFR0_DIT_MASK, | |
8458 | .fixed_bits = (0x1u << R_ID_AA64PFR0_EL0_SHIFT) | | |
8459 | (0x1u << R_ID_AA64PFR0_EL1_SHIFT) }, | |
8460 | { .name = "ID_AA64PFR1_EL1", | |
8461 | .exported_bits = R_ID_AA64PFR1_BT_MASK | | |
8462 | R_ID_AA64PFR1_SSBS_MASK | | |
8463 | R_ID_AA64PFR1_MTE_MASK | | |
8464 | R_ID_AA64PFR1_SME_MASK }, | |
8465 | { .name = "ID_AA64PFR*_EL1_RESERVED", | |
8466 | .is_glob = true }, | |
8467 | { .name = "ID_AA64ZFR0_EL1", | |
8468 | .exported_bits = R_ID_AA64ZFR0_SVEVER_MASK | | |
8469 | R_ID_AA64ZFR0_AES_MASK | | |
8470 | R_ID_AA64ZFR0_BITPERM_MASK | | |
8471 | R_ID_AA64ZFR0_BFLOAT16_MASK | | |
8472 | R_ID_AA64ZFR0_SHA3_MASK | | |
8473 | R_ID_AA64ZFR0_SM4_MASK | | |
8474 | R_ID_AA64ZFR0_I8MM_MASK | | |
8475 | R_ID_AA64ZFR0_F32MM_MASK | | |
8476 | R_ID_AA64ZFR0_F64MM_MASK }, | |
8477 | { .name = "ID_AA64SMFR0_EL1", | |
8478 | .exported_bits = R_ID_AA64SMFR0_F32F32_MASK | | |
8479 | R_ID_AA64SMFR0_B16F32_MASK | | |
8480 | R_ID_AA64SMFR0_F16F32_MASK | | |
8481 | R_ID_AA64SMFR0_I8I32_MASK | | |
8482 | R_ID_AA64SMFR0_F64F64_MASK | | |
8483 | R_ID_AA64SMFR0_I16I64_MASK | | |
8484 | R_ID_AA64SMFR0_FA64_MASK }, | |
8485 | { .name = "ID_AA64MMFR0_EL1", | |
8486 | .exported_bits = R_ID_AA64MMFR0_ECV_MASK, | |
8487 | .fixed_bits = (0xfu << R_ID_AA64MMFR0_TGRAN64_SHIFT) | | |
8488 | (0xfu << R_ID_AA64MMFR0_TGRAN4_SHIFT) }, | |
8489 | { .name = "ID_AA64MMFR1_EL1", | |
8490 | .exported_bits = R_ID_AA64MMFR1_AFP_MASK }, | |
8491 | { .name = "ID_AA64MMFR2_EL1", | |
8492 | .exported_bits = R_ID_AA64MMFR2_AT_MASK }, | |
8493 | { .name = "ID_AA64MMFR*_EL1_RESERVED", | |
8494 | .is_glob = true }, | |
8495 | { .name = "ID_AA64DFR0_EL1", | |
8496 | .fixed_bits = (0x6u << R_ID_AA64DFR0_DEBUGVER_SHIFT) }, | |
8497 | { .name = "ID_AA64DFR1_EL1" }, | |
8498 | { .name = "ID_AA64DFR*_EL1_RESERVED", | |
8499 | .is_glob = true }, | |
8500 | { .name = "ID_AA64AFR*", | |
8501 | .is_glob = true }, | |
8502 | { .name = "ID_AA64ISAR0_EL1", | |
8503 | .exported_bits = R_ID_AA64ISAR0_AES_MASK | | |
8504 | R_ID_AA64ISAR0_SHA1_MASK | | |
8505 | R_ID_AA64ISAR0_SHA2_MASK | | |
8506 | R_ID_AA64ISAR0_CRC32_MASK | | |
8507 | R_ID_AA64ISAR0_ATOMIC_MASK | | |
8508 | R_ID_AA64ISAR0_RDM_MASK | | |
8509 | R_ID_AA64ISAR0_SHA3_MASK | | |
8510 | R_ID_AA64ISAR0_SM3_MASK | | |
8511 | R_ID_AA64ISAR0_SM4_MASK | | |
8512 | R_ID_AA64ISAR0_DP_MASK | | |
8513 | R_ID_AA64ISAR0_FHM_MASK | | |
8514 | R_ID_AA64ISAR0_TS_MASK | | |
8515 | R_ID_AA64ISAR0_RNDR_MASK }, | |
8516 | { .name = "ID_AA64ISAR1_EL1", | |
8517 | .exported_bits = R_ID_AA64ISAR1_DPB_MASK | | |
8518 | R_ID_AA64ISAR1_APA_MASK | | |
8519 | R_ID_AA64ISAR1_API_MASK | | |
8520 | R_ID_AA64ISAR1_JSCVT_MASK | | |
8521 | R_ID_AA64ISAR1_FCMA_MASK | | |
8522 | R_ID_AA64ISAR1_LRCPC_MASK | | |
8523 | R_ID_AA64ISAR1_GPA_MASK | | |
8524 | R_ID_AA64ISAR1_GPI_MASK | | |
8525 | R_ID_AA64ISAR1_FRINTTS_MASK | | |
8526 | R_ID_AA64ISAR1_SB_MASK | | |
8527 | R_ID_AA64ISAR1_BF16_MASK | | |
8528 | R_ID_AA64ISAR1_DGH_MASK | | |
8529 | R_ID_AA64ISAR1_I8MM_MASK }, | |
8530 | { .name = "ID_AA64ISAR2_EL1", | |
8531 | .exported_bits = R_ID_AA64ISAR2_WFXT_MASK | | |
8532 | R_ID_AA64ISAR2_RPRES_MASK | | |
8533 | R_ID_AA64ISAR2_GPA3_MASK | | |
8534 | R_ID_AA64ISAR2_APA3_MASK }, | |
8535 | { .name = "ID_AA64ISAR*_EL1_RESERVED", | |
8536 | .is_glob = true }, | |
8537 | }; | |
8538 | modify_arm_cp_regs(v8_idregs, v8_user_idregs); | |
8539 | #endif | |
8540 | /* RVBAR_EL1 is only implemented if EL1 is the highest EL */ | |
8541 | if (!arm_feature(env, ARM_FEATURE_EL3) && | |
8542 | !arm_feature(env, ARM_FEATURE_EL2)) { | |
8543 | ARMCPRegInfo rvbar = { | |
8544 | .name = "RVBAR_EL1", .state = ARM_CP_STATE_BOTH, | |
8545 | .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1, | |
8546 | .access = PL1_R, | |
8547 | .fieldoffset = offsetof(CPUARMState, cp15.rvbar), | |
8548 | }; | |
8549 | define_one_arm_cp_reg(cpu, &rvbar); | |
8550 | } | |
8551 | define_arm_cp_regs(cpu, v8_idregs); | |
8552 | define_arm_cp_regs(cpu, v8_cp_reginfo); | |
8553 | ||
8554 | for (i = 4; i < 16; i++) { | |
8555 | /* | |
8556 | * Encodings in "0, c0, {c4-c7}, {0-7}" are RAZ for AArch32. | |
8557 | * For pre-v8 cores there are RAZ patterns for these in | |
8558 | * id_pre_v8_midr_cp_reginfo[]; for v8 we do that here. | |
8559 | * v8 extends the "must RAZ" part of the ID register space | |
8560 | * to also cover c0, 0, c{8-15}, {0-7}. | |
8561 | * These are STATE_AA32 because in the AArch64 sysreg space | |
8562 | * c4-c7 is where the AArch64 ID registers live (and we've | |
8563 | * already defined those in v8_idregs[]), and c8-c15 are not | |
8564 | * "must RAZ" for AArch64. | |
8565 | */ | |
8566 | g_autofree char *name = g_strdup_printf("RES_0_C0_C%d_X", i); | |
8567 | ARMCPRegInfo v8_aa32_raz_idregs = { | |
8568 | .name = name, | |
8569 | .state = ARM_CP_STATE_AA32, | |
8570 | .cp = 15, .opc1 = 0, .crn = 0, .crm = i, .opc2 = CP_ANY, | |
8571 | .access = PL1_R, .type = ARM_CP_CONST, | |
8572 | .accessfn = access_aa64_tid3, | |
8573 | .resetvalue = 0 }; | |
8574 | define_one_arm_cp_reg(cpu, &v8_aa32_raz_idregs); | |
8575 | } | |
8576 | } | |
8577 | ||
8578 | /* | |
8579 | * Register the base EL2 cpregs. | |
8580 | * Pre v8, these registers are implemented only as part of the | |
8581 | * Virtualization Extensions (EL2 present). Beginning with v8, | |
8582 | * if EL2 is missing but EL3 is enabled, mostly these become | |
8583 | * RES0 from EL3, with some specific exceptions. | |
8584 | */ | |
8585 | if (arm_feature(env, ARM_FEATURE_EL2) | |
8586 | || (arm_feature(env, ARM_FEATURE_EL3) | |
8587 | && arm_feature(env, ARM_FEATURE_V8))) { | |
8588 | uint64_t vmpidr_def = mpidr_read_val(env); | |
8589 | ARMCPRegInfo vpidr_regs[] = { | |
8590 | { .name = "VPIDR", .state = ARM_CP_STATE_AA32, | |
8591 | .cp = 15, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0, | |
8592 | .access = PL2_RW, .accessfn = access_el3_aa32ns, | |
8593 | .resetvalue = cpu->midr, | |
8594 | .type = ARM_CP_ALIAS | ARM_CP_EL3_NO_EL2_C_NZ, | |
8595 | .fieldoffset = offsetoflow32(CPUARMState, cp15.vpidr_el2) }, | |
8596 | { .name = "VPIDR_EL2", .state = ARM_CP_STATE_AA64, | |
8597 | .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0, | |
8598 | .access = PL2_RW, .resetvalue = cpu->midr, | |
8599 | .type = ARM_CP_EL3_NO_EL2_C_NZ, | |
8600 | .fieldoffset = offsetof(CPUARMState, cp15.vpidr_el2) }, | |
8601 | { .name = "VMPIDR", .state = ARM_CP_STATE_AA32, | |
8602 | .cp = 15, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5, | |
8603 | .access = PL2_RW, .accessfn = access_el3_aa32ns, | |
8604 | .resetvalue = vmpidr_def, | |
8605 | .type = ARM_CP_ALIAS | ARM_CP_EL3_NO_EL2_C_NZ, | |
8606 | .fieldoffset = offsetoflow32(CPUARMState, cp15.vmpidr_el2) }, | |
8607 | { .name = "VMPIDR_EL2", .state = ARM_CP_STATE_AA64, | |
8608 | .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5, | |
8609 | .access = PL2_RW, .resetvalue = vmpidr_def, | |
8610 | .type = ARM_CP_EL3_NO_EL2_C_NZ, | |
8611 | .fieldoffset = offsetof(CPUARMState, cp15.vmpidr_el2) }, | |
8612 | }; | |
8613 | /* | |
8614 | * The only field of MDCR_EL2 that has a defined architectural reset | |
8615 | * value is MDCR_EL2.HPMN which should reset to the value of PMCR_EL0.N. | |
8616 | */ | |
8617 | ARMCPRegInfo mdcr_el2 = { | |
8618 | .name = "MDCR_EL2", .state = ARM_CP_STATE_BOTH, .type = ARM_CP_IO, | |
8619 | .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 1, | |
8620 | .writefn = mdcr_el2_write, | |
8621 | .access = PL2_RW, .resetvalue = pmu_num_counters(env), | |
8622 | .fieldoffset = offsetof(CPUARMState, cp15.mdcr_el2), | |
8623 | }; | |
8624 | define_one_arm_cp_reg(cpu, &mdcr_el2); | |
8625 | define_arm_cp_regs(cpu, vpidr_regs); | |
8626 | define_arm_cp_regs(cpu, el2_cp_reginfo); | |
8627 | if (arm_feature(env, ARM_FEATURE_V8)) { | |
8628 | define_arm_cp_regs(cpu, el2_v8_cp_reginfo); | |
8629 | } | |
8630 | if (cpu_isar_feature(aa64_sel2, cpu)) { | |
8631 | define_arm_cp_regs(cpu, el2_sec_cp_reginfo); | |
8632 | } | |
8633 | /* RVBAR_EL2 is only implemented if EL2 is the highest EL */ | |
8634 | if (!arm_feature(env, ARM_FEATURE_EL3)) { | |
8635 | ARMCPRegInfo rvbar[] = { | |
8636 | { | |
8637 | .name = "RVBAR_EL2", .state = ARM_CP_STATE_AA64, | |
8638 | .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 1, | |
8639 | .access = PL2_R, | |
8640 | .fieldoffset = offsetof(CPUARMState, cp15.rvbar), | |
8641 | }, | |
8642 | { .name = "RVBAR", .type = ARM_CP_ALIAS, | |
8643 | .cp = 15, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1, | |
8644 | .access = PL2_R, | |
8645 | .fieldoffset = offsetof(CPUARMState, cp15.rvbar), | |
8646 | }, | |
8647 | }; | |
8648 | define_arm_cp_regs(cpu, rvbar); | |
8649 | } | |
8650 | } | |
8651 | ||
8652 | /* Register the base EL3 cpregs. */ | |
8653 | if (arm_feature(env, ARM_FEATURE_EL3)) { | |
8654 | define_arm_cp_regs(cpu, el3_cp_reginfo); | |
8655 | ARMCPRegInfo el3_regs[] = { | |
8656 | { .name = "RVBAR_EL3", .state = ARM_CP_STATE_AA64, | |
8657 | .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 1, | |
8658 | .access = PL3_R, | |
8659 | .fieldoffset = offsetof(CPUARMState, cp15.rvbar), | |
8660 | }, | |
8661 | { .name = "SCTLR_EL3", .state = ARM_CP_STATE_AA64, | |
8662 | .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 0, .opc2 = 0, | |
8663 | .access = PL3_RW, | |
8664 | .raw_writefn = raw_write, .writefn = sctlr_write, | |
8665 | .fieldoffset = offsetof(CPUARMState, cp15.sctlr_el[3]), | |
8666 | .resetvalue = cpu->reset_sctlr }, | |
8667 | }; | |
8668 | ||
8669 | define_arm_cp_regs(cpu, el3_regs); | |
8670 | } | |
8671 | /* | |
8672 | * The behaviour of NSACR is sufficiently various that we don't | |
8673 | * try to describe it in a single reginfo: | |
8674 | * if EL3 is 64 bit, then trap to EL3 from S EL1, | |
8675 | * reads as constant 0xc00 from NS EL1 and NS EL2 | |
8676 | * if EL3 is 32 bit, then RW at EL3, RO at NS EL1 and NS EL2 | |
8677 | * if v7 without EL3, register doesn't exist | |
8678 | * if v8 without EL3, reads as constant 0xc00 from NS EL1 and NS EL2 | |
8679 | */ | |
8680 | if (arm_feature(env, ARM_FEATURE_EL3)) { | |
8681 | if (arm_feature(env, ARM_FEATURE_AARCH64)) { | |
8682 | static const ARMCPRegInfo nsacr = { | |
8683 | .name = "NSACR", .type = ARM_CP_CONST, | |
8684 | .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2, | |
8685 | .access = PL1_RW, .accessfn = nsacr_access, | |
8686 | .resetvalue = 0xc00 | |
8687 | }; | |
8688 | define_one_arm_cp_reg(cpu, &nsacr); | |
8689 | } else { | |
8690 | static const ARMCPRegInfo nsacr = { | |
8691 | .name = "NSACR", | |
8692 | .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2, | |
8693 | .access = PL3_RW | PL1_R, | |
8694 | .resetvalue = 0, | |
8695 | .fieldoffset = offsetof(CPUARMState, cp15.nsacr) | |
8696 | }; | |
8697 | define_one_arm_cp_reg(cpu, &nsacr); | |
8698 | } | |
8699 | } else { | |
8700 | if (arm_feature(env, ARM_FEATURE_V8)) { | |
8701 | static const ARMCPRegInfo nsacr = { | |
8702 | .name = "NSACR", .type = ARM_CP_CONST, | |
8703 | .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2, | |
8704 | .access = PL1_R, | |
8705 | .resetvalue = 0xc00 | |
8706 | }; | |
8707 | define_one_arm_cp_reg(cpu, &nsacr); | |
8708 | } | |
8709 | } | |
8710 | ||
8711 | if (arm_feature(env, ARM_FEATURE_PMSA)) { | |
8712 | if (arm_feature(env, ARM_FEATURE_V6)) { | |
8713 | /* PMSAv6 not implemented */ | |
8714 | assert(arm_feature(env, ARM_FEATURE_V7)); | |
8715 | define_arm_cp_regs(cpu, vmsa_pmsa_cp_reginfo); | |
8716 | define_arm_cp_regs(cpu, pmsav7_cp_reginfo); | |
8717 | } else { | |
8718 | define_arm_cp_regs(cpu, pmsav5_cp_reginfo); | |
8719 | } | |
8720 | } else { | |
8721 | define_arm_cp_regs(cpu, vmsa_pmsa_cp_reginfo); | |
8722 | define_arm_cp_regs(cpu, vmsa_cp_reginfo); | |
8723 | /* TTCBR2 is introduced with ARMv8.2-AA32HPD. */ | |
8724 | if (cpu_isar_feature(aa32_hpd, cpu)) { | |
8725 | define_one_arm_cp_reg(cpu, &ttbcr2_reginfo); | |
8726 | } | |
8727 | } | |
8728 | if (arm_feature(env, ARM_FEATURE_THUMB2EE)) { | |
8729 | define_arm_cp_regs(cpu, t2ee_cp_reginfo); | |
8730 | } | |
8731 | if (arm_feature(env, ARM_FEATURE_GENERIC_TIMER)) { | |
8732 | define_arm_cp_regs(cpu, generic_timer_cp_reginfo); | |
8733 | } | |
8734 | if (arm_feature(env, ARM_FEATURE_VAPA)) { | |
8735 | define_arm_cp_regs(cpu, vapa_cp_reginfo); | |
8736 | } | |
8737 | if (arm_feature(env, ARM_FEATURE_CACHE_TEST_CLEAN)) { | |
8738 | define_arm_cp_regs(cpu, cache_test_clean_cp_reginfo); | |
8739 | } | |
8740 | if (arm_feature(env, ARM_FEATURE_CACHE_DIRTY_REG)) { | |
8741 | define_arm_cp_regs(cpu, cache_dirty_status_cp_reginfo); | |
8742 | } | |
8743 | if (arm_feature(env, ARM_FEATURE_CACHE_BLOCK_OPS)) { | |
8744 | define_arm_cp_regs(cpu, cache_block_ops_cp_reginfo); | |
8745 | } | |
8746 | if (arm_feature(env, ARM_FEATURE_OMAPCP)) { | |
8747 | define_arm_cp_regs(cpu, omap_cp_reginfo); | |
8748 | } | |
8749 | if (arm_feature(env, ARM_FEATURE_STRONGARM)) { | |
8750 | define_arm_cp_regs(cpu, strongarm_cp_reginfo); | |
8751 | } | |
8752 | if (arm_feature(env, ARM_FEATURE_XSCALE)) { | |
8753 | define_arm_cp_regs(cpu, xscale_cp_reginfo); | |
8754 | } | |
8755 | if (arm_feature(env, ARM_FEATURE_DUMMY_C15_REGS)) { | |
8756 | define_arm_cp_regs(cpu, dummy_c15_cp_reginfo); | |
8757 | } | |
8758 | if (arm_feature(env, ARM_FEATURE_LPAE)) { | |
8759 | define_arm_cp_regs(cpu, lpae_cp_reginfo); | |
8760 | } | |
8761 | if (cpu_isar_feature(aa32_jazelle, cpu)) { | |
8762 | define_arm_cp_regs(cpu, jazelle_regs); | |
8763 | } | |
8764 | /* | |
8765 | * Slightly awkwardly, the OMAP and StrongARM cores need all of | |
8766 | * cp15 crn=0 to be writes-ignored, whereas for other cores they should | |
8767 | * be read-only (ie write causes UNDEF exception). | |
8768 | */ | |
8769 | { | |
8770 | ARMCPRegInfo id_pre_v8_midr_cp_reginfo[] = { | |
8771 | /* | |
8772 | * Pre-v8 MIDR space. | |
8773 | * Note that the MIDR isn't a simple constant register because | |
8774 | * of the TI925 behaviour where writes to another register can | |
8775 | * cause the MIDR value to change. | |
8776 | * | |
8777 | * Unimplemented registers in the c15 0 0 0 space default to | |
8778 | * MIDR. Define MIDR first as this entire space, then CTR, TCMTR | |
8779 | * and friends override accordingly. | |
8780 | */ | |
8781 | { .name = "MIDR", | |
8782 | .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = CP_ANY, | |
8783 | .access = PL1_R, .resetvalue = cpu->midr, | |
8784 | .writefn = arm_cp_write_ignore, .raw_writefn = raw_write, | |
8785 | .readfn = midr_read, | |
8786 | .fieldoffset = offsetof(CPUARMState, cp15.c0_cpuid), | |
8787 | .type = ARM_CP_OVERRIDE }, | |
8788 | /* crn = 0 op1 = 0 crm = 3..7 : currently unassigned; we RAZ. */ | |
8789 | { .name = "DUMMY", | |
8790 | .cp = 15, .crn = 0, .crm = 3, .opc1 = 0, .opc2 = CP_ANY, | |
8791 | .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, | |
8792 | { .name = "DUMMY", | |
8793 | .cp = 15, .crn = 0, .crm = 4, .opc1 = 0, .opc2 = CP_ANY, | |
8794 | .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, | |
8795 | { .name = "DUMMY", | |
8796 | .cp = 15, .crn = 0, .crm = 5, .opc1 = 0, .opc2 = CP_ANY, | |
8797 | .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, | |
8798 | { .name = "DUMMY", | |
8799 | .cp = 15, .crn = 0, .crm = 6, .opc1 = 0, .opc2 = CP_ANY, | |
8800 | .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, | |
8801 | { .name = "DUMMY", | |
8802 | .cp = 15, .crn = 0, .crm = 7, .opc1 = 0, .opc2 = CP_ANY, | |
8803 | .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, | |
8804 | }; | |
8805 | ARMCPRegInfo id_v8_midr_cp_reginfo[] = { | |
8806 | { .name = "MIDR_EL1", .state = ARM_CP_STATE_BOTH, | |
8807 | .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 0, .opc2 = 0, | |
8808 | .access = PL1_R, .type = ARM_CP_NO_RAW, .resetvalue = cpu->midr, | |
8809 | .fgt = FGT_MIDR_EL1, | |
8810 | .fieldoffset = offsetof(CPUARMState, cp15.c0_cpuid), | |
8811 | .readfn = midr_read }, | |
8812 | /* crn = 0 op1 = 0 crm = 0 op2 = 7 : AArch32 aliases of MIDR */ | |
8813 | { .name = "MIDR", .type = ARM_CP_ALIAS | ARM_CP_CONST, | |
8814 | .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 7, | |
8815 | .access = PL1_R, .resetvalue = cpu->midr }, | |
8816 | { .name = "REVIDR_EL1", .state = ARM_CP_STATE_BOTH, | |
8817 | .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 0, .opc2 = 6, | |
8818 | .access = PL1_R, | |
8819 | .accessfn = access_aa64_tid1, | |
8820 | .fgt = FGT_REVIDR_EL1, | |
8821 | .type = ARM_CP_CONST, .resetvalue = cpu->revidr }, | |
8822 | }; | |
8823 | ARMCPRegInfo id_v8_midr_alias_cp_reginfo = { | |
8824 | .name = "MIDR", .type = ARM_CP_ALIAS | ARM_CP_CONST, | |
8825 | .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 4, | |
8826 | .access = PL1_R, .resetvalue = cpu->midr | |
8827 | }; | |
8828 | ARMCPRegInfo id_cp_reginfo[] = { | |
8829 | /* These are common to v8 and pre-v8 */ | |
8830 | { .name = "CTR", | |
8831 | .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 1, | |
8832 | .access = PL1_R, .accessfn = ctr_el0_access, | |
8833 | .type = ARM_CP_CONST, .resetvalue = cpu->ctr }, | |
8834 | { .name = "CTR_EL0", .state = ARM_CP_STATE_AA64, | |
8835 | .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 0, .crm = 0, | |
8836 | .access = PL0_R, .accessfn = ctr_el0_access, | |
8837 | .fgt = FGT_CTR_EL0, | |
8838 | .type = ARM_CP_CONST, .resetvalue = cpu->ctr }, | |
8839 | /* TCMTR and TLBTR exist in v8 but have no 64-bit versions */ | |
8840 | { .name = "TCMTR", | |
8841 | .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 2, | |
8842 | .access = PL1_R, | |
8843 | .accessfn = access_aa32_tid1, | |
8844 | .type = ARM_CP_CONST, .resetvalue = 0 }, | |
8845 | }; | |
8846 | /* TLBTR is specific to VMSA */ | |
8847 | ARMCPRegInfo id_tlbtr_reginfo = { | |
8848 | .name = "TLBTR", | |
8849 | .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 3, | |
8850 | .access = PL1_R, | |
8851 | .accessfn = access_aa32_tid1, | |
8852 | .type = ARM_CP_CONST, .resetvalue = 0, | |
8853 | }; | |
8854 | /* MPUIR is specific to PMSA V6+ */ | |
8855 | ARMCPRegInfo id_mpuir_reginfo = { | |
8856 | .name = "MPUIR", | |
8857 | .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 4, | |
8858 | .access = PL1_R, .type = ARM_CP_CONST, | |
8859 | .resetvalue = cpu->pmsav7_dregion << 8 | |
8860 | }; | |
8861 | /* HMPUIR is specific to PMSA V8 */ | |
8862 | ARMCPRegInfo id_hmpuir_reginfo = { | |
8863 | .name = "HMPUIR", | |
8864 | .cp = 15, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 4, | |
8865 | .access = PL2_R, .type = ARM_CP_CONST, | |
8866 | .resetvalue = cpu->pmsav8r_hdregion | |
8867 | }; | |
8868 | static const ARMCPRegInfo crn0_wi_reginfo = { | |
8869 | .name = "CRN0_WI", .cp = 15, .crn = 0, .crm = CP_ANY, | |
8870 | .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_W, | |
8871 | .type = ARM_CP_NOP | ARM_CP_OVERRIDE | |
8872 | }; | |
8873 | #ifdef CONFIG_USER_ONLY | |
8874 | static const ARMCPRegUserSpaceInfo id_v8_user_midr_cp_reginfo[] = { | |
8875 | { .name = "MIDR_EL1", | |
8876 | .exported_bits = R_MIDR_EL1_REVISION_MASK | | |
8877 | R_MIDR_EL1_PARTNUM_MASK | | |
8878 | R_MIDR_EL1_ARCHITECTURE_MASK | | |
8879 | R_MIDR_EL1_VARIANT_MASK | | |
8880 | R_MIDR_EL1_IMPLEMENTER_MASK }, | |
8881 | { .name = "REVIDR_EL1" }, | |
8882 | }; | |
8883 | modify_arm_cp_regs(id_v8_midr_cp_reginfo, id_v8_user_midr_cp_reginfo); | |
8884 | #endif | |
8885 | if (arm_feature(env, ARM_FEATURE_OMAPCP) || | |
8886 | arm_feature(env, ARM_FEATURE_STRONGARM)) { | |
8887 | size_t i; | |
8888 | /* | |
8889 | * Register the blanket "writes ignored" value first to cover the | |
8890 | * whole space. Then update the specific ID registers to allow write | |
8891 | * access, so that they ignore writes rather than causing them to | |
8892 | * UNDEF. | |
8893 | */ | |
8894 | define_one_arm_cp_reg(cpu, &crn0_wi_reginfo); | |
8895 | for (i = 0; i < ARRAY_SIZE(id_pre_v8_midr_cp_reginfo); ++i) { | |
8896 | id_pre_v8_midr_cp_reginfo[i].access = PL1_RW; | |
8897 | } | |
8898 | for (i = 0; i < ARRAY_SIZE(id_cp_reginfo); ++i) { | |
8899 | id_cp_reginfo[i].access = PL1_RW; | |
8900 | } | |
8901 | id_mpuir_reginfo.access = PL1_RW; | |
8902 | id_tlbtr_reginfo.access = PL1_RW; | |
8903 | } | |
8904 | if (arm_feature(env, ARM_FEATURE_V8)) { | |
8905 | define_arm_cp_regs(cpu, id_v8_midr_cp_reginfo); | |
8906 | if (!arm_feature(env, ARM_FEATURE_PMSA)) { | |
8907 | define_one_arm_cp_reg(cpu, &id_v8_midr_alias_cp_reginfo); | |
8908 | } | |
8909 | } else { | |
8910 | define_arm_cp_regs(cpu, id_pre_v8_midr_cp_reginfo); | |
8911 | } | |
8912 | define_arm_cp_regs(cpu, id_cp_reginfo); | |
8913 | if (!arm_feature(env, ARM_FEATURE_PMSA)) { | |
8914 | define_one_arm_cp_reg(cpu, &id_tlbtr_reginfo); | |
8915 | } else if (arm_feature(env, ARM_FEATURE_PMSA) && | |
8916 | arm_feature(env, ARM_FEATURE_V8)) { | |
8917 | uint32_t i = 0; | |
8918 | char *tmp_string; | |
8919 | ||
8920 | define_one_arm_cp_reg(cpu, &id_mpuir_reginfo); | |
8921 | define_one_arm_cp_reg(cpu, &id_hmpuir_reginfo); | |
8922 | define_arm_cp_regs(cpu, pmsav8r_cp_reginfo); | |
8923 | ||
8924 | /* Register alias is only valid for first 32 indexes */ | |
8925 | for (i = 0; i < MIN(cpu->pmsav7_dregion, 32); ++i) { | |
8926 | uint8_t crm = 0b1000 | extract32(i, 1, 3); | |
8927 | uint8_t opc1 = extract32(i, 4, 1); | |
8928 | uint8_t opc2 = extract32(i, 0, 1) << 2; | |
8929 | ||
8930 | tmp_string = g_strdup_printf("PRBAR%u", i); | |
8931 | ARMCPRegInfo tmp_prbarn_reginfo = { | |
8932 | .name = tmp_string, .type = ARM_CP_ALIAS | ARM_CP_NO_RAW, | |
8933 | .cp = 15, .opc1 = opc1, .crn = 6, .crm = crm, .opc2 = opc2, | |
8934 | .access = PL1_RW, .resetvalue = 0, | |
8935 | .accessfn = access_tvm_trvm, | |
8936 | .writefn = pmsav8r_regn_write, .readfn = pmsav8r_regn_read | |
8937 | }; | |
8938 | define_one_arm_cp_reg(cpu, &tmp_prbarn_reginfo); | |
8939 | g_free(tmp_string); | |
8940 | ||
8941 | opc2 = extract32(i, 0, 1) << 2 | 0x1; | |
8942 | tmp_string = g_strdup_printf("PRLAR%u", i); | |
8943 | ARMCPRegInfo tmp_prlarn_reginfo = { | |
8944 | .name = tmp_string, .type = ARM_CP_ALIAS | ARM_CP_NO_RAW, | |
8945 | .cp = 15, .opc1 = opc1, .crn = 6, .crm = crm, .opc2 = opc2, | |
8946 | .access = PL1_RW, .resetvalue = 0, | |
8947 | .accessfn = access_tvm_trvm, | |
8948 | .writefn = pmsav8r_regn_write, .readfn = pmsav8r_regn_read | |
8949 | }; | |
8950 | define_one_arm_cp_reg(cpu, &tmp_prlarn_reginfo); | |
8951 | g_free(tmp_string); | |
8952 | } | |
8953 | ||
8954 | /* Register alias is only valid for first 32 indexes */ | |
8955 | for (i = 0; i < MIN(cpu->pmsav8r_hdregion, 32); ++i) { | |
8956 | uint8_t crm = 0b1000 | extract32(i, 1, 3); | |
8957 | uint8_t opc1 = 0b100 | extract32(i, 4, 1); | |
8958 | uint8_t opc2 = extract32(i, 0, 1) << 2; | |
8959 | ||
8960 | tmp_string = g_strdup_printf("HPRBAR%u", i); | |
8961 | ARMCPRegInfo tmp_hprbarn_reginfo = { | |
8962 | .name = tmp_string, | |
8963 | .type = ARM_CP_NO_RAW, | |
8964 | .cp = 15, .opc1 = opc1, .crn = 6, .crm = crm, .opc2 = opc2, | |
8965 | .access = PL2_RW, .resetvalue = 0, | |
8966 | .writefn = pmsav8r_regn_write, .readfn = pmsav8r_regn_read | |
8967 | }; | |
8968 | define_one_arm_cp_reg(cpu, &tmp_hprbarn_reginfo); | |
8969 | g_free(tmp_string); | |
8970 | ||
8971 | opc2 = extract32(i, 0, 1) << 2 | 0x1; | |
8972 | tmp_string = g_strdup_printf("HPRLAR%u", i); | |
8973 | ARMCPRegInfo tmp_hprlarn_reginfo = { | |
8974 | .name = tmp_string, | |
8975 | .type = ARM_CP_NO_RAW, | |
8976 | .cp = 15, .opc1 = opc1, .crn = 6, .crm = crm, .opc2 = opc2, | |
8977 | .access = PL2_RW, .resetvalue = 0, | |
8978 | .writefn = pmsav8r_regn_write, .readfn = pmsav8r_regn_read | |
8979 | }; | |
8980 | define_one_arm_cp_reg(cpu, &tmp_hprlarn_reginfo); | |
8981 | g_free(tmp_string); | |
8982 | } | |
8983 | } else if (arm_feature(env, ARM_FEATURE_V7)) { | |
8984 | define_one_arm_cp_reg(cpu, &id_mpuir_reginfo); | |
8985 | } | |
8986 | } | |
8987 | ||
8988 | if (arm_feature(env, ARM_FEATURE_MPIDR)) { | |
8989 | ARMCPRegInfo mpidr_cp_reginfo[] = { | |
8990 | { .name = "MPIDR_EL1", .state = ARM_CP_STATE_BOTH, | |
8991 | .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 5, | |
8992 | .fgt = FGT_MPIDR_EL1, | |
8993 | .access = PL1_R, .readfn = mpidr_read, .type = ARM_CP_NO_RAW }, | |
8994 | }; | |
8995 | #ifdef CONFIG_USER_ONLY | |
8996 | static const ARMCPRegUserSpaceInfo mpidr_user_cp_reginfo[] = { | |
8997 | { .name = "MPIDR_EL1", | |
8998 | .fixed_bits = 0x0000000080000000 }, | |
8999 | }; | |
9000 | modify_arm_cp_regs(mpidr_cp_reginfo, mpidr_user_cp_reginfo); | |
9001 | #endif | |
9002 | define_arm_cp_regs(cpu, mpidr_cp_reginfo); | |
9003 | } | |
9004 | ||
9005 | if (arm_feature(env, ARM_FEATURE_AUXCR)) { | |
9006 | ARMCPRegInfo auxcr_reginfo[] = { | |
9007 | { .name = "ACTLR_EL1", .state = ARM_CP_STATE_BOTH, | |
9008 | .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 1, | |
9009 | .access = PL1_RW, .accessfn = access_tacr, | |
9010 | .type = ARM_CP_CONST, .resetvalue = cpu->reset_auxcr }, | |
9011 | { .name = "ACTLR_EL2", .state = ARM_CP_STATE_BOTH, | |
9012 | .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 1, | |
9013 | .access = PL2_RW, .type = ARM_CP_CONST, | |
9014 | .resetvalue = 0 }, | |
9015 | { .name = "ACTLR_EL3", .state = ARM_CP_STATE_AA64, | |
9016 | .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 0, .opc2 = 1, | |
9017 | .access = PL3_RW, .type = ARM_CP_CONST, | |
9018 | .resetvalue = 0 }, | |
9019 | }; | |
9020 | define_arm_cp_regs(cpu, auxcr_reginfo); | |
9021 | if (cpu_isar_feature(aa32_ac2, cpu)) { | |
9022 | define_arm_cp_regs(cpu, actlr2_hactlr2_reginfo); | |
9023 | } | |
9024 | } | |
9025 | ||
9026 | if (arm_feature(env, ARM_FEATURE_CBAR)) { | |
9027 | /* | |
9028 | * CBAR is IMPDEF, but common on Arm Cortex-A implementations. | |
9029 | * There are two flavours: | |
9030 | * (1) older 32-bit only cores have a simple 32-bit CBAR | |
9031 | * (2) 64-bit cores have a 64-bit CBAR visible to AArch64, plus a | |
9032 | * 32-bit register visible to AArch32 at a different encoding | |
9033 | * to the "flavour 1" register and with the bits rearranged to | |
9034 | * be able to squash a 64-bit address into the 32-bit view. | |
9035 | * We distinguish the two via the ARM_FEATURE_AARCH64 flag, but | |
9036 | * in future if we support AArch32-only configs of some of the | |
9037 | * AArch64 cores we might need to add a specific feature flag | |
9038 | * to indicate cores with "flavour 2" CBAR. | |
9039 | */ | |
9040 | if (arm_feature(env, ARM_FEATURE_AARCH64)) { | |
9041 | /* 32 bit view is [31:18] 0...0 [43:32]. */ | |
9042 | uint32_t cbar32 = (extract64(cpu->reset_cbar, 18, 14) << 18) | |
9043 | | extract64(cpu->reset_cbar, 32, 12); | |
9044 | ARMCPRegInfo cbar_reginfo[] = { | |
9045 | { .name = "CBAR", | |
9046 | .type = ARM_CP_CONST, | |
9047 | .cp = 15, .crn = 15, .crm = 3, .opc1 = 1, .opc2 = 0, | |
9048 | .access = PL1_R, .resetvalue = cbar32 }, | |
9049 | { .name = "CBAR_EL1", .state = ARM_CP_STATE_AA64, | |
9050 | .type = ARM_CP_CONST, | |
9051 | .opc0 = 3, .opc1 = 1, .crn = 15, .crm = 3, .opc2 = 0, | |
9052 | .access = PL1_R, .resetvalue = cpu->reset_cbar }, | |
9053 | }; | |
9054 | /* We don't implement a r/w 64 bit CBAR currently */ | |
9055 | assert(arm_feature(env, ARM_FEATURE_CBAR_RO)); | |
9056 | define_arm_cp_regs(cpu, cbar_reginfo); | |
9057 | } else { | |
9058 | ARMCPRegInfo cbar = { | |
9059 | .name = "CBAR", | |
9060 | .cp = 15, .crn = 15, .crm = 0, .opc1 = 4, .opc2 = 0, | |
9061 | .access = PL1_R | PL3_W, .resetvalue = cpu->reset_cbar, | |
9062 | .fieldoffset = offsetof(CPUARMState, | |
9063 | cp15.c15_config_base_address) | |
9064 | }; | |
9065 | if (arm_feature(env, ARM_FEATURE_CBAR_RO)) { | |
9066 | cbar.access = PL1_R; | |
9067 | cbar.fieldoffset = 0; | |
9068 | cbar.type = ARM_CP_CONST; | |
9069 | } | |
9070 | define_one_arm_cp_reg(cpu, &cbar); | |
9071 | } | |
9072 | } | |
9073 | ||
9074 | if (arm_feature(env, ARM_FEATURE_VBAR)) { | |
9075 | static const ARMCPRegInfo vbar_cp_reginfo[] = { | |
9076 | { .name = "VBAR", .state = ARM_CP_STATE_BOTH, | |
9077 | .opc0 = 3, .crn = 12, .crm = 0, .opc1 = 0, .opc2 = 0, | |
9078 | .access = PL1_RW, .writefn = vbar_write, | |
9079 | .fgt = FGT_VBAR_EL1, | |
9080 | .bank_fieldoffsets = { offsetof(CPUARMState, cp15.vbar_s), | |
9081 | offsetof(CPUARMState, cp15.vbar_ns) }, | |
9082 | .resetvalue = 0 }, | |
9083 | }; | |
9084 | define_arm_cp_regs(cpu, vbar_cp_reginfo); | |
9085 | } | |
9086 | ||
9087 | /* Generic registers whose values depend on the implementation */ | |
9088 | { | |
9089 | ARMCPRegInfo sctlr = { | |
9090 | .name = "SCTLR", .state = ARM_CP_STATE_BOTH, | |
9091 | .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 0, | |
9092 | .access = PL1_RW, .accessfn = access_tvm_trvm, | |
9093 | .fgt = FGT_SCTLR_EL1, | |
9094 | .bank_fieldoffsets = { offsetof(CPUARMState, cp15.sctlr_s), | |
9095 | offsetof(CPUARMState, cp15.sctlr_ns) }, | |
9096 | .writefn = sctlr_write, .resetvalue = cpu->reset_sctlr, | |
9097 | .raw_writefn = raw_write, | |
9098 | }; | |
9099 | if (arm_feature(env, ARM_FEATURE_XSCALE)) { | |
9100 | /* | |
9101 | * Normally we would always end the TB on an SCTLR write, but Linux | |
9102 | * arch/arm/mach-pxa/sleep.S expects two instructions following | |
9103 | * an MMU enable to execute from cache. Imitate this behaviour. | |
9104 | */ | |
9105 | sctlr.type |= ARM_CP_SUPPRESS_TB_END; | |
9106 | } | |
9107 | define_one_arm_cp_reg(cpu, &sctlr); | |
9108 | ||
9109 | if (arm_feature(env, ARM_FEATURE_PMSA) && | |
9110 | arm_feature(env, ARM_FEATURE_V8)) { | |
9111 | ARMCPRegInfo vsctlr = { | |
9112 | .name = "VSCTLR", .state = ARM_CP_STATE_AA32, | |
9113 | .cp = 15, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 0, | |
9114 | .access = PL2_RW, .resetvalue = 0x0, | |
9115 | .fieldoffset = offsetoflow32(CPUARMState, cp15.vsctlr), | |
9116 | }; | |
9117 | define_one_arm_cp_reg(cpu, &vsctlr); | |
9118 | } | |
9119 | } | |
9120 | ||
9121 | if (cpu_isar_feature(aa64_lor, cpu)) { | |
9122 | define_arm_cp_regs(cpu, lor_reginfo); | |
9123 | } | |
9124 | if (cpu_isar_feature(aa64_pan, cpu)) { | |
9125 | define_one_arm_cp_reg(cpu, &pan_reginfo); | |
9126 | } | |
9127 | #ifndef CONFIG_USER_ONLY | |
9128 | if (cpu_isar_feature(aa64_ats1e1, cpu)) { | |
9129 | define_arm_cp_regs(cpu, ats1e1_reginfo); | |
9130 | } | |
9131 | if (cpu_isar_feature(aa32_ats1e1, cpu)) { | |
9132 | define_arm_cp_regs(cpu, ats1cp_reginfo); | |
9133 | } | |
9134 | #endif | |
9135 | if (cpu_isar_feature(aa64_uao, cpu)) { | |
9136 | define_one_arm_cp_reg(cpu, &uao_reginfo); | |
9137 | } | |
9138 | ||
9139 | if (cpu_isar_feature(aa64_dit, cpu)) { | |
9140 | define_one_arm_cp_reg(cpu, &dit_reginfo); | |
9141 | } | |
9142 | if (cpu_isar_feature(aa64_ssbs, cpu)) { | |
9143 | define_one_arm_cp_reg(cpu, &ssbs_reginfo); | |
9144 | } | |
9145 | if (cpu_isar_feature(any_ras, cpu)) { | |
9146 | define_arm_cp_regs(cpu, minimal_ras_reginfo); | |
9147 | } | |
9148 | ||
9149 | if (cpu_isar_feature(aa64_vh, cpu) || | |
9150 | cpu_isar_feature(aa64_debugv8p2, cpu)) { | |
9151 | define_one_arm_cp_reg(cpu, &contextidr_el2); | |
9152 | } | |
9153 | if (arm_feature(env, ARM_FEATURE_EL2) && cpu_isar_feature(aa64_vh, cpu)) { | |
9154 | define_arm_cp_regs(cpu, vhe_reginfo); | |
9155 | } | |
9156 | ||
9157 | if (cpu_isar_feature(aa64_sve, cpu)) { | |
9158 | define_arm_cp_regs(cpu, zcr_reginfo); | |
9159 | } | |
9160 | ||
9161 | if (cpu_isar_feature(aa64_hcx, cpu)) { | |
9162 | define_one_arm_cp_reg(cpu, &hcrx_el2_reginfo); | |
9163 | } | |
9164 | ||
9165 | #ifdef TARGET_AARCH64 | |
9166 | if (cpu_isar_feature(aa64_sme, cpu)) { | |
9167 | define_arm_cp_regs(cpu, sme_reginfo); | |
9168 | } | |
9169 | if (cpu_isar_feature(aa64_pauth, cpu)) { | |
9170 | define_arm_cp_regs(cpu, pauth_reginfo); | |
9171 | } | |
9172 | if (cpu_isar_feature(aa64_rndr, cpu)) { | |
9173 | define_arm_cp_regs(cpu, rndr_reginfo); | |
9174 | } | |
9175 | if (cpu_isar_feature(aa64_tlbirange, cpu)) { | |
9176 | define_arm_cp_regs(cpu, tlbirange_reginfo); | |
9177 | } | |
9178 | if (cpu_isar_feature(aa64_tlbios, cpu)) { | |
9179 | define_arm_cp_regs(cpu, tlbios_reginfo); | |
9180 | } | |
9181 | /* Data Cache clean instructions up to PoP */ | |
9182 | if (cpu_isar_feature(aa64_dcpop, cpu)) { | |
9183 | define_one_arm_cp_reg(cpu, dcpop_reg); | |
9184 | ||
9185 | if (cpu_isar_feature(aa64_dcpodp, cpu)) { | |
9186 | define_one_arm_cp_reg(cpu, dcpodp_reg); | |
9187 | } | |
9188 | } | |
9189 | ||
9190 | /* | |
9191 | * If full MTE is enabled, add all of the system registers. | |
9192 | * If only "instructions available at EL0" are enabled, | |
9193 | * then define only a RAZ/WI version of PSTATE.TCO. | |
9194 | */ | |
9195 | if (cpu_isar_feature(aa64_mte, cpu)) { | |
9196 | define_arm_cp_regs(cpu, mte_reginfo); | |
9197 | define_arm_cp_regs(cpu, mte_el0_cacheop_reginfo); | |
9198 | } else if (cpu_isar_feature(aa64_mte_insn_reg, cpu)) { | |
9199 | define_arm_cp_regs(cpu, mte_tco_ro_reginfo); | |
9200 | define_arm_cp_regs(cpu, mte_el0_cacheop_reginfo); | |
9201 | } | |
9202 | ||
9203 | if (cpu_isar_feature(aa64_scxtnum, cpu)) { | |
9204 | define_arm_cp_regs(cpu, scxtnum_reginfo); | |
9205 | } | |
9206 | ||
9207 | if (cpu_isar_feature(aa64_fgt, cpu)) { | |
9208 | define_arm_cp_regs(cpu, fgt_reginfo); | |
9209 | } | |
9210 | ||
9211 | if (cpu_isar_feature(aa64_rme, cpu)) { | |
9212 | define_arm_cp_regs(cpu, rme_reginfo); | |
9213 | if (cpu_isar_feature(aa64_mte, cpu)) { | |
9214 | define_arm_cp_regs(cpu, rme_mte_reginfo); | |
9215 | } | |
9216 | } | |
9217 | #endif | |
9218 | ||
9219 | if (cpu_isar_feature(any_predinv, cpu)) { | |
9220 | define_arm_cp_regs(cpu, predinv_reginfo); | |
9221 | } | |
9222 | ||
9223 | if (cpu_isar_feature(any_ccidx, cpu)) { | |
9224 | define_arm_cp_regs(cpu, ccsidr2_reginfo); | |
9225 | } | |
9226 | ||
9227 | #ifndef CONFIG_USER_ONLY | |
9228 | /* | |
9229 | * Register redirections and aliases must be done last, | |
9230 | * after the registers from the other extensions have been defined. | |
9231 | */ | |
9232 | if (arm_feature(env, ARM_FEATURE_EL2) && cpu_isar_feature(aa64_vh, cpu)) { | |
9233 | define_arm_vh_e2h_redirects_aliases(cpu); | |
9234 | } | |
9235 | #endif | |
9236 | } | |
9237 | ||
9238 | /* Sort alphabetically by type name, except for "any". */ | |
9239 | static gint arm_cpu_list_compare(gconstpointer a, gconstpointer b) | |
9240 | { | |
9241 | ObjectClass *class_a = (ObjectClass *)a; | |
9242 | ObjectClass *class_b = (ObjectClass *)b; | |
9243 | const char *name_a, *name_b; | |
9244 | ||
9245 | name_a = object_class_get_name(class_a); | |
9246 | name_b = object_class_get_name(class_b); | |
9247 | if (strcmp(name_a, "any-" TYPE_ARM_CPU) == 0) { | |
9248 | return 1; | |
9249 | } else if (strcmp(name_b, "any-" TYPE_ARM_CPU) == 0) { | |
9250 | return -1; | |
9251 | } else { | |
9252 | return strcmp(name_a, name_b); | |
9253 | } | |
9254 | } | |
9255 | ||
9256 | static void arm_cpu_list_entry(gpointer data, gpointer user_data) | |
9257 | { | |
9258 | ObjectClass *oc = data; | |
9259 | CPUClass *cc = CPU_CLASS(oc); | |
9260 | const char *typename; | |
9261 | char *name; | |
9262 | ||
9263 | typename = object_class_get_name(oc); | |
9264 | name = g_strndup(typename, strlen(typename) - strlen("-" TYPE_ARM_CPU)); | |
9265 | if (cc->deprecation_note) { | |
9266 | qemu_printf(" %s (deprecated)\n", name); | |
9267 | } else { | |
9268 | qemu_printf(" %s\n", name); | |
9269 | } | |
9270 | g_free(name); | |
9271 | } | |
9272 | ||
9273 | void arm_cpu_list(void) | |
9274 | { | |
9275 | GSList *list; | |
9276 | ||
9277 | list = object_class_get_list(TYPE_ARM_CPU, false); | |
9278 | list = g_slist_sort(list, arm_cpu_list_compare); | |
9279 | qemu_printf("Available CPUs:\n"); | |
9280 | g_slist_foreach(list, arm_cpu_list_entry, NULL); | |
9281 | g_slist_free(list); | |
9282 | } | |
9283 | ||
9284 | /* | |
9285 | * Private utility function for define_one_arm_cp_reg_with_opaque(): | |
9286 | * add a single reginfo struct to the hash table. | |
9287 | */ | |
9288 | static void add_cpreg_to_hashtable(ARMCPU *cpu, const ARMCPRegInfo *r, | |
9289 | void *opaque, CPState state, | |
9290 | CPSecureState secstate, | |
9291 | int crm, int opc1, int opc2, | |
9292 | const char *name) | |
9293 | { | |
9294 | CPUARMState *env = &cpu->env; | |
9295 | uint32_t key; | |
9296 | ARMCPRegInfo *r2; | |
9297 | bool is64 = r->type & ARM_CP_64BIT; | |
9298 | bool ns = secstate & ARM_CP_SECSTATE_NS; | |
9299 | int cp = r->cp; | |
9300 | size_t name_len; | |
9301 | bool make_const; | |
9302 | ||
9303 | switch (state) { | |
9304 | case ARM_CP_STATE_AA32: | |
9305 | /* We assume it is a cp15 register if the .cp field is left unset. */ | |
9306 | if (cp == 0 && r->state == ARM_CP_STATE_BOTH) { | |
9307 | cp = 15; | |
9308 | } | |
9309 | key = ENCODE_CP_REG(cp, is64, ns, r->crn, crm, opc1, opc2); | |
9310 | break; | |
9311 | case ARM_CP_STATE_AA64: | |
9312 | /* | |
9313 | * To allow abbreviation of ARMCPRegInfo definitions, we treat | |
9314 | * cp == 0 as equivalent to the value for "standard guest-visible | |
9315 | * sysreg". STATE_BOTH definitions are also always "standard sysreg" | |
9316 | * in their AArch64 view (the .cp value may be non-zero for the | |
9317 | * benefit of the AArch32 view). | |
9318 | */ | |
9319 | if (cp == 0 || r->state == ARM_CP_STATE_BOTH) { | |
9320 | cp = CP_REG_ARM64_SYSREG_CP; | |
9321 | } | |
9322 | key = ENCODE_AA64_CP_REG(cp, r->crn, crm, r->opc0, opc1, opc2); | |
9323 | break; | |
9324 | default: | |
9325 | g_assert_not_reached(); | |
9326 | } | |
9327 | ||
9328 | /* Overriding of an existing definition must be explicitly requested. */ | |
9329 | if (!(r->type & ARM_CP_OVERRIDE)) { | |
9330 | const ARMCPRegInfo *oldreg = get_arm_cp_reginfo(cpu->cp_regs, key); | |
9331 | if (oldreg) { | |
9332 | assert(oldreg->type & ARM_CP_OVERRIDE); | |
9333 | } | |
9334 | } | |
9335 | ||
9336 | /* | |
9337 | * Eliminate registers that are not present because the EL is missing. | |
9338 | * Doing this here makes it easier to put all registers for a given | |
9339 | * feature into the same ARMCPRegInfo array and define them all at once. | |
9340 | */ | |
9341 | make_const = false; | |
9342 | if (arm_feature(env, ARM_FEATURE_EL3)) { | |
9343 | /* | |
9344 | * An EL2 register without EL2 but with EL3 is (usually) RES0. | |
9345 | * See rule RJFFP in section D1.1.3 of DDI0487H.a. | |
9346 | */ | |
9347 | int min_el = ctz32(r->access) / 2; | |
9348 | if (min_el == 2 && !arm_feature(env, ARM_FEATURE_EL2)) { | |
9349 | if (r->type & ARM_CP_EL3_NO_EL2_UNDEF) { | |
9350 | return; | |
9351 | } | |
9352 | make_const = !(r->type & ARM_CP_EL3_NO_EL2_KEEP); | |
9353 | } | |
9354 | } else { | |
9355 | CPAccessRights max_el = (arm_feature(env, ARM_FEATURE_EL2) | |
9356 | ? PL2_RW : PL1_RW); | |
9357 | if ((r->access & max_el) == 0) { | |
9358 | return; | |
9359 | } | |
9360 | } | |
9361 | ||
9362 | /* Combine cpreg and name into one allocation. */ | |
9363 | name_len = strlen(name) + 1; | |
9364 | r2 = g_malloc(sizeof(*r2) + name_len); | |
9365 | *r2 = *r; | |
9366 | r2->name = memcpy(r2 + 1, name, name_len); | |
9367 | ||
9368 | /* | |
9369 | * Update fields to match the instantiation, overwiting wildcards | |
9370 | * such as CP_ANY, ARM_CP_STATE_BOTH, or ARM_CP_SECSTATE_BOTH. | |
9371 | */ | |
9372 | r2->cp = cp; | |
9373 | r2->crm = crm; | |
9374 | r2->opc1 = opc1; | |
9375 | r2->opc2 = opc2; | |
9376 | r2->state = state; | |
9377 | r2->secure = secstate; | |
9378 | if (opaque) { | |
9379 | r2->opaque = opaque; | |
9380 | } | |
9381 | ||
9382 | if (make_const) { | |
9383 | /* This should not have been a very special register to begin. */ | |
9384 | int old_special = r2->type & ARM_CP_SPECIAL_MASK; | |
9385 | assert(old_special == 0 || old_special == ARM_CP_NOP); | |
9386 | /* | |
9387 | * Set the special function to CONST, retaining the other flags. | |
9388 | * This is important for e.g. ARM_CP_SVE so that we still | |
9389 | * take the SVE trap if CPTR_EL3.EZ == 0. | |
9390 | */ | |
9391 | r2->type = (r2->type & ~ARM_CP_SPECIAL_MASK) | ARM_CP_CONST; | |
9392 | /* | |
9393 | * Usually, these registers become RES0, but there are a few | |
9394 | * special cases like VPIDR_EL2 which have a constant non-zero | |
9395 | * value with writes ignored. | |
9396 | */ | |
9397 | if (!(r->type & ARM_CP_EL3_NO_EL2_C_NZ)) { | |
9398 | r2->resetvalue = 0; | |
9399 | } | |
9400 | /* | |
9401 | * ARM_CP_CONST has precedence, so removing the callbacks and | |
9402 | * offsets are not strictly necessary, but it is potentially | |
9403 | * less confusing to debug later. | |
9404 | */ | |
9405 | r2->readfn = NULL; | |
9406 | r2->writefn = NULL; | |
9407 | r2->raw_readfn = NULL; | |
9408 | r2->raw_writefn = NULL; | |
9409 | r2->resetfn = NULL; | |
9410 | r2->fieldoffset = 0; | |
9411 | r2->bank_fieldoffsets[0] = 0; | |
9412 | r2->bank_fieldoffsets[1] = 0; | |
9413 | } else { | |
9414 | bool isbanked = r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1]; | |
9415 | ||
9416 | if (isbanked) { | |
9417 | /* | |
9418 | * Register is banked (using both entries in array). | |
9419 | * Overwriting fieldoffset as the array is only used to define | |
9420 | * banked registers but later only fieldoffset is used. | |
9421 | */ | |
9422 | r2->fieldoffset = r->bank_fieldoffsets[ns]; | |
9423 | } | |
9424 | if (state == ARM_CP_STATE_AA32) { | |
9425 | if (isbanked) { | |
9426 | /* | |
9427 | * If the register is banked then we don't need to migrate or | |
9428 | * reset the 32-bit instance in certain cases: | |
9429 | * | |
9430 | * 1) If the register has both 32-bit and 64-bit instances | |
9431 | * then we can count on the 64-bit instance taking care | |
9432 | * of the non-secure bank. | |
9433 | * 2) If ARMv8 is enabled then we can count on a 64-bit | |
9434 | * version taking care of the secure bank. This requires | |
9435 | * that separate 32 and 64-bit definitions are provided. | |
9436 | */ | |
9437 | if ((r->state == ARM_CP_STATE_BOTH && ns) || | |
9438 | (arm_feature(env, ARM_FEATURE_V8) && !ns)) { | |
9439 | r2->type |= ARM_CP_ALIAS; | |
9440 | } | |
9441 | } else if ((secstate != r->secure) && !ns) { | |
9442 | /* | |
9443 | * The register is not banked so we only want to allow | |
9444 | * migration of the non-secure instance. | |
9445 | */ | |
9446 | r2->type |= ARM_CP_ALIAS; | |
9447 | } | |
9448 | ||
9449 | if (HOST_BIG_ENDIAN && | |
9450 | r->state == ARM_CP_STATE_BOTH && r2->fieldoffset) { | |
9451 | r2->fieldoffset += sizeof(uint32_t); | |
9452 | } | |
9453 | } | |
9454 | } | |
9455 | ||
9456 | /* | |
9457 | * By convention, for wildcarded registers only the first | |
9458 | * entry is used for migration; the others are marked as | |
9459 | * ALIAS so we don't try to transfer the register | |
9460 | * multiple times. Special registers (ie NOP/WFI) are | |
9461 | * never migratable and not even raw-accessible. | |
9462 | */ | |
9463 | if (r2->type & ARM_CP_SPECIAL_MASK) { | |
9464 | r2->type |= ARM_CP_NO_RAW; | |
9465 | } | |
9466 | if (((r->crm == CP_ANY) && crm != 0) || | |
9467 | ((r->opc1 == CP_ANY) && opc1 != 0) || | |
9468 | ((r->opc2 == CP_ANY) && opc2 != 0)) { | |
9469 | r2->type |= ARM_CP_ALIAS | ARM_CP_NO_GDB; | |
9470 | } | |
9471 | ||
9472 | /* | |
9473 | * Check that raw accesses are either forbidden or handled. Note that | |
9474 | * we can't assert this earlier because the setup of fieldoffset for | |
9475 | * banked registers has to be done first. | |
9476 | */ | |
9477 | if (!(r2->type & ARM_CP_NO_RAW)) { | |
9478 | assert(!raw_accessors_invalid(r2)); | |
9479 | } | |
9480 | ||
9481 | g_hash_table_insert(cpu->cp_regs, (gpointer)(uintptr_t)key, r2); | |
9482 | } | |
9483 | ||
9484 | ||
9485 | void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu, | |
9486 | const ARMCPRegInfo *r, void *opaque) | |
9487 | { | |
9488 | /* | |
9489 | * Define implementations of coprocessor registers. | |
9490 | * We store these in a hashtable because typically | |
9491 | * there are less than 150 registers in a space which | |
9492 | * is 16*16*16*8*8 = 262144 in size. | |
9493 | * Wildcarding is supported for the crm, opc1 and opc2 fields. | |
9494 | * If a register is defined twice then the second definition is | |
9495 | * used, so this can be used to define some generic registers and | |
9496 | * then override them with implementation specific variations. | |
9497 | * At least one of the original and the second definition should | |
9498 | * include ARM_CP_OVERRIDE in its type bits -- this is just a guard | |
9499 | * against accidental use. | |
9500 | * | |
9501 | * The state field defines whether the register is to be | |
9502 | * visible in the AArch32 or AArch64 execution state. If the | |
9503 | * state is set to ARM_CP_STATE_BOTH then we synthesise a | |
9504 | * reginfo structure for the AArch32 view, which sees the lower | |
9505 | * 32 bits of the 64 bit register. | |
9506 | * | |
9507 | * Only registers visible in AArch64 may set r->opc0; opc0 cannot | |
9508 | * be wildcarded. AArch64 registers are always considered to be 64 | |
9509 | * bits; the ARM_CP_64BIT* flag applies only to the AArch32 view of | |
9510 | * the register, if any. | |
9511 | */ | |
9512 | int crm, opc1, opc2; | |
9513 | int crmmin = (r->crm == CP_ANY) ? 0 : r->crm; | |
9514 | int crmmax = (r->crm == CP_ANY) ? 15 : r->crm; | |
9515 | int opc1min = (r->opc1 == CP_ANY) ? 0 : r->opc1; | |
9516 | int opc1max = (r->opc1 == CP_ANY) ? 7 : r->opc1; | |
9517 | int opc2min = (r->opc2 == CP_ANY) ? 0 : r->opc2; | |
9518 | int opc2max = (r->opc2 == CP_ANY) ? 7 : r->opc2; | |
9519 | CPState state; | |
9520 | ||
9521 | /* 64 bit registers have only CRm and Opc1 fields */ | |
9522 | assert(!((r->type & ARM_CP_64BIT) && (r->opc2 || r->crn))); | |
9523 | /* op0 only exists in the AArch64 encodings */ | |
9524 | assert((r->state != ARM_CP_STATE_AA32) || (r->opc0 == 0)); | |
9525 | /* AArch64 regs are all 64 bit so ARM_CP_64BIT is meaningless */ | |
9526 | assert((r->state != ARM_CP_STATE_AA64) || !(r->type & ARM_CP_64BIT)); | |
9527 | /* | |
9528 | * This API is only for Arm's system coprocessors (14 and 15) or | |
9529 | * (M-profile or v7A-and-earlier only) for implementation defined | |
9530 | * coprocessors in the range 0..7. Our decode assumes this, since | |
9531 | * 8..13 can be used for other insns including VFP and Neon. See | |
9532 | * valid_cp() in translate.c. Assert here that we haven't tried | |
9533 | * to use an invalid coprocessor number. | |
9534 | */ | |
9535 | switch (r->state) { | |
9536 | case ARM_CP_STATE_BOTH: | |
9537 | /* 0 has a special meaning, but otherwise the same rules as AA32. */ | |
9538 | if (r->cp == 0) { | |
9539 | break; | |
9540 | } | |
9541 | /* fall through */ | |
9542 | case ARM_CP_STATE_AA32: | |
9543 | if (arm_feature(&cpu->env, ARM_FEATURE_V8) && | |
9544 | !arm_feature(&cpu->env, ARM_FEATURE_M)) { | |
9545 | assert(r->cp >= 14 && r->cp <= 15); | |
9546 | } else { | |
9547 | assert(r->cp < 8 || (r->cp >= 14 && r->cp <= 15)); | |
9548 | } | |
9549 | break; | |
9550 | case ARM_CP_STATE_AA64: | |
9551 | assert(r->cp == 0 || r->cp == CP_REG_ARM64_SYSREG_CP); | |
9552 | break; | |
9553 | default: | |
9554 | g_assert_not_reached(); | |
9555 | } | |
9556 | /* | |
9557 | * The AArch64 pseudocode CheckSystemAccess() specifies that op1 | |
9558 | * encodes a minimum access level for the register. We roll this | |
9559 | * runtime check into our general permission check code, so check | |
9560 | * here that the reginfo's specified permissions are strict enough | |
9561 | * to encompass the generic architectural permission check. | |
9562 | */ | |
9563 | if (r->state != ARM_CP_STATE_AA32) { | |
9564 | CPAccessRights mask; | |
9565 | switch (r->opc1) { | |
9566 | case 0: | |
9567 | /* min_EL EL1, but some accessible to EL0 via kernel ABI */ | |
9568 | mask = PL0U_R | PL1_RW; | |
9569 | break; | |
9570 | case 1: case 2: | |
9571 | /* min_EL EL1 */ | |
9572 | mask = PL1_RW; | |
9573 | break; | |
9574 | case 3: | |
9575 | /* min_EL EL0 */ | |
9576 | mask = PL0_RW; | |
9577 | break; | |
9578 | case 4: | |
9579 | case 5: | |
9580 | /* min_EL EL2 */ | |
9581 | mask = PL2_RW; | |
9582 | break; | |
9583 | case 6: | |
9584 | /* min_EL EL3 */ | |
9585 | mask = PL3_RW; | |
9586 | break; | |
9587 | case 7: | |
9588 | /* min_EL EL1, secure mode only (we don't check the latter) */ | |
9589 | mask = PL1_RW; | |
9590 | break; | |
9591 | default: | |
9592 | /* broken reginfo with out-of-range opc1 */ | |
9593 | g_assert_not_reached(); | |
9594 | } | |
9595 | /* assert our permissions are not too lax (stricter is fine) */ | |
9596 | assert((r->access & ~mask) == 0); | |
9597 | } | |
9598 | ||
9599 | /* | |
9600 | * Check that the register definition has enough info to handle | |
9601 | * reads and writes if they are permitted. | |
9602 | */ | |
9603 | if (!(r->type & (ARM_CP_SPECIAL_MASK | ARM_CP_CONST))) { | |
9604 | if (r->access & PL3_R) { | |
9605 | assert((r->fieldoffset || | |
9606 | (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1])) || | |
9607 | r->readfn); | |
9608 | } | |
9609 | if (r->access & PL3_W) { | |
9610 | assert((r->fieldoffset || | |
9611 | (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1])) || | |
9612 | r->writefn); | |
9613 | } | |
9614 | } | |
9615 | ||
9616 | for (crm = crmmin; crm <= crmmax; crm++) { | |
9617 | for (opc1 = opc1min; opc1 <= opc1max; opc1++) { | |
9618 | for (opc2 = opc2min; opc2 <= opc2max; opc2++) { | |
9619 | for (state = ARM_CP_STATE_AA32; | |
9620 | state <= ARM_CP_STATE_AA64; state++) { | |
9621 | if (r->state != state && r->state != ARM_CP_STATE_BOTH) { | |
9622 | continue; | |
9623 | } | |
9624 | if (state == ARM_CP_STATE_AA32) { | |
9625 | /* | |
9626 | * Under AArch32 CP registers can be common | |
9627 | * (same for secure and non-secure world) or banked. | |
9628 | */ | |
9629 | char *name; | |
9630 | ||
9631 | switch (r->secure) { | |
9632 | case ARM_CP_SECSTATE_S: | |
9633 | case ARM_CP_SECSTATE_NS: | |
9634 | add_cpreg_to_hashtable(cpu, r, opaque, state, | |
9635 | r->secure, crm, opc1, opc2, | |
9636 | r->name); | |
9637 | break; | |
9638 | case ARM_CP_SECSTATE_BOTH: | |
9639 | name = g_strdup_printf("%s_S", r->name); | |
9640 | add_cpreg_to_hashtable(cpu, r, opaque, state, | |
9641 | ARM_CP_SECSTATE_S, | |
9642 | crm, opc1, opc2, name); | |
9643 | g_free(name); | |
9644 | add_cpreg_to_hashtable(cpu, r, opaque, state, | |
9645 | ARM_CP_SECSTATE_NS, | |
9646 | crm, opc1, opc2, r->name); | |
9647 | break; | |
9648 | default: | |
9649 | g_assert_not_reached(); | |
9650 | } | |
9651 | } else { | |
9652 | /* | |
9653 | * AArch64 registers get mapped to non-secure instance | |
9654 | * of AArch32 | |
9655 | */ | |
9656 | add_cpreg_to_hashtable(cpu, r, opaque, state, | |
9657 | ARM_CP_SECSTATE_NS, | |
9658 | crm, opc1, opc2, r->name); | |
9659 | } | |
9660 | } | |
9661 | } | |
9662 | } | |
9663 | } | |
9664 | } | |
9665 | ||
9666 | /* Define a whole list of registers */ | |
9667 | void define_arm_cp_regs_with_opaque_len(ARMCPU *cpu, const ARMCPRegInfo *regs, | |
9668 | void *opaque, size_t len) | |
9669 | { | |
9670 | size_t i; | |
9671 | for (i = 0; i < len; ++i) { | |
9672 | define_one_arm_cp_reg_with_opaque(cpu, regs + i, opaque); | |
9673 | } | |
9674 | } | |
9675 | ||
9676 | /* | |
9677 | * Modify ARMCPRegInfo for access from userspace. | |
9678 | * | |
9679 | * This is a data driven modification directed by | |
9680 | * ARMCPRegUserSpaceInfo. All registers become ARM_CP_CONST as | |
9681 | * user-space cannot alter any values and dynamic values pertaining to | |
9682 | * execution state are hidden from user space view anyway. | |
9683 | */ | |
9684 | void modify_arm_cp_regs_with_len(ARMCPRegInfo *regs, size_t regs_len, | |
9685 | const ARMCPRegUserSpaceInfo *mods, | |
9686 | size_t mods_len) | |
9687 | { | |
9688 | for (size_t mi = 0; mi < mods_len; ++mi) { | |
9689 | const ARMCPRegUserSpaceInfo *m = mods + mi; | |
9690 | GPatternSpec *pat = NULL; | |
9691 | ||
9692 | if (m->is_glob) { | |
9693 | pat = g_pattern_spec_new(m->name); | |
9694 | } | |
9695 | for (size_t ri = 0; ri < regs_len; ++ri) { | |
9696 | ARMCPRegInfo *r = regs + ri; | |
9697 | ||
9698 | if (pat && g_pattern_match_string(pat, r->name)) { | |
9699 | r->type = ARM_CP_CONST; | |
9700 | r->access = PL0U_R; | |
9701 | r->resetvalue = 0; | |
9702 | /* continue */ | |
9703 | } else if (strcmp(r->name, m->name) == 0) { | |
9704 | r->type = ARM_CP_CONST; | |
9705 | r->access = PL0U_R; | |
9706 | r->resetvalue &= m->exported_bits; | |
9707 | r->resetvalue |= m->fixed_bits; | |
9708 | break; | |
9709 | } | |
9710 | } | |
9711 | if (pat) { | |
9712 | g_pattern_spec_free(pat); | |
9713 | } | |
9714 | } | |
9715 | } | |
9716 | ||
9717 | const ARMCPRegInfo *get_arm_cp_reginfo(GHashTable *cpregs, uint32_t encoded_cp) | |
9718 | { | |
9719 | return g_hash_table_lookup(cpregs, (gpointer)(uintptr_t)encoded_cp); | |
9720 | } | |
9721 | ||
9722 | void arm_cp_write_ignore(CPUARMState *env, const ARMCPRegInfo *ri, | |
9723 | uint64_t value) | |
9724 | { | |
9725 | /* Helper coprocessor write function for write-ignore registers */ | |
9726 | } | |
9727 | ||
9728 | uint64_t arm_cp_read_zero(CPUARMState *env, const ARMCPRegInfo *ri) | |
9729 | { | |
9730 | /* Helper coprocessor write function for read-as-zero registers */ | |
9731 | return 0; | |
9732 | } | |
9733 | ||
9734 | void arm_cp_reset_ignore(CPUARMState *env, const ARMCPRegInfo *opaque) | |
9735 | { | |
9736 | /* Helper coprocessor reset function for do-nothing-on-reset registers */ | |
9737 | } | |
9738 | ||
9739 | static int bad_mode_switch(CPUARMState *env, int mode, CPSRWriteType write_type) | |
9740 | { | |
9741 | /* | |
9742 | * Return true if it is not valid for us to switch to | |
9743 | * this CPU mode (ie all the UNPREDICTABLE cases in | |
9744 | * the ARM ARM CPSRWriteByInstr pseudocode). | |
9745 | */ | |
9746 | ||
9747 | /* Changes to or from Hyp via MSR and CPS are illegal. */ | |
9748 | if (write_type == CPSRWriteByInstr && | |
9749 | ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_HYP || | |
9750 | mode == ARM_CPU_MODE_HYP)) { | |
9751 | return 1; | |
9752 | } | |
9753 | ||
9754 | switch (mode) { | |
9755 | case ARM_CPU_MODE_USR: | |
9756 | return 0; | |
9757 | case ARM_CPU_MODE_SYS: | |
9758 | case ARM_CPU_MODE_SVC: | |
9759 | case ARM_CPU_MODE_ABT: | |
9760 | case ARM_CPU_MODE_UND: | |
9761 | case ARM_CPU_MODE_IRQ: | |
9762 | case ARM_CPU_MODE_FIQ: | |
9763 | /* | |
9764 | * Note that we don't implement the IMPDEF NSACR.RFR which in v7 | |
9765 | * allows FIQ mode to be Secure-only. (In v8 this doesn't exist.) | |
9766 | */ | |
9767 | /* | |
9768 | * If HCR.TGE is set then changes from Monitor to NS PL1 via MSR | |
9769 | * and CPS are treated as illegal mode changes. | |
9770 | */ | |
9771 | if (write_type == CPSRWriteByInstr && | |
9772 | (env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON && | |
9773 | (arm_hcr_el2_eff(env) & HCR_TGE)) { | |
9774 | return 1; | |
9775 | } | |
9776 | return 0; | |
9777 | case ARM_CPU_MODE_HYP: | |
9778 | return !arm_is_el2_enabled(env) || arm_current_el(env) < 2; | |
9779 | case ARM_CPU_MODE_MON: | |
9780 | return arm_current_el(env) < 3; | |
9781 | default: | |
9782 | return 1; | |
9783 | } | |
9784 | } | |
9785 | ||
9786 | uint32_t cpsr_read(CPUARMState *env) | |
9787 | { | |
9788 | int ZF; | |
9789 | ZF = (env->ZF == 0); | |
9790 | return env->uncached_cpsr | (env->NF & 0x80000000) | (ZF << 30) | | |
9791 | (env->CF << 29) | ((env->VF & 0x80000000) >> 3) | (env->QF << 27) | |
9792 | | (env->thumb << 5) | ((env->condexec_bits & 3) << 25) | |
9793 | | ((env->condexec_bits & 0xfc) << 8) | |
9794 | | (env->GE << 16) | (env->daif & CPSR_AIF); | |
9795 | } | |
9796 | ||
9797 | void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask, | |
9798 | CPSRWriteType write_type) | |
9799 | { | |
9800 | uint32_t changed_daif; | |
9801 | bool rebuild_hflags = (write_type != CPSRWriteRaw) && | |
9802 | (mask & (CPSR_M | CPSR_E | CPSR_IL)); | |
9803 | ||
9804 | if (mask & CPSR_NZCV) { | |
9805 | env->ZF = (~val) & CPSR_Z; | |
9806 | env->NF = val; | |
9807 | env->CF = (val >> 29) & 1; | |
9808 | env->VF = (val << 3) & 0x80000000; | |
9809 | } | |
9810 | if (mask & CPSR_Q) { | |
9811 | env->QF = ((val & CPSR_Q) != 0); | |
9812 | } | |
9813 | if (mask & CPSR_T) { | |
9814 | env->thumb = ((val & CPSR_T) != 0); | |
9815 | } | |
9816 | if (mask & CPSR_IT_0_1) { | |
9817 | env->condexec_bits &= ~3; | |
9818 | env->condexec_bits |= (val >> 25) & 3; | |
9819 | } | |
9820 | if (mask & CPSR_IT_2_7) { | |
9821 | env->condexec_bits &= 3; | |
9822 | env->condexec_bits |= (val >> 8) & 0xfc; | |
9823 | } | |
9824 | if (mask & CPSR_GE) { | |
9825 | env->GE = (val >> 16) & 0xf; | |
9826 | } | |
9827 | ||
9828 | /* | |
9829 | * In a V7 implementation that includes the security extensions but does | |
9830 | * not include Virtualization Extensions the SCR.FW and SCR.AW bits control | |
9831 | * whether non-secure software is allowed to change the CPSR_F and CPSR_A | |
9832 | * bits respectively. | |
9833 | * | |
9834 | * In a V8 implementation, it is permitted for privileged software to | |
9835 | * change the CPSR A/F bits regardless of the SCR.AW/FW bits. | |
9836 | */ | |
9837 | if (write_type != CPSRWriteRaw && !arm_feature(env, ARM_FEATURE_V8) && | |
9838 | arm_feature(env, ARM_FEATURE_EL3) && | |
9839 | !arm_feature(env, ARM_FEATURE_EL2) && | |
9840 | !arm_is_secure(env)) { | |
9841 | ||
9842 | changed_daif = (env->daif ^ val) & mask; | |
9843 | ||
9844 | if (changed_daif & CPSR_A) { | |
9845 | /* | |
9846 | * Check to see if we are allowed to change the masking of async | |
9847 | * abort exceptions from a non-secure state. | |
9848 | */ | |
9849 | if (!(env->cp15.scr_el3 & SCR_AW)) { | |
9850 | qemu_log_mask(LOG_GUEST_ERROR, | |
9851 | "Ignoring attempt to switch CPSR_A flag from " | |
9852 | "non-secure world with SCR.AW bit clear\n"); | |
9853 | mask &= ~CPSR_A; | |
9854 | } | |
9855 | } | |
9856 | ||
9857 | if (changed_daif & CPSR_F) { | |
9858 | /* | |
9859 | * Check to see if we are allowed to change the masking of FIQ | |
9860 | * exceptions from a non-secure state. | |
9861 | */ | |
9862 | if (!(env->cp15.scr_el3 & SCR_FW)) { | |
9863 | qemu_log_mask(LOG_GUEST_ERROR, | |
9864 | "Ignoring attempt to switch CPSR_F flag from " | |
9865 | "non-secure world with SCR.FW bit clear\n"); | |
9866 | mask &= ~CPSR_F; | |
9867 | } | |
9868 | ||
9869 | /* | |
9870 | * Check whether non-maskable FIQ (NMFI) support is enabled. | |
9871 | * If this bit is set software is not allowed to mask | |
9872 | * FIQs, but is allowed to set CPSR_F to 0. | |
9873 | */ | |
9874 | if ((A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_NMFI) && | |
9875 | (val & CPSR_F)) { | |
9876 | qemu_log_mask(LOG_GUEST_ERROR, | |
9877 | "Ignoring attempt to enable CPSR_F flag " | |
9878 | "(non-maskable FIQ [NMFI] support enabled)\n"); | |
9879 | mask &= ~CPSR_F; | |
9880 | } | |
9881 | } | |
9882 | } | |
9883 | ||
9884 | env->daif &= ~(CPSR_AIF & mask); | |
9885 | env->daif |= val & CPSR_AIF & mask; | |
9886 | ||
9887 | if (write_type != CPSRWriteRaw && | |
9888 | ((env->uncached_cpsr ^ val) & mask & CPSR_M)) { | |
9889 | if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_USR) { | |
9890 | /* | |
9891 | * Note that we can only get here in USR mode if this is a | |
9892 | * gdb stub write; for this case we follow the architectural | |
9893 | * behaviour for guest writes in USR mode of ignoring an attempt | |
9894 | * to switch mode. (Those are caught by translate.c for writes | |
9895 | * triggered by guest instructions.) | |
9896 | */ | |
9897 | mask &= ~CPSR_M; | |
9898 | } else if (bad_mode_switch(env, val & CPSR_M, write_type)) { | |
9899 | /* | |
9900 | * Attempt to switch to an invalid mode: this is UNPREDICTABLE in | |
9901 | * v7, and has defined behaviour in v8: | |
9902 | * + leave CPSR.M untouched | |
9903 | * + allow changes to the other CPSR fields | |
9904 | * + set PSTATE.IL | |
9905 | * For user changes via the GDB stub, we don't set PSTATE.IL, | |
9906 | * as this would be unnecessarily harsh for a user error. | |
9907 | */ | |
9908 | mask &= ~CPSR_M; | |
9909 | if (write_type != CPSRWriteByGDBStub && | |
9910 | arm_feature(env, ARM_FEATURE_V8)) { | |
9911 | mask |= CPSR_IL; | |
9912 | val |= CPSR_IL; | |
9913 | } | |
9914 | qemu_log_mask(LOG_GUEST_ERROR, | |
9915 | "Illegal AArch32 mode switch attempt from %s to %s\n", | |
9916 | aarch32_mode_name(env->uncached_cpsr), | |
9917 | aarch32_mode_name(val)); | |
9918 | } else { | |
9919 | qemu_log_mask(CPU_LOG_INT, "%s %s to %s PC 0x%" PRIx32 "\n", | |
9920 | write_type == CPSRWriteExceptionReturn ? | |
9921 | "Exception return from AArch32" : | |
9922 | "AArch32 mode switch from", | |
9923 | aarch32_mode_name(env->uncached_cpsr), | |
9924 | aarch32_mode_name(val), env->regs[15]); | |
9925 | switch_mode(env, val & CPSR_M); | |
9926 | } | |
9927 | } | |
9928 | mask &= ~CACHED_CPSR_BITS; | |
9929 | env->uncached_cpsr = (env->uncached_cpsr & ~mask) | (val & mask); | |
9930 | if (tcg_enabled() && rebuild_hflags) { | |
9931 | arm_rebuild_hflags(env); | |
9932 | } | |
9933 | } | |
9934 | ||
9935 | /* Sign/zero extend */ | |
9936 | uint32_t HELPER(sxtb16)(uint32_t x) | |
9937 | { | |
9938 | uint32_t res; | |
9939 | res = (uint16_t)(int8_t)x; | |
9940 | res |= (uint32_t)(int8_t)(x >> 16) << 16; | |
9941 | return res; | |
9942 | } | |
9943 | ||
9944 | static void handle_possible_div0_trap(CPUARMState *env, uintptr_t ra) | |
9945 | { | |
9946 | /* | |
9947 | * Take a division-by-zero exception if necessary; otherwise return | |
9948 | * to get the usual non-trapping division behaviour (result of 0) | |
9949 | */ | |
9950 | if (arm_feature(env, ARM_FEATURE_M) | |
9951 | && (env->v7m.ccr[env->v7m.secure] & R_V7M_CCR_DIV_0_TRP_MASK)) { | |
9952 | raise_exception_ra(env, EXCP_DIVBYZERO, 0, 1, ra); | |
9953 | } | |
9954 | } | |
9955 | ||
9956 | uint32_t HELPER(uxtb16)(uint32_t x) | |
9957 | { | |
9958 | uint32_t res; | |
9959 | res = (uint16_t)(uint8_t)x; | |
9960 | res |= (uint32_t)(uint8_t)(x >> 16) << 16; | |
9961 | return res; | |
9962 | } | |
9963 | ||
9964 | int32_t HELPER(sdiv)(CPUARMState *env, int32_t num, int32_t den) | |
9965 | { | |
9966 | if (den == 0) { | |
9967 | handle_possible_div0_trap(env, GETPC()); | |
9968 | return 0; | |
9969 | } | |
9970 | if (num == INT_MIN && den == -1) { | |
9971 | return INT_MIN; | |
9972 | } | |
9973 | return num / den; | |
9974 | } | |
9975 | ||
9976 | uint32_t HELPER(udiv)(CPUARMState *env, uint32_t num, uint32_t den) | |
9977 | { | |
9978 | if (den == 0) { | |
9979 | handle_possible_div0_trap(env, GETPC()); | |
9980 | return 0; | |
9981 | } | |
9982 | return num / den; | |
9983 | } | |
9984 | ||
9985 | uint32_t HELPER(rbit)(uint32_t x) | |
9986 | { | |
9987 | return revbit32(x); | |
9988 | } | |
9989 | ||
9990 | #ifdef CONFIG_USER_ONLY | |
9991 | ||
9992 | static void switch_mode(CPUARMState *env, int mode) | |
9993 | { | |
9994 | ARMCPU *cpu = env_archcpu(env); | |
9995 | ||
9996 | if (mode != ARM_CPU_MODE_USR) { | |
9997 | cpu_abort(CPU(cpu), "Tried to switch out of user mode\n"); | |
9998 | } | |
9999 | } | |
10000 | ||
10001 | uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx, | |
10002 | uint32_t cur_el, bool secure) | |
10003 | { | |
10004 | return 1; | |
10005 | } | |
10006 | ||
10007 | void aarch64_sync_64_to_32(CPUARMState *env) | |
10008 | { | |
10009 | g_assert_not_reached(); | |
10010 | } | |
10011 | ||
10012 | #else | |
10013 | ||
10014 | static void switch_mode(CPUARMState *env, int mode) | |
10015 | { | |
10016 | int old_mode; | |
10017 | int i; | |
10018 | ||
10019 | old_mode = env->uncached_cpsr & CPSR_M; | |
10020 | if (mode == old_mode) { | |
10021 | return; | |
10022 | } | |
10023 | ||
10024 | if (old_mode == ARM_CPU_MODE_FIQ) { | |
10025 | memcpy(env->fiq_regs, env->regs + 8, 5 * sizeof(uint32_t)); | |
10026 | memcpy(env->regs + 8, env->usr_regs, 5 * sizeof(uint32_t)); | |
10027 | } else if (mode == ARM_CPU_MODE_FIQ) { | |
10028 | memcpy(env->usr_regs, env->regs + 8, 5 * sizeof(uint32_t)); | |
10029 | memcpy(env->regs + 8, env->fiq_regs, 5 * sizeof(uint32_t)); | |
10030 | } | |
10031 | ||
10032 | i = bank_number(old_mode); | |
10033 | env->banked_r13[i] = env->regs[13]; | |
10034 | env->banked_spsr[i] = env->spsr; | |
10035 | ||
10036 | i = bank_number(mode); | |
10037 | env->regs[13] = env->banked_r13[i]; | |
10038 | env->spsr = env->banked_spsr[i]; | |
10039 | ||
10040 | env->banked_r14[r14_bank_number(old_mode)] = env->regs[14]; | |
10041 | env->regs[14] = env->banked_r14[r14_bank_number(mode)]; | |
10042 | } | |
10043 | ||
10044 | /* | |
10045 | * Physical Interrupt Target EL Lookup Table | |
10046 | * | |
10047 | * [ From ARM ARM section G1.13.4 (Table G1-15) ] | |
10048 | * | |
10049 | * The below multi-dimensional table is used for looking up the target | |
10050 | * exception level given numerous condition criteria. Specifically, the | |
10051 | * target EL is based on SCR and HCR routing controls as well as the | |
10052 | * currently executing EL and secure state. | |
10053 | * | |
10054 | * Dimensions: | |
10055 | * target_el_table[2][2][2][2][2][4] | |
10056 | * | | | | | +--- Current EL | |
10057 | * | | | | +------ Non-secure(0)/Secure(1) | |
10058 | * | | | +--------- HCR mask override | |
10059 | * | | +------------ SCR exec state control | |
10060 | * | +--------------- SCR mask override | |
10061 | * +------------------ 32-bit(0)/64-bit(1) EL3 | |
10062 | * | |
10063 | * The table values are as such: | |
10064 | * 0-3 = EL0-EL3 | |
10065 | * -1 = Cannot occur | |
10066 | * | |
10067 | * The ARM ARM target EL table includes entries indicating that an "exception | |
10068 | * is not taken". The two cases where this is applicable are: | |
10069 | * 1) An exception is taken from EL3 but the SCR does not have the exception | |
10070 | * routed to EL3. | |
10071 | * 2) An exception is taken from EL2 but the HCR does not have the exception | |
10072 | * routed to EL2. | |
10073 | * In these two cases, the below table contain a target of EL1. This value is | |
10074 | * returned as it is expected that the consumer of the table data will check | |
10075 | * for "target EL >= current EL" to ensure the exception is not taken. | |
10076 | * | |
10077 | * SCR HCR | |
10078 | * 64 EA AMO From | |
10079 | * BIT IRQ IMO Non-secure Secure | |
10080 | * EL3 FIQ RW FMO EL0 EL1 EL2 EL3 EL0 EL1 EL2 EL3 | |
10081 | */ | |
10082 | static const int8_t target_el_table[2][2][2][2][2][4] = { | |
10083 | {{{{/* 0 0 0 0 */{ 1, 1, 2, -1 },{ 3, -1, -1, 3 },}, | |
10084 | {/* 0 0 0 1 */{ 2, 2, 2, -1 },{ 3, -1, -1, 3 },},}, | |
10085 | {{/* 0 0 1 0 */{ 1, 1, 2, -1 },{ 3, -1, -1, 3 },}, | |
10086 | {/* 0 0 1 1 */{ 2, 2, 2, -1 },{ 3, -1, -1, 3 },},},}, | |
10087 | {{{/* 0 1 0 0 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },}, | |
10088 | {/* 0 1 0 1 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},}, | |
10089 | {{/* 0 1 1 0 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },}, | |
10090 | {/* 0 1 1 1 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},},},}, | |
10091 | {{{{/* 1 0 0 0 */{ 1, 1, 2, -1 },{ 1, 1, -1, 1 },}, | |
10092 | {/* 1 0 0 1 */{ 2, 2, 2, -1 },{ 2, 2, -1, 1 },},}, | |
10093 | {{/* 1 0 1 0 */{ 1, 1, 1, -1 },{ 1, 1, 1, 1 },}, | |
10094 | {/* 1 0 1 1 */{ 2, 2, 2, -1 },{ 2, 2, 2, 1 },},},}, | |
10095 | {{{/* 1 1 0 0 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },}, | |
10096 | {/* 1 1 0 1 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},}, | |
10097 | {{/* 1 1 1 0 */{ 3, 3, 3, -1 },{ 3, 3, 3, 3 },}, | |
10098 | {/* 1 1 1 1 */{ 3, 3, 3, -1 },{ 3, 3, 3, 3 },},},},}, | |
10099 | }; | |
10100 | ||
10101 | /* | |
10102 | * Determine the target EL for physical exceptions | |
10103 | */ | |
10104 | uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx, | |
10105 | uint32_t cur_el, bool secure) | |
10106 | { | |
10107 | CPUARMState *env = cs->env_ptr; | |
10108 | bool rw; | |
10109 | bool scr; | |
10110 | bool hcr; | |
10111 | int target_el; | |
10112 | /* Is the highest EL AArch64? */ | |
10113 | bool is64 = arm_feature(env, ARM_FEATURE_AARCH64); | |
10114 | uint64_t hcr_el2; | |
10115 | ||
10116 | if (arm_feature(env, ARM_FEATURE_EL3)) { | |
10117 | rw = ((env->cp15.scr_el3 & SCR_RW) == SCR_RW); | |
10118 | } else { | |
10119 | /* | |
10120 | * Either EL2 is the highest EL (and so the EL2 register width | |
10121 | * is given by is64); or there is no EL2 or EL3, in which case | |
10122 | * the value of 'rw' does not affect the table lookup anyway. | |
10123 | */ | |
10124 | rw = is64; | |
10125 | } | |
10126 | ||
10127 | hcr_el2 = arm_hcr_el2_eff(env); | |
10128 | switch (excp_idx) { | |
10129 | case EXCP_IRQ: | |
10130 | scr = ((env->cp15.scr_el3 & SCR_IRQ) == SCR_IRQ); | |
10131 | hcr = hcr_el2 & HCR_IMO; | |
10132 | break; | |
10133 | case EXCP_FIQ: | |
10134 | scr = ((env->cp15.scr_el3 & SCR_FIQ) == SCR_FIQ); | |
10135 | hcr = hcr_el2 & HCR_FMO; | |
10136 | break; | |
10137 | default: | |
10138 | scr = ((env->cp15.scr_el3 & SCR_EA) == SCR_EA); | |
10139 | hcr = hcr_el2 & HCR_AMO; | |
10140 | break; | |
10141 | }; | |
10142 | ||
10143 | /* | |
10144 | * For these purposes, TGE and AMO/IMO/FMO both force the | |
10145 | * interrupt to EL2. Fold TGE into the bit extracted above. | |
10146 | */ | |
10147 | hcr |= (hcr_el2 & HCR_TGE) != 0; | |
10148 | ||
10149 | /* Perform a table-lookup for the target EL given the current state */ | |
10150 | target_el = target_el_table[is64][scr][rw][hcr][secure][cur_el]; | |
10151 | ||
10152 | assert(target_el > 0); | |
10153 | ||
10154 | return target_el; | |
10155 | } | |
10156 | ||
10157 | void arm_log_exception(CPUState *cs) | |
10158 | { | |
10159 | int idx = cs->exception_index; | |
10160 | ||
10161 | if (qemu_loglevel_mask(CPU_LOG_INT)) { | |
10162 | const char *exc = NULL; | |
10163 | static const char * const excnames[] = { | |
10164 | [EXCP_UDEF] = "Undefined Instruction", | |
10165 | [EXCP_SWI] = "SVC", | |
10166 | [EXCP_PREFETCH_ABORT] = "Prefetch Abort", | |
10167 | [EXCP_DATA_ABORT] = "Data Abort", | |
10168 | [EXCP_IRQ] = "IRQ", | |
10169 | [EXCP_FIQ] = "FIQ", | |
10170 | [EXCP_BKPT] = "Breakpoint", | |
10171 | [EXCP_EXCEPTION_EXIT] = "QEMU v7M exception exit", | |
10172 | [EXCP_KERNEL_TRAP] = "QEMU intercept of kernel commpage", | |
10173 | [EXCP_HVC] = "Hypervisor Call", | |
10174 | [EXCP_HYP_TRAP] = "Hypervisor Trap", | |
10175 | [EXCP_SMC] = "Secure Monitor Call", | |
10176 | [EXCP_VIRQ] = "Virtual IRQ", | |
10177 | [EXCP_VFIQ] = "Virtual FIQ", | |
10178 | [EXCP_SEMIHOST] = "Semihosting call", | |
10179 | [EXCP_NOCP] = "v7M NOCP UsageFault", | |
10180 | [EXCP_INVSTATE] = "v7M INVSTATE UsageFault", | |
10181 | [EXCP_STKOF] = "v8M STKOF UsageFault", | |
10182 | [EXCP_LAZYFP] = "v7M exception during lazy FP stacking", | |
10183 | [EXCP_LSERR] = "v8M LSERR UsageFault", | |
10184 | [EXCP_UNALIGNED] = "v7M UNALIGNED UsageFault", | |
10185 | [EXCP_DIVBYZERO] = "v7M DIVBYZERO UsageFault", | |
10186 | [EXCP_VSERR] = "Virtual SERR", | |
10187 | [EXCP_GPC] = "Granule Protection Check", | |
10188 | }; | |
10189 | ||
10190 | if (idx >= 0 && idx < ARRAY_SIZE(excnames)) { | |
10191 | exc = excnames[idx]; | |
10192 | } | |
10193 | if (!exc) { | |
10194 | exc = "unknown"; | |
10195 | } | |
10196 | qemu_log_mask(CPU_LOG_INT, "Taking exception %d [%s] on CPU %d\n", | |
10197 | idx, exc, cs->cpu_index); | |
10198 | } | |
10199 | } | |
10200 | ||
10201 | /* | |
10202 | * Function used to synchronize QEMU's AArch64 register set with AArch32 | |
10203 | * register set. This is necessary when switching between AArch32 and AArch64 | |
10204 | * execution state. | |
10205 | */ | |
10206 | void aarch64_sync_32_to_64(CPUARMState *env) | |
10207 | { | |
10208 | int i; | |
10209 | uint32_t mode = env->uncached_cpsr & CPSR_M; | |
10210 | ||
10211 | /* We can blanket copy R[0:7] to X[0:7] */ | |
10212 | for (i = 0; i < 8; i++) { | |
10213 | env->xregs[i] = env->regs[i]; | |
10214 | } | |
10215 | ||
10216 | /* | |
10217 | * Unless we are in FIQ mode, x8-x12 come from the user registers r8-r12. | |
10218 | * Otherwise, they come from the banked user regs. | |
10219 | */ | |
10220 | if (mode == ARM_CPU_MODE_FIQ) { | |
10221 | for (i = 8; i < 13; i++) { | |
10222 | env->xregs[i] = env->usr_regs[i - 8]; | |
10223 | } | |
10224 | } else { | |
10225 | for (i = 8; i < 13; i++) { | |
10226 | env->xregs[i] = env->regs[i]; | |
10227 | } | |
10228 | } | |
10229 | ||
10230 | /* | |
10231 | * Registers x13-x23 are the various mode SP and FP registers. Registers | |
10232 | * r13 and r14 are only copied if we are in that mode, otherwise we copy | |
10233 | * from the mode banked register. | |
10234 | */ | |
10235 | if (mode == ARM_CPU_MODE_USR || mode == ARM_CPU_MODE_SYS) { | |
10236 | env->xregs[13] = env->regs[13]; | |
10237 | env->xregs[14] = env->regs[14]; | |
10238 | } else { | |
10239 | env->xregs[13] = env->banked_r13[bank_number(ARM_CPU_MODE_USR)]; | |
10240 | /* HYP is an exception in that it is copied from r14 */ | |
10241 | if (mode == ARM_CPU_MODE_HYP) { | |
10242 | env->xregs[14] = env->regs[14]; | |
10243 | } else { | |
10244 | env->xregs[14] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_USR)]; | |
10245 | } | |
10246 | } | |
10247 | ||
10248 | if (mode == ARM_CPU_MODE_HYP) { | |
10249 | env->xregs[15] = env->regs[13]; | |
10250 | } else { | |
10251 | env->xregs[15] = env->banked_r13[bank_number(ARM_CPU_MODE_HYP)]; | |
10252 | } | |
10253 | ||
10254 | if (mode == ARM_CPU_MODE_IRQ) { | |
10255 | env->xregs[16] = env->regs[14]; | |
10256 | env->xregs[17] = env->regs[13]; | |
10257 | } else { | |
10258 | env->xregs[16] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_IRQ)]; | |
10259 | env->xregs[17] = env->banked_r13[bank_number(ARM_CPU_MODE_IRQ)]; | |
10260 | } | |
10261 | ||
10262 | if (mode == ARM_CPU_MODE_SVC) { | |
10263 | env->xregs[18] = env->regs[14]; | |
10264 | env->xregs[19] = env->regs[13]; | |
10265 | } else { | |
10266 | env->xregs[18] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_SVC)]; | |
10267 | env->xregs[19] = env->banked_r13[bank_number(ARM_CPU_MODE_SVC)]; | |
10268 | } | |
10269 | ||
10270 | if (mode == ARM_CPU_MODE_ABT) { | |
10271 | env->xregs[20] = env->regs[14]; | |
10272 | env->xregs[21] = env->regs[13]; | |
10273 | } else { | |
10274 | env->xregs[20] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_ABT)]; | |
10275 | env->xregs[21] = env->banked_r13[bank_number(ARM_CPU_MODE_ABT)]; | |
10276 | } | |
10277 | ||
10278 | if (mode == ARM_CPU_MODE_UND) { | |
10279 | env->xregs[22] = env->regs[14]; | |
10280 | env->xregs[23] = env->regs[13]; | |
10281 | } else { | |
10282 | env->xregs[22] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_UND)]; | |
10283 | env->xregs[23] = env->banked_r13[bank_number(ARM_CPU_MODE_UND)]; | |
10284 | } | |
10285 | ||
10286 | /* | |
10287 | * Registers x24-x30 are mapped to r8-r14 in FIQ mode. If we are in FIQ | |
10288 | * mode, then we can copy from r8-r14. Otherwise, we copy from the | |
10289 | * FIQ bank for r8-r14. | |
10290 | */ | |
10291 | if (mode == ARM_CPU_MODE_FIQ) { | |
10292 | for (i = 24; i < 31; i++) { | |
10293 | env->xregs[i] = env->regs[i - 16]; /* X[24:30] <- R[8:14] */ | |
10294 | } | |
10295 | } else { | |
10296 | for (i = 24; i < 29; i++) { | |
10297 | env->xregs[i] = env->fiq_regs[i - 24]; | |
10298 | } | |
10299 | env->xregs[29] = env->banked_r13[bank_number(ARM_CPU_MODE_FIQ)]; | |
10300 | env->xregs[30] = env->banked_r14[r14_bank_number(ARM_CPU_MODE_FIQ)]; | |
10301 | } | |
10302 | ||
10303 | env->pc = env->regs[15]; | |
10304 | } | |
10305 | ||
10306 | /* | |
10307 | * Function used to synchronize QEMU's AArch32 register set with AArch64 | |
10308 | * register set. This is necessary when switching between AArch32 and AArch64 | |
10309 | * execution state. | |
10310 | */ | |
10311 | void aarch64_sync_64_to_32(CPUARMState *env) | |
10312 | { | |
10313 | int i; | |
10314 | uint32_t mode = env->uncached_cpsr & CPSR_M; | |
10315 | ||
10316 | /* We can blanket copy X[0:7] to R[0:7] */ | |
10317 | for (i = 0; i < 8; i++) { | |
10318 | env->regs[i] = env->xregs[i]; | |
10319 | } | |
10320 | ||
10321 | /* | |
10322 | * Unless we are in FIQ mode, r8-r12 come from the user registers x8-x12. | |
10323 | * Otherwise, we copy x8-x12 into the banked user regs. | |
10324 | */ | |
10325 | if (mode == ARM_CPU_MODE_FIQ) { | |
10326 | for (i = 8; i < 13; i++) { | |
10327 | env->usr_regs[i - 8] = env->xregs[i]; | |
10328 | } | |
10329 | } else { | |
10330 | for (i = 8; i < 13; i++) { | |
10331 | env->regs[i] = env->xregs[i]; | |
10332 | } | |
10333 | } | |
10334 | ||
10335 | /* | |
10336 | * Registers r13 & r14 depend on the current mode. | |
10337 | * If we are in a given mode, we copy the corresponding x registers to r13 | |
10338 | * and r14. Otherwise, we copy the x register to the banked r13 and r14 | |
10339 | * for the mode. | |
10340 | */ | |
10341 | if (mode == ARM_CPU_MODE_USR || mode == ARM_CPU_MODE_SYS) { | |
10342 | env->regs[13] = env->xregs[13]; | |
10343 | env->regs[14] = env->xregs[14]; | |
10344 | } else { | |
10345 | env->banked_r13[bank_number(ARM_CPU_MODE_USR)] = env->xregs[13]; | |
10346 | ||
10347 | /* | |
10348 | * HYP is an exception in that it does not have its own banked r14 but | |
10349 | * shares the USR r14 | |
10350 | */ | |
10351 | if (mode == ARM_CPU_MODE_HYP) { | |
10352 | env->regs[14] = env->xregs[14]; | |
10353 | } else { | |
10354 | env->banked_r14[r14_bank_number(ARM_CPU_MODE_USR)] = env->xregs[14]; | |
10355 | } | |
10356 | } | |
10357 | ||
10358 | if (mode == ARM_CPU_MODE_HYP) { | |
10359 | env->regs[13] = env->xregs[15]; | |
10360 | } else { | |
10361 | env->banked_r13[bank_number(ARM_CPU_MODE_HYP)] = env->xregs[15]; | |
10362 | } | |
10363 | ||
10364 | if (mode == ARM_CPU_MODE_IRQ) { | |
10365 | env->regs[14] = env->xregs[16]; | |
10366 | env->regs[13] = env->xregs[17]; | |
10367 | } else { | |
10368 | env->banked_r14[r14_bank_number(ARM_CPU_MODE_IRQ)] = env->xregs[16]; | |
10369 | env->banked_r13[bank_number(ARM_CPU_MODE_IRQ)] = env->xregs[17]; | |
10370 | } | |
10371 | ||
10372 | if (mode == ARM_CPU_MODE_SVC) { | |
10373 | env->regs[14] = env->xregs[18]; | |
10374 | env->regs[13] = env->xregs[19]; | |
10375 | } else { | |
10376 | env->banked_r14[r14_bank_number(ARM_CPU_MODE_SVC)] = env->xregs[18]; | |
10377 | env->banked_r13[bank_number(ARM_CPU_MODE_SVC)] = env->xregs[19]; | |
10378 | } | |
10379 | ||
10380 | if (mode == ARM_CPU_MODE_ABT) { | |
10381 | env->regs[14] = env->xregs[20]; | |
10382 | env->regs[13] = env->xregs[21]; | |
10383 | } else { | |
10384 | env->banked_r14[r14_bank_number(ARM_CPU_MODE_ABT)] = env->xregs[20]; | |
10385 | env->banked_r13[bank_number(ARM_CPU_MODE_ABT)] = env->xregs[21]; | |
10386 | } | |
10387 | ||
10388 | if (mode == ARM_CPU_MODE_UND) { | |
10389 | env->regs[14] = env->xregs[22]; | |
10390 | env->regs[13] = env->xregs[23]; | |
10391 | } else { | |
10392 | env->banked_r14[r14_bank_number(ARM_CPU_MODE_UND)] = env->xregs[22]; | |
10393 | env->banked_r13[bank_number(ARM_CPU_MODE_UND)] = env->xregs[23]; | |
10394 | } | |
10395 | ||
10396 | /* | |
10397 | * Registers x24-x30 are mapped to r8-r14 in FIQ mode. If we are in FIQ | |
10398 | * mode, then we can copy to r8-r14. Otherwise, we copy to the | |
10399 | * FIQ bank for r8-r14. | |
10400 | */ | |
10401 | if (mode == ARM_CPU_MODE_FIQ) { | |
10402 | for (i = 24; i < 31; i++) { | |
10403 | env->regs[i - 16] = env->xregs[i]; /* X[24:30] -> R[8:14] */ | |
10404 | } | |
10405 | } else { | |
10406 | for (i = 24; i < 29; i++) { | |
10407 | env->fiq_regs[i - 24] = env->xregs[i]; | |
10408 | } | |
10409 | env->banked_r13[bank_number(ARM_CPU_MODE_FIQ)] = env->xregs[29]; | |
10410 | env->banked_r14[r14_bank_number(ARM_CPU_MODE_FIQ)] = env->xregs[30]; | |
10411 | } | |
10412 | ||
10413 | env->regs[15] = env->pc; | |
10414 | } | |
10415 | ||
10416 | static void take_aarch32_exception(CPUARMState *env, int new_mode, | |
10417 | uint32_t mask, uint32_t offset, | |
10418 | uint32_t newpc) | |
10419 | { | |
10420 | int new_el; | |
10421 | ||
10422 | /* Change the CPU state so as to actually take the exception. */ | |
10423 | switch_mode(env, new_mode); | |
10424 | ||
10425 | /* | |
10426 | * For exceptions taken to AArch32 we must clear the SS bit in both | |
10427 | * PSTATE and in the old-state value we save to SPSR_<mode>, so zero it now. | |
10428 | */ | |
10429 | env->pstate &= ~PSTATE_SS; | |
10430 | env->spsr = cpsr_read(env); | |
10431 | /* Clear IT bits. */ | |
10432 | env->condexec_bits = 0; | |
10433 | /* Switch to the new mode, and to the correct instruction set. */ | |
10434 | env->uncached_cpsr = (env->uncached_cpsr & ~CPSR_M) | new_mode; | |
10435 | ||
10436 | /* This must be after mode switching. */ | |
10437 | new_el = arm_current_el(env); | |
10438 | ||
10439 | /* Set new mode endianness */ | |
10440 | env->uncached_cpsr &= ~CPSR_E; | |
10441 | if (env->cp15.sctlr_el[new_el] & SCTLR_EE) { | |
10442 | env->uncached_cpsr |= CPSR_E; | |
10443 | } | |
10444 | /* J and IL must always be cleared for exception entry */ | |
10445 | env->uncached_cpsr &= ~(CPSR_IL | CPSR_J); | |
10446 | env->daif |= mask; | |
10447 | ||
10448 | if (cpu_isar_feature(aa32_ssbs, env_archcpu(env))) { | |
10449 | if (env->cp15.sctlr_el[new_el] & SCTLR_DSSBS_32) { | |
10450 | env->uncached_cpsr |= CPSR_SSBS; | |
10451 | } else { | |
10452 | env->uncached_cpsr &= ~CPSR_SSBS; | |
10453 | } | |
10454 | } | |
10455 | ||
10456 | if (new_mode == ARM_CPU_MODE_HYP) { | |
10457 | env->thumb = (env->cp15.sctlr_el[2] & SCTLR_TE) != 0; | |
10458 | env->elr_el[2] = env->regs[15]; | |
10459 | } else { | |
10460 | /* CPSR.PAN is normally preserved preserved unless... */ | |
10461 | if (cpu_isar_feature(aa32_pan, env_archcpu(env))) { | |
10462 | switch (new_el) { | |
10463 | case 3: | |
10464 | if (!arm_is_secure_below_el3(env)) { | |
10465 | /* ... the target is EL3, from non-secure state. */ | |
10466 | env->uncached_cpsr &= ~CPSR_PAN; | |
10467 | break; | |
10468 | } | |
10469 | /* ... the target is EL3, from secure state ... */ | |
10470 | /* fall through */ | |
10471 | case 1: | |
10472 | /* ... the target is EL1 and SCTLR.SPAN is 0. */ | |
10473 | if (!(env->cp15.sctlr_el[new_el] & SCTLR_SPAN)) { | |
10474 | env->uncached_cpsr |= CPSR_PAN; | |
10475 | } | |
10476 | break; | |
10477 | } | |
10478 | } | |
10479 | /* | |
10480 | * this is a lie, as there was no c1_sys on V4T/V5, but who cares | |
10481 | * and we should just guard the thumb mode on V4 | |
10482 | */ | |
10483 | if (arm_feature(env, ARM_FEATURE_V4T)) { | |
10484 | env->thumb = | |
10485 | (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_TE) != 0; | |
10486 | } | |
10487 | env->regs[14] = env->regs[15] + offset; | |
10488 | } | |
10489 | env->regs[15] = newpc; | |
10490 | ||
10491 | if (tcg_enabled()) { | |
10492 | arm_rebuild_hflags(env); | |
10493 | } | |
10494 | } | |
10495 | ||
10496 | static void arm_cpu_do_interrupt_aarch32_hyp(CPUState *cs) | |
10497 | { | |
10498 | /* | |
10499 | * Handle exception entry to Hyp mode; this is sufficiently | |
10500 | * different to entry to other AArch32 modes that we handle it | |
10501 | * separately here. | |
10502 | * | |
10503 | * The vector table entry used is always the 0x14 Hyp mode entry point, | |
10504 | * unless this is an UNDEF/SVC/HVC/abort taken from Hyp to Hyp. | |
10505 | * The offset applied to the preferred return address is always zero | |
10506 | * (see DDI0487C.a section G1.12.3). | |
10507 | * PSTATE A/I/F masks are set based only on the SCR.EA/IRQ/FIQ values. | |
10508 | */ | |
10509 | uint32_t addr, mask; | |
10510 | ARMCPU *cpu = ARM_CPU(cs); | |
10511 | CPUARMState *env = &cpu->env; | |
10512 | ||
10513 | switch (cs->exception_index) { | |
10514 | case EXCP_UDEF: | |
10515 | addr = 0x04; | |
10516 | break; | |
10517 | case EXCP_SWI: | |
10518 | addr = 0x08; | |
10519 | break; | |
10520 | case EXCP_BKPT: | |
10521 | /* Fall through to prefetch abort. */ | |
10522 | case EXCP_PREFETCH_ABORT: | |
10523 | env->cp15.ifar_s = env->exception.vaddress; | |
10524 | qemu_log_mask(CPU_LOG_INT, "...with HIFAR 0x%x\n", | |
10525 | (uint32_t)env->exception.vaddress); | |
10526 | addr = 0x0c; | |
10527 | break; | |
10528 | case EXCP_DATA_ABORT: | |
10529 | env->cp15.dfar_s = env->exception.vaddress; | |
10530 | qemu_log_mask(CPU_LOG_INT, "...with HDFAR 0x%x\n", | |
10531 | (uint32_t)env->exception.vaddress); | |
10532 | addr = 0x10; | |
10533 | break; | |
10534 | case EXCP_IRQ: | |
10535 | addr = 0x18; | |
10536 | break; | |
10537 | case EXCP_FIQ: | |
10538 | addr = 0x1c; | |
10539 | break; | |
10540 | case EXCP_HVC: | |
10541 | addr = 0x08; | |
10542 | break; | |
10543 | case EXCP_HYP_TRAP: | |
10544 | addr = 0x14; | |
10545 | break; | |
10546 | default: | |
10547 | cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index); | |
10548 | } | |
10549 | ||
10550 | if (cs->exception_index != EXCP_IRQ && cs->exception_index != EXCP_FIQ) { | |
10551 | if (!arm_feature(env, ARM_FEATURE_V8)) { | |
10552 | /* | |
10553 | * QEMU syndrome values are v8-style. v7 has the IL bit | |
10554 | * UNK/SBZP for "field not valid" cases, where v8 uses RES1. | |
10555 | * If this is a v7 CPU, squash the IL bit in those cases. | |
10556 | */ | |
10557 | if (cs->exception_index == EXCP_PREFETCH_ABORT || | |
10558 | (cs->exception_index == EXCP_DATA_ABORT && | |
10559 | !(env->exception.syndrome & ARM_EL_ISV)) || | |
10560 | syn_get_ec(env->exception.syndrome) == EC_UNCATEGORIZED) { | |
10561 | env->exception.syndrome &= ~ARM_EL_IL; | |
10562 | } | |
10563 | } | |
10564 | env->cp15.esr_el[2] = env->exception.syndrome; | |
10565 | } | |
10566 | ||
10567 | if (arm_current_el(env) != 2 && addr < 0x14) { | |
10568 | addr = 0x14; | |
10569 | } | |
10570 | ||
10571 | mask = 0; | |
10572 | if (!(env->cp15.scr_el3 & SCR_EA)) { | |
10573 | mask |= CPSR_A; | |
10574 | } | |
10575 | if (!(env->cp15.scr_el3 & SCR_IRQ)) { | |
10576 | mask |= CPSR_I; | |
10577 | } | |
10578 | if (!(env->cp15.scr_el3 & SCR_FIQ)) { | |
10579 | mask |= CPSR_F; | |
10580 | } | |
10581 | ||
10582 | addr += env->cp15.hvbar; | |
10583 | ||
10584 | take_aarch32_exception(env, ARM_CPU_MODE_HYP, mask, 0, addr); | |
10585 | } | |
10586 | ||
10587 | static void arm_cpu_do_interrupt_aarch32(CPUState *cs) | |
10588 | { | |
10589 | ARMCPU *cpu = ARM_CPU(cs); | |
10590 | CPUARMState *env = &cpu->env; | |
10591 | uint32_t addr; | |
10592 | uint32_t mask; | |
10593 | int new_mode; | |
10594 | uint32_t offset; | |
10595 | uint32_t moe; | |
10596 | ||
10597 | /* If this is a debug exception we must update the DBGDSCR.MOE bits */ | |
10598 | switch (syn_get_ec(env->exception.syndrome)) { | |
10599 | case EC_BREAKPOINT: | |
10600 | case EC_BREAKPOINT_SAME_EL: | |
10601 | moe = 1; | |
10602 | break; | |
10603 | case EC_WATCHPOINT: | |
10604 | case EC_WATCHPOINT_SAME_EL: | |
10605 | moe = 10; | |
10606 | break; | |
10607 | case EC_AA32_BKPT: | |
10608 | moe = 3; | |
10609 | break; | |
10610 | case EC_VECTORCATCH: | |
10611 | moe = 5; | |
10612 | break; | |
10613 | default: | |
10614 | moe = 0; | |
10615 | break; | |
10616 | } | |
10617 | ||
10618 | if (moe) { | |
10619 | env->cp15.mdscr_el1 = deposit64(env->cp15.mdscr_el1, 2, 4, moe); | |
10620 | } | |
10621 | ||
10622 | if (env->exception.target_el == 2) { | |
10623 | arm_cpu_do_interrupt_aarch32_hyp(cs); | |
10624 | return; | |
10625 | } | |
10626 | ||
10627 | switch (cs->exception_index) { | |
10628 | case EXCP_UDEF: | |
10629 | new_mode = ARM_CPU_MODE_UND; | |
10630 | addr = 0x04; | |
10631 | mask = CPSR_I; | |
10632 | if (env->thumb) { | |
10633 | offset = 2; | |
10634 | } else { | |
10635 | offset = 4; | |
10636 | } | |
10637 | break; | |
10638 | case EXCP_SWI: | |
10639 | new_mode = ARM_CPU_MODE_SVC; | |
10640 | addr = 0x08; | |
10641 | mask = CPSR_I; | |
10642 | /* The PC already points to the next instruction. */ | |
10643 | offset = 0; | |
10644 | break; | |
10645 | case EXCP_BKPT: | |
10646 | /* Fall through to prefetch abort. */ | |
10647 | case EXCP_PREFETCH_ABORT: | |
10648 | A32_BANKED_CURRENT_REG_SET(env, ifsr, env->exception.fsr); | |
10649 | A32_BANKED_CURRENT_REG_SET(env, ifar, env->exception.vaddress); | |
10650 | qemu_log_mask(CPU_LOG_INT, "...with IFSR 0x%x IFAR 0x%x\n", | |
10651 | env->exception.fsr, (uint32_t)env->exception.vaddress); | |
10652 | new_mode = ARM_CPU_MODE_ABT; | |
10653 | addr = 0x0c; | |
10654 | mask = CPSR_A | CPSR_I; | |
10655 | offset = 4; | |
10656 | break; | |
10657 | case EXCP_DATA_ABORT: | |
10658 | A32_BANKED_CURRENT_REG_SET(env, dfsr, env->exception.fsr); | |
10659 | A32_BANKED_CURRENT_REG_SET(env, dfar, env->exception.vaddress); | |
10660 | qemu_log_mask(CPU_LOG_INT, "...with DFSR 0x%x DFAR 0x%x\n", | |
10661 | env->exception.fsr, | |
10662 | (uint32_t)env->exception.vaddress); | |
10663 | new_mode = ARM_CPU_MODE_ABT; | |
10664 | addr = 0x10; | |
10665 | mask = CPSR_A | CPSR_I; | |
10666 | offset = 8; | |
10667 | break; | |
10668 | case EXCP_IRQ: | |
10669 | new_mode = ARM_CPU_MODE_IRQ; | |
10670 | addr = 0x18; | |
10671 | /* Disable IRQ and imprecise data aborts. */ | |
10672 | mask = CPSR_A | CPSR_I; | |
10673 | offset = 4; | |
10674 | if (env->cp15.scr_el3 & SCR_IRQ) { | |
10675 | /* IRQ routed to monitor mode */ | |
10676 | new_mode = ARM_CPU_MODE_MON; | |
10677 | mask |= CPSR_F; | |
10678 | } | |
10679 | break; | |
10680 | case EXCP_FIQ: | |
10681 | new_mode = ARM_CPU_MODE_FIQ; | |
10682 | addr = 0x1c; | |
10683 | /* Disable FIQ, IRQ and imprecise data aborts. */ | |
10684 | mask = CPSR_A | CPSR_I | CPSR_F; | |
10685 | if (env->cp15.scr_el3 & SCR_FIQ) { | |
10686 | /* FIQ routed to monitor mode */ | |
10687 | new_mode = ARM_CPU_MODE_MON; | |
10688 | } | |
10689 | offset = 4; | |
10690 | break; | |
10691 | case EXCP_VIRQ: | |
10692 | new_mode = ARM_CPU_MODE_IRQ; | |
10693 | addr = 0x18; | |
10694 | /* Disable IRQ and imprecise data aborts. */ | |
10695 | mask = CPSR_A | CPSR_I; | |
10696 | offset = 4; | |
10697 | break; | |
10698 | case EXCP_VFIQ: | |
10699 | new_mode = ARM_CPU_MODE_FIQ; | |
10700 | addr = 0x1c; | |
10701 | /* Disable FIQ, IRQ and imprecise data aborts. */ | |
10702 | mask = CPSR_A | CPSR_I | CPSR_F; | |
10703 | offset = 4; | |
10704 | break; | |
10705 | case EXCP_VSERR: | |
10706 | { | |
10707 | /* | |
10708 | * Note that this is reported as a data abort, but the DFAR | |
10709 | * has an UNKNOWN value. Construct the SError syndrome from | |
10710 | * AET and ExT fields. | |
10711 | */ | |
10712 | ARMMMUFaultInfo fi = { .type = ARMFault_AsyncExternal, }; | |
10713 | ||
10714 | if (extended_addresses_enabled(env)) { | |
10715 | env->exception.fsr = arm_fi_to_lfsc(&fi); | |
10716 | } else { | |
10717 | env->exception.fsr = arm_fi_to_sfsc(&fi); | |
10718 | } | |
10719 | env->exception.fsr |= env->cp15.vsesr_el2 & 0xd000; | |
10720 | A32_BANKED_CURRENT_REG_SET(env, dfsr, env->exception.fsr); | |
10721 | qemu_log_mask(CPU_LOG_INT, "...with IFSR 0x%x\n", | |
10722 | env->exception.fsr); | |
10723 | ||
10724 | new_mode = ARM_CPU_MODE_ABT; | |
10725 | addr = 0x10; | |
10726 | mask = CPSR_A | CPSR_I; | |
10727 | offset = 8; | |
10728 | } | |
10729 | break; | |
10730 | case EXCP_SMC: | |
10731 | new_mode = ARM_CPU_MODE_MON; | |
10732 | addr = 0x08; | |
10733 | mask = CPSR_A | CPSR_I | CPSR_F; | |
10734 | offset = 0; | |
10735 | break; | |
10736 | default: | |
10737 | cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index); | |
10738 | return; /* Never happens. Keep compiler happy. */ | |
10739 | } | |
10740 | ||
10741 | if (new_mode == ARM_CPU_MODE_MON) { | |
10742 | addr += env->cp15.mvbar; | |
10743 | } else if (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_V) { | |
10744 | /* High vectors. When enabled, base address cannot be remapped. */ | |
10745 | addr += 0xffff0000; | |
10746 | } else { | |
10747 | /* | |
10748 | * ARM v7 architectures provide a vector base address register to remap | |
10749 | * the interrupt vector table. | |
10750 | * This register is only followed in non-monitor mode, and is banked. | |
10751 | * Note: only bits 31:5 are valid. | |
10752 | */ | |
10753 | addr += A32_BANKED_CURRENT_REG_GET(env, vbar); | |
10754 | } | |
10755 | ||
10756 | if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON) { | |
10757 | env->cp15.scr_el3 &= ~SCR_NS; | |
10758 | } | |
10759 | ||
10760 | take_aarch32_exception(env, new_mode, mask, offset, addr); | |
10761 | } | |
10762 | ||
10763 | static int aarch64_regnum(CPUARMState *env, int aarch32_reg) | |
10764 | { | |
10765 | /* | |
10766 | * Return the register number of the AArch64 view of the AArch32 | |
10767 | * register @aarch32_reg. The CPUARMState CPSR is assumed to still | |
10768 | * be that of the AArch32 mode the exception came from. | |
10769 | */ | |
10770 | int mode = env->uncached_cpsr & CPSR_M; | |
10771 | ||
10772 | switch (aarch32_reg) { | |
10773 | case 0 ... 7: | |
10774 | return aarch32_reg; | |
10775 | case 8 ... 12: | |
10776 | return mode == ARM_CPU_MODE_FIQ ? aarch32_reg + 16 : aarch32_reg; | |
10777 | case 13: | |
10778 | switch (mode) { | |
10779 | case ARM_CPU_MODE_USR: | |
10780 | case ARM_CPU_MODE_SYS: | |
10781 | return 13; | |
10782 | case ARM_CPU_MODE_HYP: | |
10783 | return 15; | |
10784 | case ARM_CPU_MODE_IRQ: | |
10785 | return 17; | |
10786 | case ARM_CPU_MODE_SVC: | |
10787 | return 19; | |
10788 | case ARM_CPU_MODE_ABT: | |
10789 | return 21; | |
10790 | case ARM_CPU_MODE_UND: | |
10791 | return 23; | |
10792 | case ARM_CPU_MODE_FIQ: | |
10793 | return 29; | |
10794 | default: | |
10795 | g_assert_not_reached(); | |
10796 | } | |
10797 | case 14: | |
10798 | switch (mode) { | |
10799 | case ARM_CPU_MODE_USR: | |
10800 | case ARM_CPU_MODE_SYS: | |
10801 | case ARM_CPU_MODE_HYP: | |
10802 | return 14; | |
10803 | case ARM_CPU_MODE_IRQ: | |
10804 | return 16; | |
10805 | case ARM_CPU_MODE_SVC: | |
10806 | return 18; | |
10807 | case ARM_CPU_MODE_ABT: | |
10808 | return 20; | |
10809 | case ARM_CPU_MODE_UND: | |
10810 | return 22; | |
10811 | case ARM_CPU_MODE_FIQ: | |
10812 | return 30; | |
10813 | default: | |
10814 | g_assert_not_reached(); | |
10815 | } | |
10816 | case 15: | |
10817 | return 31; | |
10818 | default: | |
10819 | g_assert_not_reached(); | |
10820 | } | |
10821 | } | |
10822 | ||
10823 | static uint32_t cpsr_read_for_spsr_elx(CPUARMState *env) | |
10824 | { | |
10825 | uint32_t ret = cpsr_read(env); | |
10826 | ||
10827 | /* Move DIT to the correct location for SPSR_ELx */ | |
10828 | if (ret & CPSR_DIT) { | |
10829 | ret &= ~CPSR_DIT; | |
10830 | ret |= PSTATE_DIT; | |
10831 | } | |
10832 | /* Merge PSTATE.SS into SPSR_ELx */ | |
10833 | ret |= env->pstate & PSTATE_SS; | |
10834 | ||
10835 | return ret; | |
10836 | } | |
10837 | ||
10838 | static bool syndrome_is_sync_extabt(uint32_t syndrome) | |
10839 | { | |
10840 | /* Return true if this syndrome value is a synchronous external abort */ | |
10841 | switch (syn_get_ec(syndrome)) { | |
10842 | case EC_INSNABORT: | |
10843 | case EC_INSNABORT_SAME_EL: | |
10844 | case EC_DATAABORT: | |
10845 | case EC_DATAABORT_SAME_EL: | |
10846 | /* Look at fault status code for all the synchronous ext abort cases */ | |
10847 | switch (syndrome & 0x3f) { | |
10848 | case 0x10: | |
10849 | case 0x13: | |
10850 | case 0x14: | |
10851 | case 0x15: | |
10852 | case 0x16: | |
10853 | case 0x17: | |
10854 | return true; | |
10855 | default: | |
10856 | return false; | |
10857 | } | |
10858 | default: | |
10859 | return false; | |
10860 | } | |
10861 | } | |
10862 | ||
10863 | /* Handle exception entry to a target EL which is using AArch64 */ | |
10864 | static void arm_cpu_do_interrupt_aarch64(CPUState *cs) | |
10865 | { | |
10866 | ARMCPU *cpu = ARM_CPU(cs); | |
10867 | CPUARMState *env = &cpu->env; | |
10868 | unsigned int new_el = env->exception.target_el; | |
10869 | target_ulong addr = env->cp15.vbar_el[new_el]; | |
10870 | unsigned int new_mode = aarch64_pstate_mode(new_el, true); | |
10871 | unsigned int old_mode; | |
10872 | unsigned int cur_el = arm_current_el(env); | |
10873 | int rt; | |
10874 | ||
10875 | if (tcg_enabled()) { | |
10876 | /* | |
10877 | * Note that new_el can never be 0. If cur_el is 0, then | |
10878 | * el0_a64 is is_a64(), else el0_a64 is ignored. | |
10879 | */ | |
10880 | aarch64_sve_change_el(env, cur_el, new_el, is_a64(env)); | |
10881 | } | |
10882 | ||
10883 | if (cur_el < new_el) { | |
10884 | /* | |
10885 | * Entry vector offset depends on whether the implemented EL | |
10886 | * immediately lower than the target level is using AArch32 or AArch64 | |
10887 | */ | |
10888 | bool is_aa64; | |
10889 | uint64_t hcr; | |
10890 | ||
10891 | switch (new_el) { | |
10892 | case 3: | |
10893 | is_aa64 = (env->cp15.scr_el3 & SCR_RW) != 0; | |
10894 | break; | |
10895 | case 2: | |
10896 | hcr = arm_hcr_el2_eff(env); | |
10897 | if ((hcr & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) { | |
10898 | is_aa64 = (hcr & HCR_RW) != 0; | |
10899 | break; | |
10900 | } | |
10901 | /* fall through */ | |
10902 | case 1: | |
10903 | is_aa64 = is_a64(env); | |
10904 | break; | |
10905 | default: | |
10906 | g_assert_not_reached(); | |
10907 | } | |
10908 | ||
10909 | if (is_aa64) { | |
10910 | addr += 0x400; | |
10911 | } else { | |
10912 | addr += 0x600; | |
10913 | } | |
10914 | } else if (pstate_read(env) & PSTATE_SP) { | |
10915 | addr += 0x200; | |
10916 | } | |
10917 | ||
10918 | switch (cs->exception_index) { | |
10919 | case EXCP_GPC: | |
10920 | qemu_log_mask(CPU_LOG_INT, "...with MFAR 0x%" PRIx64 "\n", | |
10921 | env->cp15.mfar_el3); | |
10922 | /* fall through */ | |
10923 | case EXCP_PREFETCH_ABORT: | |
10924 | case EXCP_DATA_ABORT: | |
10925 | /* | |
10926 | * FEAT_DoubleFault allows synchronous external aborts taken to EL3 | |
10927 | * to be taken to the SError vector entrypoint. | |
10928 | */ | |
10929 | if (new_el == 3 && (env->cp15.scr_el3 & SCR_EASE) && | |
10930 | syndrome_is_sync_extabt(env->exception.syndrome)) { | |
10931 | addr += 0x180; | |
10932 | } | |
10933 | env->cp15.far_el[new_el] = env->exception.vaddress; | |
10934 | qemu_log_mask(CPU_LOG_INT, "...with FAR 0x%" PRIx64 "\n", | |
10935 | env->cp15.far_el[new_el]); | |
10936 | /* fall through */ | |
10937 | case EXCP_BKPT: | |
10938 | case EXCP_UDEF: | |
10939 | case EXCP_SWI: | |
10940 | case EXCP_HVC: | |
10941 | case EXCP_HYP_TRAP: | |
10942 | case EXCP_SMC: | |
10943 | switch (syn_get_ec(env->exception.syndrome)) { | |
10944 | case EC_ADVSIMDFPACCESSTRAP: | |
10945 | /* | |
10946 | * QEMU internal FP/SIMD syndromes from AArch32 include the | |
10947 | * TA and coproc fields which are only exposed if the exception | |
10948 | * is taken to AArch32 Hyp mode. Mask them out to get a valid | |
10949 | * AArch64 format syndrome. | |
10950 | */ | |
10951 | env->exception.syndrome &= ~MAKE_64BIT_MASK(0, 20); | |
10952 | break; | |
10953 | case EC_CP14RTTRAP: | |
10954 | case EC_CP15RTTRAP: | |
10955 | case EC_CP14DTTRAP: | |
10956 | /* | |
10957 | * For a trap on AArch32 MRC/MCR/LDC/STC the Rt field is currently | |
10958 | * the raw register field from the insn; when taking this to | |
10959 | * AArch64 we must convert it to the AArch64 view of the register | |
10960 | * number. Notice that we read a 4-bit AArch32 register number and | |
10961 | * write back a 5-bit AArch64 one. | |
10962 | */ | |
10963 | rt = extract32(env->exception.syndrome, 5, 4); | |
10964 | rt = aarch64_regnum(env, rt); | |
10965 | env->exception.syndrome = deposit32(env->exception.syndrome, | |
10966 | 5, 5, rt); | |
10967 | break; | |
10968 | case EC_CP15RRTTRAP: | |
10969 | case EC_CP14RRTTRAP: | |
10970 | /* Similarly for MRRC/MCRR traps for Rt and Rt2 fields */ | |
10971 | rt = extract32(env->exception.syndrome, 5, 4); | |
10972 | rt = aarch64_regnum(env, rt); | |
10973 | env->exception.syndrome = deposit32(env->exception.syndrome, | |
10974 | 5, 5, rt); | |
10975 | rt = extract32(env->exception.syndrome, 10, 4); | |
10976 | rt = aarch64_regnum(env, rt); | |
10977 | env->exception.syndrome = deposit32(env->exception.syndrome, | |
10978 | 10, 5, rt); | |
10979 | break; | |
10980 | } | |
10981 | env->cp15.esr_el[new_el] = env->exception.syndrome; | |
10982 | break; | |
10983 | case EXCP_IRQ: | |
10984 | case EXCP_VIRQ: | |
10985 | addr += 0x80; | |
10986 | break; | |
10987 | case EXCP_FIQ: | |
10988 | case EXCP_VFIQ: | |
10989 | addr += 0x100; | |
10990 | break; | |
10991 | case EXCP_VSERR: | |
10992 | addr += 0x180; | |
10993 | /* Construct the SError syndrome from IDS and ISS fields. */ | |
10994 | env->exception.syndrome = syn_serror(env->cp15.vsesr_el2 & 0x1ffffff); | |
10995 | env->cp15.esr_el[new_el] = env->exception.syndrome; | |
10996 | break; | |
10997 | default: | |
10998 | cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index); | |
10999 | } | |
11000 | ||
11001 | if (is_a64(env)) { | |
11002 | old_mode = pstate_read(env); | |
11003 | aarch64_save_sp(env, arm_current_el(env)); | |
11004 | env->elr_el[new_el] = env->pc; | |
11005 | } else { | |
11006 | old_mode = cpsr_read_for_spsr_elx(env); | |
11007 | env->elr_el[new_el] = env->regs[15]; | |
11008 | ||
11009 | aarch64_sync_32_to_64(env); | |
11010 | ||
11011 | env->condexec_bits = 0; | |
11012 | } | |
11013 | env->banked_spsr[aarch64_banked_spsr_index(new_el)] = old_mode; | |
11014 | ||
11015 | qemu_log_mask(CPU_LOG_INT, "...with ELR 0x%" PRIx64 "\n", | |
11016 | env->elr_el[new_el]); | |
11017 | ||
11018 | if (cpu_isar_feature(aa64_pan, cpu)) { | |
11019 | /* The value of PSTATE.PAN is normally preserved, except when ... */ | |
11020 | new_mode |= old_mode & PSTATE_PAN; | |
11021 | switch (new_el) { | |
11022 | case 2: | |
11023 | /* ... the target is EL2 with HCR_EL2.{E2H,TGE} == '11' ... */ | |
11024 | if ((arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE)) | |
11025 | != (HCR_E2H | HCR_TGE)) { | |
11026 | break; | |
11027 | } | |
11028 | /* fall through */ | |
11029 | case 1: | |
11030 | /* ... the target is EL1 ... */ | |
11031 | /* ... and SCTLR_ELx.SPAN == 0, then set to 1. */ | |
11032 | if ((env->cp15.sctlr_el[new_el] & SCTLR_SPAN) == 0) { | |
11033 | new_mode |= PSTATE_PAN; | |
11034 | } | |
11035 | break; | |
11036 | } | |
11037 | } | |
11038 | if (cpu_isar_feature(aa64_mte, cpu)) { | |
11039 | new_mode |= PSTATE_TCO; | |
11040 | } | |
11041 | ||
11042 | if (cpu_isar_feature(aa64_ssbs, cpu)) { | |
11043 | if (env->cp15.sctlr_el[new_el] & SCTLR_DSSBS_64) { | |
11044 | new_mode |= PSTATE_SSBS; | |
11045 | } else { | |
11046 | new_mode &= ~PSTATE_SSBS; | |
11047 | } | |
11048 | } | |
11049 | ||
11050 | pstate_write(env, PSTATE_DAIF | new_mode); | |
11051 | env->aarch64 = true; | |
11052 | aarch64_restore_sp(env, new_el); | |
11053 | ||
11054 | if (tcg_enabled()) { | |
11055 | helper_rebuild_hflags_a64(env, new_el); | |
11056 | } | |
11057 | ||
11058 | env->pc = addr; | |
11059 | ||
11060 | qemu_log_mask(CPU_LOG_INT, "...to EL%d PC 0x%" PRIx64 " PSTATE 0x%x\n", | |
11061 | new_el, env->pc, pstate_read(env)); | |
11062 | } | |
11063 | ||
11064 | /* | |
11065 | * Do semihosting call and set the appropriate return value. All the | |
11066 | * permission and validity checks have been done at translate time. | |
11067 | * | |
11068 | * We only see semihosting exceptions in TCG only as they are not | |
11069 | * trapped to the hypervisor in KVM. | |
11070 | */ | |
11071 | #ifdef CONFIG_TCG | |
11072 | static void tcg_handle_semihosting(CPUState *cs) | |
11073 | { | |
11074 | ARMCPU *cpu = ARM_CPU(cs); | |
11075 | CPUARMState *env = &cpu->env; | |
11076 | ||
11077 | if (is_a64(env)) { | |
11078 | qemu_log_mask(CPU_LOG_INT, | |
11079 | "...handling as semihosting call 0x%" PRIx64 "\n", | |
11080 | env->xregs[0]); | |
11081 | do_common_semihosting(cs); | |
11082 | env->pc += 4; | |
11083 | } else { | |
11084 | qemu_log_mask(CPU_LOG_INT, | |
11085 | "...handling as semihosting call 0x%x\n", | |
11086 | env->regs[0]); | |
11087 | do_common_semihosting(cs); | |
11088 | env->regs[15] += env->thumb ? 2 : 4; | |
11089 | } | |
11090 | } | |
11091 | #endif | |
11092 | ||
11093 | /* | |
11094 | * Handle a CPU exception for A and R profile CPUs. | |
11095 | * Do any appropriate logging, handle PSCI calls, and then hand off | |
11096 | * to the AArch64-entry or AArch32-entry function depending on the | |
11097 | * target exception level's register width. | |
11098 | * | |
11099 | * Note: this is used for both TCG (as the do_interrupt tcg op), | |
11100 | * and KVM to re-inject guest debug exceptions, and to | |
11101 | * inject a Synchronous-External-Abort. | |
11102 | */ | |
11103 | void arm_cpu_do_interrupt(CPUState *cs) | |
11104 | { | |
11105 | ARMCPU *cpu = ARM_CPU(cs); | |
11106 | CPUARMState *env = &cpu->env; | |
11107 | unsigned int new_el = env->exception.target_el; | |
11108 | ||
11109 | assert(!arm_feature(env, ARM_FEATURE_M)); | |
11110 | ||
11111 | arm_log_exception(cs); | |
11112 | qemu_log_mask(CPU_LOG_INT, "...from EL%d to EL%d\n", arm_current_el(env), | |
11113 | new_el); | |
11114 | if (qemu_loglevel_mask(CPU_LOG_INT) | |
11115 | && !excp_is_internal(cs->exception_index)) { | |
11116 | qemu_log_mask(CPU_LOG_INT, "...with ESR 0x%x/0x%" PRIx32 "\n", | |
11117 | syn_get_ec(env->exception.syndrome), | |
11118 | env->exception.syndrome); | |
11119 | } | |
11120 | ||
11121 | if (tcg_enabled() && arm_is_psci_call(cpu, cs->exception_index)) { | |
11122 | arm_handle_psci_call(cpu); | |
11123 | qemu_log_mask(CPU_LOG_INT, "...handled as PSCI call\n"); | |
11124 | return; | |
11125 | } | |
11126 | ||
11127 | /* | |
11128 | * Semihosting semantics depend on the register width of the code | |
11129 | * that caused the exception, not the target exception level, so | |
11130 | * must be handled here. | |
11131 | */ | |
11132 | #ifdef CONFIG_TCG | |
11133 | if (cs->exception_index == EXCP_SEMIHOST) { | |
11134 | tcg_handle_semihosting(cs); | |
11135 | return; | |
11136 | } | |
11137 | #endif | |
11138 | ||
11139 | /* | |
11140 | * Hooks may change global state so BQL should be held, also the | |
11141 | * BQL needs to be held for any modification of | |
11142 | * cs->interrupt_request. | |
11143 | */ | |
11144 | g_assert(qemu_mutex_iothread_locked()); | |
11145 | ||
11146 | arm_call_pre_el_change_hook(cpu); | |
11147 | ||
11148 | assert(!excp_is_internal(cs->exception_index)); | |
11149 | if (arm_el_is_aa64(env, new_el)) { | |
11150 | arm_cpu_do_interrupt_aarch64(cs); | |
11151 | } else { | |
11152 | arm_cpu_do_interrupt_aarch32(cs); | |
11153 | } | |
11154 | ||
11155 | arm_call_el_change_hook(cpu); | |
11156 | ||
11157 | if (!kvm_enabled()) { | |
11158 | cs->interrupt_request |= CPU_INTERRUPT_EXITTB; | |
11159 | } | |
11160 | } | |
11161 | #endif /* !CONFIG_USER_ONLY */ | |
11162 | ||
11163 | uint64_t arm_sctlr(CPUARMState *env, int el) | |
11164 | { | |
11165 | /* Only EL0 needs to be adjusted for EL1&0 or EL2&0. */ | |
11166 | if (el == 0) { | |
11167 | ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, 0); | |
11168 | el = mmu_idx == ARMMMUIdx_E20_0 ? 2 : 1; | |
11169 | } | |
11170 | return env->cp15.sctlr_el[el]; | |
11171 | } | |
11172 | ||
11173 | int aa64_va_parameter_tbi(uint64_t tcr, ARMMMUIdx mmu_idx) | |
11174 | { | |
11175 | if (regime_has_2_ranges(mmu_idx)) { | |
11176 | return extract64(tcr, 37, 2); | |
11177 | } else if (regime_is_stage2(mmu_idx)) { | |
11178 | return 0; /* VTCR_EL2 */ | |
11179 | } else { | |
11180 | /* Replicate the single TBI bit so we always have 2 bits. */ | |
11181 | return extract32(tcr, 20, 1) * 3; | |
11182 | } | |
11183 | } | |
11184 | ||
11185 | int aa64_va_parameter_tbid(uint64_t tcr, ARMMMUIdx mmu_idx) | |
11186 | { | |
11187 | if (regime_has_2_ranges(mmu_idx)) { | |
11188 | return extract64(tcr, 51, 2); | |
11189 | } else if (regime_is_stage2(mmu_idx)) { | |
11190 | return 0; /* VTCR_EL2 */ | |
11191 | } else { | |
11192 | /* Replicate the single TBID bit so we always have 2 bits. */ | |
11193 | return extract32(tcr, 29, 1) * 3; | |
11194 | } | |
11195 | } | |
11196 | ||
11197 | int aa64_va_parameter_tcma(uint64_t tcr, ARMMMUIdx mmu_idx) | |
11198 | { | |
11199 | if (regime_has_2_ranges(mmu_idx)) { | |
11200 | return extract64(tcr, 57, 2); | |
11201 | } else { | |
11202 | /* Replicate the single TCMA bit so we always have 2 bits. */ | |
11203 | return extract32(tcr, 30, 1) * 3; | |
11204 | } | |
11205 | } | |
11206 | ||
11207 | static ARMGranuleSize tg0_to_gran_size(int tg) | |
11208 | { | |
11209 | switch (tg) { | |
11210 | case 0: | |
11211 | return Gran4K; | |
11212 | case 1: | |
11213 | return Gran64K; | |
11214 | case 2: | |
11215 | return Gran16K; | |
11216 | default: | |
11217 | return GranInvalid; | |
11218 | } | |
11219 | } | |
11220 | ||
11221 | static ARMGranuleSize tg1_to_gran_size(int tg) | |
11222 | { | |
11223 | switch (tg) { | |
11224 | case 1: | |
11225 | return Gran16K; | |
11226 | case 2: | |
11227 | return Gran4K; | |
11228 | case 3: | |
11229 | return Gran64K; | |
11230 | default: | |
11231 | return GranInvalid; | |
11232 | } | |
11233 | } | |
11234 | ||
11235 | static inline bool have4k(ARMCPU *cpu, bool stage2) | |
11236 | { | |
11237 | return stage2 ? cpu_isar_feature(aa64_tgran4_2, cpu) | |
11238 | : cpu_isar_feature(aa64_tgran4, cpu); | |
11239 | } | |
11240 | ||
11241 | static inline bool have16k(ARMCPU *cpu, bool stage2) | |
11242 | { | |
11243 | return stage2 ? cpu_isar_feature(aa64_tgran16_2, cpu) | |
11244 | : cpu_isar_feature(aa64_tgran16, cpu); | |
11245 | } | |
11246 | ||
11247 | static inline bool have64k(ARMCPU *cpu, bool stage2) | |
11248 | { | |
11249 | return stage2 ? cpu_isar_feature(aa64_tgran64_2, cpu) | |
11250 | : cpu_isar_feature(aa64_tgran64, cpu); | |
11251 | } | |
11252 | ||
11253 | static ARMGranuleSize sanitize_gran_size(ARMCPU *cpu, ARMGranuleSize gran, | |
11254 | bool stage2) | |
11255 | { | |
11256 | switch (gran) { | |
11257 | case Gran4K: | |
11258 | if (have4k(cpu, stage2)) { | |
11259 | return gran; | |
11260 | } | |
11261 | break; | |
11262 | case Gran16K: | |
11263 | if (have16k(cpu, stage2)) { | |
11264 | return gran; | |
11265 | } | |
11266 | break; | |
11267 | case Gran64K: | |
11268 | if (have64k(cpu, stage2)) { | |
11269 | return gran; | |
11270 | } | |
11271 | break; | |
11272 | case GranInvalid: | |
11273 | break; | |
11274 | } | |
11275 | /* | |
11276 | * If the guest selects a granule size that isn't implemented, | |
11277 | * the architecture requires that we behave as if it selected one | |
11278 | * that is (with an IMPDEF choice of which one to pick). We choose | |
11279 | * to implement the smallest supported granule size. | |
11280 | */ | |
11281 | if (have4k(cpu, stage2)) { | |
11282 | return Gran4K; | |
11283 | } | |
11284 | if (have16k(cpu, stage2)) { | |
11285 | return Gran16K; | |
11286 | } | |
11287 | assert(have64k(cpu, stage2)); | |
11288 | return Gran64K; | |
11289 | } | |
11290 | ||
11291 | ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va, | |
11292 | ARMMMUIdx mmu_idx, bool data, | |
11293 | bool el1_is_aa32) | |
11294 | { | |
11295 | uint64_t tcr = regime_tcr(env, mmu_idx); | |
11296 | bool epd, hpd, tsz_oob, ds, ha, hd; | |
11297 | int select, tsz, tbi, max_tsz, min_tsz, ps, sh; | |
11298 | ARMGranuleSize gran; | |
11299 | ARMCPU *cpu = env_archcpu(env); | |
11300 | bool stage2 = regime_is_stage2(mmu_idx); | |
11301 | ||
11302 | if (!regime_has_2_ranges(mmu_idx)) { | |
11303 | select = 0; | |
11304 | tsz = extract32(tcr, 0, 6); | |
11305 | gran = tg0_to_gran_size(extract32(tcr, 14, 2)); | |
11306 | if (stage2) { | |
11307 | /* VTCR_EL2 */ | |
11308 | hpd = false; | |
11309 | } else { | |
11310 | hpd = extract32(tcr, 24, 1); | |
11311 | } | |
11312 | epd = false; | |
11313 | sh = extract32(tcr, 12, 2); | |
11314 | ps = extract32(tcr, 16, 3); | |
11315 | ha = extract32(tcr, 21, 1) && cpu_isar_feature(aa64_hafs, cpu); | |
11316 | hd = extract32(tcr, 22, 1) && cpu_isar_feature(aa64_hdbs, cpu); | |
11317 | ds = extract64(tcr, 32, 1); | |
11318 | } else { | |
11319 | bool e0pd; | |
11320 | ||
11321 | /* | |
11322 | * Bit 55 is always between the two regions, and is canonical for | |
11323 | * determining if address tagging is enabled. | |
11324 | */ | |
11325 | select = extract64(va, 55, 1); | |
11326 | if (!select) { | |
11327 | tsz = extract32(tcr, 0, 6); | |
11328 | gran = tg0_to_gran_size(extract32(tcr, 14, 2)); | |
11329 | epd = extract32(tcr, 7, 1); | |
11330 | sh = extract32(tcr, 12, 2); | |
11331 | hpd = extract64(tcr, 41, 1); | |
11332 | e0pd = extract64(tcr, 55, 1); | |
11333 | } else { | |
11334 | tsz = extract32(tcr, 16, 6); | |
11335 | gran = tg1_to_gran_size(extract32(tcr, 30, 2)); | |
11336 | epd = extract32(tcr, 23, 1); | |
11337 | sh = extract32(tcr, 28, 2); | |
11338 | hpd = extract64(tcr, 42, 1); | |
11339 | e0pd = extract64(tcr, 56, 1); | |
11340 | } | |
11341 | ps = extract64(tcr, 32, 3); | |
11342 | ha = extract64(tcr, 39, 1) && cpu_isar_feature(aa64_hafs, cpu); | |
11343 | hd = extract64(tcr, 40, 1) && cpu_isar_feature(aa64_hdbs, cpu); | |
11344 | ds = extract64(tcr, 59, 1); | |
11345 | ||
11346 | if (e0pd && cpu_isar_feature(aa64_e0pd, cpu) && | |
11347 | regime_is_user(env, mmu_idx)) { | |
11348 | epd = true; | |
11349 | } | |
11350 | } | |
11351 | ||
11352 | gran = sanitize_gran_size(cpu, gran, stage2); | |
11353 | ||
11354 | if (cpu_isar_feature(aa64_st, cpu)) { | |
11355 | max_tsz = 48 - (gran == Gran64K); | |
11356 | } else { | |
11357 | max_tsz = 39; | |
11358 | } | |
11359 | ||
11360 | /* | |
11361 | * DS is RES0 unless FEAT_LPA2 is supported for the given page size; | |
11362 | * adjust the effective value of DS, as documented. | |
11363 | */ | |
11364 | min_tsz = 16; | |
11365 | if (gran == Gran64K) { | |
11366 | if (cpu_isar_feature(aa64_lva, cpu)) { | |
11367 | min_tsz = 12; | |
11368 | } | |
11369 | ds = false; | |
11370 | } else if (ds) { | |
11371 | if (regime_is_stage2(mmu_idx)) { | |
11372 | if (gran == Gran16K) { | |
11373 | ds = cpu_isar_feature(aa64_tgran16_2_lpa2, cpu); | |
11374 | } else { | |
11375 | ds = cpu_isar_feature(aa64_tgran4_2_lpa2, cpu); | |
11376 | } | |
11377 | } else { | |
11378 | if (gran == Gran16K) { | |
11379 | ds = cpu_isar_feature(aa64_tgran16_lpa2, cpu); | |
11380 | } else { | |
11381 | ds = cpu_isar_feature(aa64_tgran4_lpa2, cpu); | |
11382 | } | |
11383 | } | |
11384 | if (ds) { | |
11385 | min_tsz = 12; | |
11386 | } | |
11387 | } | |
11388 | ||
11389 | if (stage2 && el1_is_aa32) { | |
11390 | /* | |
11391 | * For AArch32 EL1 the min txsz (and thus max IPA size) requirements | |
11392 | * are loosened: a configured IPA of 40 bits is permitted even if | |
11393 | * the implemented PA is less than that (and so a 40 bit IPA would | |
11394 | * fault for an AArch64 EL1). See R_DTLMN. | |
11395 | */ | |
11396 | min_tsz = MIN(min_tsz, 24); | |
11397 | } | |
11398 | ||
11399 | if (tsz > max_tsz) { | |
11400 | tsz = max_tsz; | |
11401 | tsz_oob = true; | |
11402 | } else if (tsz < min_tsz) { | |
11403 | tsz = min_tsz; | |
11404 | tsz_oob = true; | |
11405 | } else { | |
11406 | tsz_oob = false; | |
11407 | } | |
11408 | ||
11409 | /* Present TBI as a composite with TBID. */ | |
11410 | tbi = aa64_va_parameter_tbi(tcr, mmu_idx); | |
11411 | if (!data) { | |
11412 | tbi &= ~aa64_va_parameter_tbid(tcr, mmu_idx); | |
11413 | } | |
11414 | tbi = (tbi >> select) & 1; | |
11415 | ||
11416 | return (ARMVAParameters) { | |
11417 | .tsz = tsz, | |
11418 | .ps = ps, | |
11419 | .sh = sh, | |
11420 | .select = select, | |
11421 | .tbi = tbi, | |
11422 | .epd = epd, | |
11423 | .hpd = hpd, | |
11424 | .tsz_oob = tsz_oob, | |
11425 | .ds = ds, | |
11426 | .ha = ha, | |
11427 | .hd = ha && hd, | |
11428 | .gran = gran, | |
11429 | }; | |
11430 | } | |
11431 | ||
11432 | /* | |
11433 | * Note that signed overflow is undefined in C. The following routines are | |
11434 | * careful to use unsigned types where modulo arithmetic is required. | |
11435 | * Failure to do so _will_ break on newer gcc. | |
11436 | */ | |
11437 | ||
11438 | /* Signed saturating arithmetic. */ | |
11439 | ||
11440 | /* Perform 16-bit signed saturating addition. */ | |
11441 | static inline uint16_t add16_sat(uint16_t a, uint16_t b) | |
11442 | { | |
11443 | uint16_t res; | |
11444 | ||
11445 | res = a + b; | |
11446 | if (((res ^ a) & 0x8000) && !((a ^ b) & 0x8000)) { | |
11447 | if (a & 0x8000) { | |
11448 | res = 0x8000; | |
11449 | } else { | |
11450 | res = 0x7fff; | |
11451 | } | |
11452 | } | |
11453 | return res; | |
11454 | } | |
11455 | ||
11456 | /* Perform 8-bit signed saturating addition. */ | |
11457 | static inline uint8_t add8_sat(uint8_t a, uint8_t b) | |
11458 | { | |
11459 | uint8_t res; | |
11460 | ||
11461 | res = a + b; | |
11462 | if (((res ^ a) & 0x80) && !((a ^ b) & 0x80)) { | |
11463 | if (a & 0x80) { | |
11464 | res = 0x80; | |
11465 | } else { | |
11466 | res = 0x7f; | |
11467 | } | |
11468 | } | |
11469 | return res; | |
11470 | } | |
11471 | ||
11472 | /* Perform 16-bit signed saturating subtraction. */ | |
11473 | static inline uint16_t sub16_sat(uint16_t a, uint16_t b) | |
11474 | { | |
11475 | uint16_t res; | |
11476 | ||
11477 | res = a - b; | |
11478 | if (((res ^ a) & 0x8000) && ((a ^ b) & 0x8000)) { | |
11479 | if (a & 0x8000) { | |
11480 | res = 0x8000; | |
11481 | } else { | |
11482 | res = 0x7fff; | |
11483 | } | |
11484 | } | |
11485 | return res; | |
11486 | } | |
11487 | ||
11488 | /* Perform 8-bit signed saturating subtraction. */ | |
11489 | static inline uint8_t sub8_sat(uint8_t a, uint8_t b) | |
11490 | { | |
11491 | uint8_t res; | |
11492 | ||
11493 | res = a - b; | |
11494 | if (((res ^ a) & 0x80) && ((a ^ b) & 0x80)) { | |
11495 | if (a & 0x80) { | |
11496 | res = 0x80; | |
11497 | } else { | |
11498 | res = 0x7f; | |
11499 | } | |
11500 | } | |
11501 | return res; | |
11502 | } | |
11503 | ||
11504 | #define ADD16(a, b, n) RESULT(add16_sat(a, b), n, 16); | |
11505 | #define SUB16(a, b, n) RESULT(sub16_sat(a, b), n, 16); | |
11506 | #define ADD8(a, b, n) RESULT(add8_sat(a, b), n, 8); | |
11507 | #define SUB8(a, b, n) RESULT(sub8_sat(a, b), n, 8); | |
11508 | #define PFX q | |
11509 | ||
11510 | #include "op_addsub.h" | |
11511 | ||
11512 | /* Unsigned saturating arithmetic. */ | |
11513 | static inline uint16_t add16_usat(uint16_t a, uint16_t b) | |
11514 | { | |
11515 | uint16_t res; | |
11516 | res = a + b; | |
11517 | if (res < a) { | |
11518 | res = 0xffff; | |
11519 | } | |
11520 | return res; | |
11521 | } | |
11522 | ||
11523 | static inline uint16_t sub16_usat(uint16_t a, uint16_t b) | |
11524 | { | |
11525 | if (a > b) { | |
11526 | return a - b; | |
11527 | } else { | |
11528 | return 0; | |
11529 | } | |
11530 | } | |
11531 | ||
11532 | static inline uint8_t add8_usat(uint8_t a, uint8_t b) | |
11533 | { | |
11534 | uint8_t res; | |
11535 | res = a + b; | |
11536 | if (res < a) { | |
11537 | res = 0xff; | |
11538 | } | |
11539 | return res; | |
11540 | } | |
11541 | ||
11542 | static inline uint8_t sub8_usat(uint8_t a, uint8_t b) | |
11543 | { | |
11544 | if (a > b) { | |
11545 | return a - b; | |
11546 | } else { | |
11547 | return 0; | |
11548 | } | |
11549 | } | |
11550 | ||
11551 | #define ADD16(a, b, n) RESULT(add16_usat(a, b), n, 16); | |
11552 | #define SUB16(a, b, n) RESULT(sub16_usat(a, b), n, 16); | |
11553 | #define ADD8(a, b, n) RESULT(add8_usat(a, b), n, 8); | |
11554 | #define SUB8(a, b, n) RESULT(sub8_usat(a, b), n, 8); | |
11555 | #define PFX uq | |
11556 | ||
11557 | #include "op_addsub.h" | |
11558 | ||
11559 | /* Signed modulo arithmetic. */ | |
11560 | #define SARITH16(a, b, n, op) do { \ | |
11561 | int32_t sum; \ | |
11562 | sum = (int32_t)(int16_t)(a) op (int32_t)(int16_t)(b); \ | |
11563 | RESULT(sum, n, 16); \ | |
11564 | if (sum >= 0) \ | |
11565 | ge |= 3 << (n * 2); \ | |
11566 | } while (0) | |
11567 | ||
11568 | #define SARITH8(a, b, n, op) do { \ | |
11569 | int32_t sum; \ | |
11570 | sum = (int32_t)(int8_t)(a) op (int32_t)(int8_t)(b); \ | |
11571 | RESULT(sum, n, 8); \ | |
11572 | if (sum >= 0) \ | |
11573 | ge |= 1 << n; \ | |
11574 | } while (0) | |
11575 | ||
11576 | ||
11577 | #define ADD16(a, b, n) SARITH16(a, b, n, +) | |
11578 | #define SUB16(a, b, n) SARITH16(a, b, n, -) | |
11579 | #define ADD8(a, b, n) SARITH8(a, b, n, +) | |
11580 | #define SUB8(a, b, n) SARITH8(a, b, n, -) | |
11581 | #define PFX s | |
11582 | #define ARITH_GE | |
11583 | ||
11584 | #include "op_addsub.h" | |
11585 | ||
11586 | /* Unsigned modulo arithmetic. */ | |
11587 | #define ADD16(a, b, n) do { \ | |
11588 | uint32_t sum; \ | |
11589 | sum = (uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b); \ | |
11590 | RESULT(sum, n, 16); \ | |
11591 | if ((sum >> 16) == 1) \ | |
11592 | ge |= 3 << (n * 2); \ | |
11593 | } while (0) | |
11594 | ||
11595 | #define ADD8(a, b, n) do { \ | |
11596 | uint32_t sum; \ | |
11597 | sum = (uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b); \ | |
11598 | RESULT(sum, n, 8); \ | |
11599 | if ((sum >> 8) == 1) \ | |
11600 | ge |= 1 << n; \ | |
11601 | } while (0) | |
11602 | ||
11603 | #define SUB16(a, b, n) do { \ | |
11604 | uint32_t sum; \ | |
11605 | sum = (uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b); \ | |
11606 | RESULT(sum, n, 16); \ | |
11607 | if ((sum >> 16) == 0) \ | |
11608 | ge |= 3 << (n * 2); \ | |
11609 | } while (0) | |
11610 | ||
11611 | #define SUB8(a, b, n) do { \ | |
11612 | uint32_t sum; \ | |
11613 | sum = (uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b); \ | |
11614 | RESULT(sum, n, 8); \ | |
11615 | if ((sum >> 8) == 0) \ | |
11616 | ge |= 1 << n; \ | |
11617 | } while (0) | |
11618 | ||
11619 | #define PFX u | |
11620 | #define ARITH_GE | |
11621 | ||
11622 | #include "op_addsub.h" | |
11623 | ||
11624 | /* Halved signed arithmetic. */ | |
11625 | #define ADD16(a, b, n) \ | |
11626 | RESULT(((int32_t)(int16_t)(a) + (int32_t)(int16_t)(b)) >> 1, n, 16) | |
11627 | #define SUB16(a, b, n) \ | |
11628 | RESULT(((int32_t)(int16_t)(a) - (int32_t)(int16_t)(b)) >> 1, n, 16) | |
11629 | #define ADD8(a, b, n) \ | |
11630 | RESULT(((int32_t)(int8_t)(a) + (int32_t)(int8_t)(b)) >> 1, n, 8) | |
11631 | #define SUB8(a, b, n) \ | |
11632 | RESULT(((int32_t)(int8_t)(a) - (int32_t)(int8_t)(b)) >> 1, n, 8) | |
11633 | #define PFX sh | |
11634 | ||
11635 | #include "op_addsub.h" | |
11636 | ||
11637 | /* Halved unsigned arithmetic. */ | |
11638 | #define ADD16(a, b, n) \ | |
11639 | RESULT(((uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b)) >> 1, n, 16) | |
11640 | #define SUB16(a, b, n) \ | |
11641 | RESULT(((uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b)) >> 1, n, 16) | |
11642 | #define ADD8(a, b, n) \ | |
11643 | RESULT(((uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b)) >> 1, n, 8) | |
11644 | #define SUB8(a, b, n) \ | |
11645 | RESULT(((uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b)) >> 1, n, 8) | |
11646 | #define PFX uh | |
11647 | ||
11648 | #include "op_addsub.h" | |
11649 | ||
11650 | static inline uint8_t do_usad(uint8_t a, uint8_t b) | |
11651 | { | |
11652 | if (a > b) { | |
11653 | return a - b; | |
11654 | } else { | |
11655 | return b - a; | |
11656 | } | |
11657 | } | |
11658 | ||
11659 | /* Unsigned sum of absolute byte differences. */ | |
11660 | uint32_t HELPER(usad8)(uint32_t a, uint32_t b) | |
11661 | { | |
11662 | uint32_t sum; | |
11663 | sum = do_usad(a, b); | |
11664 | sum += do_usad(a >> 8, b >> 8); | |
11665 | sum += do_usad(a >> 16, b >> 16); | |
11666 | sum += do_usad(a >> 24, b >> 24); | |
11667 | return sum; | |
11668 | } | |
11669 | ||
11670 | /* For ARMv6 SEL instruction. */ | |
11671 | uint32_t HELPER(sel_flags)(uint32_t flags, uint32_t a, uint32_t b) | |
11672 | { | |
11673 | uint32_t mask; | |
11674 | ||
11675 | mask = 0; | |
11676 | if (flags & 1) { | |
11677 | mask |= 0xff; | |
11678 | } | |
11679 | if (flags & 2) { | |
11680 | mask |= 0xff00; | |
11681 | } | |
11682 | if (flags & 4) { | |
11683 | mask |= 0xff0000; | |
11684 | } | |
11685 | if (flags & 8) { | |
11686 | mask |= 0xff000000; | |
11687 | } | |
11688 | return (a & mask) | (b & ~mask); | |
11689 | } | |
11690 | ||
11691 | /* | |
11692 | * CRC helpers. | |
11693 | * The upper bytes of val (above the number specified by 'bytes') must have | |
11694 | * been zeroed out by the caller. | |
11695 | */ | |
11696 | uint32_t HELPER(crc32)(uint32_t acc, uint32_t val, uint32_t bytes) | |
11697 | { | |
11698 | uint8_t buf[4]; | |
11699 | ||
11700 | stl_le_p(buf, val); | |
11701 | ||
11702 | /* zlib crc32 converts the accumulator and output to one's complement. */ | |
11703 | return crc32(acc ^ 0xffffffff, buf, bytes) ^ 0xffffffff; | |
11704 | } | |
11705 | ||
11706 | uint32_t HELPER(crc32c)(uint32_t acc, uint32_t val, uint32_t bytes) | |
11707 | { | |
11708 | uint8_t buf[4]; | |
11709 | ||
11710 | stl_le_p(buf, val); | |
11711 | ||
11712 | /* Linux crc32c converts the output to one's complement. */ | |
11713 | return crc32c(acc, buf, bytes) ^ 0xffffffff; | |
11714 | } | |
11715 | ||
11716 | /* | |
11717 | * Return the exception level to which FP-disabled exceptions should | |
11718 | * be taken, or 0 if FP is enabled. | |
11719 | */ | |
11720 | int fp_exception_el(CPUARMState *env, int cur_el) | |
11721 | { | |
11722 | #ifndef CONFIG_USER_ONLY | |
11723 | uint64_t hcr_el2; | |
11724 | ||
11725 | /* | |
11726 | * CPACR and the CPTR registers don't exist before v6, so FP is | |
11727 | * always accessible | |
11728 | */ | |
11729 | if (!arm_feature(env, ARM_FEATURE_V6)) { | |
11730 | return 0; | |
11731 | } | |
11732 | ||
11733 | if (arm_feature(env, ARM_FEATURE_M)) { | |
11734 | /* CPACR can cause a NOCP UsageFault taken to current security state */ | |
11735 | if (!v7m_cpacr_pass(env, env->v7m.secure, cur_el != 0)) { | |
11736 | return 1; | |
11737 | } | |
11738 | ||
11739 | if (arm_feature(env, ARM_FEATURE_M_SECURITY) && !env->v7m.secure) { | |
11740 | if (!extract32(env->v7m.nsacr, 10, 1)) { | |
11741 | /* FP insns cause a NOCP UsageFault taken to Secure */ | |
11742 | return 3; | |
11743 | } | |
11744 | } | |
11745 | ||
11746 | return 0; | |
11747 | } | |
11748 | ||
11749 | hcr_el2 = arm_hcr_el2_eff(env); | |
11750 | ||
11751 | /* | |
11752 | * The CPACR controls traps to EL1, or PL1 if we're 32 bit: | |
11753 | * 0, 2 : trap EL0 and EL1/PL1 accesses | |
11754 | * 1 : trap only EL0 accesses | |
11755 | * 3 : trap no accesses | |
11756 | * This register is ignored if E2H+TGE are both set. | |
11757 | */ | |
11758 | if ((hcr_el2 & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) { | |
11759 | int fpen = FIELD_EX64(env->cp15.cpacr_el1, CPACR_EL1, FPEN); | |
11760 | ||
11761 | switch (fpen) { | |
11762 | case 1: | |
11763 | if (cur_el != 0) { | |
11764 | break; | |
11765 | } | |
11766 | /* fall through */ | |
11767 | case 0: | |
11768 | case 2: | |
11769 | /* Trap from Secure PL0 or PL1 to Secure PL1. */ | |
11770 | if (!arm_el_is_aa64(env, 3) | |
11771 | && (cur_el == 3 || arm_is_secure_below_el3(env))) { | |
11772 | return 3; | |
11773 | } | |
11774 | if (cur_el <= 1) { | |
11775 | return 1; | |
11776 | } | |
11777 | break; | |
11778 | } | |
11779 | } | |
11780 | ||
11781 | /* | |
11782 | * The NSACR allows A-profile AArch32 EL3 and M-profile secure mode | |
11783 | * to control non-secure access to the FPU. It doesn't have any | |
11784 | * effect if EL3 is AArch64 or if EL3 doesn't exist at all. | |
11785 | */ | |
11786 | if ((arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) && | |
11787 | cur_el <= 2 && !arm_is_secure_below_el3(env))) { | |
11788 | if (!extract32(env->cp15.nsacr, 10, 1)) { | |
11789 | /* FP insns act as UNDEF */ | |
11790 | return cur_el == 2 ? 2 : 1; | |
11791 | } | |
11792 | } | |
11793 | ||
11794 | /* | |
11795 | * CPTR_EL2 is present in v7VE or v8, and changes format | |
11796 | * with HCR_EL2.E2H (regardless of TGE). | |
11797 | */ | |
11798 | if (cur_el <= 2) { | |
11799 | if (hcr_el2 & HCR_E2H) { | |
11800 | switch (FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, FPEN)) { | |
11801 | case 1: | |
11802 | if (cur_el != 0 || !(hcr_el2 & HCR_TGE)) { | |
11803 | break; | |
11804 | } | |
11805 | /* fall through */ | |
11806 | case 0: | |
11807 | case 2: | |
11808 | return 2; | |
11809 | } | |
11810 | } else if (arm_is_el2_enabled(env)) { | |
11811 | if (FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, TFP)) { | |
11812 | return 2; | |
11813 | } | |
11814 | } | |
11815 | } | |
11816 | ||
11817 | /* CPTR_EL3 : present in v8 */ | |
11818 | if (FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, TFP)) { | |
11819 | /* Trap all FP ops to EL3 */ | |
11820 | return 3; | |
11821 | } | |
11822 | #endif | |
11823 | return 0; | |
11824 | } | |
11825 | ||
11826 | /* Return the exception level we're running at if this is our mmu_idx */ | |
11827 | int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx) | |
11828 | { | |
11829 | if (mmu_idx & ARM_MMU_IDX_M) { | |
11830 | return mmu_idx & ARM_MMU_IDX_M_PRIV; | |
11831 | } | |
11832 | ||
11833 | switch (mmu_idx) { | |
11834 | case ARMMMUIdx_E10_0: | |
11835 | case ARMMMUIdx_E20_0: | |
11836 | return 0; | |
11837 | case ARMMMUIdx_E10_1: | |
11838 | case ARMMMUIdx_E10_1_PAN: | |
11839 | return 1; | |
11840 | case ARMMMUIdx_E2: | |
11841 | case ARMMMUIdx_E20_2: | |
11842 | case ARMMMUIdx_E20_2_PAN: | |
11843 | return 2; | |
11844 | case ARMMMUIdx_E3: | |
11845 | return 3; | |
11846 | default: | |
11847 | g_assert_not_reached(); | |
11848 | } | |
11849 | } | |
11850 | ||
11851 | #ifndef CONFIG_TCG | |
11852 | ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate) | |
11853 | { | |
11854 | g_assert_not_reached(); | |
11855 | } | |
11856 | #endif | |
11857 | ||
11858 | static bool arm_pan_enabled(CPUARMState *env) | |
11859 | { | |
11860 | if (is_a64(env)) { | |
11861 | return env->pstate & PSTATE_PAN; | |
11862 | } else { | |
11863 | return env->uncached_cpsr & CPSR_PAN; | |
11864 | } | |
11865 | } | |
11866 | ||
11867 | ARMMMUIdx arm_mmu_idx_el(CPUARMState *env, int el) | |
11868 | { | |
11869 | ARMMMUIdx idx; | |
11870 | uint64_t hcr; | |
11871 | ||
11872 | if (arm_feature(env, ARM_FEATURE_M)) { | |
11873 | return arm_v7m_mmu_idx_for_secstate(env, env->v7m.secure); | |
11874 | } | |
11875 | ||
11876 | /* See ARM pseudo-function ELIsInHost. */ | |
11877 | switch (el) { | |
11878 | case 0: | |
11879 | hcr = arm_hcr_el2_eff(env); | |
11880 | if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) { | |
11881 | idx = ARMMMUIdx_E20_0; | |
11882 | } else { | |
11883 | idx = ARMMMUIdx_E10_0; | |
11884 | } | |
11885 | break; | |
11886 | case 1: | |
11887 | if (arm_pan_enabled(env)) { | |
11888 | idx = ARMMMUIdx_E10_1_PAN; | |
11889 | } else { | |
11890 | idx = ARMMMUIdx_E10_1; | |
11891 | } | |
11892 | break; | |
11893 | case 2: | |
11894 | /* Note that TGE does not apply at EL2. */ | |
11895 | if (arm_hcr_el2_eff(env) & HCR_E2H) { | |
11896 | if (arm_pan_enabled(env)) { | |
11897 | idx = ARMMMUIdx_E20_2_PAN; | |
11898 | } else { | |
11899 | idx = ARMMMUIdx_E20_2; | |
11900 | } | |
11901 | } else { | |
11902 | idx = ARMMMUIdx_E2; | |
11903 | } | |
11904 | break; | |
11905 | case 3: | |
11906 | return ARMMMUIdx_E3; | |
11907 | default: | |
11908 | g_assert_not_reached(); | |
11909 | } | |
11910 | ||
11911 | return idx; | |
11912 | } | |
11913 | ||
11914 | ARMMMUIdx arm_mmu_idx(CPUARMState *env) | |
11915 | { | |
11916 | return arm_mmu_idx_el(env, arm_current_el(env)); | |
11917 | } | |
11918 | ||
11919 | static bool mve_no_pred(CPUARMState *env) | |
11920 | { | |
11921 | /* | |
11922 | * Return true if there is definitely no predication of MVE | |
11923 | * instructions by VPR or LTPSIZE. (Returning false even if there | |
11924 | * isn't any predication is OK; generated code will just be | |
11925 | * a little worse.) | |
11926 | * If the CPU does not implement MVE then this TB flag is always 0. | |
11927 | * | |
11928 | * NOTE: if you change this logic, the "recalculate s->mve_no_pred" | |
11929 | * logic in gen_update_fp_context() needs to be updated to match. | |
11930 | * | |
11931 | * We do not include the effect of the ECI bits here -- they are | |
11932 | * tracked in other TB flags. This simplifies the logic for | |
11933 | * "when did we emit code that changes the MVE_NO_PRED TB flag | |
11934 | * and thus need to end the TB?". | |
11935 | */ | |
11936 | if (cpu_isar_feature(aa32_mve, env_archcpu(env))) { | |
11937 | return false; | |
11938 | } | |
11939 | if (env->v7m.vpr) { | |
11940 | return false; | |
11941 | } | |
11942 | if (env->v7m.ltpsize < 4) { | |
11943 | return false; | |
11944 | } | |
11945 | return true; | |
11946 | } | |
11947 | ||
11948 | void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc, | |
11949 | target_ulong *cs_base, uint32_t *pflags) | |
11950 | { | |
11951 | CPUARMTBFlags flags; | |
11952 | ||
11953 | assert_hflags_rebuild_correctly(env); | |
11954 | flags = env->hflags; | |
11955 | ||
11956 | if (EX_TBFLAG_ANY(flags, AARCH64_STATE)) { | |
11957 | *pc = env->pc; | |
11958 | if (cpu_isar_feature(aa64_bti, env_archcpu(env))) { | |
11959 | DP_TBFLAG_A64(flags, BTYPE, env->btype); | |
11960 | } | |
11961 | } else { | |
11962 | *pc = env->regs[15]; | |
11963 | ||
11964 | if (arm_feature(env, ARM_FEATURE_M)) { | |
11965 | if (arm_feature(env, ARM_FEATURE_M_SECURITY) && | |
11966 | FIELD_EX32(env->v7m.fpccr[M_REG_S], V7M_FPCCR, S) | |
11967 | != env->v7m.secure) { | |
11968 | DP_TBFLAG_M32(flags, FPCCR_S_WRONG, 1); | |
11969 | } | |
11970 | ||
11971 | if ((env->v7m.fpccr[env->v7m.secure] & R_V7M_FPCCR_ASPEN_MASK) && | |
11972 | (!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK) || | |
11973 | (env->v7m.secure && | |
11974 | !(env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK)))) { | |
11975 | /* | |
11976 | * ASPEN is set, but FPCA/SFPA indicate that there is no | |
11977 | * active FP context; we must create a new FP context before | |
11978 | * executing any FP insn. | |
11979 | */ | |
11980 | DP_TBFLAG_M32(flags, NEW_FP_CTXT_NEEDED, 1); | |
11981 | } | |
11982 | ||
11983 | bool is_secure = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK; | |
11984 | if (env->v7m.fpccr[is_secure] & R_V7M_FPCCR_LSPACT_MASK) { | |
11985 | DP_TBFLAG_M32(flags, LSPACT, 1); | |
11986 | } | |
11987 | ||
11988 | if (mve_no_pred(env)) { | |
11989 | DP_TBFLAG_M32(flags, MVE_NO_PRED, 1); | |
11990 | } | |
11991 | } else { | |
11992 | /* | |
11993 | * Note that XSCALE_CPAR shares bits with VECSTRIDE. | |
11994 | * Note that VECLEN+VECSTRIDE are RES0 for M-profile. | |
11995 | */ | |
11996 | if (arm_feature(env, ARM_FEATURE_XSCALE)) { | |
11997 | DP_TBFLAG_A32(flags, XSCALE_CPAR, env->cp15.c15_cpar); | |
11998 | } else { | |
11999 | DP_TBFLAG_A32(flags, VECLEN, env->vfp.vec_len); | |
12000 | DP_TBFLAG_A32(flags, VECSTRIDE, env->vfp.vec_stride); | |
12001 | } | |
12002 | if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)) { | |
12003 | DP_TBFLAG_A32(flags, VFPEN, 1); | |
12004 | } | |
12005 | } | |
12006 | ||
12007 | DP_TBFLAG_AM32(flags, THUMB, env->thumb); | |
12008 | DP_TBFLAG_AM32(flags, CONDEXEC, env->condexec_bits); | |
12009 | } | |
12010 | ||
12011 | /* | |
12012 | * The SS_ACTIVE and PSTATE_SS bits correspond to the state machine | |
12013 | * states defined in the ARM ARM for software singlestep: | |
12014 | * SS_ACTIVE PSTATE.SS State | |
12015 | * 0 x Inactive (the TB flag for SS is always 0) | |
12016 | * 1 0 Active-pending | |
12017 | * 1 1 Active-not-pending | |
12018 | * SS_ACTIVE is set in hflags; PSTATE__SS is computed every TB. | |
12019 | */ | |
12020 | if (EX_TBFLAG_ANY(flags, SS_ACTIVE) && (env->pstate & PSTATE_SS)) { | |
12021 | DP_TBFLAG_ANY(flags, PSTATE__SS, 1); | |
12022 | } | |
12023 | ||
12024 | *pflags = flags.flags; | |
12025 | *cs_base = flags.flags2; | |
12026 | } | |
12027 | ||
12028 | #ifdef TARGET_AARCH64 | |
12029 | /* | |
12030 | * The manual says that when SVE is enabled and VQ is widened the | |
12031 | * implementation is allowed to zero the previously inaccessible | |
12032 | * portion of the registers. The corollary to that is that when | |
12033 | * SVE is enabled and VQ is narrowed we are also allowed to zero | |
12034 | * the now inaccessible portion of the registers. | |
12035 | * | |
12036 | * The intent of this is that no predicate bit beyond VQ is ever set. | |
12037 | * Which means that some operations on predicate registers themselves | |
12038 | * may operate on full uint64_t or even unrolled across the maximum | |
12039 | * uint64_t[4]. Performing 4 bits of host arithmetic unconditionally | |
12040 | * may well be cheaper than conditionals to restrict the operation | |
12041 | * to the relevant portion of a uint16_t[16]. | |
12042 | */ | |
12043 | void aarch64_sve_narrow_vq(CPUARMState *env, unsigned vq) | |
12044 | { | |
12045 | int i, j; | |
12046 | uint64_t pmask; | |
12047 | ||
12048 | assert(vq >= 1 && vq <= ARM_MAX_VQ); | |
12049 | assert(vq <= env_archcpu(env)->sve_max_vq); | |
12050 | ||
12051 | /* Zap the high bits of the zregs. */ | |
12052 | for (i = 0; i < 32; i++) { | |
12053 | memset(&env->vfp.zregs[i].d[2 * vq], 0, 16 * (ARM_MAX_VQ - vq)); | |
12054 | } | |
12055 | ||
12056 | /* Zap the high bits of the pregs and ffr. */ | |
12057 | pmask = 0; | |
12058 | if (vq & 3) { | |
12059 | pmask = ~(-1ULL << (16 * (vq & 3))); | |
12060 | } | |
12061 | for (j = vq / 4; j < ARM_MAX_VQ / 4; j++) { | |
12062 | for (i = 0; i < 17; ++i) { | |
12063 | env->vfp.pregs[i].p[j] &= pmask; | |
12064 | } | |
12065 | pmask = 0; | |
12066 | } | |
12067 | } | |
12068 | ||
12069 | static uint32_t sve_vqm1_for_el_sm_ena(CPUARMState *env, int el, bool sm) | |
12070 | { | |
12071 | int exc_el; | |
12072 | ||
12073 | if (sm) { | |
12074 | exc_el = sme_exception_el(env, el); | |
12075 | } else { | |
12076 | exc_el = sve_exception_el(env, el); | |
12077 | } | |
12078 | if (exc_el) { | |
12079 | return 0; /* disabled */ | |
12080 | } | |
12081 | return sve_vqm1_for_el_sm(env, el, sm); | |
12082 | } | |
12083 | ||
12084 | /* | |
12085 | * Notice a change in SVE vector size when changing EL. | |
12086 | */ | |
12087 | void aarch64_sve_change_el(CPUARMState *env, int old_el, | |
12088 | int new_el, bool el0_a64) | |
12089 | { | |
12090 | ARMCPU *cpu = env_archcpu(env); | |
12091 | int old_len, new_len; | |
12092 | bool old_a64, new_a64, sm; | |
12093 | ||
12094 | /* Nothing to do if no SVE. */ | |
12095 | if (!cpu_isar_feature(aa64_sve, cpu)) { | |
12096 | return; | |
12097 | } | |
12098 | ||
12099 | /* Nothing to do if FP is disabled in either EL. */ | |
12100 | if (fp_exception_el(env, old_el) || fp_exception_el(env, new_el)) { | |
12101 | return; | |
12102 | } | |
12103 | ||
12104 | old_a64 = old_el ? arm_el_is_aa64(env, old_el) : el0_a64; | |
12105 | new_a64 = new_el ? arm_el_is_aa64(env, new_el) : el0_a64; | |
12106 | ||
12107 | /* | |
12108 | * Both AArch64.TakeException and AArch64.ExceptionReturn | |
12109 | * invoke ResetSVEState when taking an exception from, or | |
12110 | * returning to, AArch32 state when PSTATE.SM is enabled. | |
12111 | */ | |
12112 | sm = FIELD_EX64(env->svcr, SVCR, SM); | |
12113 | if (old_a64 != new_a64 && sm) { | |
12114 | arm_reset_sve_state(env); | |
12115 | return; | |
12116 | } | |
12117 | ||
12118 | /* | |
12119 | * DDI0584A.d sec 3.2: "If SVE instructions are disabled or trapped | |
12120 | * at ELx, or not available because the EL is in AArch32 state, then | |
12121 | * for all purposes other than a direct read, the ZCR_ELx.LEN field | |
12122 | * has an effective value of 0". | |
12123 | * | |
12124 | * Consider EL2 (aa64, vq=4) -> EL0 (aa32) -> EL1 (aa64, vq=0). | |
12125 | * If we ignore aa32 state, we would fail to see the vq4->vq0 transition | |
12126 | * from EL2->EL1. Thus we go ahead and narrow when entering aa32 so that | |
12127 | * we already have the correct register contents when encountering the | |
12128 | * vq0->vq0 transition between EL0->EL1. | |
12129 | */ | |
12130 | old_len = new_len = 0; | |
12131 | if (old_a64) { | |
12132 | old_len = sve_vqm1_for_el_sm_ena(env, old_el, sm); | |
12133 | } | |
12134 | if (new_a64) { | |
12135 | new_len = sve_vqm1_for_el_sm_ena(env, new_el, sm); | |
12136 | } | |
12137 | ||
12138 | /* When changing vector length, clear inaccessible state. */ | |
12139 | if (new_len < old_len) { | |
12140 | aarch64_sve_narrow_vq(env, new_len + 1); | |
12141 | } | |
12142 | } | |
12143 | #endif | |
12144 | ||
12145 | #ifndef CONFIG_USER_ONLY | |
12146 | ARMSecuritySpace arm_security_space(CPUARMState *env) | |
12147 | { | |
12148 | if (arm_feature(env, ARM_FEATURE_M)) { | |
12149 | return arm_secure_to_space(env->v7m.secure); | |
12150 | } | |
12151 | ||
12152 | /* | |
12153 | * If EL3 is not supported then the secure state is implementation | |
12154 | * defined, in which case QEMU defaults to non-secure. | |
12155 | */ | |
12156 | if (!arm_feature(env, ARM_FEATURE_EL3)) { | |
12157 | return ARMSS_NonSecure; | |
12158 | } | |
12159 | ||
12160 | /* Check for AArch64 EL3 or AArch32 Mon. */ | |
12161 | if (is_a64(env)) { | |
12162 | if (extract32(env->pstate, 2, 2) == 3) { | |
12163 | if (cpu_isar_feature(aa64_rme, env_archcpu(env))) { | |
12164 | return ARMSS_Root; | |
12165 | } else { | |
12166 | return ARMSS_Secure; | |
12167 | } | |
12168 | } | |
12169 | } else { | |
12170 | if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON) { | |
12171 | return ARMSS_Secure; | |
12172 | } | |
12173 | } | |
12174 | ||
12175 | return arm_security_space_below_el3(env); | |
12176 | } | |
12177 | ||
12178 | ARMSecuritySpace arm_security_space_below_el3(CPUARMState *env) | |
12179 | { | |
12180 | assert(!arm_feature(env, ARM_FEATURE_M)); | |
12181 | ||
12182 | /* | |
12183 | * If EL3 is not supported then the secure state is implementation | |
12184 | * defined, in which case QEMU defaults to non-secure. | |
12185 | */ | |
12186 | if (!arm_feature(env, ARM_FEATURE_EL3)) { | |
12187 | return ARMSS_NonSecure; | |
12188 | } | |
12189 | ||
12190 | /* | |
12191 | * Note NSE cannot be set without RME, and NSE & !NS is Reserved. | |
12192 | * Ignoring NSE when !NS retains consistency without having to | |
12193 | * modify other predicates. | |
12194 | */ | |
12195 | if (!(env->cp15.scr_el3 & SCR_NS)) { | |
12196 | return ARMSS_Secure; | |
12197 | } else if (env->cp15.scr_el3 & SCR_NSE) { | |
12198 | return ARMSS_Realm; | |
12199 | } else { | |
12200 | return ARMSS_NonSecure; | |
12201 | } | |
12202 | } | |
12203 | #endif /* !CONFIG_USER_ONLY */ |