]> git.proxmox.com Git - mirror_qemu.git/blame - target/hppa/cpu.h
Merge tag 'for-upstream' of https://gitlab.com/bonzini/qemu into staging
[mirror_qemu.git] / target / hppa / cpu.h
CommitLineData
61766fe9
RH
1/*
2 * PA-RISC emulation cpu definitions for qemu.
3 *
4 * Copyright (c) 2016 Richard Henderson <rth@twiddle.net>
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
d6ea4236 9 * version 2.1 of the License, or (at your option) any later version.
61766fe9
RH
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20#ifndef HPPA_CPU_H
21#define HPPA_CPU_H
22
61766fe9 23#include "cpu-qom.h"
74433bf0 24#include "exec/cpu-defs.h"
69242e7e 25#include "qemu/cpu-float.h"
61766fe9 26
7b93dab5
RH
27/* PA-RISC 1.x processors have a strong memory model. */
28/* ??? While we do not yet implement PA-RISC 2.0, those processors have
29 a weak memory model, but with TLB bits that force ordering on a per-page
30 basis. It's probably easier to fall back to a strong memory model. */
31#define TCG_GUEST_DEFAULT_MO TCG_MO_ALL
32
2ad04500
HD
33#define MMU_KERNEL_IDX 11
34#define MMU_PL1_IDX 12
35#define MMU_PL2_IDX 13
36#define MMU_USER_IDX 14
37#define MMU_PHYS_IDX 15
c400b6ed 38
2ad04500
HD
39#define PRIV_TO_MMU_IDX(priv) (MMU_KERNEL_IDX + (priv))
40#define MMU_IDX_TO_PRIV(mmu_idx) ((mmu_idx) - MMU_KERNEL_IDX)
c01e5dfb 41
61766fe9
RH
42#define TARGET_INSN_START_EXTRA_WORDS 1
43
88b7ad10
HD
44/* No need to flush MMU_PHYS_IDX */
45#define HPPA_MMU_FLUSH_MASK \
46 (1 << MMU_KERNEL_IDX | 1 << MMU_PL1_IDX | \
47 1 << MMU_PL2_IDX | 1 << MMU_USER_IDX)
48
8b81968c 49/* Hardware exceptions, interrupts, faults, and traps. */
2986721d
RH
50#define EXCP_HPMC 1 /* high priority machine check */
51#define EXCP_POWER_FAIL 2
52#define EXCP_RC 3 /* recovery counter */
53#define EXCP_EXT_INTERRUPT 4 /* external interrupt */
54#define EXCP_LPMC 5 /* low priority machine check */
55#define EXCP_ITLB_MISS 6 /* itlb miss / instruction page fault */
56#define EXCP_IMP 7 /* instruction memory protection trap */
57#define EXCP_ILL 8 /* illegal instruction trap */
58#define EXCP_BREAK 9 /* break instruction */
59#define EXCP_PRIV_OPR 10 /* privileged operation trap */
60#define EXCP_PRIV_REG 11 /* privileged register trap */
61#define EXCP_OVERFLOW 12 /* signed overflow trap */
62#define EXCP_COND 13 /* trap-on-condition */
63#define EXCP_ASSIST 14 /* assist exception trap */
64#define EXCP_DTLB_MISS 15 /* dtlb miss / data page fault */
65#define EXCP_NA_ITLB_MISS 16 /* non-access itlb miss */
66#define EXCP_NA_DTLB_MISS 17 /* non-access dtlb miss */
67#define EXCP_DMP 18 /* data memory protection trap */
68#define EXCP_DMB 19 /* data memory break trap */
69#define EXCP_TLB_DIRTY 20 /* tlb dirty bit trap */
70#define EXCP_PAGE_REF 21 /* page reference trap */
71#define EXCP_ASSIST_EMU 22 /* assist emulation trap */
72#define EXCP_HPT 23 /* high-privilege transfer trap */
73#define EXCP_LPT 24 /* low-privilege transfer trap */
74#define EXCP_TB 25 /* taken branch trap */
75#define EXCP_DMAR 26 /* data memory access rights trap */
76#define EXCP_DMPI 27 /* data memory protection id trap */
77#define EXCP_UNALIGN 28 /* unaligned data reference trap */
78#define EXCP_PER_INTERRUPT 29 /* performance monitor interrupt */
79
80/* Exceptions for linux-user emulation. */
81#define EXCP_SYSCALL 30
82#define EXCP_SYSCALL_LWS 31
61766fe9 83
4a4554c6
HD
84/* Emulated hardware TOC button */
85#define EXCP_TOC 32 /* TOC = Transfer of control (NMI) */
86
87#define CPU_INTERRUPT_NMI CPU_INTERRUPT_TGT_EXT_3 /* TOC */
88
fa57e327
RH
89/* Taken from Linux kernel: arch/parisc/include/asm/psw.h */
90#define PSW_I 0x00000001
91#define PSW_D 0x00000002
92#define PSW_P 0x00000004
93#define PSW_Q 0x00000008
94#define PSW_R 0x00000010
95#define PSW_F 0x00000020
96#define PSW_G 0x00000040 /* PA1.x only */
97#define PSW_O 0x00000080 /* PA2.0 only */
98#define PSW_CB 0x0000ff00
99#define PSW_M 0x00010000
100#define PSW_V 0x00020000
101#define PSW_C 0x00040000
102#define PSW_B 0x00080000
103#define PSW_X 0x00100000
104#define PSW_N 0x00200000
105#define PSW_L 0x00400000
106#define PSW_H 0x00800000
107#define PSW_T 0x01000000
108#define PSW_S 0x02000000
109#define PSW_E 0x04000000
110#ifdef TARGET_HPPA64
111#define PSW_W 0x08000000 /* PA2.0 only */
112#else
113#define PSW_W 0
114#endif
115#define PSW_Z 0x40000000 /* PA1.x only */
116#define PSW_Y 0x80000000 /* PA1.x only */
117
118#define PSW_SM (PSW_W | PSW_E | PSW_O | PSW_G | PSW_F \
119 | PSW_R | PSW_Q | PSW_P | PSW_D | PSW_I)
120
121/* ssm/rsm instructions number PSW_W and PSW_E differently */
122#define PSW_SM_I PSW_I /* Enable External Interrupts */
123#define PSW_SM_D PSW_D
124#define PSW_SM_P PSW_P
125#define PSW_SM_Q PSW_Q /* Enable Interrupt State Collection */
126#define PSW_SM_R PSW_R /* Enable Recover Counter Trap */
127#ifdef TARGET_HPPA64
128#define PSW_SM_E 0x100
129#define PSW_SM_W 0x200 /* PA2.0 only : Enable Wide Mode */
130#else
131#define PSW_SM_E 0
132#define PSW_SM_W 0
133#endif
134
35136a77 135#define CR_RC 0
d5de20bd
SS
136#define CR_PID1 8
137#define CR_PID2 9
138#define CR_PID3 12
139#define CR_PID4 13
35136a77
RH
140#define CR_SCRCCR 10
141#define CR_SAR 11
142#define CR_IVA 14
143#define CR_EIEM 15
144#define CR_IT 16
145#define CR_IIASQ 17
146#define CR_IIAOQ 18
147#define CR_IIR 19
148#define CR_ISR 20
149#define CR_IOR 21
150#define CR_IPSW 22
151#define CR_EIRR 23
152
eaa3783b
RH
153#if TARGET_REGISTER_BITS == 32
154typedef uint32_t target_ureg;
155typedef int32_t target_sreg;
156#define TREG_FMT_lx "%08"PRIx32
157#define TREG_FMT_ld "%"PRId32
158#else
159typedef uint64_t target_ureg;
160typedef int64_t target_sreg;
161#define TREG_FMT_lx "%016"PRIx64
162#define TREG_FMT_ld "%"PRId64
163#endif
164
650cdb2a
RH
165typedef struct {
166 uint64_t va_b;
167 uint64_t va_e;
168 target_ureg pa;
169 unsigned u : 1;
170 unsigned t : 1;
171 unsigned d : 1;
172 unsigned b : 1;
173 unsigned page_size : 4;
174 unsigned ar_type : 3;
175 unsigned ar_pl1 : 2;
176 unsigned ar_pl2 : 2;
177 unsigned entry_valid : 1;
178 unsigned access_id : 16;
179} hppa_tlb_entry;
180
1ea4a06a 181typedef struct CPUArchState {
f8c0fd98
HD
182 target_ureg iaoq_f; /* front */
183 target_ureg iaoq_b; /* back, aka next instruction */
184
eaa3783b 185 target_ureg gr[32];
61766fe9 186 uint64_t fr[32];
33423472 187 uint64_t sr[8]; /* stored shifted into place for gva */
61766fe9 188
eaa3783b
RH
189 target_ureg psw; /* All psw bits except the following: */
190 target_ureg psw_n; /* boolean */
191 target_sreg psw_v; /* in most significant bit */
61766fe9
RH
192
193 /* Splitting the carry-borrow field into the MSB and "the rest", allows
194 * for "the rest" to be deleted when it is unused, but the MSB is in use.
195 * In addition, it's easier to compute carry-in for bit B+1 than it is to
196 * compute carry-out for bit B (3 vs 4 insns for addition, assuming the
197 * host has the appropriate add-with-carry insn to compute the msb).
198 * Therefore the carry bits are stored as: cb_msb : cb & 0x11111110.
199 */
eaa3783b
RH
200 target_ureg psw_cb; /* in least significant bit of next nibble */
201 target_ureg psw_cb_msb; /* boolean */
61766fe9 202
c301f34e
RH
203 uint64_t iasq_f;
204 uint64_t iasq_b;
61766fe9 205
61766fe9
RH
206 uint32_t fr0_shadow; /* flags, c, ca/cq, rm, d, enables */
207 float_status fp_status;
208
35136a77
RH
209 target_ureg cr[32]; /* control registers */
210 target_ureg cr_back[2]; /* back of cr17/cr18 */
f49b3537 211 target_ureg shadow[7]; /* shadow registers */
35136a77 212
650cdb2a 213 /* ??? The number of entries isn't specified by the architecture. */
711212ac
HD
214#ifdef TARGET_HPPA64
215#define HPPA_BTLB_FIXED 0 /* BTLBs are not supported in 64-bit machines */
216#else
217#define HPPA_BTLB_FIXED 16
218#endif
219#define HPPA_BTLB_VARIABLE 0
df5c6a50 220#define HPPA_TLB_ENTRIES 256
711212ac 221#define HPPA_BTLB_ENTRIES (HPPA_BTLB_FIXED + HPPA_BTLB_VARIABLE)
df5c6a50 222
650cdb2a
RH
223 /* ??? Implement a unified itlb/dtlb for the moment. */
224 /* ??? We should use a more intelligent data structure. */
df5c6a50 225 hppa_tlb_entry tlb[HPPA_TLB_ENTRIES];
650cdb2a 226 uint32_t tlb_last;
1ea4a06a 227} CPUHPPAState;
61766fe9
RH
228
229/**
230 * HPPACPU:
231 * @env: #CPUHPPAState
232 *
233 * An HPPA CPU.
234 */
b36e239e 235struct ArchCPU {
61766fe9
RH
236 /*< private >*/
237 CPUState parent_obj;
238 /*< public >*/
239
5b146dc7 240 CPUNegativeOffsetState neg;
61766fe9 241 CPUHPPAState env;
49c29d6c 242 QEMUTimer *alarm_timer;
61766fe9
RH
243};
244
61766fe9
RH
245#include "exec/cpu-all.h"
246
247static inline int cpu_mmu_index(CPUHPPAState *env, bool ifetch)
248{
3d68ee7b
RH
249#ifdef CONFIG_USER_ONLY
250 return MMU_USER_IDX;
251#else
252 if (env->psw & (ifetch ? PSW_C : PSW_D)) {
c01e5dfb 253 return PRIV_TO_MMU_IDX(env->iaoq_f & 3);
3d68ee7b
RH
254 }
255 return MMU_PHYS_IDX; /* mmu disabled */
256#endif
61766fe9
RH
257}
258
259void hppa_translate_init(void);
260
0dacec87 261#define CPU_RESOLVING_TYPE TYPE_HPPA_CPU
61766fe9 262
c301f34e
RH
263static inline target_ulong hppa_form_gva_psw(target_ureg psw, uint64_t spc,
264 target_ureg off)
265{
266#ifdef CONFIG_USER_ONLY
267 return off;
268#else
269 off &= (psw & PSW_W ? 0x3fffffffffffffffull : 0xffffffffull);
270 return spc | off;
271#endif
272}
273
274static inline target_ulong hppa_form_gva(CPUHPPAState *env, uint64_t spc,
275 target_ureg off)
276{
277 return hppa_form_gva_psw(env->psw, spc, off);
278}
279
217d1a5e
RH
280/*
281 * Since PSW_{I,CB} will never need to be in tb->flags, reuse them.
494737b7
RH
282 * TB_FLAG_SR_SAME indicates that SR4 through SR7 all contain the
283 * same value.
284 */
285#define TB_FLAG_SR_SAME PSW_I
c301f34e 286#define TB_FLAG_PRIV_SHIFT 8
217d1a5e 287#define TB_FLAG_UNALIGN 0x400
c301f34e 288
bb5de525
AJ
289static inline void cpu_get_tb_cpu_state(CPUHPPAState *env, vaddr *pc,
290 uint64_t *cs_base, uint32_t *pflags)
61766fe9 291{
c301f34e
RH
292 uint32_t flags = env->psw_n * PSW_N;
293
294 /* TB lookup assumes that PC contains the complete virtual address.
295 If we leave space+offset separate, we'll get ITLB misses to an
296 incomplete virtual address. This also means that we must separate
8b81968c 297 out current cpu privilege from the low bits of IAOQ_F. */
c301f34e 298#ifdef CONFIG_USER_ONLY
ebd0e151
RH
299 *pc = env->iaoq_f & -4;
300 *cs_base = env->iaoq_b & -4;
217d1a5e 301 flags |= TB_FLAG_UNALIGN * !env_cpu(env)->prctl_unalign_sigbus;
c301f34e 302#else
3d68ee7b 303 /* ??? E, T, H, L, B, P bits need to be here, when implemented. */
c301f34e
RH
304 flags |= env->psw & (PSW_W | PSW_C | PSW_D);
305 flags |= (env->iaoq_f & 3) << TB_FLAG_PRIV_SHIFT;
306
307 *pc = (env->psw & PSW_C
308 ? hppa_form_gva_psw(env->psw, env->iasq_f, env->iaoq_f & -4)
309 : env->iaoq_f & -4);
310 *cs_base = env->iasq_f;
311
312 /* Insert a difference between IAOQ_B and IAOQ_F within the otherwise zero
313 low 32-bits of CS_BASE. This will succeed for all direct branches,
314 which is the primary case we care about -- using goto_tb within a page.
315 Failure is indicated by a zero difference. */
316 if (env->iasq_f == env->iasq_b) {
317 target_sreg diff = env->iaoq_b - env->iaoq_f;
318 if (TARGET_REGISTER_BITS == 32 || diff == (int32_t)diff) {
319 *cs_base |= (uint32_t)diff;
320 }
321 }
494737b7
RH
322 if ((env->sr[4] == env->sr[5])
323 & (env->sr[4] == env->sr[6])
324 & (env->sr[4] == env->sr[7])) {
325 flags |= TB_FLAG_SR_SAME;
326 }
c301f34e
RH
327#endif
328
329 *pflags = flags;
61766fe9
RH
330}
331
eaa3783b
RH
332target_ureg cpu_hppa_get_psw(CPUHPPAState *env);
333void cpu_hppa_put_psw(CPUHPPAState *env, target_ureg);
61766fe9
RH
334void cpu_hppa_loaded_fr0(CPUHPPAState *env);
335
d5de20bd
SS
336#ifdef CONFIG_USER_ONLY
337static inline void cpu_hppa_change_prot_id(CPUHPPAState *env) { }
338#else
339void cpu_hppa_change_prot_id(CPUHPPAState *env);
340#endif
341
a010bdbe 342int hppa_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
61766fe9 343int hppa_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
90c84c56 344void hppa_cpu_dump_state(CPUState *cs, FILE *f, int);
860e0b96 345#ifndef CONFIG_USER_ONLY
6d2d454a 346hwaddr hppa_cpu_get_phys_page_debug(CPUState *cs, vaddr addr);
3c7bef03
RH
347bool hppa_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
348 MMUAccessType access_type, int mmu_idx,
349 bool probe, uintptr_t retaddr);
68fa1780
PMD
350void hppa_cpu_do_interrupt(CPUState *cpu);
351bool hppa_cpu_exec_interrupt(CPUState *cpu, int int_req);
650cdb2a 352int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx,
fa824d99
HD
353 int type, hwaddr *pphys, int *pprot,
354 hppa_tlb_entry **tlb_entry);
4f5f2548 355extern const MemoryRegionOps hppa_io_eir_ops;
8a9358cc 356extern const VMStateDescription vmstate_hppa_cpu;
49c29d6c 357void hppa_cpu_alarm_timer(void *);
43e05652 358int hppa_artype_for_page(CPUHPPAState *env, target_ulong vaddr);
650cdb2a 359#endif
8905770b 360G_NORETURN void hppa_dynamic_excp(CPUHPPAState *env, int excp, uintptr_t ra);
61766fe9
RH
361
362#endif /* HPPA_CPU_H */