]> git.proxmox.com Git - mirror_qemu.git/blame - target/hppa/cpu.h
target/hppa: Remove TARGET_HPPA64
[mirror_qemu.git] / target / hppa / cpu.h
CommitLineData
61766fe9
RH
1/*
2 * PA-RISC emulation cpu definitions for qemu.
3 *
4 * Copyright (c) 2016 Richard Henderson <rth@twiddle.net>
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
d6ea4236 9 * version 2.1 of the License, or (at your option) any later version.
61766fe9
RH
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20#ifndef HPPA_CPU_H
21#define HPPA_CPU_H
22
61766fe9 23#include "cpu-qom.h"
74433bf0 24#include "exec/cpu-defs.h"
69242e7e 25#include "qemu/cpu-float.h"
66866cc7 26#include "qemu/interval-tree.h"
61766fe9 27
7b93dab5
RH
28/* PA-RISC 1.x processors have a strong memory model. */
29/* ??? While we do not yet implement PA-RISC 2.0, those processors have
30 a weak memory model, but with TLB bits that force ordering on a per-page
31 basis. It's probably easier to fall back to a strong memory model. */
32#define TCG_GUEST_DEFAULT_MO TCG_MO_ALL
33
bb67ec32
RH
34#define MMU_KERNEL_IDX 7
35#define MMU_KERNEL_P_IDX 8
36#define MMU_PL1_IDX 9
37#define MMU_PL1_P_IDX 10
38#define MMU_PL2_IDX 11
39#define MMU_PL2_P_IDX 12
40#define MMU_USER_IDX 13
41#define MMU_USER_P_IDX 14
42#define MMU_PHYS_IDX 15
43
44#define MMU_IDX_TO_PRIV(MIDX) (((MIDX) - MMU_KERNEL_IDX) / 2)
45#define MMU_IDX_TO_P(MIDX) (((MIDX) - MMU_KERNEL_IDX) & 1)
46#define PRIV_P_TO_MMU_IDX(PRIV, P) ((PRIV) * 2 + !!(P) + MMU_KERNEL_IDX)
c01e5dfb 47
61766fe9
RH
48#define TARGET_INSN_START_EXTRA_WORDS 1
49
88b7ad10
HD
50/* No need to flush MMU_PHYS_IDX */
51#define HPPA_MMU_FLUSH_MASK \
bb67ec32
RH
52 (1 << MMU_KERNEL_IDX | 1 << MMU_KERNEL_P_IDX | \
53 1 << MMU_PL1_IDX | 1 << MMU_PL1_P_IDX | \
54 1 << MMU_PL2_IDX | 1 << MMU_PL2_P_IDX | \
55 1 << MMU_USER_IDX | 1 << MMU_USER_P_IDX)
56
57/* Indicies to flush for access_id changes. */
58#define HPPA_MMU_FLUSH_P_MASK \
59 (1 << MMU_KERNEL_P_IDX | 1 << MMU_PL1_P_IDX | \
60 1 << MMU_PL2_P_IDX | 1 << MMU_USER_P_IDX)
88b7ad10 61
8b81968c 62/* Hardware exceptions, interrupts, faults, and traps. */
2986721d
RH
63#define EXCP_HPMC 1 /* high priority machine check */
64#define EXCP_POWER_FAIL 2
65#define EXCP_RC 3 /* recovery counter */
66#define EXCP_EXT_INTERRUPT 4 /* external interrupt */
67#define EXCP_LPMC 5 /* low priority machine check */
68#define EXCP_ITLB_MISS 6 /* itlb miss / instruction page fault */
69#define EXCP_IMP 7 /* instruction memory protection trap */
70#define EXCP_ILL 8 /* illegal instruction trap */
71#define EXCP_BREAK 9 /* break instruction */
72#define EXCP_PRIV_OPR 10 /* privileged operation trap */
73#define EXCP_PRIV_REG 11 /* privileged register trap */
74#define EXCP_OVERFLOW 12 /* signed overflow trap */
75#define EXCP_COND 13 /* trap-on-condition */
76#define EXCP_ASSIST 14 /* assist exception trap */
77#define EXCP_DTLB_MISS 15 /* dtlb miss / data page fault */
78#define EXCP_NA_ITLB_MISS 16 /* non-access itlb miss */
79#define EXCP_NA_DTLB_MISS 17 /* non-access dtlb miss */
80#define EXCP_DMP 18 /* data memory protection trap */
81#define EXCP_DMB 19 /* data memory break trap */
82#define EXCP_TLB_DIRTY 20 /* tlb dirty bit trap */
83#define EXCP_PAGE_REF 21 /* page reference trap */
84#define EXCP_ASSIST_EMU 22 /* assist emulation trap */
85#define EXCP_HPT 23 /* high-privilege transfer trap */
86#define EXCP_LPT 24 /* low-privilege transfer trap */
87#define EXCP_TB 25 /* taken branch trap */
88#define EXCP_DMAR 26 /* data memory access rights trap */
89#define EXCP_DMPI 27 /* data memory protection id trap */
90#define EXCP_UNALIGN 28 /* unaligned data reference trap */
91#define EXCP_PER_INTERRUPT 29 /* performance monitor interrupt */
92
93/* Exceptions for linux-user emulation. */
94#define EXCP_SYSCALL 30
95#define EXCP_SYSCALL_LWS 31
61766fe9 96
4a4554c6
HD
97/* Emulated hardware TOC button */
98#define EXCP_TOC 32 /* TOC = Transfer of control (NMI) */
99
100#define CPU_INTERRUPT_NMI CPU_INTERRUPT_TGT_EXT_3 /* TOC */
101
fa57e327
RH
102/* Taken from Linux kernel: arch/parisc/include/asm/psw.h */
103#define PSW_I 0x00000001
104#define PSW_D 0x00000002
105#define PSW_P 0x00000004
106#define PSW_Q 0x00000008
107#define PSW_R 0x00000010
108#define PSW_F 0x00000020
109#define PSW_G 0x00000040 /* PA1.x only */
110#define PSW_O 0x00000080 /* PA2.0 only */
111#define PSW_CB 0x0000ff00
112#define PSW_M 0x00010000
113#define PSW_V 0x00020000
114#define PSW_C 0x00040000
115#define PSW_B 0x00080000
116#define PSW_X 0x00100000
117#define PSW_N 0x00200000
118#define PSW_L 0x00400000
119#define PSW_H 0x00800000
120#define PSW_T 0x01000000
121#define PSW_S 0x02000000
122#define PSW_E 0x04000000
fa57e327 123#define PSW_W 0x08000000 /* PA2.0 only */
fa57e327
RH
124#define PSW_Z 0x40000000 /* PA1.x only */
125#define PSW_Y 0x80000000 /* PA1.x only */
126
127#define PSW_SM (PSW_W | PSW_E | PSW_O | PSW_G | PSW_F \
128 | PSW_R | PSW_Q | PSW_P | PSW_D | PSW_I)
129
130/* ssm/rsm instructions number PSW_W and PSW_E differently */
131#define PSW_SM_I PSW_I /* Enable External Interrupts */
132#define PSW_SM_D PSW_D
133#define PSW_SM_P PSW_P
134#define PSW_SM_Q PSW_Q /* Enable Interrupt State Collection */
135#define PSW_SM_R PSW_R /* Enable Recover Counter Trap */
fa57e327
RH
136#define PSW_SM_E 0x100
137#define PSW_SM_W 0x200 /* PA2.0 only : Enable Wide Mode */
fa57e327 138
35136a77 139#define CR_RC 0
d5de20bd
SS
140#define CR_PID1 8
141#define CR_PID2 9
142#define CR_PID3 12
143#define CR_PID4 13
35136a77
RH
144#define CR_SCRCCR 10
145#define CR_SAR 11
146#define CR_IVA 14
147#define CR_EIEM 15
148#define CR_IT 16
149#define CR_IIASQ 17
150#define CR_IIAOQ 18
151#define CR_IIR 19
152#define CR_ISR 20
153#define CR_IOR 21
154#define CR_IPSW 22
155#define CR_EIRR 23
156
eaa3783b
RH
157#if TARGET_REGISTER_BITS == 32
158typedef uint32_t target_ureg;
159typedef int32_t target_sreg;
160#define TREG_FMT_lx "%08"PRIx32
161#define TREG_FMT_ld "%"PRId32
162#else
163typedef uint64_t target_ureg;
164typedef int64_t target_sreg;
165#define TREG_FMT_lx "%016"PRIx64
166#define TREG_FMT_ld "%"PRId64
167#endif
168
729cd350 169typedef struct HPPATLBEntry {
d7553f35
RH
170 union {
171 IntervalTreeNode itree;
172 struct HPPATLBEntry *unused_next;
173 };
66866cc7 174
650cdb2a 175 target_ureg pa;
f8cda28b
RH
176
177 unsigned entry_valid : 1;
178
650cdb2a
RH
179 unsigned u : 1;
180 unsigned t : 1;
181 unsigned d : 1;
182 unsigned b : 1;
650cdb2a
RH
183 unsigned ar_type : 3;
184 unsigned ar_pl1 : 2;
185 unsigned ar_pl2 : 2;
650cdb2a 186 unsigned access_id : 16;
729cd350 187} HPPATLBEntry;
650cdb2a 188
1ea4a06a 189typedef struct CPUArchState {
f8c0fd98
HD
190 target_ureg iaoq_f; /* front */
191 target_ureg iaoq_b; /* back, aka next instruction */
192
eaa3783b 193 target_ureg gr[32];
61766fe9 194 uint64_t fr[32];
33423472 195 uint64_t sr[8]; /* stored shifted into place for gva */
61766fe9 196
eaa3783b
RH
197 target_ureg psw; /* All psw bits except the following: */
198 target_ureg psw_n; /* boolean */
199 target_sreg psw_v; /* in most significant bit */
61766fe9
RH
200
201 /* Splitting the carry-borrow field into the MSB and "the rest", allows
202 * for "the rest" to be deleted when it is unused, but the MSB is in use.
203 * In addition, it's easier to compute carry-in for bit B+1 than it is to
204 * compute carry-out for bit B (3 vs 4 insns for addition, assuming the
205 * host has the appropriate add-with-carry insn to compute the msb).
206 * Therefore the carry bits are stored as: cb_msb : cb & 0x11111110.
207 */
eaa3783b
RH
208 target_ureg psw_cb; /* in least significant bit of next nibble */
209 target_ureg psw_cb_msb; /* boolean */
61766fe9 210
c301f34e
RH
211 uint64_t iasq_f;
212 uint64_t iasq_b;
61766fe9 213
61766fe9
RH
214 uint32_t fr0_shadow; /* flags, c, ca/cq, rm, d, enables */
215 float_status fp_status;
216
35136a77
RH
217 target_ureg cr[32]; /* control registers */
218 target_ureg cr_back[2]; /* back of cr17/cr18 */
f49b3537 219 target_ureg shadow[7]; /* shadow registers */
35136a77 220
9cf2112b
RH
221 /*
222 * ??? The number of entries isn't specified by the architecture.
223 * BTLBs are not supported in 64-bit machines.
224 */
225#define PA10_BTLB_FIXED 16
226#define PA10_BTLB_VARIABLE 0
df5c6a50 227#define HPPA_TLB_ENTRIES 256
df5c6a50 228
d7553f35 229 /* Index for round-robin tlb eviction. */
650cdb2a 230 uint32_t tlb_last;
d7553f35
RH
231
232 /*
233 * For pa1.x, the partial initialized, still invalid tlb entry
234 * which has had ITLBA performed, but not yet ITLBP.
235 */
236 HPPATLBEntry *tlb_partial;
237
238 /* Linked list of all invalid (unused) tlb entries. */
239 HPPATLBEntry *tlb_unused;
240
241 /* Root of the search tree for all valid tlb entries. */
242 IntervalTreeRoot tlb_root;
243
244 HPPATLBEntry tlb[HPPA_TLB_ENTRIES];
1ea4a06a 245} CPUHPPAState;
61766fe9
RH
246
247/**
248 * HPPACPU:
249 * @env: #CPUHPPAState
250 *
251 * An HPPA CPU.
252 */
b36e239e 253struct ArchCPU {
61766fe9
RH
254 /*< private >*/
255 CPUState parent_obj;
256 /*< public >*/
257
258 CPUHPPAState env;
49c29d6c 259 QEMUTimer *alarm_timer;
61766fe9
RH
260};
261
61766fe9
RH
262#include "exec/cpu-all.h"
263
bd6243a3
RH
264static inline bool hppa_is_pa20(CPUHPPAState *env)
265{
266 return object_dynamic_cast(OBJECT(env_cpu(env)), TYPE_HPPA64_CPU) != NULL;
267}
268
9cf2112b
RH
269static inline int HPPA_BTLB_ENTRIES(CPUHPPAState *env)
270{
271 return hppa_is_pa20(env) ? 0 : PA10_BTLB_FIXED + PA10_BTLB_VARIABLE;
272}
273
61766fe9
RH
274static inline int cpu_mmu_index(CPUHPPAState *env, bool ifetch)
275{
3d68ee7b
RH
276#ifdef CONFIG_USER_ONLY
277 return MMU_USER_IDX;
278#else
279 if (env->psw & (ifetch ? PSW_C : PSW_D)) {
bb67ec32 280 return PRIV_P_TO_MMU_IDX(env->iaoq_f & 3, env->psw & PSW_P);
3d68ee7b
RH
281 }
282 return MMU_PHYS_IDX; /* mmu disabled */
283#endif
61766fe9
RH
284}
285
286void hppa_translate_init(void);
287
0dacec87 288#define CPU_RESOLVING_TYPE TYPE_HPPA_CPU
61766fe9 289
c301f34e
RH
290static inline target_ulong hppa_form_gva_psw(target_ureg psw, uint64_t spc,
291 target_ureg off)
292{
293#ifdef CONFIG_USER_ONLY
294 return off;
295#else
698240d1 296 off &= psw & PSW_W ? MAKE_64BIT_MASK(0, 62) : MAKE_64BIT_MASK(0, 32);
c301f34e
RH
297 return spc | off;
298#endif
299}
300
301static inline target_ulong hppa_form_gva(CPUHPPAState *env, uint64_t spc,
302 target_ureg off)
303{
304 return hppa_form_gva_psw(env->psw, spc, off);
305}
306
ccdf741c
RH
307hwaddr hppa_abs_to_phys_pa2_w0(vaddr addr);
308hwaddr hppa_abs_to_phys_pa2_w1(vaddr addr);
309
217d1a5e
RH
310/*
311 * Since PSW_{I,CB} will never need to be in tb->flags, reuse them.
494737b7
RH
312 * TB_FLAG_SR_SAME indicates that SR4 through SR7 all contain the
313 * same value.
314 */
315#define TB_FLAG_SR_SAME PSW_I
c301f34e 316#define TB_FLAG_PRIV_SHIFT 8
217d1a5e 317#define TB_FLAG_UNALIGN 0x400
c301f34e 318
bb5de525
AJ
319static inline void cpu_get_tb_cpu_state(CPUHPPAState *env, vaddr *pc,
320 uint64_t *cs_base, uint32_t *pflags)
61766fe9 321{
c301f34e
RH
322 uint32_t flags = env->psw_n * PSW_N;
323
324 /* TB lookup assumes that PC contains the complete virtual address.
325 If we leave space+offset separate, we'll get ITLB misses to an
326 incomplete virtual address. This also means that we must separate
8b81968c 327 out current cpu privilege from the low bits of IAOQ_F. */
c301f34e 328#ifdef CONFIG_USER_ONLY
ebd0e151
RH
329 *pc = env->iaoq_f & -4;
330 *cs_base = env->iaoq_b & -4;
217d1a5e 331 flags |= TB_FLAG_UNALIGN * !env_cpu(env)->prctl_unalign_sigbus;
c301f34e 332#else
bb67ec32
RH
333 /* ??? E, T, H, L, B bits need to be here, when implemented. */
334 flags |= env->psw & (PSW_W | PSW_C | PSW_D | PSW_P);
c301f34e
RH
335 flags |= (env->iaoq_f & 3) << TB_FLAG_PRIV_SHIFT;
336
698240d1
RH
337 *pc = hppa_form_gva_psw(env->psw, (env->psw & PSW_C ? env->iasq_f : 0),
338 env->iaoq_f & -4);
c301f34e
RH
339 *cs_base = env->iasq_f;
340
341 /* Insert a difference between IAOQ_B and IAOQ_F within the otherwise zero
342 low 32-bits of CS_BASE. This will succeed for all direct branches,
343 which is the primary case we care about -- using goto_tb within a page.
344 Failure is indicated by a zero difference. */
345 if (env->iasq_f == env->iasq_b) {
346 target_sreg diff = env->iaoq_b - env->iaoq_f;
347 if (TARGET_REGISTER_BITS == 32 || diff == (int32_t)diff) {
348 *cs_base |= (uint32_t)diff;
349 }
350 }
494737b7
RH
351 if ((env->sr[4] == env->sr[5])
352 & (env->sr[4] == env->sr[6])
353 & (env->sr[4] == env->sr[7])) {
354 flags |= TB_FLAG_SR_SAME;
355 }
c301f34e
RH
356#endif
357
358 *pflags = flags;
61766fe9
RH
359}
360
eaa3783b
RH
361target_ureg cpu_hppa_get_psw(CPUHPPAState *env);
362void cpu_hppa_put_psw(CPUHPPAState *env, target_ureg);
61766fe9
RH
363void cpu_hppa_loaded_fr0(CPUHPPAState *env);
364
d5de20bd
SS
365#ifdef CONFIG_USER_ONLY
366static inline void cpu_hppa_change_prot_id(CPUHPPAState *env) { }
367#else
368void cpu_hppa_change_prot_id(CPUHPPAState *env);
369#endif
370
a010bdbe 371int hppa_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
61766fe9 372int hppa_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
90c84c56 373void hppa_cpu_dump_state(CPUState *cs, FILE *f, int);
860e0b96 374#ifndef CONFIG_USER_ONLY
d7553f35 375void hppa_ptlbe(CPUHPPAState *env);
6d2d454a 376hwaddr hppa_cpu_get_phys_page_debug(CPUState *cs, vaddr addr);
3c7bef03
RH
377bool hppa_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
378 MMUAccessType access_type, int mmu_idx,
379 bool probe, uintptr_t retaddr);
68fa1780
PMD
380void hppa_cpu_do_interrupt(CPUState *cpu);
381bool hppa_cpu_exec_interrupt(CPUState *cpu, int int_req);
650cdb2a 382int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx,
fa824d99 383 int type, hwaddr *pphys, int *pprot,
729cd350 384 HPPATLBEntry **tlb_entry);
4f5f2548 385extern const MemoryRegionOps hppa_io_eir_ops;
8a9358cc 386extern const VMStateDescription vmstate_hppa_cpu;
49c29d6c 387void hppa_cpu_alarm_timer(void *);
43e05652 388int hppa_artype_for_page(CPUHPPAState *env, target_ulong vaddr);
650cdb2a 389#endif
8905770b 390G_NORETURN void hppa_dynamic_excp(CPUHPPAState *env, int excp, uintptr_t ra);
61766fe9 391
d3ae32d4
RH
392#define CPU_RESOLVING_TYPE TYPE_HPPA_CPU
393
394#define cpu_list hppa_cpu_list
395void hppa_cpu_list(void);
396
61766fe9 397#endif /* HPPA_CPU_H */