]> git.proxmox.com Git - mirror_qemu.git/blame - target/hppa/cpu.h
target/hppa: Rename hppa_tlb_entry to HPPATLBEntry
[mirror_qemu.git] / target / hppa / cpu.h
CommitLineData
61766fe9
RH
1/*
2 * PA-RISC emulation cpu definitions for qemu.
3 *
4 * Copyright (c) 2016 Richard Henderson <rth@twiddle.net>
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
d6ea4236 9 * version 2.1 of the License, or (at your option) any later version.
61766fe9
RH
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20#ifndef HPPA_CPU_H
21#define HPPA_CPU_H
22
61766fe9 23#include "cpu-qom.h"
74433bf0 24#include "exec/cpu-defs.h"
69242e7e 25#include "qemu/cpu-float.h"
61766fe9 26
7b93dab5
RH
27/* PA-RISC 1.x processors have a strong memory model. */
28/* ??? While we do not yet implement PA-RISC 2.0, those processors have
29 a weak memory model, but with TLB bits that force ordering on a per-page
30 basis. It's probably easier to fall back to a strong memory model. */
31#define TCG_GUEST_DEFAULT_MO TCG_MO_ALL
32
bb67ec32
RH
33#define MMU_KERNEL_IDX 7
34#define MMU_KERNEL_P_IDX 8
35#define MMU_PL1_IDX 9
36#define MMU_PL1_P_IDX 10
37#define MMU_PL2_IDX 11
38#define MMU_PL2_P_IDX 12
39#define MMU_USER_IDX 13
40#define MMU_USER_P_IDX 14
41#define MMU_PHYS_IDX 15
42
43#define MMU_IDX_TO_PRIV(MIDX) (((MIDX) - MMU_KERNEL_IDX) / 2)
44#define MMU_IDX_TO_P(MIDX) (((MIDX) - MMU_KERNEL_IDX) & 1)
45#define PRIV_P_TO_MMU_IDX(PRIV, P) ((PRIV) * 2 + !!(P) + MMU_KERNEL_IDX)
c01e5dfb 46
61766fe9
RH
47#define TARGET_INSN_START_EXTRA_WORDS 1
48
88b7ad10
HD
49/* No need to flush MMU_PHYS_IDX */
50#define HPPA_MMU_FLUSH_MASK \
bb67ec32
RH
51 (1 << MMU_KERNEL_IDX | 1 << MMU_KERNEL_P_IDX | \
52 1 << MMU_PL1_IDX | 1 << MMU_PL1_P_IDX | \
53 1 << MMU_PL2_IDX | 1 << MMU_PL2_P_IDX | \
54 1 << MMU_USER_IDX | 1 << MMU_USER_P_IDX)
55
56/* Indicies to flush for access_id changes. */
57#define HPPA_MMU_FLUSH_P_MASK \
58 (1 << MMU_KERNEL_P_IDX | 1 << MMU_PL1_P_IDX | \
59 1 << MMU_PL2_P_IDX | 1 << MMU_USER_P_IDX)
88b7ad10 60
8b81968c 61/* Hardware exceptions, interrupts, faults, and traps. */
2986721d
RH
62#define EXCP_HPMC 1 /* high priority machine check */
63#define EXCP_POWER_FAIL 2
64#define EXCP_RC 3 /* recovery counter */
65#define EXCP_EXT_INTERRUPT 4 /* external interrupt */
66#define EXCP_LPMC 5 /* low priority machine check */
67#define EXCP_ITLB_MISS 6 /* itlb miss / instruction page fault */
68#define EXCP_IMP 7 /* instruction memory protection trap */
69#define EXCP_ILL 8 /* illegal instruction trap */
70#define EXCP_BREAK 9 /* break instruction */
71#define EXCP_PRIV_OPR 10 /* privileged operation trap */
72#define EXCP_PRIV_REG 11 /* privileged register trap */
73#define EXCP_OVERFLOW 12 /* signed overflow trap */
74#define EXCP_COND 13 /* trap-on-condition */
75#define EXCP_ASSIST 14 /* assist exception trap */
76#define EXCP_DTLB_MISS 15 /* dtlb miss / data page fault */
77#define EXCP_NA_ITLB_MISS 16 /* non-access itlb miss */
78#define EXCP_NA_DTLB_MISS 17 /* non-access dtlb miss */
79#define EXCP_DMP 18 /* data memory protection trap */
80#define EXCP_DMB 19 /* data memory break trap */
81#define EXCP_TLB_DIRTY 20 /* tlb dirty bit trap */
82#define EXCP_PAGE_REF 21 /* page reference trap */
83#define EXCP_ASSIST_EMU 22 /* assist emulation trap */
84#define EXCP_HPT 23 /* high-privilege transfer trap */
85#define EXCP_LPT 24 /* low-privilege transfer trap */
86#define EXCP_TB 25 /* taken branch trap */
87#define EXCP_DMAR 26 /* data memory access rights trap */
88#define EXCP_DMPI 27 /* data memory protection id trap */
89#define EXCP_UNALIGN 28 /* unaligned data reference trap */
90#define EXCP_PER_INTERRUPT 29 /* performance monitor interrupt */
91
92/* Exceptions for linux-user emulation. */
93#define EXCP_SYSCALL 30
94#define EXCP_SYSCALL_LWS 31
61766fe9 95
4a4554c6
HD
96/* Emulated hardware TOC button */
97#define EXCP_TOC 32 /* TOC = Transfer of control (NMI) */
98
99#define CPU_INTERRUPT_NMI CPU_INTERRUPT_TGT_EXT_3 /* TOC */
100
fa57e327
RH
101/* Taken from Linux kernel: arch/parisc/include/asm/psw.h */
102#define PSW_I 0x00000001
103#define PSW_D 0x00000002
104#define PSW_P 0x00000004
105#define PSW_Q 0x00000008
106#define PSW_R 0x00000010
107#define PSW_F 0x00000020
108#define PSW_G 0x00000040 /* PA1.x only */
109#define PSW_O 0x00000080 /* PA2.0 only */
110#define PSW_CB 0x0000ff00
111#define PSW_M 0x00010000
112#define PSW_V 0x00020000
113#define PSW_C 0x00040000
114#define PSW_B 0x00080000
115#define PSW_X 0x00100000
116#define PSW_N 0x00200000
117#define PSW_L 0x00400000
118#define PSW_H 0x00800000
119#define PSW_T 0x01000000
120#define PSW_S 0x02000000
121#define PSW_E 0x04000000
122#ifdef TARGET_HPPA64
123#define PSW_W 0x08000000 /* PA2.0 only */
124#else
125#define PSW_W 0
126#endif
127#define PSW_Z 0x40000000 /* PA1.x only */
128#define PSW_Y 0x80000000 /* PA1.x only */
129
130#define PSW_SM (PSW_W | PSW_E | PSW_O | PSW_G | PSW_F \
131 | PSW_R | PSW_Q | PSW_P | PSW_D | PSW_I)
132
133/* ssm/rsm instructions number PSW_W and PSW_E differently */
134#define PSW_SM_I PSW_I /* Enable External Interrupts */
135#define PSW_SM_D PSW_D
136#define PSW_SM_P PSW_P
137#define PSW_SM_Q PSW_Q /* Enable Interrupt State Collection */
138#define PSW_SM_R PSW_R /* Enable Recover Counter Trap */
139#ifdef TARGET_HPPA64
140#define PSW_SM_E 0x100
141#define PSW_SM_W 0x200 /* PA2.0 only : Enable Wide Mode */
142#else
143#define PSW_SM_E 0
144#define PSW_SM_W 0
145#endif
146
35136a77 147#define CR_RC 0
d5de20bd
SS
148#define CR_PID1 8
149#define CR_PID2 9
150#define CR_PID3 12
151#define CR_PID4 13
35136a77
RH
152#define CR_SCRCCR 10
153#define CR_SAR 11
154#define CR_IVA 14
155#define CR_EIEM 15
156#define CR_IT 16
157#define CR_IIASQ 17
158#define CR_IIAOQ 18
159#define CR_IIR 19
160#define CR_ISR 20
161#define CR_IOR 21
162#define CR_IPSW 22
163#define CR_EIRR 23
164
eaa3783b
RH
165#if TARGET_REGISTER_BITS == 32
166typedef uint32_t target_ureg;
167typedef int32_t target_sreg;
168#define TREG_FMT_lx "%08"PRIx32
169#define TREG_FMT_ld "%"PRId32
170#else
171typedef uint64_t target_ureg;
172typedef int64_t target_sreg;
173#define TREG_FMT_lx "%016"PRIx64
174#define TREG_FMT_ld "%"PRId64
175#endif
176
729cd350 177typedef struct HPPATLBEntry {
650cdb2a
RH
178 uint64_t va_b;
179 uint64_t va_e;
180 target_ureg pa;
181 unsigned u : 1;
182 unsigned t : 1;
183 unsigned d : 1;
184 unsigned b : 1;
185 unsigned page_size : 4;
186 unsigned ar_type : 3;
187 unsigned ar_pl1 : 2;
188 unsigned ar_pl2 : 2;
189 unsigned entry_valid : 1;
190 unsigned access_id : 16;
729cd350 191} HPPATLBEntry;
650cdb2a 192
1ea4a06a 193typedef struct CPUArchState {
f8c0fd98
HD
194 target_ureg iaoq_f; /* front */
195 target_ureg iaoq_b; /* back, aka next instruction */
196
eaa3783b 197 target_ureg gr[32];
61766fe9 198 uint64_t fr[32];
33423472 199 uint64_t sr[8]; /* stored shifted into place for gva */
61766fe9 200
eaa3783b
RH
201 target_ureg psw; /* All psw bits except the following: */
202 target_ureg psw_n; /* boolean */
203 target_sreg psw_v; /* in most significant bit */
61766fe9
RH
204
205 /* Splitting the carry-borrow field into the MSB and "the rest", allows
206 * for "the rest" to be deleted when it is unused, but the MSB is in use.
207 * In addition, it's easier to compute carry-in for bit B+1 than it is to
208 * compute carry-out for bit B (3 vs 4 insns for addition, assuming the
209 * host has the appropriate add-with-carry insn to compute the msb).
210 * Therefore the carry bits are stored as: cb_msb : cb & 0x11111110.
211 */
eaa3783b
RH
212 target_ureg psw_cb; /* in least significant bit of next nibble */
213 target_ureg psw_cb_msb; /* boolean */
61766fe9 214
c301f34e
RH
215 uint64_t iasq_f;
216 uint64_t iasq_b;
61766fe9 217
61766fe9
RH
218 uint32_t fr0_shadow; /* flags, c, ca/cq, rm, d, enables */
219 float_status fp_status;
220
35136a77
RH
221 target_ureg cr[32]; /* control registers */
222 target_ureg cr_back[2]; /* back of cr17/cr18 */
f49b3537 223 target_ureg shadow[7]; /* shadow registers */
35136a77 224
650cdb2a 225 /* ??? The number of entries isn't specified by the architecture. */
711212ac
HD
226#ifdef TARGET_HPPA64
227#define HPPA_BTLB_FIXED 0 /* BTLBs are not supported in 64-bit machines */
228#else
229#define HPPA_BTLB_FIXED 16
230#endif
231#define HPPA_BTLB_VARIABLE 0
df5c6a50 232#define HPPA_TLB_ENTRIES 256
711212ac 233#define HPPA_BTLB_ENTRIES (HPPA_BTLB_FIXED + HPPA_BTLB_VARIABLE)
df5c6a50 234
650cdb2a
RH
235 /* ??? Implement a unified itlb/dtlb for the moment. */
236 /* ??? We should use a more intelligent data structure. */
729cd350 237 HPPATLBEntry tlb[HPPA_TLB_ENTRIES];
650cdb2a 238 uint32_t tlb_last;
1ea4a06a 239} CPUHPPAState;
61766fe9
RH
240
241/**
242 * HPPACPU:
243 * @env: #CPUHPPAState
244 *
245 * An HPPA CPU.
246 */
b36e239e 247struct ArchCPU {
61766fe9
RH
248 /*< private >*/
249 CPUState parent_obj;
250 /*< public >*/
251
252 CPUHPPAState env;
49c29d6c 253 QEMUTimer *alarm_timer;
61766fe9
RH
254};
255
61766fe9
RH
256#include "exec/cpu-all.h"
257
258static inline int cpu_mmu_index(CPUHPPAState *env, bool ifetch)
259{
3d68ee7b
RH
260#ifdef CONFIG_USER_ONLY
261 return MMU_USER_IDX;
262#else
263 if (env->psw & (ifetch ? PSW_C : PSW_D)) {
bb67ec32 264 return PRIV_P_TO_MMU_IDX(env->iaoq_f & 3, env->psw & PSW_P);
3d68ee7b
RH
265 }
266 return MMU_PHYS_IDX; /* mmu disabled */
267#endif
61766fe9
RH
268}
269
270void hppa_translate_init(void);
271
0dacec87 272#define CPU_RESOLVING_TYPE TYPE_HPPA_CPU
61766fe9 273
c301f34e
RH
274static inline target_ulong hppa_form_gva_psw(target_ureg psw, uint64_t spc,
275 target_ureg off)
276{
277#ifdef CONFIG_USER_ONLY
278 return off;
279#else
280 off &= (psw & PSW_W ? 0x3fffffffffffffffull : 0xffffffffull);
281 return spc | off;
282#endif
283}
284
285static inline target_ulong hppa_form_gva(CPUHPPAState *env, uint64_t spc,
286 target_ureg off)
287{
288 return hppa_form_gva_psw(env->psw, spc, off);
289}
290
217d1a5e
RH
291/*
292 * Since PSW_{I,CB} will never need to be in tb->flags, reuse them.
494737b7
RH
293 * TB_FLAG_SR_SAME indicates that SR4 through SR7 all contain the
294 * same value.
295 */
296#define TB_FLAG_SR_SAME PSW_I
c301f34e 297#define TB_FLAG_PRIV_SHIFT 8
217d1a5e 298#define TB_FLAG_UNALIGN 0x400
c301f34e 299
bb5de525
AJ
300static inline void cpu_get_tb_cpu_state(CPUHPPAState *env, vaddr *pc,
301 uint64_t *cs_base, uint32_t *pflags)
61766fe9 302{
c301f34e
RH
303 uint32_t flags = env->psw_n * PSW_N;
304
305 /* TB lookup assumes that PC contains the complete virtual address.
306 If we leave space+offset separate, we'll get ITLB misses to an
307 incomplete virtual address. This also means that we must separate
8b81968c 308 out current cpu privilege from the low bits of IAOQ_F. */
c301f34e 309#ifdef CONFIG_USER_ONLY
ebd0e151
RH
310 *pc = env->iaoq_f & -4;
311 *cs_base = env->iaoq_b & -4;
217d1a5e 312 flags |= TB_FLAG_UNALIGN * !env_cpu(env)->prctl_unalign_sigbus;
c301f34e 313#else
bb67ec32
RH
314 /* ??? E, T, H, L, B bits need to be here, when implemented. */
315 flags |= env->psw & (PSW_W | PSW_C | PSW_D | PSW_P);
c301f34e
RH
316 flags |= (env->iaoq_f & 3) << TB_FLAG_PRIV_SHIFT;
317
318 *pc = (env->psw & PSW_C
319 ? hppa_form_gva_psw(env->psw, env->iasq_f, env->iaoq_f & -4)
320 : env->iaoq_f & -4);
321 *cs_base = env->iasq_f;
322
323 /* Insert a difference between IAOQ_B and IAOQ_F within the otherwise zero
324 low 32-bits of CS_BASE. This will succeed for all direct branches,
325 which is the primary case we care about -- using goto_tb within a page.
326 Failure is indicated by a zero difference. */
327 if (env->iasq_f == env->iasq_b) {
328 target_sreg diff = env->iaoq_b - env->iaoq_f;
329 if (TARGET_REGISTER_BITS == 32 || diff == (int32_t)diff) {
330 *cs_base |= (uint32_t)diff;
331 }
332 }
494737b7
RH
333 if ((env->sr[4] == env->sr[5])
334 & (env->sr[4] == env->sr[6])
335 & (env->sr[4] == env->sr[7])) {
336 flags |= TB_FLAG_SR_SAME;
337 }
c301f34e
RH
338#endif
339
340 *pflags = flags;
61766fe9
RH
341}
342
eaa3783b
RH
343target_ureg cpu_hppa_get_psw(CPUHPPAState *env);
344void cpu_hppa_put_psw(CPUHPPAState *env, target_ureg);
61766fe9
RH
345void cpu_hppa_loaded_fr0(CPUHPPAState *env);
346
d5de20bd
SS
347#ifdef CONFIG_USER_ONLY
348static inline void cpu_hppa_change_prot_id(CPUHPPAState *env) { }
349#else
350void cpu_hppa_change_prot_id(CPUHPPAState *env);
351#endif
352
a010bdbe 353int hppa_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
61766fe9 354int hppa_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
90c84c56 355void hppa_cpu_dump_state(CPUState *cs, FILE *f, int);
860e0b96 356#ifndef CONFIG_USER_ONLY
6d2d454a 357hwaddr hppa_cpu_get_phys_page_debug(CPUState *cs, vaddr addr);
3c7bef03
RH
358bool hppa_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
359 MMUAccessType access_type, int mmu_idx,
360 bool probe, uintptr_t retaddr);
68fa1780
PMD
361void hppa_cpu_do_interrupt(CPUState *cpu);
362bool hppa_cpu_exec_interrupt(CPUState *cpu, int int_req);
650cdb2a 363int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx,
fa824d99 364 int type, hwaddr *pphys, int *pprot,
729cd350 365 HPPATLBEntry **tlb_entry);
4f5f2548 366extern const MemoryRegionOps hppa_io_eir_ops;
8a9358cc 367extern const VMStateDescription vmstate_hppa_cpu;
49c29d6c 368void hppa_cpu_alarm_timer(void *);
43e05652 369int hppa_artype_for_page(CPUHPPAState *env, target_ulong vaddr);
650cdb2a 370#endif
8905770b 371G_NORETURN void hppa_dynamic_excp(CPUHPPAState *env, int excp, uintptr_t ra);
61766fe9
RH
372
373#endif /* HPPA_CPU_H */