X-Git-Url: https://git.proxmox.com/?a=blobdiff_plain;f=target%2Fhppa%2Fcpu.h;h=aab251bc4bb90c81e89521934549100eb9c3c82e;hb=d730b9d1f25b83e1b0a7e8b75aec02b93b78bdc7;hp=f60435b9ec5d6b62234717478f0c79d34894b054;hpb=3d68ee7bbe34278d5792f5341ba0246069c6191c;p=mirror_qemu.git diff --git a/target/hppa/cpu.h b/target/hppa/cpu.h index f60435b9ec..aab251bc4b 100644 --- a/target/hppa/cpu.h +++ b/target/hppa/cpu.h @@ -20,23 +20,17 @@ #ifndef HPPA_CPU_H #define HPPA_CPU_H -#include "qemu-common.h" #include "cpu-qom.h" - -#define TARGET_LONG_BITS 32 -#define TARGET_VIRT_ADDR_SPACE_BITS 32 -#define TARGET_REGISTER_BITS 32 -#define TARGET_PHYS_ADDR_SPACE_BITS 32 - -#define CPUArchState struct CPUHPPAState - #include "exec/cpu-defs.h" -#include "fpu/softfloat.h" -#define TARGET_PAGE_BITS 12 + +/* PA-RISC 1.x processors have a strong memory model. */ +/* ??? While we do not yet implement PA-RISC 2.0, those processors have + a weak memory model, but with TLB bits that force ordering on a per-page + basis. It's probably easier to fall back to a strong memory model. */ +#define TCG_GUEST_DEFAULT_MO TCG_MO_ALL #define ALIGNED_ONLY -#define NB_MMU_MODES 5 #define MMU_KERNEL_IDX 0 #define MMU_USER_IDX 3 #define MMU_PHYS_IDX 4 @@ -123,6 +117,24 @@ #define PSW_SM_W 0 #endif +#define CR_RC 0 +#define CR_PID1 8 +#define CR_PID2 9 +#define CR_PID3 12 +#define CR_PID4 13 +#define CR_SCRCCR 10 +#define CR_SAR 11 +#define CR_IVA 14 +#define CR_EIEM 15 +#define CR_IT 16 +#define CR_IIASQ 17 +#define CR_IIAOQ 18 +#define CR_IIR 19 +#define CR_ISR 20 +#define CR_IOR 21 +#define CR_IPSW 22 +#define CR_EIRR 23 + typedef struct CPUHPPAState CPUHPPAState; #if TARGET_REGISTER_BITS == 32 @@ -137,13 +149,26 @@ typedef int64_t target_sreg; #define TREG_FMT_ld "%"PRId64 #endif +typedef struct { + uint64_t va_b; + uint64_t va_e; + target_ureg pa; + unsigned u : 1; + unsigned t : 1; + unsigned d : 1; + unsigned b : 1; + unsigned page_size : 4; + unsigned ar_type : 3; + unsigned ar_pl1 : 2; + unsigned ar_pl2 : 2; + unsigned entry_valid : 1; + unsigned access_id : 16; +} hppa_tlb_entry; + struct CPUHPPAState { target_ureg gr[32]; uint64_t fr[32]; - - target_ureg sar; - target_ureg cr26; - target_ureg cr27; + uint64_t sr[8]; /* stored shifted into place for gva */ target_ureg psw; /* All psw bits except the following: */ target_ureg psw_n; /* boolean */ @@ -161,14 +186,21 @@ struct CPUHPPAState { target_ureg iaoq_f; /* front */ target_ureg iaoq_b; /* back, aka next instruction */ - - target_ureg ior; /* interrupt offset register */ + uint64_t iasq_f; + uint64_t iasq_b; uint32_t fr0_shadow; /* flags, c, ca/cq, rm, d, enables */ float_status fp_status; - /* Those resources are used only in QEMU core */ - CPU_COMMON + target_ureg cr[32]; /* control registers */ + target_ureg cr_back[2]; /* back of cr17/cr18 */ + target_ureg shadow[7]; /* shadow registers */ + + /* ??? The number of entries isn't specified by the architecture. */ + /* ??? Implement a unified itlb/dtlb for the moment. */ + /* ??? We should use a more intelligent data structure. */ + hppa_tlb_entry tlb[256]; + uint32_t tlb_last; }; /** @@ -182,16 +214,14 @@ struct HPPACPU { CPUState parent_obj; /*< public >*/ + CPUNegativeOffsetState neg; CPUHPPAState env; + QEMUTimer *alarm_timer; }; -static inline HPPACPU *hppa_env_get_cpu(CPUHPPAState *env) -{ - return container_of(env, HPPACPU, env); -} -#define ENV_GET_CPU(e) CPU(hppa_env_get_cpu(e)) -#define ENV_OFFSET offsetof(HPPACPU, env) +typedef CPUHPPAState CPUArchState; +typedef HPPACPU ArchCPU; #include "exec/cpu-all.h" @@ -209,35 +239,105 @@ static inline int cpu_mmu_index(CPUHPPAState *env, bool ifetch) void hppa_translate_init(void); -#define cpu_init(cpu_model) cpu_generic_init(TYPE_HPPA_CPU, cpu_model) +#define CPU_RESOLVING_TYPE TYPE_HPPA_CPU -void hppa_cpu_list(FILE *f, fprintf_function cpu_fprintf); +static inline target_ulong hppa_form_gva_psw(target_ureg psw, uint64_t spc, + target_ureg off) +{ +#ifdef CONFIG_USER_ONLY + return off; +#else + off &= (psw & PSW_W ? 0x3fffffffffffffffull : 0xffffffffull); + return spc | off; +#endif +} + +static inline target_ulong hppa_form_gva(CPUHPPAState *env, uint64_t spc, + target_ureg off) +{ + return hppa_form_gva_psw(env->psw, spc, off); +} + +/* Since PSW_{I,CB} will never need to be in tb->flags, reuse them. + * TB_FLAG_SR_SAME indicates that SR4 through SR7 all contain the + * same value. + */ +#define TB_FLAG_SR_SAME PSW_I +#define TB_FLAG_PRIV_SHIFT 8 static inline void cpu_get_tb_cpu_state(CPUHPPAState *env, target_ulong *pc, target_ulong *cs_base, uint32_t *pflags) { - *pc = env->iaoq_f; - *cs_base = env->iaoq_b; + uint32_t flags = env->psw_n * PSW_N; + + /* TB lookup assumes that PC contains the complete virtual address. + If we leave space+offset separate, we'll get ITLB misses to an + incomplete virtual address. This also means that we must separate + out current cpu priviledge from the low bits of IAOQ_F. */ +#ifdef CONFIG_USER_ONLY + *pc = env->iaoq_f & -4; + *cs_base = env->iaoq_b & -4; +#else /* ??? E, T, H, L, B, P bits need to be here, when implemented. */ - *pflags = (env->psw & (PSW_W | PSW_C | PSW_D)) - | env->psw_n * PSW_N; + flags |= env->psw & (PSW_W | PSW_C | PSW_D); + flags |= (env->iaoq_f & 3) << TB_FLAG_PRIV_SHIFT; + + *pc = (env->psw & PSW_C + ? hppa_form_gva_psw(env->psw, env->iasq_f, env->iaoq_f & -4) + : env->iaoq_f & -4); + *cs_base = env->iasq_f; + + /* Insert a difference between IAOQ_B and IAOQ_F within the otherwise zero + low 32-bits of CS_BASE. This will succeed for all direct branches, + which is the primary case we care about -- using goto_tb within a page. + Failure is indicated by a zero difference. */ + if (env->iasq_f == env->iasq_b) { + target_sreg diff = env->iaoq_b - env->iaoq_f; + if (TARGET_REGISTER_BITS == 32 || diff == (int32_t)diff) { + *cs_base |= (uint32_t)diff; + } + } + if ((env->sr[4] == env->sr[5]) + & (env->sr[4] == env->sr[6]) + & (env->sr[4] == env->sr[7])) { + flags |= TB_FLAG_SR_SAME; + } +#endif + + *pflags = flags; } target_ureg cpu_hppa_get_psw(CPUHPPAState *env); void cpu_hppa_put_psw(CPUHPPAState *env, target_ureg); void cpu_hppa_loaded_fr0(CPUHPPAState *env); +#ifdef CONFIG_USER_ONLY +static inline void cpu_hppa_change_prot_id(CPUHPPAState *env) { } +#else +void cpu_hppa_change_prot_id(CPUHPPAState *env); +#endif + #define cpu_signal_handler cpu_hppa_signal_handler int cpu_hppa_signal_handler(int host_signum, void *pinfo, void *puc); -int hppa_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int size, - int rw, int midx); hwaddr hppa_cpu_get_phys_page_debug(CPUState *cs, vaddr addr); int hppa_cpu_gdb_read_register(CPUState *cpu, uint8_t *buf, int reg); int hppa_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); void hppa_cpu_do_interrupt(CPUState *cpu); bool hppa_cpu_exec_interrupt(CPUState *cpu, int int_req); -void hppa_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function, int); +void hppa_cpu_dump_state(CPUState *cs, FILE *f, int); +bool hppa_cpu_tlb_fill(CPUState *cs, vaddr address, int size, + MMUAccessType access_type, int mmu_idx, + bool probe, uintptr_t retaddr); +#ifndef CONFIG_USER_ONLY +int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx, + int type, hwaddr *pphys, int *pprot); +extern const MemoryRegionOps hppa_io_eir_ops; +extern const struct VMStateDescription vmstate_hppa_cpu; +void hppa_cpu_alarm_timer(void *); +int hppa_artype_for_page(CPUHPPAState *env, target_ulong vaddr); +#endif +void QEMU_NORETURN hppa_dynamic_excp(CPUHPPAState *env, int excp, uintptr_t ra); #endif /* HPPA_CPU_H */