1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 #ifndef _ASM_POWERPC_ASM_PROTOTYPES_H
3 #define _ASM_POWERPC_ASM_PROTOTYPES_H
5 * This file is for prototypes of C functions that are only called
6 * from asm, and any associated variables.
8 * Copyright 2016, Daniel Axtens, IBM Corporation.
11 #include <linux/threads.h>
12 #include <asm/cacheflush.h>
13 #include <asm/checksum.h>
14 #include <linux/uaccess.h>
15 #include <asm/epapr_hcalls.h>
17 #include <asm/mmu_context.h>
18 #include <asm/ultravisor-api.h>
20 #include <uapi/asm/ucontext.h>
23 extern struct task_struct
*current_set
[NR_CPUS
];
24 extern struct task_struct
*secondary_current
;
25 void start_secondary(void *unused
);
30 extern struct paca_struct kexec_paca
;
31 void kexec_copy_flush(struct kimage
*image
);
33 /* pseries hcall tracing */
34 extern struct static_key hcall_tracepoint_key
;
35 void __trace_hcall_entry(unsigned long opcode
, unsigned long *args
);
36 void __trace_hcall_exit(long opcode
, long retval
, unsigned long *retbuf
);
39 #if defined(CONFIG_PPC_POWERNV) || defined(CONFIG_PPC_SVM)
40 long ucall_norets(unsigned long opcode
, ...);
42 static inline long ucall_norets(unsigned long opcode
, ...)
44 return U_NOT_AVAILABLE
;
49 int64_t __opal_call(int64_t a0
, int64_t a1
, int64_t a2
, int64_t a3
,
50 int64_t a4
, int64_t a5
, int64_t a6
, int64_t a7
,
51 int64_t opcode
, uint64_t msr
);
54 int enter_vmx_usercopy(void);
55 int exit_vmx_usercopy(void);
56 int enter_vmx_ops(void);
57 void *exit_vmx_ops(void *dest
);
59 /* signals, syscalls and interrupts */
60 long sys_swapcontext(struct ucontext __user
*old_ctx
,
61 struct ucontext __user
*new_ctx
,
64 long sys_debug_setcontext(struct ucontext __user
*ctx
,
65 int ndbg
, struct sig_dbg_op __user
*dbg
);
67 ppc_select(int n
, fd_set __user
*inp
, fd_set __user
*outp
, fd_set __user
*exp
,
68 struct __kernel_old_timeval __user
*tvp
);
69 unsigned long __init
early_init(unsigned long dt_ptr
);
70 void __init
machine_init(u64 dt_ptr
);
72 long system_call_exception(long r3
, long r4
, long r5
, long r6
, long r7
, long r8
, unsigned long r0
, struct pt_regs
*regs
);
73 notrace
unsigned long syscall_exit_prepare(unsigned long r3
, struct pt_regs
*regs
, long scv
);
74 notrace
unsigned long interrupt_exit_user_prepare(struct pt_regs
*regs
);
75 notrace
unsigned long interrupt_exit_kernel_prepare(struct pt_regs
*regs
);
77 unsigned long syscall_exit_restart(unsigned long r3
, struct pt_regs
*regs
);
78 unsigned long interrupt_exit_user_restart(struct pt_regs
*regs
);
79 unsigned long interrupt_exit_kernel_restart(struct pt_regs
*regs
);
82 long ppc_fadvise64_64(int fd
, int advice
, u32 offset_high
, u32 offset_low
,
83 u32 len_high
, u32 len_low
);
84 long sys_switch_endian(void);
86 /* prom_init (OpenFirmware) */
87 unsigned long __init
prom_init(unsigned long r3
, unsigned long r4
,
89 unsigned long r6
, unsigned long r7
,
93 void __init
early_setup(unsigned long dt_ptr
);
94 void early_setup_secondary(void);
97 extern u64
__bswapdi2(u64
);
98 extern s64
__lshrdi3(s64
, int);
99 extern s64
__ashldi3(s64
, int);
100 extern s64
__ashrdi3(s64
, int);
101 extern int __cmpdi2(s64
, s64
);
102 extern int __ucmpdi2(u64
, u64
);
106 unsigned long prepare_ftrace_return(unsigned long parent
, unsigned long ip
,
109 void pnv_power9_force_smt4_catch(void);
110 void pnv_power9_force_smt4_release(void);
112 /* Transaction memory related */
113 void tm_enable(void);
114 void tm_disable(void);
115 void tm_abort(uint8_t cause
);
118 void _kvmppc_restore_tm_pr(struct kvm_vcpu
*vcpu
, u64 guest_msr
);
119 void _kvmppc_save_tm_pr(struct kvm_vcpu
*vcpu
, u64 guest_msr
);
122 extern s32 patch__call_flush_branch_caches1
;
123 extern s32 patch__call_flush_branch_caches2
;
124 extern s32 patch__call_flush_branch_caches3
;
125 extern s32 patch__flush_count_cache_return
;
126 extern s32 patch__flush_link_stack_return
;
127 extern s32 patch__call_kvm_flush_link_stack
;
128 extern s32 patch__call_kvm_flush_link_stack_p9
;
129 extern s32 patch__memset_nocache
, patch__memcpy_nocache
;
131 extern long flush_branch_caches
;
132 extern long kvm_flush_link_stack
;
134 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
135 void kvmppc_save_tm_hv(struct kvm_vcpu
*vcpu
, u64 msr
, bool preserve_nv
);
136 void kvmppc_restore_tm_hv(struct kvm_vcpu
*vcpu
, u64 msr
, bool preserve_nv
);
138 static inline void kvmppc_save_tm_hv(struct kvm_vcpu
*vcpu
, u64 msr
,
139 bool preserve_nv
) { }
140 static inline void kvmppc_restore_tm_hv(struct kvm_vcpu
*vcpu
, u64 msr
,
141 bool preserve_nv
) { }
142 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
144 void kvmhv_save_host_pmu(void);
145 void kvmhv_load_host_pmu(void);
146 void kvmhv_save_guest_pmu(struct kvm_vcpu
*vcpu
, bool pmu_in_use
);
147 void kvmhv_load_guest_pmu(struct kvm_vcpu
*vcpu
);
149 void kvmppc_p9_enter_guest(struct kvm_vcpu
*vcpu
);
151 long kvmppc_h_set_dabr(struct kvm_vcpu
*vcpu
, unsigned long dabr
);
152 long kvmppc_h_set_xdabr(struct kvm_vcpu
*vcpu
, unsigned long dabr
,
153 unsigned long dabrx
);
155 #endif /* _ASM_POWERPC_ASM_PROTOTYPES_H */