1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 #ifndef _ASM_POWERPC_ASM_PROTOTYPES_H
3 #define _ASM_POWERPC_ASM_PROTOTYPES_H
4 /*
5 * This file is for prototypes of C functions that are only called
6 * from asm, and any associated variables.
7 *
8 * Copyright 2016, Daniel Axtens, IBM Corporation.
9 */
10
11 #include <linux/threads.h>
12 #include <asm/cacheflush.h>
13 #include <asm/checksum.h>
14 #include <linux/uaccess.h>
15 #include <asm/epapr_hcalls.h>
16 #include <asm/dcr.h>
17 #include <asm/mmu_context.h>
18 #include <asm/ultravisor-api.h>
19
20 #include <uapi/asm/ucontext.h>
21
22 /* SMP */
23 extern struct task_struct *current_set[NR_CPUS];
24 extern struct task_struct *secondary_current;
25 void start_secondary(void *unused);
26
27 /* kexec */
28 struct paca_struct;
29 struct kimage;
30 extern struct paca_struct kexec_paca;
31 void kexec_copy_flush(struct kimage *image);
32
33 /* pseries hcall tracing */
34 extern struct static_key hcall_tracepoint_key;
35 void __trace_hcall_entry(unsigned long opcode, unsigned long *args);
36 void __trace_hcall_exit(long opcode, long retval, unsigned long *retbuf);
37
38 /* Ultravisor */
39 #if defined(CONFIG_PPC_POWERNV) || defined(CONFIG_PPC_SVM)
40 long ucall_norets(unsigned long opcode, ...);
41 #else
ucall_norets(unsigned long opcode,...)42 static inline long ucall_norets(unsigned long opcode, ...)
43 {
44 return U_NOT_AVAILABLE;
45 }
46 #endif
47
48 /* OPAL */
49 int64_t __opal_call(int64_t a0, int64_t a1, int64_t a2, int64_t a3,
50 int64_t a4, int64_t a5, int64_t a6, int64_t a7,
51 int64_t opcode, uint64_t msr);
52
53 /* VMX copying */
54 int enter_vmx_usercopy(void);
55 int exit_vmx_usercopy(void);
56 int enter_vmx_ops(void);
57 void *exit_vmx_ops(void *dest);
58
59 /* Traps */
60 long machine_check_early(struct pt_regs *regs);
61 long hmi_exception_realmode(struct pt_regs *regs);
62 void SMIException(struct pt_regs *regs);
63 void handle_hmi_exception(struct pt_regs *regs);
64 void instruction_breakpoint_exception(struct pt_regs *regs);
65 void RunModeException(struct pt_regs *regs);
66 void single_step_exception(struct pt_regs *regs);
67 void program_check_exception(struct pt_regs *regs);
68 void alignment_exception(struct pt_regs *regs);
69 void StackOverflow(struct pt_regs *regs);
70 void stack_overflow_exception(struct pt_regs *regs);
71 void kernel_fp_unavailable_exception(struct pt_regs *regs);
72 void altivec_unavailable_exception(struct pt_regs *regs);
73 void vsx_unavailable_exception(struct pt_regs *regs);
74 void fp_unavailable_tm(struct pt_regs *regs);
75 void altivec_unavailable_tm(struct pt_regs *regs);
76 void vsx_unavailable_tm(struct pt_regs *regs);
77 void facility_unavailable_exception(struct pt_regs *regs);
78 void TAUException(struct pt_regs *regs);
79 void altivec_assist_exception(struct pt_regs *regs);
80 void unrecoverable_exception(struct pt_regs *regs);
81 void kernel_bad_stack(struct pt_regs *regs);
82 void system_reset_exception(struct pt_regs *regs);
83 void machine_check_exception(struct pt_regs *regs);
84 void emulation_assist_interrupt(struct pt_regs *regs);
85 long do_slb_fault(struct pt_regs *regs, unsigned long ea);
86 void do_bad_slb_fault(struct pt_regs *regs, unsigned long ea, long err);
87
88 /* signals, syscalls and interrupts */
89 long sys_swapcontext(struct ucontext __user *old_ctx,
90 struct ucontext __user *new_ctx,
91 long ctx_size);
92 #ifdef CONFIG_PPC32
93 long sys_debug_setcontext(struct ucontext __user *ctx,
94 int ndbg, struct sig_dbg_op __user *dbg);
95 int
96 ppc_select(int n, fd_set __user *inp, fd_set __user *outp, fd_set __user *exp,
97 struct __kernel_old_timeval __user *tvp);
98 unsigned long __init early_init(unsigned long dt_ptr);
99 void __init machine_init(u64 dt_ptr);
100 #endif
101 long system_call_exception(long r3, long r4, long r5, long r6, long r7, long r8, unsigned long r0, struct pt_regs *regs);
102 notrace unsigned long syscall_exit_prepare(unsigned long r3, struct pt_regs *regs, long scv);
103 notrace unsigned long interrupt_exit_user_prepare(struct pt_regs *regs, unsigned long msr);
104 notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs, unsigned long msr);
105
106 long ppc_fadvise64_64(int fd, int advice, u32 offset_high, u32 offset_low,
107 u32 len_high, u32 len_low);
108 long sys_switch_endian(void);
109 notrace unsigned int __check_irq_replay(void);
110 void notrace restore_interrupts(void);
111
112 /* prom_init (OpenFirmware) */
113 unsigned long __init prom_init(unsigned long r3, unsigned long r4,
114 unsigned long pp,
115 unsigned long r6, unsigned long r7,
116 unsigned long kbase);
117
118 /* setup */
119 void __init early_setup(unsigned long dt_ptr);
120 void early_setup_secondary(void);
121
122 /* misc runtime */
123 extern u64 __bswapdi2(u64);
124 extern s64 __lshrdi3(s64, int);
125 extern s64 __ashldi3(s64, int);
126 extern s64 __ashrdi3(s64, int);
127 extern int __cmpdi2(s64, s64);
128 extern int __ucmpdi2(u64, u64);
129
130 /* tracing */
131 void _mcount(void);
132 unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip,
133 unsigned long sp);
134
135 void pnv_power9_force_smt4_catch(void);
136 void pnv_power9_force_smt4_release(void);
137
138 /* Transaction memory related */
139 void tm_enable(void);
140 void tm_disable(void);
141 void tm_abort(uint8_t cause);
142
143 struct kvm_vcpu;
144 void _kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu, u64 guest_msr);
145 void _kvmppc_save_tm_pr(struct kvm_vcpu *vcpu, u64 guest_msr);
146
147 /* Patch sites */
148 extern s32 patch__call_flush_branch_caches1;
149 extern s32 patch__call_flush_branch_caches2;
150 extern s32 patch__call_flush_branch_caches3;
151 extern s32 patch__flush_count_cache_return;
152 extern s32 patch__flush_link_stack_return;
153 extern s32 patch__call_kvm_flush_link_stack;
154 extern s32 patch__memset_nocache, patch__memcpy_nocache;
155
156 extern long flush_branch_caches;
157 extern long kvm_flush_link_stack;
158
159 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
160 void kvmppc_save_tm_hv(struct kvm_vcpu *vcpu, u64 msr, bool preserve_nv);
161 void kvmppc_restore_tm_hv(struct kvm_vcpu *vcpu, u64 msr, bool preserve_nv);
162 #else
kvmppc_save_tm_hv(struct kvm_vcpu * vcpu,u64 msr,bool preserve_nv)163 static inline void kvmppc_save_tm_hv(struct kvm_vcpu *vcpu, u64 msr,
164 bool preserve_nv) { }
kvmppc_restore_tm_hv(struct kvm_vcpu * vcpu,u64 msr,bool preserve_nv)165 static inline void kvmppc_restore_tm_hv(struct kvm_vcpu *vcpu, u64 msr,
166 bool preserve_nv) { }
167 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
168
169 void kvmhv_save_host_pmu(void);
170 void kvmhv_load_host_pmu(void);
171 void kvmhv_save_guest_pmu(struct kvm_vcpu *vcpu, bool pmu_in_use);
172 void kvmhv_load_guest_pmu(struct kvm_vcpu *vcpu);
173
174 int __kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu);
175
176 long kvmppc_h_set_dabr(struct kvm_vcpu *vcpu, unsigned long dabr);
177 long kvmppc_h_set_xdabr(struct kvm_vcpu *vcpu, unsigned long dabr,
178 unsigned long dabrx);
179
180 #endif /* _ASM_POWERPC_ASM_PROTOTYPES_H */
181