1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 #ifndef _ASM_POWERPC_PROCESSOR_H
3 #define _ASM_POWERPC_PROCESSOR_H
4
5 /*
6 * Copyright (C) 2001 PPC 64 Team, IBM Corp
7 */
8
9 #include <asm/reg.h>
10
11 #ifdef CONFIG_VSX
12 #define TS_FPRWIDTH 2
13
14 #ifdef __BIG_ENDIAN__
15 #define TS_FPROFFSET 0
16 #define TS_VSRLOWOFFSET 1
17 #else
18 #define TS_FPROFFSET 1
19 #define TS_VSRLOWOFFSET 0
20 #endif
21
22 #else
23 #define TS_FPRWIDTH 1
24 #define TS_FPROFFSET 0
25 #endif
26
27 #ifdef CONFIG_PPC64
28 /* Default SMT priority is set to 3. Use 11- 13bits to save priority. */
29 #define PPR_PRIORITY 3
30 #ifdef __ASSEMBLY__
31 #define DEFAULT_PPR (PPR_PRIORITY << 50)
32 #else
33 #define DEFAULT_PPR ((u64)PPR_PRIORITY << 50)
34 #endif /* __ASSEMBLY__ */
35 #endif /* CONFIG_PPC64 */
36
37 #ifndef __ASSEMBLY__
38 #include <linux/types.h>
39 #include <linux/thread_info.h>
40 #include <asm/ptrace.h>
41 #include <asm/hw_breakpoint.h>
42
43 /* We do _not_ want to define new machine types at all, those must die
44 * in favor of using the device-tree
45 * -- BenH.
46 */
47
48 /* PREP sub-platform types. Unused */
49 #define _PREP_Motorola 0x01 /* motorola prep */
50 #define _PREP_Firm 0x02 /* firmworks prep */
51 #define _PREP_IBM 0x00 /* ibm prep */
52 #define _PREP_Bull 0x03 /* bull prep */
53
54 /* CHRP sub-platform types. These are arbitrary */
55 #define _CHRP_Motorola 0x04 /* motorola chrp, the cobra */
56 #define _CHRP_IBM 0x05 /* IBM chrp, the longtrail and longtrail 2 */
57 #define _CHRP_Pegasos 0x06 /* Genesi/bplan's Pegasos and Pegasos2 */
58 #define _CHRP_briq 0x07 /* TotalImpact's briQ */
59
60 #if defined(__KERNEL__) && defined(CONFIG_PPC32)
61
62 extern int _chrp_type;
63
64 #endif /* defined(__KERNEL__) && defined(CONFIG_PPC32) */
65
66 /* Macros for adjusting thread priority (hardware multi-threading) */
67 #define HMT_very_low() asm volatile("or 31,31,31 # very low priority")
68 #define HMT_low() asm volatile("or 1,1,1 # low priority")
69 #define HMT_medium_low() asm volatile("or 6,6,6 # medium low priority")
70 #define HMT_medium() asm volatile("or 2,2,2 # medium priority")
71 #define HMT_medium_high() asm volatile("or 5,5,5 # medium high priority")
72 #define HMT_high() asm volatile("or 3,3,3 # high priority")
73
74 #ifdef __KERNEL__
75
76 #ifdef CONFIG_PPC64
77 #include <asm/task_size_64.h>
78 #else
79 #include <asm/task_size_32.h>
80 #endif
81
82 struct task_struct;
83 void start_thread(struct pt_regs *regs, unsigned long fdptr, unsigned long sp);
84 void release_thread(struct task_struct *);
85
86 typedef struct {
87 unsigned long seg;
88 } mm_segment_t;
89
90 #define TS_FPR(i) fp_state.fpr[i][TS_FPROFFSET]
91 #define TS_CKFPR(i) ckfp_state.fpr[i][TS_FPROFFSET]
92
93 /* FP and VSX 0-31 register set */
94 struct thread_fp_state {
95 u64 fpr[32][TS_FPRWIDTH] __attribute__((aligned(16)));
96 u64 fpscr; /* Floating point status */
97 };
98
99 /* Complete AltiVec register set including VSCR */
100 struct thread_vr_state {
101 vector128 vr[32] __attribute__((aligned(16)));
102 vector128 vscr __attribute__((aligned(16)));
103 };
104
105 struct debug_reg {
106 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
107 /*
108 * The following help to manage the use of Debug Control Registers
109 * om the BookE platforms.
110 */
111 uint32_t dbcr0;
112 uint32_t dbcr1;
113 #ifdef CONFIG_BOOKE
114 uint32_t dbcr2;
115 #endif
116 /*
117 * The stored value of the DBSR register will be the value at the
118 * last debug interrupt. This register can only be read from the
119 * user (will never be written to) and has value while helping to
120 * describe the reason for the last debug trap. Torez
121 */
122 uint32_t dbsr;
123 /*
124 * The following will contain addresses used by debug applications
125 * to help trace and trap on particular address locations.
126 * The bits in the Debug Control Registers above help define which
127 * of the following registers will contain valid data and/or addresses.
128 */
129 unsigned long iac1;
130 unsigned long iac2;
131 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
132 unsigned long iac3;
133 unsigned long iac4;
134 #endif
135 unsigned long dac1;
136 unsigned long dac2;
137 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
138 unsigned long dvc1;
139 unsigned long dvc2;
140 #endif
141 #endif
142 };
143
144 struct thread_struct {
145 unsigned long ksp; /* Kernel stack pointer */
146
147 #ifdef CONFIG_PPC64
148 unsigned long ksp_vsid;
149 #endif
150 struct pt_regs *regs; /* Pointer to saved register state */
151 mm_segment_t addr_limit; /* for get_fs() validation */
152 #ifdef CONFIG_BOOKE
153 /* BookE base exception scratch space; align on cacheline */
154 unsigned long normsave[8] ____cacheline_aligned;
155 #endif
156 #ifdef CONFIG_PPC32
157 void *pgdir; /* root of page-table tree */
158 unsigned long ksp_limit; /* if ksp <= ksp_limit stack overflow */
159 #ifdef CONFIG_PPC_RTAS
160 unsigned long rtas_sp; /* stack pointer for when in RTAS */
161 #endif
162 #endif
163 #if defined(CONFIG_PPC_BOOK3S_32) && defined(CONFIG_PPC_KUAP)
164 unsigned long kuap; /* opened segments for user access */
165 #endif
166 /* Debug Registers */
167 struct debug_reg debug;
168 struct thread_fp_state fp_state;
169 struct thread_fp_state *fp_save_area;
170 int fpexc_mode; /* floating-point exception mode */
171 unsigned int align_ctl; /* alignment handling control */
172 #ifdef CONFIG_HAVE_HW_BREAKPOINT
173 struct perf_event *ptrace_bps[HBP_NUM];
174 /*
175 * Helps identify source of single-step exception and subsequent
176 * hw-breakpoint enablement
177 */
178 struct perf_event *last_hit_ubp;
179 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
180 struct arch_hw_breakpoint hw_brk; /* info on the hardware breakpoint */
181 unsigned long trap_nr; /* last trap # on this thread */
182 u8 load_slb; /* Ages out SLB preload cache entries */
183 u8 load_fp;
184 #ifdef CONFIG_ALTIVEC
185 u8 load_vec;
186 struct thread_vr_state vr_state;
187 struct thread_vr_state *vr_save_area;
188 unsigned long vrsave;
189 int used_vr; /* set if process has used altivec */
190 #endif /* CONFIG_ALTIVEC */
191 #ifdef CONFIG_VSX
192 /* VSR status */
193 int used_vsr; /* set if process has used VSX */
194 #endif /* CONFIG_VSX */
195 #ifdef CONFIG_SPE
196 unsigned long evr[32]; /* upper 32-bits of SPE regs */
197 u64 acc; /* Accumulator */
198 unsigned long spefscr; /* SPE & eFP status */
199 unsigned long spefscr_last; /* SPEFSCR value on last prctl
200 call or trap return */
201 int used_spe; /* set if process has used spe */
202 #endif /* CONFIG_SPE */
203 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
204 u8 load_tm;
205 u64 tm_tfhar; /* Transaction fail handler addr */
206 u64 tm_texasr; /* Transaction exception & summary */
207 u64 tm_tfiar; /* Transaction fail instr address reg */
208 struct pt_regs ckpt_regs; /* Checkpointed registers */
209
210 unsigned long tm_tar;
211 unsigned long tm_ppr;
212 unsigned long tm_dscr;
213
214 /*
215 * Checkpointed FP and VSX 0-31 register set.
216 *
217 * When a transaction is active/signalled/scheduled etc., *regs is the
218 * most recent set of/speculated GPRs with ckpt_regs being the older
219 * checkpointed regs to which we roll back if transaction aborts.
220 *
221 * These are analogous to how ckpt_regs and pt_regs work
222 */
223 struct thread_fp_state ckfp_state; /* Checkpointed FP state */
224 struct thread_vr_state ckvr_state; /* Checkpointed VR state */
225 unsigned long ckvrsave; /* Checkpointed VRSAVE */
226 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
227 #ifdef CONFIG_PPC_MEM_KEYS
228 unsigned long amr;
229 unsigned long iamr;
230 unsigned long uamor;
231 #endif
232 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
233 void* kvm_shadow_vcpu; /* KVM internal data */
234 #endif /* CONFIG_KVM_BOOK3S_32_HANDLER */
235 #if defined(CONFIG_KVM) && defined(CONFIG_BOOKE)
236 struct kvm_vcpu *kvm_vcpu;
237 #endif
238 #ifdef CONFIG_PPC64
239 unsigned long dscr;
240 unsigned long fscr;
241 /*
242 * This member element dscr_inherit indicates that the process
243 * has explicitly attempted and changed the DSCR register value
244 * for itself. Hence kernel wont use the default CPU DSCR value
245 * contained in the PACA structure anymore during process context
246 * switch. Once this variable is set, this behaviour will also be
247 * inherited to all the children of this process from that point
248 * onwards.
249 */
250 int dscr_inherit;
251 unsigned long tidr;
252 #endif
253 #ifdef CONFIG_PPC_BOOK3S_64
254 unsigned long tar;
255 unsigned long ebbrr;
256 unsigned long ebbhr;
257 unsigned long bescr;
258 unsigned long siar;
259 unsigned long sdar;
260 unsigned long sier;
261 unsigned long mmcr2;
262 unsigned mmcr0;
263
264 unsigned used_ebb;
265 unsigned int used_vas;
266 #endif
267 };
268
269 #define ARCH_MIN_TASKALIGN 16
270
271 #define INIT_SP (sizeof(init_stack) + (unsigned long) &init_stack)
272 #define INIT_SP_LIMIT ((unsigned long)&init_stack)
273
274 #ifdef CONFIG_SPE
275 #define SPEFSCR_INIT \
276 .spefscr = SPEFSCR_FINVE | SPEFSCR_FDBZE | SPEFSCR_FUNFE | SPEFSCR_FOVFE, \
277 .spefscr_last = SPEFSCR_FINVE | SPEFSCR_FDBZE | SPEFSCR_FUNFE | SPEFSCR_FOVFE,
278 #else
279 #define SPEFSCR_INIT
280 #endif
281
282 #ifdef CONFIG_PPC32
283 #define INIT_THREAD { \
284 .ksp = INIT_SP, \
285 .ksp_limit = INIT_SP_LIMIT, \
286 .addr_limit = KERNEL_DS, \
287 .pgdir = swapper_pg_dir, \
288 .fpexc_mode = MSR_FE0 | MSR_FE1, \
289 SPEFSCR_INIT \
290 }
291 #else
292 #define INIT_THREAD { \
293 .ksp = INIT_SP, \
294 .regs = (struct pt_regs *)INIT_SP - 1, /* XXX bogus, I think */ \
295 .addr_limit = KERNEL_DS, \
296 .fpexc_mode = 0, \
297 .fscr = FSCR_TAR | FSCR_EBB \
298 }
299 #endif
300
301 #define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.regs)
302
303 unsigned long get_wchan(struct task_struct *p);
304
305 #define KSTK_EIP(tsk) ((tsk)->thread.regs? (tsk)->thread.regs->nip: 0)
306 #define KSTK_ESP(tsk) ((tsk)->thread.regs? (tsk)->thread.regs->gpr[1]: 0)
307
308 /* Get/set floating-point exception mode */
309 #define GET_FPEXC_CTL(tsk, adr) get_fpexc_mode((tsk), (adr))
310 #define SET_FPEXC_CTL(tsk, val) set_fpexc_mode((tsk), (val))
311
312 extern int get_fpexc_mode(struct task_struct *tsk, unsigned long adr);
313 extern int set_fpexc_mode(struct task_struct *tsk, unsigned int val);
314
315 #define GET_ENDIAN(tsk, adr) get_endian((tsk), (adr))
316 #define SET_ENDIAN(tsk, val) set_endian((tsk), (val))
317
318 extern int get_endian(struct task_struct *tsk, unsigned long adr);
319 extern int set_endian(struct task_struct *tsk, unsigned int val);
320
321 #define GET_UNALIGN_CTL(tsk, adr) get_unalign_ctl((tsk), (adr))
322 #define SET_UNALIGN_CTL(tsk, val) set_unalign_ctl((tsk), (val))
323
324 extern int get_unalign_ctl(struct task_struct *tsk, unsigned long adr);
325 extern int set_unalign_ctl(struct task_struct *tsk, unsigned int val);
326
327 extern void load_fp_state(struct thread_fp_state *fp);
328 extern void store_fp_state(struct thread_fp_state *fp);
329 extern void load_vr_state(struct thread_vr_state *vr);
330 extern void store_vr_state(struct thread_vr_state *vr);
331
__unpack_fe01(unsigned long msr_bits)332 static inline unsigned int __unpack_fe01(unsigned long msr_bits)
333 {
334 return ((msr_bits & MSR_FE0) >> 10) | ((msr_bits & MSR_FE1) >> 8);
335 }
336
__pack_fe01(unsigned int fpmode)337 static inline unsigned long __pack_fe01(unsigned int fpmode)
338 {
339 return ((fpmode << 10) & MSR_FE0) | ((fpmode << 8) & MSR_FE1);
340 }
341
342 #ifdef CONFIG_PPC64
343 #define cpu_relax() do { HMT_low(); HMT_medium(); barrier(); } while (0)
344
345 #define spin_begin() HMT_low()
346
347 #define spin_cpu_relax() barrier()
348
349 #define spin_end() HMT_medium()
350
351 #define spin_until_cond(cond) \
352 do { \
353 if (unlikely(!(cond))) { \
354 spin_begin(); \
355 do { \
356 spin_cpu_relax(); \
357 } while (!(cond)); \
358 spin_end(); \
359 } \
360 } while (0)
361
362 #else
363 #define cpu_relax() barrier()
364 #endif
365
366 /* Check that a certain kernel stack pointer is valid in task_struct p */
367 int validate_sp(unsigned long sp, struct task_struct *p,
368 unsigned long nbytes);
369
370 /*
371 * Prefetch macros.
372 */
373 #define ARCH_HAS_PREFETCH
374 #define ARCH_HAS_PREFETCHW
375 #define ARCH_HAS_SPINLOCK_PREFETCH
376
prefetch(const void * x)377 static inline void prefetch(const void *x)
378 {
379 if (unlikely(!x))
380 return;
381
382 __asm__ __volatile__ ("dcbt 0,%0" : : "r" (x));
383 }
384
prefetchw(const void * x)385 static inline void prefetchw(const void *x)
386 {
387 if (unlikely(!x))
388 return;
389
390 __asm__ __volatile__ ("dcbtst 0,%0" : : "r" (x));
391 }
392
393 #define spin_lock_prefetch(x) prefetchw(x)
394
395 #define HAVE_ARCH_PICK_MMAP_LAYOUT
396
397 #ifdef CONFIG_PPC64
get_clean_sp(unsigned long sp,int is_32)398 static inline unsigned long get_clean_sp(unsigned long sp, int is_32)
399 {
400 if (is_32)
401 return sp & 0x0ffffffffUL;
402 return sp;
403 }
404 #else
get_clean_sp(unsigned long sp,int is_32)405 static inline unsigned long get_clean_sp(unsigned long sp, int is_32)
406 {
407 return sp;
408 }
409 #endif
410
411 /* asm stubs */
412 extern unsigned long isa300_idle_stop_noloss(unsigned long psscr_val);
413 extern unsigned long isa300_idle_stop_mayloss(unsigned long psscr_val);
414 extern unsigned long isa206_idle_insn_mayloss(unsigned long type);
415
416 extern unsigned long cpuidle_disable;
417 enum idle_boot_override {IDLE_NO_OVERRIDE = 0, IDLE_POWERSAVE_OFF};
418
419 extern int powersave_nap; /* set if nap mode can be used in idle loop */
420
421 extern void power7_idle_type(unsigned long type);
422 extern void power9_idle_type(unsigned long stop_psscr_val,
423 unsigned long stop_psscr_mask);
424
425 extern void flush_instruction_cache(void);
426 extern void hard_reset_now(void);
427 extern void poweroff_now(void);
428 extern int fix_alignment(struct pt_regs *);
429 extern void cvt_fd(float *from, double *to);
430 extern void cvt_df(double *from, float *to);
431 extern void _nmask_and_or_msr(unsigned long nmask, unsigned long or_val);
432
433 #ifdef CONFIG_PPC64
434 /*
435 * We handle most unaligned accesses in hardware. On the other hand
436 * unaligned DMA can be very expensive on some ppc64 IO chips (it does
437 * powers of 2 writes until it reaches sufficient alignment).
438 *
439 * Based on this we disable the IP header alignment in network drivers.
440 */
441 #define NET_IP_ALIGN 0
442 #endif
443
444 #endif /* __KERNEL__ */
445 #endif /* __ASSEMBLY__ */
446 #endif /* _ASM_POWERPC_PROCESSOR_H */
447