1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Based on arch/arm/include/asm/ptrace.h
4  *
5  * Copyright (C) 1996-2003 Russell King
6  * Copyright (C) 2012 ARM Ltd.
7  */
8 #ifndef __ASM_PTRACE_H
9 #define __ASM_PTRACE_H
10 
11 #include <asm/cpufeature.h>
12 
13 #include <uapi/asm/ptrace.h>
14 
15 /* Current Exception Level values, as contained in CurrentEL */
16 #define CurrentEL_EL1		(1 << 2)
17 #define CurrentEL_EL2		(2 << 2)
18 
19 /*
20  * PMR values used to mask/unmask interrupts.
21  *
22  * GIC priority masking works as follows: if an IRQ's priority is a higher value
23  * than the value held in PMR, that IRQ is masked. Lowering the value of PMR
24  * means masking more IRQs (or at least that the same IRQs remain masked).
25  *
26  * To mask interrupts, we clear the most significant bit of PMR.
27  *
28  * Some code sections either automatically switch back to PSR.I or explicitly
29  * require to not use priority masking. If bit GIC_PRIO_PSR_I_SET is included
30  * in the priority mask, it indicates that PSR.I should be set and
31  * interrupt disabling temporarily does not rely on IRQ priorities.
32  */
33 #define GIC_PRIO_IRQON			0xe0
34 #define __GIC_PRIO_IRQOFF		(GIC_PRIO_IRQON & ~0x80)
35 #define __GIC_PRIO_IRQOFF_NS		0xa0
36 #define GIC_PRIO_PSR_I_SET		(1 << 4)
37 
38 #define GIC_PRIO_IRQOFF							\
39 	({								\
40 		extern struct static_key_false gic_nonsecure_priorities;\
41 		u8 __prio = __GIC_PRIO_IRQOFF;				\
42 									\
43 		if (static_branch_unlikely(&gic_nonsecure_priorities))	\
44 			__prio = __GIC_PRIO_IRQOFF_NS;			\
45 									\
46 		__prio;							\
47 	})
48 
49 /* Additional SPSR bits not exposed in the UABI */
50 #define PSR_MODE_THREAD_BIT	(1 << 0)
51 #define PSR_IL_BIT		(1 << 20)
52 
53 /* AArch32-specific ptrace requests */
54 #define COMPAT_PTRACE_GETREGS		12
55 #define COMPAT_PTRACE_SETREGS		13
56 #define COMPAT_PTRACE_GET_THREAD_AREA	22
57 #define COMPAT_PTRACE_SET_SYSCALL	23
58 #define COMPAT_PTRACE_GETVFPREGS	27
59 #define COMPAT_PTRACE_SETVFPREGS	28
60 #define COMPAT_PTRACE_GETHBPREGS	29
61 #define COMPAT_PTRACE_SETHBPREGS	30
62 
63 /* SPSR_ELx bits for exceptions taken from AArch32 */
64 #define PSR_AA32_MODE_MASK	0x0000001f
65 #define PSR_AA32_MODE_USR	0x00000010
66 #define PSR_AA32_MODE_FIQ	0x00000011
67 #define PSR_AA32_MODE_IRQ	0x00000012
68 #define PSR_AA32_MODE_SVC	0x00000013
69 #define PSR_AA32_MODE_ABT	0x00000017
70 #define PSR_AA32_MODE_HYP	0x0000001a
71 #define PSR_AA32_MODE_UND	0x0000001b
72 #define PSR_AA32_MODE_SYS	0x0000001f
73 #define PSR_AA32_T_BIT		0x00000020
74 #define PSR_AA32_F_BIT		0x00000040
75 #define PSR_AA32_I_BIT		0x00000080
76 #define PSR_AA32_A_BIT		0x00000100
77 #define PSR_AA32_E_BIT		0x00000200
78 #define PSR_AA32_PAN_BIT	0x00400000
79 #define PSR_AA32_SSBS_BIT	0x00800000
80 #define PSR_AA32_DIT_BIT	0x01000000
81 #define PSR_AA32_Q_BIT		0x08000000
82 #define PSR_AA32_V_BIT		0x10000000
83 #define PSR_AA32_C_BIT		0x20000000
84 #define PSR_AA32_Z_BIT		0x40000000
85 #define PSR_AA32_N_BIT		0x80000000
86 #define PSR_AA32_IT_MASK	0x0600fc00	/* If-Then execution state mask */
87 #define PSR_AA32_GE_MASK	0x000f0000
88 
89 #ifdef CONFIG_CPU_BIG_ENDIAN
90 #define PSR_AA32_ENDSTATE	PSR_AA32_E_BIT
91 #else
92 #define PSR_AA32_ENDSTATE	0
93 #endif
94 
95 /* AArch32 CPSR bits, as seen in AArch32 */
96 #define COMPAT_PSR_DIT_BIT	0x00200000
97 
98 /*
99  * These are 'magic' values for PTRACE_PEEKUSR that return info about where a
100  * process is located in memory.
101  */
102 #define COMPAT_PT_TEXT_ADDR		0x10000
103 #define COMPAT_PT_DATA_ADDR		0x10004
104 #define COMPAT_PT_TEXT_END_ADDR		0x10008
105 
106 /*
107  * If pt_regs.syscallno == NO_SYSCALL, then the thread is not executing
108  * a syscall -- i.e., its most recent entry into the kernel from
109  * userspace was not via SVC, or otherwise a tracer cancelled the syscall.
110  *
111  * This must have the value -1, for ABI compatibility with ptrace etc.
112  */
113 #define NO_SYSCALL (-1)
114 
115 #ifndef __ASSEMBLY__
116 #include <linux/bug.h>
117 #include <linux/types.h>
118 
119 /* sizeof(struct user) for AArch32 */
120 #define COMPAT_USER_SZ	296
121 
122 /* Architecturally defined mapping between AArch32 and AArch64 registers */
123 #define compat_usr(x)	regs[(x)]
124 #define compat_fp	regs[11]
125 #define compat_sp	regs[13]
126 #define compat_lr	regs[14]
127 #define compat_sp_hyp	regs[15]
128 #define compat_lr_irq	regs[16]
129 #define compat_sp_irq	regs[17]
130 #define compat_lr_svc	regs[18]
131 #define compat_sp_svc	regs[19]
132 #define compat_lr_abt	regs[20]
133 #define compat_sp_abt	regs[21]
134 #define compat_lr_und	regs[22]
135 #define compat_sp_und	regs[23]
136 #define compat_r8_fiq	regs[24]
137 #define compat_r9_fiq	regs[25]
138 #define compat_r10_fiq	regs[26]
139 #define compat_r11_fiq	regs[27]
140 #define compat_r12_fiq	regs[28]
141 #define compat_sp_fiq	regs[29]
142 #define compat_lr_fiq	regs[30]
143 
compat_psr_to_pstate(const unsigned long psr)144 static inline unsigned long compat_psr_to_pstate(const unsigned long psr)
145 {
146 	unsigned long pstate;
147 
148 	pstate = psr & ~COMPAT_PSR_DIT_BIT;
149 
150 	if (psr & COMPAT_PSR_DIT_BIT)
151 		pstate |= PSR_AA32_DIT_BIT;
152 
153 	return pstate;
154 }
155 
pstate_to_compat_psr(const unsigned long pstate)156 static inline unsigned long pstate_to_compat_psr(const unsigned long pstate)
157 {
158 	unsigned long psr;
159 
160 	psr = pstate & ~PSR_AA32_DIT_BIT;
161 
162 	if (pstate & PSR_AA32_DIT_BIT)
163 		psr |= COMPAT_PSR_DIT_BIT;
164 
165 	return psr;
166 }
167 
168 /*
169  * This struct defines the way the registers are stored on the stack during an
170  * exception. Note that sizeof(struct pt_regs) has to be a multiple of 16 (for
171  * stack alignment). struct user_pt_regs must form a prefix of struct pt_regs.
172  */
173 struct pt_regs {
174 	union {
175 		struct user_pt_regs user_regs;
176 		struct {
177 			u64 regs[31];
178 			u64 sp;
179 			u64 pc;
180 			u64 pstate;
181 		};
182 	};
183 	u64 orig_x0;
184 #ifdef __AARCH64EB__
185 	u32 unused2;
186 	s32 syscallno;
187 #else
188 	s32 syscallno;
189 	u32 unused2;
190 #endif
191 
192 	u64 orig_addr_limit;
193 	/* Only valid when ARM64_HAS_IRQ_PRIO_MASKING is enabled. */
194 	u64 pmr_save;
195 	u64 stackframe[2];
196 
197 	/* Only valid for some EL1 exceptions. */
198 	u64 lockdep_hardirqs;
199 	u64 exit_rcu;
200 };
201 
in_syscall(struct pt_regs const * regs)202 static inline bool in_syscall(struct pt_regs const *regs)
203 {
204 	return regs->syscallno != NO_SYSCALL;
205 }
206 
forget_syscall(struct pt_regs * regs)207 static inline void forget_syscall(struct pt_regs *regs)
208 {
209 	regs->syscallno = NO_SYSCALL;
210 }
211 
212 #define MAX_REG_OFFSET offsetof(struct pt_regs, pstate)
213 
214 #define arch_has_single_step()	(1)
215 
216 #ifdef CONFIG_COMPAT
217 #define compat_thumb_mode(regs) \
218 	(((regs)->pstate & PSR_AA32_T_BIT))
219 #else
220 #define compat_thumb_mode(regs) (0)
221 #endif
222 
223 #define user_mode(regs)	\
224 	(((regs)->pstate & PSR_MODE_MASK) == PSR_MODE_EL0t)
225 
226 #define compat_user_mode(regs)	\
227 	(((regs)->pstate & (PSR_MODE32_BIT | PSR_MODE_MASK)) == \
228 	 (PSR_MODE32_BIT | PSR_MODE_EL0t))
229 
230 #define processor_mode(regs) \
231 	((regs)->pstate & PSR_MODE_MASK)
232 
233 #define irqs_priority_unmasked(regs)					\
234 	(system_uses_irq_prio_masking() ?				\
235 		(regs)->pmr_save == GIC_PRIO_IRQON :			\
236 		true)
237 
238 #define interrupts_enabled(regs)			\
239 	(!((regs)->pstate & PSR_I_BIT) && irqs_priority_unmasked(regs))
240 
241 #define fast_interrupts_enabled(regs) \
242 	(!((regs)->pstate & PSR_F_BIT))
243 
user_stack_pointer(struct pt_regs * regs)244 static inline unsigned long user_stack_pointer(struct pt_regs *regs)
245 {
246 	if (compat_user_mode(regs))
247 		return regs->compat_sp;
248 	return regs->sp;
249 }
250 
251 extern int regs_query_register_offset(const char *name);
252 extern unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs,
253 					       unsigned int n);
254 
255 /**
256  * regs_get_register() - get register value from its offset
257  * @regs:	pt_regs from which register value is gotten
258  * @offset:	offset of the register.
259  *
260  * regs_get_register returns the value of a register whose offset from @regs.
261  * The @offset is the offset of the register in struct pt_regs.
262  * If @offset is bigger than MAX_REG_OFFSET, this returns 0.
263  */
regs_get_register(struct pt_regs * regs,unsigned int offset)264 static inline u64 regs_get_register(struct pt_regs *regs, unsigned int offset)
265 {
266 	u64 val = 0;
267 
268 	WARN_ON(offset & 7);
269 
270 	offset >>= 3;
271 	switch (offset) {
272 	case 0 ... 30:
273 		val = regs->regs[offset];
274 		break;
275 	case offsetof(struct pt_regs, sp) >> 3:
276 		val = regs->sp;
277 		break;
278 	case offsetof(struct pt_regs, pc) >> 3:
279 		val = regs->pc;
280 		break;
281 	case offsetof(struct pt_regs, pstate) >> 3:
282 		val = regs->pstate;
283 		break;
284 	default:
285 		val = 0;
286 	}
287 
288 	return val;
289 }
290 
291 /*
292  * Read a register given an architectural register index r.
293  * This handles the common case where 31 means XZR, not SP.
294  */
pt_regs_read_reg(const struct pt_regs * regs,int r)295 static inline unsigned long pt_regs_read_reg(const struct pt_regs *regs, int r)
296 {
297 	return (r == 31) ? 0 : regs->regs[r];
298 }
299 
300 /*
301  * Write a register given an architectural register index r.
302  * This handles the common case where 31 means XZR, not SP.
303  */
pt_regs_write_reg(struct pt_regs * regs,int r,unsigned long val)304 static inline void pt_regs_write_reg(struct pt_regs *regs, int r,
305 				     unsigned long val)
306 {
307 	if (r != 31)
308 		regs->regs[r] = val;
309 }
310 
311 /* Valid only for Kernel mode traps. */
kernel_stack_pointer(struct pt_regs * regs)312 static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
313 {
314 	return regs->sp;
315 }
316 
regs_return_value(struct pt_regs * regs)317 static inline unsigned long regs_return_value(struct pt_regs *regs)
318 {
319 	return regs->regs[0];
320 }
321 
regs_set_return_value(struct pt_regs * regs,unsigned long rc)322 static inline void regs_set_return_value(struct pt_regs *regs, unsigned long rc)
323 {
324 	regs->regs[0] = rc;
325 }
326 
327 /**
328  * regs_get_kernel_argument() - get Nth function argument in kernel
329  * @regs:	pt_regs of that context
330  * @n:		function argument number (start from 0)
331  *
332  * regs_get_argument() returns @n th argument of the function call.
333  *
334  * Note that this chooses the most likely register mapping. In very rare
335  * cases this may not return correct data, for example, if one of the
336  * function parameters is 16 bytes or bigger. In such cases, we cannot
337  * get access the parameter correctly and the register assignment of
338  * subsequent parameters will be shifted.
339  */
regs_get_kernel_argument(struct pt_regs * regs,unsigned int n)340 static inline unsigned long regs_get_kernel_argument(struct pt_regs *regs,
341 						     unsigned int n)
342 {
343 #define NR_REG_ARGUMENTS 8
344 	if (n < NR_REG_ARGUMENTS)
345 		return pt_regs_read_reg(regs, n);
346 	return 0;
347 }
348 
349 /* We must avoid circular header include via sched.h */
350 struct task_struct;
351 int valid_user_regs(struct user_pt_regs *regs, struct task_struct *task);
352 
instruction_pointer(struct pt_regs * regs)353 static inline unsigned long instruction_pointer(struct pt_regs *regs)
354 {
355 	return regs->pc;
356 }
instruction_pointer_set(struct pt_regs * regs,unsigned long val)357 static inline void instruction_pointer_set(struct pt_regs *regs,
358 		unsigned long val)
359 {
360 	regs->pc = val;
361 }
362 
frame_pointer(struct pt_regs * regs)363 static inline unsigned long frame_pointer(struct pt_regs *regs)
364 {
365 	return regs->regs[29];
366 }
367 
368 #define procedure_link_pointer(regs)	((regs)->regs[30])
369 
procedure_link_pointer_set(struct pt_regs * regs,unsigned long val)370 static inline void procedure_link_pointer_set(struct pt_regs *regs,
371 					   unsigned long val)
372 {
373 	procedure_link_pointer(regs) = val;
374 }
375 
376 extern unsigned long profile_pc(struct pt_regs *regs);
377 
378 #endif /* __ASSEMBLY__ */
379 #endif
380