1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * bpf_jit64.h: BPF JIT compiler for PPC64
4  *
5  * Copyright 2016 Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
6  *		  IBM Corporation
7  */
8 #ifndef _BPF_JIT64_H
9 #define _BPF_JIT64_H
10 
11 #include "bpf_jit.h"
12 
13 /*
14  * Stack layout:
15  * Ensure the top half (upto local_tmp_var) stays consistent
16  * with our redzone usage.
17  *
18  *		[	prev sp		] <-------------
19  *		[   nv gpr save area	] 6*8		|
20  *		[    tail_call_cnt	] 8		|
21  *		[    local_tmp_var	] 8		|
22  * fp (r31) -->	[   ebpf stack space	] upto 512	|
23  *		[     frame header	] 32/112	|
24  * sp (r1) --->	[    stack pointer	] --------------
25  */
26 
27 /* for gpr non volatile registers BPG_REG_6 to 10 */
28 #define BPF_PPC_STACK_SAVE	(6*8)
29 /* for bpf JIT code internal usage */
30 #define BPF_PPC_STACK_LOCALS	16
31 /* stack frame excluding BPF stack, ensure this is quadword aligned */
32 #define BPF_PPC_STACKFRAME	(STACK_FRAME_MIN_SIZE + \
33 				 BPF_PPC_STACK_LOCALS + BPF_PPC_STACK_SAVE)
34 
35 #ifndef __ASSEMBLY__
36 
37 /* BPF register usage */
38 #define TMP_REG_1	(MAX_BPF_JIT_REG + 0)
39 #define TMP_REG_2	(MAX_BPF_JIT_REG + 1)
40 
41 /* BPF to ppc register mappings */
42 static const int b2p[] = {
43 	/* function return value */
44 	[BPF_REG_0] = 8,
45 	/* function arguments */
46 	[BPF_REG_1] = 3,
47 	[BPF_REG_2] = 4,
48 	[BPF_REG_3] = 5,
49 	[BPF_REG_4] = 6,
50 	[BPF_REG_5] = 7,
51 	/* non volatile registers */
52 	[BPF_REG_6] = 27,
53 	[BPF_REG_7] = 28,
54 	[BPF_REG_8] = 29,
55 	[BPF_REG_9] = 30,
56 	/* frame pointer aka BPF_REG_10 */
57 	[BPF_REG_FP] = 31,
58 	/* eBPF jit internal registers */
59 	[BPF_REG_AX] = 2,
60 	[TMP_REG_1] = 9,
61 	[TMP_REG_2] = 10
62 };
63 
64 /* PPC NVR range -- update this if we ever use NVRs below r27 */
65 #define BPF_PPC_NVR_MIN		27
66 
67 /*
68  * WARNING: These can use TMP_REG_2 if the offset is not at word boundary,
69  * so ensure that it isn't in use already.
70  */
71 #define PPC_BPF_LL(r, base, i) do {					      \
72 				if ((i) % 4) {				      \
73 					EMIT(PPC_RAW_LI(b2p[TMP_REG_2], (i)));\
74 					EMIT(PPC_RAW_LDX(r, base,	      \
75 							b2p[TMP_REG_2]));     \
76 				} else					      \
77 					EMIT(PPC_RAW_LD(r, base, i));	      \
78 				} while(0)
79 #define PPC_BPF_STL(r, base, i) do {					      \
80 				if ((i) % 4) {				      \
81 					EMIT(PPC_RAW_LI(b2p[TMP_REG_2], (i)));\
82 					EMIT(PPC_RAW_STDX(r, base,	      \
83 							b2p[TMP_REG_2]));     \
84 				} else					      \
85 					EMIT(PPC_RAW_STD(r, base, i));	      \
86 				} while(0)
87 #define PPC_BPF_STLU(r, base, i) do { EMIT(PPC_RAW_STDU(r, base, i)); } while(0)
88 
89 #define SEEN_FUNC	0x1000 /* might call external helpers */
90 #define SEEN_STACK	0x2000 /* uses BPF stack */
91 #define SEEN_TAILCALL	0x4000 /* uses tail calls */
92 
93 struct codegen_context {
94 	/*
95 	 * This is used to track register usage as well
96 	 * as calls to external helpers.
97 	 * - register usage is tracked with corresponding
98 	 *   bits (r3-r10 and r27-r31)
99 	 * - rest of the bits can be used to track other
100 	 *   things -- for now, we use bits 16 to 23
101 	 *   encoded in SEEN_* macros above
102 	 */
103 	unsigned int seen;
104 	unsigned int idx;
105 	unsigned int stack_size;
106 };
107 
108 #endif /* !__ASSEMBLY__ */
109 
110 #endif
111