1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * bpf_jit_comp.c: BPF JIT compiler
4 *
5 * Copyright (C) 2011-2013 Eric Dumazet (eric.dumazet@gmail.com)
6 * Internal BPF Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
7 */
8 #include <linux/netdevice.h>
9 #include <linux/filter.h>
10 #include <linux/if_vlan.h>
11 #include <linux/bpf.h>
12 #include <linux/memory.h>
13 #include <linux/sort.h>
14 #include <asm/extable.h>
15 #include <asm/set_memory.h>
16 #include <asm/nospec-branch.h>
17 #include <asm/text-patching.h>
18 #include <asm/asm-prototypes.h>
19
emit_code(u8 * ptr,u32 bytes,unsigned int len)20 static u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
21 {
22 if (len == 1)
23 *ptr = bytes;
24 else if (len == 2)
25 *(u16 *)ptr = bytes;
26 else {
27 *(u32 *)ptr = bytes;
28 barrier();
29 }
30 return ptr + len;
31 }
32
33 #define EMIT(bytes, len) \
34 do { prog = emit_code(prog, bytes, len); } while (0)
35
36 #define EMIT1(b1) EMIT(b1, 1)
37 #define EMIT2(b1, b2) EMIT((b1) + ((b2) << 8), 2)
38 #define EMIT3(b1, b2, b3) EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3)
39 #define EMIT4(b1, b2, b3, b4) EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4)
40
41 #define EMIT1_off32(b1, off) \
42 do { EMIT1(b1); EMIT(off, 4); } while (0)
43 #define EMIT2_off32(b1, b2, off) \
44 do { EMIT2(b1, b2); EMIT(off, 4); } while (0)
45 #define EMIT3_off32(b1, b2, b3, off) \
46 do { EMIT3(b1, b2, b3); EMIT(off, 4); } while (0)
47 #define EMIT4_off32(b1, b2, b3, b4, off) \
48 do { EMIT4(b1, b2, b3, b4); EMIT(off, 4); } while (0)
49
is_imm8(int value)50 static bool is_imm8(int value)
51 {
52 return value <= 127 && value >= -128;
53 }
54
is_simm32(s64 value)55 static bool is_simm32(s64 value)
56 {
57 return value == (s64)(s32)value;
58 }
59
is_uimm32(u64 value)60 static bool is_uimm32(u64 value)
61 {
62 return value == (u64)(u32)value;
63 }
64
65 /* mov dst, src */
66 #define EMIT_mov(DST, SRC) \
67 do { \
68 if (DST != SRC) \
69 EMIT3(add_2mod(0x48, DST, SRC), 0x89, add_2reg(0xC0, DST, SRC)); \
70 } while (0)
71
bpf_size_to_x86_bytes(int bpf_size)72 static int bpf_size_to_x86_bytes(int bpf_size)
73 {
74 if (bpf_size == BPF_W)
75 return 4;
76 else if (bpf_size == BPF_H)
77 return 2;
78 else if (bpf_size == BPF_B)
79 return 1;
80 else if (bpf_size == BPF_DW)
81 return 4; /* imm32 */
82 else
83 return 0;
84 }
85
86 /*
87 * List of x86 cond jumps opcodes (. + s8)
88 * Add 0x10 (and an extra 0x0f) to generate far jumps (. + s32)
89 */
90 #define X86_JB 0x72
91 #define X86_JAE 0x73
92 #define X86_JE 0x74
93 #define X86_JNE 0x75
94 #define X86_JBE 0x76
95 #define X86_JA 0x77
96 #define X86_JL 0x7C
97 #define X86_JGE 0x7D
98 #define X86_JLE 0x7E
99 #define X86_JG 0x7F
100
101 /* Pick a register outside of BPF range for JIT internal work */
102 #define AUX_REG (MAX_BPF_JIT_REG + 1)
103 #define X86_REG_R9 (MAX_BPF_JIT_REG + 2)
104
105 /*
106 * The following table maps BPF registers to x86-64 registers.
107 *
108 * x86-64 register R12 is unused, since if used as base address
109 * register in load/store instructions, it always needs an
110 * extra byte of encoding and is callee saved.
111 *
112 * x86-64 register R9 is not used by BPF programs, but can be used by BPF
113 * trampoline. x86-64 register R10 is used for blinding (if enabled).
114 */
115 static const int reg2hex[] = {
116 [BPF_REG_0] = 0, /* RAX */
117 [BPF_REG_1] = 7, /* RDI */
118 [BPF_REG_2] = 6, /* RSI */
119 [BPF_REG_3] = 2, /* RDX */
120 [BPF_REG_4] = 1, /* RCX */
121 [BPF_REG_5] = 0, /* R8 */
122 [BPF_REG_6] = 3, /* RBX callee saved */
123 [BPF_REG_7] = 5, /* R13 callee saved */
124 [BPF_REG_8] = 6, /* R14 callee saved */
125 [BPF_REG_9] = 7, /* R15 callee saved */
126 [BPF_REG_FP] = 5, /* RBP readonly */
127 [BPF_REG_AX] = 2, /* R10 temp register */
128 [AUX_REG] = 3, /* R11 temp register */
129 [X86_REG_R9] = 1, /* R9 register, 6th function argument */
130 };
131
132 static const int reg2pt_regs[] = {
133 [BPF_REG_0] = offsetof(struct pt_regs, ax),
134 [BPF_REG_1] = offsetof(struct pt_regs, di),
135 [BPF_REG_2] = offsetof(struct pt_regs, si),
136 [BPF_REG_3] = offsetof(struct pt_regs, dx),
137 [BPF_REG_4] = offsetof(struct pt_regs, cx),
138 [BPF_REG_5] = offsetof(struct pt_regs, r8),
139 [BPF_REG_6] = offsetof(struct pt_regs, bx),
140 [BPF_REG_7] = offsetof(struct pt_regs, r13),
141 [BPF_REG_8] = offsetof(struct pt_regs, r14),
142 [BPF_REG_9] = offsetof(struct pt_regs, r15),
143 };
144
145 /*
146 * is_ereg() == true if BPF register 'reg' maps to x86-64 r8..r15
147 * which need extra byte of encoding.
148 * rax,rcx,...,rbp have simpler encoding
149 */
is_ereg(u32 reg)150 static bool is_ereg(u32 reg)
151 {
152 return (1 << reg) & (BIT(BPF_REG_5) |
153 BIT(AUX_REG) |
154 BIT(BPF_REG_7) |
155 BIT(BPF_REG_8) |
156 BIT(BPF_REG_9) |
157 BIT(X86_REG_R9) |
158 BIT(BPF_REG_AX));
159 }
160
161 /*
162 * is_ereg_8l() == true if BPF register 'reg' is mapped to access x86-64
163 * lower 8-bit registers dil,sil,bpl,spl,r8b..r15b, which need extra byte
164 * of encoding. al,cl,dl,bl have simpler encoding.
165 */
is_ereg_8l(u32 reg)166 static bool is_ereg_8l(u32 reg)
167 {
168 return is_ereg(reg) ||
169 (1 << reg) & (BIT(BPF_REG_1) |
170 BIT(BPF_REG_2) |
171 BIT(BPF_REG_FP));
172 }
173
is_axreg(u32 reg)174 static bool is_axreg(u32 reg)
175 {
176 return reg == BPF_REG_0;
177 }
178
179 /* Add modifiers if 'reg' maps to x86-64 registers R8..R15 */
add_1mod(u8 byte,u32 reg)180 static u8 add_1mod(u8 byte, u32 reg)
181 {
182 if (is_ereg(reg))
183 byte |= 1;
184 return byte;
185 }
186
add_2mod(u8 byte,u32 r1,u32 r2)187 static u8 add_2mod(u8 byte, u32 r1, u32 r2)
188 {
189 if (is_ereg(r1))
190 byte |= 1;
191 if (is_ereg(r2))
192 byte |= 4;
193 return byte;
194 }
195
196 /* Encode 'dst_reg' register into x86-64 opcode 'byte' */
add_1reg(u8 byte,u32 dst_reg)197 static u8 add_1reg(u8 byte, u32 dst_reg)
198 {
199 return byte + reg2hex[dst_reg];
200 }
201
202 /* Encode 'dst_reg' and 'src_reg' registers into x86-64 opcode 'byte' */
add_2reg(u8 byte,u32 dst_reg,u32 src_reg)203 static u8 add_2reg(u8 byte, u32 dst_reg, u32 src_reg)
204 {
205 return byte + reg2hex[dst_reg] + (reg2hex[src_reg] << 3);
206 }
207
208 /* Some 1-byte opcodes for binary ALU operations */
209 static u8 simple_alu_opcodes[] = {
210 [BPF_ADD] = 0x01,
211 [BPF_SUB] = 0x29,
212 [BPF_AND] = 0x21,
213 [BPF_OR] = 0x09,
214 [BPF_XOR] = 0x31,
215 [BPF_LSH] = 0xE0,
216 [BPF_RSH] = 0xE8,
217 [BPF_ARSH] = 0xF8,
218 };
219
jit_fill_hole(void * area,unsigned int size)220 static void jit_fill_hole(void *area, unsigned int size)
221 {
222 /* Fill whole space with INT3 instructions */
223 memset(area, 0xcc, size);
224 }
225
226 struct jit_context {
227 int cleanup_addr; /* Epilogue code offset */
228 };
229
230 /* Maximum number of bytes emitted while JITing one eBPF insn */
231 #define BPF_MAX_INSN_SIZE 128
232 #define BPF_INSN_SAFETY 64
233
234 /* Number of bytes emit_patch() needs to generate instructions */
235 #define X86_PATCH_SIZE 5
236 /* Number of bytes that will be skipped on tailcall */
237 #define X86_TAIL_CALL_OFFSET 11
238
push_callee_regs(u8 ** pprog,bool * callee_regs_used)239 static void push_callee_regs(u8 **pprog, bool *callee_regs_used)
240 {
241 u8 *prog = *pprog;
242
243 if (callee_regs_used[0])
244 EMIT1(0x53); /* push rbx */
245 if (callee_regs_used[1])
246 EMIT2(0x41, 0x55); /* push r13 */
247 if (callee_regs_used[2])
248 EMIT2(0x41, 0x56); /* push r14 */
249 if (callee_regs_used[3])
250 EMIT2(0x41, 0x57); /* push r15 */
251 *pprog = prog;
252 }
253
pop_callee_regs(u8 ** pprog,bool * callee_regs_used)254 static void pop_callee_regs(u8 **pprog, bool *callee_regs_used)
255 {
256 u8 *prog = *pprog;
257
258 if (callee_regs_used[3])
259 EMIT2(0x41, 0x5F); /* pop r15 */
260 if (callee_regs_used[2])
261 EMIT2(0x41, 0x5E); /* pop r14 */
262 if (callee_regs_used[1])
263 EMIT2(0x41, 0x5D); /* pop r13 */
264 if (callee_regs_used[0])
265 EMIT1(0x5B); /* pop rbx */
266 *pprog = prog;
267 }
268
269 /*
270 * Emit x86-64 prologue code for BPF program.
271 * bpf_tail_call helper will skip the first X86_TAIL_CALL_OFFSET bytes
272 * while jumping to another program
273 */
emit_prologue(u8 ** pprog,u32 stack_depth,bool ebpf_from_cbpf,bool tail_call_reachable,bool is_subprog)274 static void emit_prologue(u8 **pprog, u32 stack_depth, bool ebpf_from_cbpf,
275 bool tail_call_reachable, bool is_subprog)
276 {
277 u8 *prog = *pprog;
278
279 /* BPF trampoline can be made to work without these nops,
280 * but let's waste 5 bytes for now and optimize later
281 */
282 memcpy(prog, x86_nops[5], X86_PATCH_SIZE);
283 prog += X86_PATCH_SIZE;
284 if (!ebpf_from_cbpf) {
285 if (tail_call_reachable && !is_subprog)
286 EMIT2(0x31, 0xC0); /* xor eax, eax */
287 else
288 EMIT2(0x66, 0x90); /* nop2 */
289 }
290 EMIT1(0x55); /* push rbp */
291 EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */
292 /* sub rsp, rounded_stack_depth */
293 if (stack_depth)
294 EMIT3_off32(0x48, 0x81, 0xEC, round_up(stack_depth, 8));
295 if (tail_call_reachable)
296 EMIT1(0x50); /* push rax */
297 *pprog = prog;
298 }
299
emit_patch(u8 ** pprog,void * func,void * ip,u8 opcode)300 static int emit_patch(u8 **pprog, void *func, void *ip, u8 opcode)
301 {
302 u8 *prog = *pprog;
303 s64 offset;
304
305 offset = func - (ip + X86_PATCH_SIZE);
306 if (!is_simm32(offset)) {
307 pr_err("Target call %p is out of range\n", func);
308 return -ERANGE;
309 }
310 EMIT1_off32(opcode, offset);
311 *pprog = prog;
312 return 0;
313 }
314
emit_call(u8 ** pprog,void * func,void * ip)315 static int emit_call(u8 **pprog, void *func, void *ip)
316 {
317 return emit_patch(pprog, func, ip, 0xE8);
318 }
319
emit_jump(u8 ** pprog,void * func,void * ip)320 static int emit_jump(u8 **pprog, void *func, void *ip)
321 {
322 return emit_patch(pprog, func, ip, 0xE9);
323 }
324
__bpf_arch_text_poke(void * ip,enum bpf_text_poke_type t,void * old_addr,void * new_addr,const bool text_live)325 static int __bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
326 void *old_addr, void *new_addr,
327 const bool text_live)
328 {
329 const u8 *nop_insn = x86_nops[5];
330 u8 old_insn[X86_PATCH_SIZE];
331 u8 new_insn[X86_PATCH_SIZE];
332 u8 *prog;
333 int ret;
334
335 memcpy(old_insn, nop_insn, X86_PATCH_SIZE);
336 if (old_addr) {
337 prog = old_insn;
338 ret = t == BPF_MOD_CALL ?
339 emit_call(&prog, old_addr, ip) :
340 emit_jump(&prog, old_addr, ip);
341 if (ret)
342 return ret;
343 }
344
345 memcpy(new_insn, nop_insn, X86_PATCH_SIZE);
346 if (new_addr) {
347 prog = new_insn;
348 ret = t == BPF_MOD_CALL ?
349 emit_call(&prog, new_addr, ip) :
350 emit_jump(&prog, new_addr, ip);
351 if (ret)
352 return ret;
353 }
354
355 ret = -EBUSY;
356 mutex_lock(&text_mutex);
357 if (memcmp(ip, old_insn, X86_PATCH_SIZE))
358 goto out;
359 ret = 1;
360 if (memcmp(ip, new_insn, X86_PATCH_SIZE)) {
361 if (text_live)
362 text_poke_bp(ip, new_insn, X86_PATCH_SIZE, NULL);
363 else
364 memcpy(ip, new_insn, X86_PATCH_SIZE);
365 ret = 0;
366 }
367 out:
368 mutex_unlock(&text_mutex);
369 return ret;
370 }
371
bpf_arch_text_poke(void * ip,enum bpf_text_poke_type t,void * old_addr,void * new_addr)372 int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
373 void *old_addr, void *new_addr)
374 {
375 if (!is_kernel_text((long)ip) &&
376 !is_bpf_text_address((long)ip))
377 /* BPF poking in modules is not supported */
378 return -EINVAL;
379
380 return __bpf_arch_text_poke(ip, t, old_addr, new_addr, true);
381 }
382
get_pop_bytes(bool * callee_regs_used)383 static int get_pop_bytes(bool *callee_regs_used)
384 {
385 int bytes = 0;
386
387 if (callee_regs_used[3])
388 bytes += 2;
389 if (callee_regs_used[2])
390 bytes += 2;
391 if (callee_regs_used[1])
392 bytes += 2;
393 if (callee_regs_used[0])
394 bytes += 1;
395
396 return bytes;
397 }
398
399 /*
400 * Generate the following code:
401 *
402 * ... bpf_tail_call(void *ctx, struct bpf_array *array, u64 index) ...
403 * if (index >= array->map.max_entries)
404 * goto out;
405 * if (++tail_call_cnt > MAX_TAIL_CALL_CNT)
406 * goto out;
407 * prog = array->ptrs[index];
408 * if (prog == NULL)
409 * goto out;
410 * goto *(prog->bpf_func + prologue_size);
411 * out:
412 */
emit_bpf_tail_call_indirect(u8 ** pprog,bool * callee_regs_used,u32 stack_depth)413 static void emit_bpf_tail_call_indirect(u8 **pprog, bool *callee_regs_used,
414 u32 stack_depth)
415 {
416 int tcc_off = -4 - round_up(stack_depth, 8);
417 u8 *prog = *pprog;
418 int pop_bytes = 0;
419 int off1 = 42;
420 int off2 = 31;
421 int off3 = 9;
422
423 /* count the additional bytes used for popping callee regs from stack
424 * that need to be taken into account for each of the offsets that
425 * are used for bailing out of the tail call
426 */
427 pop_bytes = get_pop_bytes(callee_regs_used);
428 off1 += pop_bytes;
429 off2 += pop_bytes;
430 off3 += pop_bytes;
431
432 if (stack_depth) {
433 off1 += 7;
434 off2 += 7;
435 off3 += 7;
436 }
437
438 /*
439 * rdi - pointer to ctx
440 * rsi - pointer to bpf_array
441 * rdx - index in bpf_array
442 */
443
444 /*
445 * if (index >= array->map.max_entries)
446 * goto out;
447 */
448 EMIT2(0x89, 0xD2); /* mov edx, edx */
449 EMIT3(0x39, 0x56, /* cmp dword ptr [rsi + 16], edx */
450 offsetof(struct bpf_array, map.max_entries));
451 #define OFFSET1 (off1 + RETPOLINE_RCX_BPF_JIT_SIZE) /* Number of bytes to jump */
452 EMIT2(X86_JBE, OFFSET1); /* jbe out */
453
454 /*
455 * if (tail_call_cnt > MAX_TAIL_CALL_CNT)
456 * goto out;
457 */
458 EMIT2_off32(0x8B, 0x85, tcc_off); /* mov eax, dword ptr [rbp - tcc_off] */
459 EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */
460 #define OFFSET2 (off2 + RETPOLINE_RCX_BPF_JIT_SIZE)
461 EMIT2(X86_JA, OFFSET2); /* ja out */
462 EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */
463 EMIT2_off32(0x89, 0x85, tcc_off); /* mov dword ptr [rbp - tcc_off], eax */
464
465 /* prog = array->ptrs[index]; */
466 EMIT4_off32(0x48, 0x8B, 0x8C, 0xD6, /* mov rcx, [rsi + rdx * 8 + offsetof(...)] */
467 offsetof(struct bpf_array, ptrs));
468
469 /*
470 * if (prog == NULL)
471 * goto out;
472 */
473 EMIT3(0x48, 0x85, 0xC9); /* test rcx,rcx */
474 #define OFFSET3 (off3 + RETPOLINE_RCX_BPF_JIT_SIZE)
475 EMIT2(X86_JE, OFFSET3); /* je out */
476
477 *pprog = prog;
478 pop_callee_regs(pprog, callee_regs_used);
479 prog = *pprog;
480
481 EMIT1(0x58); /* pop rax */
482 if (stack_depth)
483 EMIT3_off32(0x48, 0x81, 0xC4, /* add rsp, sd */
484 round_up(stack_depth, 8));
485
486 /* goto *(prog->bpf_func + X86_TAIL_CALL_OFFSET); */
487 EMIT4(0x48, 0x8B, 0x49, /* mov rcx, qword ptr [rcx + 32] */
488 offsetof(struct bpf_prog, bpf_func));
489 EMIT4(0x48, 0x83, 0xC1, /* add rcx, X86_TAIL_CALL_OFFSET */
490 X86_TAIL_CALL_OFFSET);
491 /*
492 * Now we're ready to jump into next BPF program
493 * rdi == ctx (1st arg)
494 * rcx == prog->bpf_func + X86_TAIL_CALL_OFFSET
495 */
496 RETPOLINE_RCX_BPF_JIT();
497
498 /* out: */
499 *pprog = prog;
500 }
501
emit_bpf_tail_call_direct(struct bpf_jit_poke_descriptor * poke,u8 ** pprog,int addr,u8 * image,bool * callee_regs_used,u32 stack_depth)502 static void emit_bpf_tail_call_direct(struct bpf_jit_poke_descriptor *poke,
503 u8 **pprog, int addr, u8 *image,
504 bool *callee_regs_used, u32 stack_depth)
505 {
506 int tcc_off = -4 - round_up(stack_depth, 8);
507 u8 *prog = *pprog;
508 int pop_bytes = 0;
509 int off1 = 20;
510 int poke_off;
511
512 /* count the additional bytes used for popping callee regs to stack
513 * that need to be taken into account for jump offset that is used for
514 * bailing out from of the tail call when limit is reached
515 */
516 pop_bytes = get_pop_bytes(callee_regs_used);
517 off1 += pop_bytes;
518
519 /*
520 * total bytes for:
521 * - nop5/ jmpq $off
522 * - pop callee regs
523 * - sub rsp, $val if depth > 0
524 * - pop rax
525 */
526 poke_off = X86_PATCH_SIZE + pop_bytes + 1;
527 if (stack_depth) {
528 poke_off += 7;
529 off1 += 7;
530 }
531
532 /*
533 * if (tail_call_cnt > MAX_TAIL_CALL_CNT)
534 * goto out;
535 */
536 EMIT2_off32(0x8B, 0x85, tcc_off); /* mov eax, dword ptr [rbp - tcc_off] */
537 EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */
538 EMIT2(X86_JA, off1); /* ja out */
539 EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */
540 EMIT2_off32(0x89, 0x85, tcc_off); /* mov dword ptr [rbp - tcc_off], eax */
541
542 poke->tailcall_bypass = image + (addr - poke_off - X86_PATCH_SIZE);
543 poke->adj_off = X86_TAIL_CALL_OFFSET;
544 poke->tailcall_target = image + (addr - X86_PATCH_SIZE);
545 poke->bypass_addr = (u8 *)poke->tailcall_target + X86_PATCH_SIZE;
546
547 emit_jump(&prog, (u8 *)poke->tailcall_target + X86_PATCH_SIZE,
548 poke->tailcall_bypass);
549
550 *pprog = prog;
551 pop_callee_regs(pprog, callee_regs_used);
552 prog = *pprog;
553 EMIT1(0x58); /* pop rax */
554 if (stack_depth)
555 EMIT3_off32(0x48, 0x81, 0xC4, round_up(stack_depth, 8));
556
557 memcpy(prog, x86_nops[5], X86_PATCH_SIZE);
558 prog += X86_PATCH_SIZE;
559 /* out: */
560
561 *pprog = prog;
562 }
563
bpf_tail_call_direct_fixup(struct bpf_prog * prog)564 static void bpf_tail_call_direct_fixup(struct bpf_prog *prog)
565 {
566 struct bpf_jit_poke_descriptor *poke;
567 struct bpf_array *array;
568 struct bpf_prog *target;
569 int i, ret;
570
571 for (i = 0; i < prog->aux->size_poke_tab; i++) {
572 poke = &prog->aux->poke_tab[i];
573 if (poke->aux && poke->aux != prog->aux)
574 continue;
575
576 WARN_ON_ONCE(READ_ONCE(poke->tailcall_target_stable));
577
578 if (poke->reason != BPF_POKE_REASON_TAIL_CALL)
579 continue;
580
581 array = container_of(poke->tail_call.map, struct bpf_array, map);
582 mutex_lock(&array->aux->poke_mutex);
583 target = array->ptrs[poke->tail_call.key];
584 if (target) {
585 /* Plain memcpy is used when image is not live yet
586 * and still not locked as read-only. Once poke
587 * location is active (poke->tailcall_target_stable),
588 * any parallel bpf_arch_text_poke() might occur
589 * still on the read-write image until we finally
590 * locked it as read-only. Both modifications on
591 * the given image are under text_mutex to avoid
592 * interference.
593 */
594 ret = __bpf_arch_text_poke(poke->tailcall_target,
595 BPF_MOD_JUMP, NULL,
596 (u8 *)target->bpf_func +
597 poke->adj_off, false);
598 BUG_ON(ret < 0);
599 ret = __bpf_arch_text_poke(poke->tailcall_bypass,
600 BPF_MOD_JUMP,
601 (u8 *)poke->tailcall_target +
602 X86_PATCH_SIZE, NULL, false);
603 BUG_ON(ret < 0);
604 }
605 WRITE_ONCE(poke->tailcall_target_stable, true);
606 mutex_unlock(&array->aux->poke_mutex);
607 }
608 }
609
emit_mov_imm32(u8 ** pprog,bool sign_propagate,u32 dst_reg,const u32 imm32)610 static void emit_mov_imm32(u8 **pprog, bool sign_propagate,
611 u32 dst_reg, const u32 imm32)
612 {
613 u8 *prog = *pprog;
614 u8 b1, b2, b3;
615
616 /*
617 * Optimization: if imm32 is positive, use 'mov %eax, imm32'
618 * (which zero-extends imm32) to save 2 bytes.
619 */
620 if (sign_propagate && (s32)imm32 < 0) {
621 /* 'mov %rax, imm32' sign extends imm32 */
622 b1 = add_1mod(0x48, dst_reg);
623 b2 = 0xC7;
624 b3 = 0xC0;
625 EMIT3_off32(b1, b2, add_1reg(b3, dst_reg), imm32);
626 goto done;
627 }
628
629 /*
630 * Optimization: if imm32 is zero, use 'xor %eax, %eax'
631 * to save 3 bytes.
632 */
633 if (imm32 == 0) {
634 if (is_ereg(dst_reg))
635 EMIT1(add_2mod(0x40, dst_reg, dst_reg));
636 b2 = 0x31; /* xor */
637 b3 = 0xC0;
638 EMIT2(b2, add_2reg(b3, dst_reg, dst_reg));
639 goto done;
640 }
641
642 /* mov %eax, imm32 */
643 if (is_ereg(dst_reg))
644 EMIT1(add_1mod(0x40, dst_reg));
645 EMIT1_off32(add_1reg(0xB8, dst_reg), imm32);
646 done:
647 *pprog = prog;
648 }
649
emit_mov_imm64(u8 ** pprog,u32 dst_reg,const u32 imm32_hi,const u32 imm32_lo)650 static void emit_mov_imm64(u8 **pprog, u32 dst_reg,
651 const u32 imm32_hi, const u32 imm32_lo)
652 {
653 u8 *prog = *pprog;
654
655 if (is_uimm32(((u64)imm32_hi << 32) | (u32)imm32_lo)) {
656 /*
657 * For emitting plain u32, where sign bit must not be
658 * propagated LLVM tends to load imm64 over mov32
659 * directly, so save couple of bytes by just doing
660 * 'mov %eax, imm32' instead.
661 */
662 emit_mov_imm32(&prog, false, dst_reg, imm32_lo);
663 } else {
664 /* movabsq %rax, imm64 */
665 EMIT2(add_1mod(0x48, dst_reg), add_1reg(0xB8, dst_reg));
666 EMIT(imm32_lo, 4);
667 EMIT(imm32_hi, 4);
668 }
669
670 *pprog = prog;
671 }
672
emit_mov_reg(u8 ** pprog,bool is64,u32 dst_reg,u32 src_reg)673 static void emit_mov_reg(u8 **pprog, bool is64, u32 dst_reg, u32 src_reg)
674 {
675 u8 *prog = *pprog;
676
677 if (is64) {
678 /* mov dst, src */
679 EMIT_mov(dst_reg, src_reg);
680 } else {
681 /* mov32 dst, src */
682 if (is_ereg(dst_reg) || is_ereg(src_reg))
683 EMIT1(add_2mod(0x40, dst_reg, src_reg));
684 EMIT2(0x89, add_2reg(0xC0, dst_reg, src_reg));
685 }
686
687 *pprog = prog;
688 }
689
690 /* Emit the suffix (ModR/M etc) for addressing *(ptr_reg + off) and val_reg */
emit_insn_suffix(u8 ** pprog,u32 ptr_reg,u32 val_reg,int off)691 static void emit_insn_suffix(u8 **pprog, u32 ptr_reg, u32 val_reg, int off)
692 {
693 u8 *prog = *pprog;
694
695 if (is_imm8(off)) {
696 /* 1-byte signed displacement.
697 *
698 * If off == 0 we could skip this and save one extra byte, but
699 * special case of x86 R13 which always needs an offset is not
700 * worth the hassle
701 */
702 EMIT2(add_2reg(0x40, ptr_reg, val_reg), off);
703 } else {
704 /* 4-byte signed displacement */
705 EMIT1_off32(add_2reg(0x80, ptr_reg, val_reg), off);
706 }
707 *pprog = prog;
708 }
709
710 /*
711 * Emit a REX byte if it will be necessary to address these registers
712 */
maybe_emit_mod(u8 ** pprog,u32 dst_reg,u32 src_reg,bool is64)713 static void maybe_emit_mod(u8 **pprog, u32 dst_reg, u32 src_reg, bool is64)
714 {
715 u8 *prog = *pprog;
716
717 if (is64)
718 EMIT1(add_2mod(0x48, dst_reg, src_reg));
719 else if (is_ereg(dst_reg) || is_ereg(src_reg))
720 EMIT1(add_2mod(0x40, dst_reg, src_reg));
721 *pprog = prog;
722 }
723
724 /* LDX: dst_reg = *(u8*)(src_reg + off) */
emit_ldx(u8 ** pprog,u32 size,u32 dst_reg,u32 src_reg,int off)725 static void emit_ldx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
726 {
727 u8 *prog = *pprog;
728
729 switch (size) {
730 case BPF_B:
731 /* Emit 'movzx rax, byte ptr [rax + off]' */
732 EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB6);
733 break;
734 case BPF_H:
735 /* Emit 'movzx rax, word ptr [rax + off]' */
736 EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB7);
737 break;
738 case BPF_W:
739 /* Emit 'mov eax, dword ptr [rax+0x14]' */
740 if (is_ereg(dst_reg) || is_ereg(src_reg))
741 EMIT2(add_2mod(0x40, src_reg, dst_reg), 0x8B);
742 else
743 EMIT1(0x8B);
744 break;
745 case BPF_DW:
746 /* Emit 'mov rax, qword ptr [rax+0x14]' */
747 EMIT2(add_2mod(0x48, src_reg, dst_reg), 0x8B);
748 break;
749 }
750 emit_insn_suffix(&prog, src_reg, dst_reg, off);
751 *pprog = prog;
752 }
753
754 /* STX: *(u8*)(dst_reg + off) = src_reg */
emit_stx(u8 ** pprog,u32 size,u32 dst_reg,u32 src_reg,int off)755 static void emit_stx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
756 {
757 u8 *prog = *pprog;
758
759 switch (size) {
760 case BPF_B:
761 /* Emit 'mov byte ptr [rax + off], al' */
762 if (is_ereg(dst_reg) || is_ereg_8l(src_reg))
763 /* Add extra byte for eregs or SIL,DIL,BPL in src_reg */
764 EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x88);
765 else
766 EMIT1(0x88);
767 break;
768 case BPF_H:
769 if (is_ereg(dst_reg) || is_ereg(src_reg))
770 EMIT3(0x66, add_2mod(0x40, dst_reg, src_reg), 0x89);
771 else
772 EMIT2(0x66, 0x89);
773 break;
774 case BPF_W:
775 if (is_ereg(dst_reg) || is_ereg(src_reg))
776 EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x89);
777 else
778 EMIT1(0x89);
779 break;
780 case BPF_DW:
781 EMIT2(add_2mod(0x48, dst_reg, src_reg), 0x89);
782 break;
783 }
784 emit_insn_suffix(&prog, dst_reg, src_reg, off);
785 *pprog = prog;
786 }
787
emit_atomic(u8 ** pprog,u8 atomic_op,u32 dst_reg,u32 src_reg,s16 off,u8 bpf_size)788 static int emit_atomic(u8 **pprog, u8 atomic_op,
789 u32 dst_reg, u32 src_reg, s16 off, u8 bpf_size)
790 {
791 u8 *prog = *pprog;
792
793 EMIT1(0xF0); /* lock prefix */
794
795 maybe_emit_mod(&prog, dst_reg, src_reg, bpf_size == BPF_DW);
796
797 /* emit opcode */
798 switch (atomic_op) {
799 case BPF_ADD:
800 case BPF_SUB:
801 case BPF_AND:
802 case BPF_OR:
803 case BPF_XOR:
804 /* lock *(u32/u64*)(dst_reg + off) <op>= src_reg */
805 EMIT1(simple_alu_opcodes[atomic_op]);
806 break;
807 case BPF_ADD | BPF_FETCH:
808 /* src_reg = atomic_fetch_add(dst_reg + off, src_reg); */
809 EMIT2(0x0F, 0xC1);
810 break;
811 case BPF_XCHG:
812 /* src_reg = atomic_xchg(dst_reg + off, src_reg); */
813 EMIT1(0x87);
814 break;
815 case BPF_CMPXCHG:
816 /* r0 = atomic_cmpxchg(dst_reg + off, r0, src_reg); */
817 EMIT2(0x0F, 0xB1);
818 break;
819 default:
820 pr_err("bpf_jit: unknown atomic opcode %02x\n", atomic_op);
821 return -EFAULT;
822 }
823
824 emit_insn_suffix(&prog, dst_reg, src_reg, off);
825
826 *pprog = prog;
827 return 0;
828 }
829
ex_handler_bpf(const struct exception_table_entry * x,struct pt_regs * regs,int trapnr,unsigned long error_code,unsigned long fault_addr)830 static bool ex_handler_bpf(const struct exception_table_entry *x,
831 struct pt_regs *regs, int trapnr,
832 unsigned long error_code, unsigned long fault_addr)
833 {
834 u32 reg = x->fixup >> 8;
835
836 /* jump over faulting load and clear dest register */
837 *(unsigned long *)((void *)regs + reg) = 0;
838 regs->ip += x->fixup & 0xff;
839 return true;
840 }
841
detect_reg_usage(struct bpf_insn * insn,int insn_cnt,bool * regs_used,bool * tail_call_seen)842 static void detect_reg_usage(struct bpf_insn *insn, int insn_cnt,
843 bool *regs_used, bool *tail_call_seen)
844 {
845 int i;
846
847 for (i = 1; i <= insn_cnt; i++, insn++) {
848 if (insn->code == (BPF_JMP | BPF_TAIL_CALL))
849 *tail_call_seen = true;
850 if (insn->dst_reg == BPF_REG_6 || insn->src_reg == BPF_REG_6)
851 regs_used[0] = true;
852 if (insn->dst_reg == BPF_REG_7 || insn->src_reg == BPF_REG_7)
853 regs_used[1] = true;
854 if (insn->dst_reg == BPF_REG_8 || insn->src_reg == BPF_REG_8)
855 regs_used[2] = true;
856 if (insn->dst_reg == BPF_REG_9 || insn->src_reg == BPF_REG_9)
857 regs_used[3] = true;
858 }
859 }
860
emit_nops(u8 ** pprog,int len)861 static void emit_nops(u8 **pprog, int len)
862 {
863 u8 *prog = *pprog;
864 int i, noplen;
865
866 while (len > 0) {
867 noplen = len;
868
869 if (noplen > ASM_NOP_MAX)
870 noplen = ASM_NOP_MAX;
871
872 for (i = 0; i < noplen; i++)
873 EMIT1(x86_nops[noplen][i]);
874 len -= noplen;
875 }
876
877 *pprog = prog;
878 }
879
880 #define INSN_SZ_DIFF (((addrs[i] - addrs[i - 1]) - (prog - temp)))
881
do_jit(struct bpf_prog * bpf_prog,int * addrs,u8 * image,int oldproglen,struct jit_context * ctx,bool jmp_padding)882 static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
883 int oldproglen, struct jit_context *ctx, bool jmp_padding)
884 {
885 bool tail_call_reachable = bpf_prog->aux->tail_call_reachable;
886 struct bpf_insn *insn = bpf_prog->insnsi;
887 bool callee_regs_used[4] = {};
888 int insn_cnt = bpf_prog->len;
889 bool tail_call_seen = false;
890 bool seen_exit = false;
891 u8 temp[BPF_MAX_INSN_SIZE + BPF_INSN_SAFETY];
892 int i, excnt = 0;
893 int ilen, proglen = 0;
894 u8 *prog = temp;
895 int err;
896
897 detect_reg_usage(insn, insn_cnt, callee_regs_used,
898 &tail_call_seen);
899
900 /* tail call's presence in current prog implies it is reachable */
901 tail_call_reachable |= tail_call_seen;
902
903 emit_prologue(&prog, bpf_prog->aux->stack_depth,
904 bpf_prog_was_classic(bpf_prog), tail_call_reachable,
905 bpf_prog->aux->func_idx != 0);
906 push_callee_regs(&prog, callee_regs_used);
907
908 ilen = prog - temp;
909 if (image)
910 memcpy(image + proglen, temp, ilen);
911 proglen += ilen;
912 addrs[0] = proglen;
913 prog = temp;
914
915 for (i = 1; i <= insn_cnt; i++, insn++) {
916 const s32 imm32 = insn->imm;
917 u32 dst_reg = insn->dst_reg;
918 u32 src_reg = insn->src_reg;
919 u8 b2 = 0, b3 = 0;
920 u8 *start_of_ldx;
921 s64 jmp_offset;
922 u8 jmp_cond;
923 u8 *func;
924 int nops;
925
926 switch (insn->code) {
927 /* ALU */
928 case BPF_ALU | BPF_ADD | BPF_X:
929 case BPF_ALU | BPF_SUB | BPF_X:
930 case BPF_ALU | BPF_AND | BPF_X:
931 case BPF_ALU | BPF_OR | BPF_X:
932 case BPF_ALU | BPF_XOR | BPF_X:
933 case BPF_ALU64 | BPF_ADD | BPF_X:
934 case BPF_ALU64 | BPF_SUB | BPF_X:
935 case BPF_ALU64 | BPF_AND | BPF_X:
936 case BPF_ALU64 | BPF_OR | BPF_X:
937 case BPF_ALU64 | BPF_XOR | BPF_X:
938 maybe_emit_mod(&prog, dst_reg, src_reg,
939 BPF_CLASS(insn->code) == BPF_ALU64);
940 b2 = simple_alu_opcodes[BPF_OP(insn->code)];
941 EMIT2(b2, add_2reg(0xC0, dst_reg, src_reg));
942 break;
943
944 case BPF_ALU64 | BPF_MOV | BPF_X:
945 case BPF_ALU | BPF_MOV | BPF_X:
946 emit_mov_reg(&prog,
947 BPF_CLASS(insn->code) == BPF_ALU64,
948 dst_reg, src_reg);
949 break;
950
951 /* neg dst */
952 case BPF_ALU | BPF_NEG:
953 case BPF_ALU64 | BPF_NEG:
954 if (BPF_CLASS(insn->code) == BPF_ALU64)
955 EMIT1(add_1mod(0x48, dst_reg));
956 else if (is_ereg(dst_reg))
957 EMIT1(add_1mod(0x40, dst_reg));
958 EMIT2(0xF7, add_1reg(0xD8, dst_reg));
959 break;
960
961 case BPF_ALU | BPF_ADD | BPF_K:
962 case BPF_ALU | BPF_SUB | BPF_K:
963 case BPF_ALU | BPF_AND | BPF_K:
964 case BPF_ALU | BPF_OR | BPF_K:
965 case BPF_ALU | BPF_XOR | BPF_K:
966 case BPF_ALU64 | BPF_ADD | BPF_K:
967 case BPF_ALU64 | BPF_SUB | BPF_K:
968 case BPF_ALU64 | BPF_AND | BPF_K:
969 case BPF_ALU64 | BPF_OR | BPF_K:
970 case BPF_ALU64 | BPF_XOR | BPF_K:
971 if (BPF_CLASS(insn->code) == BPF_ALU64)
972 EMIT1(add_1mod(0x48, dst_reg));
973 else if (is_ereg(dst_reg))
974 EMIT1(add_1mod(0x40, dst_reg));
975
976 /*
977 * b3 holds 'normal' opcode, b2 short form only valid
978 * in case dst is eax/rax.
979 */
980 switch (BPF_OP(insn->code)) {
981 case BPF_ADD:
982 b3 = 0xC0;
983 b2 = 0x05;
984 break;
985 case BPF_SUB:
986 b3 = 0xE8;
987 b2 = 0x2D;
988 break;
989 case BPF_AND:
990 b3 = 0xE0;
991 b2 = 0x25;
992 break;
993 case BPF_OR:
994 b3 = 0xC8;
995 b2 = 0x0D;
996 break;
997 case BPF_XOR:
998 b3 = 0xF0;
999 b2 = 0x35;
1000 break;
1001 }
1002
1003 if (is_imm8(imm32))
1004 EMIT3(0x83, add_1reg(b3, dst_reg), imm32);
1005 else if (is_axreg(dst_reg))
1006 EMIT1_off32(b2, imm32);
1007 else
1008 EMIT2_off32(0x81, add_1reg(b3, dst_reg), imm32);
1009 break;
1010
1011 case BPF_ALU64 | BPF_MOV | BPF_K:
1012 case BPF_ALU | BPF_MOV | BPF_K:
1013 emit_mov_imm32(&prog, BPF_CLASS(insn->code) == BPF_ALU64,
1014 dst_reg, imm32);
1015 break;
1016
1017 case BPF_LD | BPF_IMM | BPF_DW:
1018 emit_mov_imm64(&prog, dst_reg, insn[1].imm, insn[0].imm);
1019 insn++;
1020 i++;
1021 break;
1022
1023 /* dst %= src, dst /= src, dst %= imm32, dst /= imm32 */
1024 case BPF_ALU | BPF_MOD | BPF_X:
1025 case BPF_ALU | BPF_DIV | BPF_X:
1026 case BPF_ALU | BPF_MOD | BPF_K:
1027 case BPF_ALU | BPF_DIV | BPF_K:
1028 case BPF_ALU64 | BPF_MOD | BPF_X:
1029 case BPF_ALU64 | BPF_DIV | BPF_X:
1030 case BPF_ALU64 | BPF_MOD | BPF_K:
1031 case BPF_ALU64 | BPF_DIV | BPF_K:
1032 EMIT1(0x50); /* push rax */
1033 EMIT1(0x52); /* push rdx */
1034
1035 if (BPF_SRC(insn->code) == BPF_X)
1036 /* mov r11, src_reg */
1037 EMIT_mov(AUX_REG, src_reg);
1038 else
1039 /* mov r11, imm32 */
1040 EMIT3_off32(0x49, 0xC7, 0xC3, imm32);
1041
1042 /* mov rax, dst_reg */
1043 EMIT_mov(BPF_REG_0, dst_reg);
1044
1045 /*
1046 * xor edx, edx
1047 * equivalent to 'xor rdx, rdx', but one byte less
1048 */
1049 EMIT2(0x31, 0xd2);
1050
1051 if (BPF_CLASS(insn->code) == BPF_ALU64)
1052 /* div r11 */
1053 EMIT3(0x49, 0xF7, 0xF3);
1054 else
1055 /* div r11d */
1056 EMIT3(0x41, 0xF7, 0xF3);
1057
1058 if (BPF_OP(insn->code) == BPF_MOD)
1059 /* mov r11, rdx */
1060 EMIT3(0x49, 0x89, 0xD3);
1061 else
1062 /* mov r11, rax */
1063 EMIT3(0x49, 0x89, 0xC3);
1064
1065 EMIT1(0x5A); /* pop rdx */
1066 EMIT1(0x58); /* pop rax */
1067
1068 /* mov dst_reg, r11 */
1069 EMIT_mov(dst_reg, AUX_REG);
1070 break;
1071
1072 case BPF_ALU | BPF_MUL | BPF_K:
1073 case BPF_ALU | BPF_MUL | BPF_X:
1074 case BPF_ALU64 | BPF_MUL | BPF_K:
1075 case BPF_ALU64 | BPF_MUL | BPF_X:
1076 {
1077 bool is64 = BPF_CLASS(insn->code) == BPF_ALU64;
1078
1079 if (dst_reg != BPF_REG_0)
1080 EMIT1(0x50); /* push rax */
1081 if (dst_reg != BPF_REG_3)
1082 EMIT1(0x52); /* push rdx */
1083
1084 /* mov r11, dst_reg */
1085 EMIT_mov(AUX_REG, dst_reg);
1086
1087 if (BPF_SRC(insn->code) == BPF_X)
1088 emit_mov_reg(&prog, is64, BPF_REG_0, src_reg);
1089 else
1090 emit_mov_imm32(&prog, is64, BPF_REG_0, imm32);
1091
1092 if (is64)
1093 EMIT1(add_1mod(0x48, AUX_REG));
1094 else if (is_ereg(AUX_REG))
1095 EMIT1(add_1mod(0x40, AUX_REG));
1096 /* mul(q) r11 */
1097 EMIT2(0xF7, add_1reg(0xE0, AUX_REG));
1098
1099 if (dst_reg != BPF_REG_3)
1100 EMIT1(0x5A); /* pop rdx */
1101 if (dst_reg != BPF_REG_0) {
1102 /* mov dst_reg, rax */
1103 EMIT_mov(dst_reg, BPF_REG_0);
1104 EMIT1(0x58); /* pop rax */
1105 }
1106 break;
1107 }
1108 /* Shifts */
1109 case BPF_ALU | BPF_LSH | BPF_K:
1110 case BPF_ALU | BPF_RSH | BPF_K:
1111 case BPF_ALU | BPF_ARSH | BPF_K:
1112 case BPF_ALU64 | BPF_LSH | BPF_K:
1113 case BPF_ALU64 | BPF_RSH | BPF_K:
1114 case BPF_ALU64 | BPF_ARSH | BPF_K:
1115 if (BPF_CLASS(insn->code) == BPF_ALU64)
1116 EMIT1(add_1mod(0x48, dst_reg));
1117 else if (is_ereg(dst_reg))
1118 EMIT1(add_1mod(0x40, dst_reg));
1119
1120 b3 = simple_alu_opcodes[BPF_OP(insn->code)];
1121 if (imm32 == 1)
1122 EMIT2(0xD1, add_1reg(b3, dst_reg));
1123 else
1124 EMIT3(0xC1, add_1reg(b3, dst_reg), imm32);
1125 break;
1126
1127 case BPF_ALU | BPF_LSH | BPF_X:
1128 case BPF_ALU | BPF_RSH | BPF_X:
1129 case BPF_ALU | BPF_ARSH | BPF_X:
1130 case BPF_ALU64 | BPF_LSH | BPF_X:
1131 case BPF_ALU64 | BPF_RSH | BPF_X:
1132 case BPF_ALU64 | BPF_ARSH | BPF_X:
1133
1134 /* Check for bad case when dst_reg == rcx */
1135 if (dst_reg == BPF_REG_4) {
1136 /* mov r11, dst_reg */
1137 EMIT_mov(AUX_REG, dst_reg);
1138 dst_reg = AUX_REG;
1139 }
1140
1141 if (src_reg != BPF_REG_4) { /* common case */
1142 EMIT1(0x51); /* push rcx */
1143
1144 /* mov rcx, src_reg */
1145 EMIT_mov(BPF_REG_4, src_reg);
1146 }
1147
1148 /* shl %rax, %cl | shr %rax, %cl | sar %rax, %cl */
1149 if (BPF_CLASS(insn->code) == BPF_ALU64)
1150 EMIT1(add_1mod(0x48, dst_reg));
1151 else if (is_ereg(dst_reg))
1152 EMIT1(add_1mod(0x40, dst_reg));
1153
1154 b3 = simple_alu_opcodes[BPF_OP(insn->code)];
1155 EMIT2(0xD3, add_1reg(b3, dst_reg));
1156
1157 if (src_reg != BPF_REG_4)
1158 EMIT1(0x59); /* pop rcx */
1159
1160 if (insn->dst_reg == BPF_REG_4)
1161 /* mov dst_reg, r11 */
1162 EMIT_mov(insn->dst_reg, AUX_REG);
1163 break;
1164
1165 case BPF_ALU | BPF_END | BPF_FROM_BE:
1166 switch (imm32) {
1167 case 16:
1168 /* Emit 'ror %ax, 8' to swap lower 2 bytes */
1169 EMIT1(0x66);
1170 if (is_ereg(dst_reg))
1171 EMIT1(0x41);
1172 EMIT3(0xC1, add_1reg(0xC8, dst_reg), 8);
1173
1174 /* Emit 'movzwl eax, ax' */
1175 if (is_ereg(dst_reg))
1176 EMIT3(0x45, 0x0F, 0xB7);
1177 else
1178 EMIT2(0x0F, 0xB7);
1179 EMIT1(add_2reg(0xC0, dst_reg, dst_reg));
1180 break;
1181 case 32:
1182 /* Emit 'bswap eax' to swap lower 4 bytes */
1183 if (is_ereg(dst_reg))
1184 EMIT2(0x41, 0x0F);
1185 else
1186 EMIT1(0x0F);
1187 EMIT1(add_1reg(0xC8, dst_reg));
1188 break;
1189 case 64:
1190 /* Emit 'bswap rax' to swap 8 bytes */
1191 EMIT3(add_1mod(0x48, dst_reg), 0x0F,
1192 add_1reg(0xC8, dst_reg));
1193 break;
1194 }
1195 break;
1196
1197 case BPF_ALU | BPF_END | BPF_FROM_LE:
1198 switch (imm32) {
1199 case 16:
1200 /*
1201 * Emit 'movzwl eax, ax' to zero extend 16-bit
1202 * into 64 bit
1203 */
1204 if (is_ereg(dst_reg))
1205 EMIT3(0x45, 0x0F, 0xB7);
1206 else
1207 EMIT2(0x0F, 0xB7);
1208 EMIT1(add_2reg(0xC0, dst_reg, dst_reg));
1209 break;
1210 case 32:
1211 /* Emit 'mov eax, eax' to clear upper 32-bits */
1212 if (is_ereg(dst_reg))
1213 EMIT1(0x45);
1214 EMIT2(0x89, add_2reg(0xC0, dst_reg, dst_reg));
1215 break;
1216 case 64:
1217 /* nop */
1218 break;
1219 }
1220 break;
1221
1222 /* speculation barrier */
1223 case BPF_ST | BPF_NOSPEC:
1224 if (boot_cpu_has(X86_FEATURE_XMM2))
1225 /* Emit 'lfence' */
1226 EMIT3(0x0F, 0xAE, 0xE8);
1227 break;
1228
1229 /* ST: *(u8*)(dst_reg + off) = imm */
1230 case BPF_ST | BPF_MEM | BPF_B:
1231 if (is_ereg(dst_reg))
1232 EMIT2(0x41, 0xC6);
1233 else
1234 EMIT1(0xC6);
1235 goto st;
1236 case BPF_ST | BPF_MEM | BPF_H:
1237 if (is_ereg(dst_reg))
1238 EMIT3(0x66, 0x41, 0xC7);
1239 else
1240 EMIT2(0x66, 0xC7);
1241 goto st;
1242 case BPF_ST | BPF_MEM | BPF_W:
1243 if (is_ereg(dst_reg))
1244 EMIT2(0x41, 0xC7);
1245 else
1246 EMIT1(0xC7);
1247 goto st;
1248 case BPF_ST | BPF_MEM | BPF_DW:
1249 EMIT2(add_1mod(0x48, dst_reg), 0xC7);
1250
1251 st: if (is_imm8(insn->off))
1252 EMIT2(add_1reg(0x40, dst_reg), insn->off);
1253 else
1254 EMIT1_off32(add_1reg(0x80, dst_reg), insn->off);
1255
1256 EMIT(imm32, bpf_size_to_x86_bytes(BPF_SIZE(insn->code)));
1257 break;
1258
1259 /* STX: *(u8*)(dst_reg + off) = src_reg */
1260 case BPF_STX | BPF_MEM | BPF_B:
1261 case BPF_STX | BPF_MEM | BPF_H:
1262 case BPF_STX | BPF_MEM | BPF_W:
1263 case BPF_STX | BPF_MEM | BPF_DW:
1264 emit_stx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off);
1265 break;
1266
1267 /* LDX: dst_reg = *(u8*)(src_reg + off) */
1268 case BPF_LDX | BPF_MEM | BPF_B:
1269 case BPF_LDX | BPF_PROBE_MEM | BPF_B:
1270 case BPF_LDX | BPF_MEM | BPF_H:
1271 case BPF_LDX | BPF_PROBE_MEM | BPF_H:
1272 case BPF_LDX | BPF_MEM | BPF_W:
1273 case BPF_LDX | BPF_PROBE_MEM | BPF_W:
1274 case BPF_LDX | BPF_MEM | BPF_DW:
1275 case BPF_LDX | BPF_PROBE_MEM | BPF_DW:
1276 if (BPF_MODE(insn->code) == BPF_PROBE_MEM) {
1277 /* test src_reg, src_reg */
1278 maybe_emit_mod(&prog, src_reg, src_reg, true); /* always 1 byte */
1279 EMIT2(0x85, add_2reg(0xC0, src_reg, src_reg));
1280 /* jne start_of_ldx */
1281 EMIT2(X86_JNE, 0);
1282 /* xor dst_reg, dst_reg */
1283 emit_mov_imm32(&prog, false, dst_reg, 0);
1284 /* jmp byte_after_ldx */
1285 EMIT2(0xEB, 0);
1286
1287 /* populate jmp_offset for JNE above */
1288 temp[4] = prog - temp - 5 /* sizeof(test + jne) */;
1289 start_of_ldx = prog;
1290 }
1291 emit_ldx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off);
1292 if (BPF_MODE(insn->code) == BPF_PROBE_MEM) {
1293 struct exception_table_entry *ex;
1294 u8 *_insn = image + proglen + (start_of_ldx - temp);
1295 s64 delta;
1296
1297 /* populate jmp_offset for JMP above */
1298 start_of_ldx[-1] = prog - start_of_ldx;
1299
1300 if (!bpf_prog->aux->extable)
1301 break;
1302
1303 if (excnt >= bpf_prog->aux->num_exentries) {
1304 pr_err("ex gen bug\n");
1305 return -EFAULT;
1306 }
1307 ex = &bpf_prog->aux->extable[excnt++];
1308
1309 delta = _insn - (u8 *)&ex->insn;
1310 if (!is_simm32(delta)) {
1311 pr_err("extable->insn doesn't fit into 32-bit\n");
1312 return -EFAULT;
1313 }
1314 ex->insn = delta;
1315
1316 delta = (u8 *)ex_handler_bpf - (u8 *)&ex->handler;
1317 if (!is_simm32(delta)) {
1318 pr_err("extable->handler doesn't fit into 32-bit\n");
1319 return -EFAULT;
1320 }
1321 ex->handler = delta;
1322
1323 if (dst_reg > BPF_REG_9) {
1324 pr_err("verifier error\n");
1325 return -EFAULT;
1326 }
1327 /*
1328 * Compute size of x86 insn and its target dest x86 register.
1329 * ex_handler_bpf() will use lower 8 bits to adjust
1330 * pt_regs->ip to jump over this x86 instruction
1331 * and upper bits to figure out which pt_regs to zero out.
1332 * End result: x86 insn "mov rbx, qword ptr [rax+0x14]"
1333 * of 4 bytes will be ignored and rbx will be zero inited.
1334 */
1335 ex->fixup = (prog - temp) | (reg2pt_regs[dst_reg] << 8);
1336 }
1337 break;
1338
1339 case BPF_STX | BPF_ATOMIC | BPF_W:
1340 case BPF_STX | BPF_ATOMIC | BPF_DW:
1341 if (insn->imm == (BPF_AND | BPF_FETCH) ||
1342 insn->imm == (BPF_OR | BPF_FETCH) ||
1343 insn->imm == (BPF_XOR | BPF_FETCH)) {
1344 bool is64 = BPF_SIZE(insn->code) == BPF_DW;
1345 u32 real_src_reg = src_reg;
1346 u32 real_dst_reg = dst_reg;
1347 u8 *branch_target;
1348
1349 /*
1350 * Can't be implemented with a single x86 insn.
1351 * Need to do a CMPXCHG loop.
1352 */
1353
1354 /* Will need RAX as a CMPXCHG operand so save R0 */
1355 emit_mov_reg(&prog, true, BPF_REG_AX, BPF_REG_0);
1356 if (src_reg == BPF_REG_0)
1357 real_src_reg = BPF_REG_AX;
1358 if (dst_reg == BPF_REG_0)
1359 real_dst_reg = BPF_REG_AX;
1360
1361 branch_target = prog;
1362 /* Load old value */
1363 emit_ldx(&prog, BPF_SIZE(insn->code),
1364 BPF_REG_0, real_dst_reg, insn->off);
1365 /*
1366 * Perform the (commutative) operation locally,
1367 * put the result in the AUX_REG.
1368 */
1369 emit_mov_reg(&prog, is64, AUX_REG, BPF_REG_0);
1370 maybe_emit_mod(&prog, AUX_REG, real_src_reg, is64);
1371 EMIT2(simple_alu_opcodes[BPF_OP(insn->imm)],
1372 add_2reg(0xC0, AUX_REG, real_src_reg));
1373 /* Attempt to swap in new value */
1374 err = emit_atomic(&prog, BPF_CMPXCHG,
1375 real_dst_reg, AUX_REG,
1376 insn->off,
1377 BPF_SIZE(insn->code));
1378 if (WARN_ON(err))
1379 return err;
1380 /*
1381 * ZF tells us whether we won the race. If it's
1382 * cleared we need to try again.
1383 */
1384 EMIT2(X86_JNE, -(prog - branch_target) - 2);
1385 /* Return the pre-modification value */
1386 emit_mov_reg(&prog, is64, real_src_reg, BPF_REG_0);
1387 /* Restore R0 after clobbering RAX */
1388 emit_mov_reg(&prog, true, BPF_REG_0, BPF_REG_AX);
1389 break;
1390 }
1391
1392 err = emit_atomic(&prog, insn->imm, dst_reg, src_reg,
1393 insn->off, BPF_SIZE(insn->code));
1394 if (err)
1395 return err;
1396 break;
1397
1398 /* call */
1399 case BPF_JMP | BPF_CALL:
1400 func = (u8 *) __bpf_call_base + imm32;
1401 if (tail_call_reachable) {
1402 EMIT3_off32(0x48, 0x8B, 0x85,
1403 -(bpf_prog->aux->stack_depth + 8));
1404 if (!imm32 || emit_call(&prog, func, image + addrs[i - 1] + 7))
1405 return -EINVAL;
1406 } else {
1407 if (!imm32 || emit_call(&prog, func, image + addrs[i - 1]))
1408 return -EINVAL;
1409 }
1410 break;
1411
1412 case BPF_JMP | BPF_TAIL_CALL:
1413 if (imm32)
1414 emit_bpf_tail_call_direct(&bpf_prog->aux->poke_tab[imm32 - 1],
1415 &prog, addrs[i], image,
1416 callee_regs_used,
1417 bpf_prog->aux->stack_depth);
1418 else
1419 emit_bpf_tail_call_indirect(&prog,
1420 callee_regs_used,
1421 bpf_prog->aux->stack_depth);
1422 break;
1423
1424 /* cond jump */
1425 case BPF_JMP | BPF_JEQ | BPF_X:
1426 case BPF_JMP | BPF_JNE | BPF_X:
1427 case BPF_JMP | BPF_JGT | BPF_X:
1428 case BPF_JMP | BPF_JLT | BPF_X:
1429 case BPF_JMP | BPF_JGE | BPF_X:
1430 case BPF_JMP | BPF_JLE | BPF_X:
1431 case BPF_JMP | BPF_JSGT | BPF_X:
1432 case BPF_JMP | BPF_JSLT | BPF_X:
1433 case BPF_JMP | BPF_JSGE | BPF_X:
1434 case BPF_JMP | BPF_JSLE | BPF_X:
1435 case BPF_JMP32 | BPF_JEQ | BPF_X:
1436 case BPF_JMP32 | BPF_JNE | BPF_X:
1437 case BPF_JMP32 | BPF_JGT | BPF_X:
1438 case BPF_JMP32 | BPF_JLT | BPF_X:
1439 case BPF_JMP32 | BPF_JGE | BPF_X:
1440 case BPF_JMP32 | BPF_JLE | BPF_X:
1441 case BPF_JMP32 | BPF_JSGT | BPF_X:
1442 case BPF_JMP32 | BPF_JSLT | BPF_X:
1443 case BPF_JMP32 | BPF_JSGE | BPF_X:
1444 case BPF_JMP32 | BPF_JSLE | BPF_X:
1445 /* cmp dst_reg, src_reg */
1446 maybe_emit_mod(&prog, dst_reg, src_reg,
1447 BPF_CLASS(insn->code) == BPF_JMP);
1448 EMIT2(0x39, add_2reg(0xC0, dst_reg, src_reg));
1449 goto emit_cond_jmp;
1450
1451 case BPF_JMP | BPF_JSET | BPF_X:
1452 case BPF_JMP32 | BPF_JSET | BPF_X:
1453 /* test dst_reg, src_reg */
1454 maybe_emit_mod(&prog, dst_reg, src_reg,
1455 BPF_CLASS(insn->code) == BPF_JMP);
1456 EMIT2(0x85, add_2reg(0xC0, dst_reg, src_reg));
1457 goto emit_cond_jmp;
1458
1459 case BPF_JMP | BPF_JSET | BPF_K:
1460 case BPF_JMP32 | BPF_JSET | BPF_K:
1461 /* test dst_reg, imm32 */
1462 if (BPF_CLASS(insn->code) == BPF_JMP)
1463 EMIT1(add_1mod(0x48, dst_reg));
1464 else if (is_ereg(dst_reg))
1465 EMIT1(add_1mod(0x40, dst_reg));
1466 EMIT2_off32(0xF7, add_1reg(0xC0, dst_reg), imm32);
1467 goto emit_cond_jmp;
1468
1469 case BPF_JMP | BPF_JEQ | BPF_K:
1470 case BPF_JMP | BPF_JNE | BPF_K:
1471 case BPF_JMP | BPF_JGT | BPF_K:
1472 case BPF_JMP | BPF_JLT | BPF_K:
1473 case BPF_JMP | BPF_JGE | BPF_K:
1474 case BPF_JMP | BPF_JLE | BPF_K:
1475 case BPF_JMP | BPF_JSGT | BPF_K:
1476 case BPF_JMP | BPF_JSLT | BPF_K:
1477 case BPF_JMP | BPF_JSGE | BPF_K:
1478 case BPF_JMP | BPF_JSLE | BPF_K:
1479 case BPF_JMP32 | BPF_JEQ | BPF_K:
1480 case BPF_JMP32 | BPF_JNE | BPF_K:
1481 case BPF_JMP32 | BPF_JGT | BPF_K:
1482 case BPF_JMP32 | BPF_JLT | BPF_K:
1483 case BPF_JMP32 | BPF_JGE | BPF_K:
1484 case BPF_JMP32 | BPF_JLE | BPF_K:
1485 case BPF_JMP32 | BPF_JSGT | BPF_K:
1486 case BPF_JMP32 | BPF_JSLT | BPF_K:
1487 case BPF_JMP32 | BPF_JSGE | BPF_K:
1488 case BPF_JMP32 | BPF_JSLE | BPF_K:
1489 /* test dst_reg, dst_reg to save one extra byte */
1490 if (imm32 == 0) {
1491 maybe_emit_mod(&prog, dst_reg, dst_reg,
1492 BPF_CLASS(insn->code) == BPF_JMP);
1493 EMIT2(0x85, add_2reg(0xC0, dst_reg, dst_reg));
1494 goto emit_cond_jmp;
1495 }
1496
1497 /* cmp dst_reg, imm8/32 */
1498 if (BPF_CLASS(insn->code) == BPF_JMP)
1499 EMIT1(add_1mod(0x48, dst_reg));
1500 else if (is_ereg(dst_reg))
1501 EMIT1(add_1mod(0x40, dst_reg));
1502
1503 if (is_imm8(imm32))
1504 EMIT3(0x83, add_1reg(0xF8, dst_reg), imm32);
1505 else
1506 EMIT2_off32(0x81, add_1reg(0xF8, dst_reg), imm32);
1507
1508 emit_cond_jmp: /* Convert BPF opcode to x86 */
1509 switch (BPF_OP(insn->code)) {
1510 case BPF_JEQ:
1511 jmp_cond = X86_JE;
1512 break;
1513 case BPF_JSET:
1514 case BPF_JNE:
1515 jmp_cond = X86_JNE;
1516 break;
1517 case BPF_JGT:
1518 /* GT is unsigned '>', JA in x86 */
1519 jmp_cond = X86_JA;
1520 break;
1521 case BPF_JLT:
1522 /* LT is unsigned '<', JB in x86 */
1523 jmp_cond = X86_JB;
1524 break;
1525 case BPF_JGE:
1526 /* GE is unsigned '>=', JAE in x86 */
1527 jmp_cond = X86_JAE;
1528 break;
1529 case BPF_JLE:
1530 /* LE is unsigned '<=', JBE in x86 */
1531 jmp_cond = X86_JBE;
1532 break;
1533 case BPF_JSGT:
1534 /* Signed '>', GT in x86 */
1535 jmp_cond = X86_JG;
1536 break;
1537 case BPF_JSLT:
1538 /* Signed '<', LT in x86 */
1539 jmp_cond = X86_JL;
1540 break;
1541 case BPF_JSGE:
1542 /* Signed '>=', GE in x86 */
1543 jmp_cond = X86_JGE;
1544 break;
1545 case BPF_JSLE:
1546 /* Signed '<=', LE in x86 */
1547 jmp_cond = X86_JLE;
1548 break;
1549 default: /* to silence GCC warning */
1550 return -EFAULT;
1551 }
1552 jmp_offset = addrs[i + insn->off] - addrs[i];
1553 if (is_imm8(jmp_offset)) {
1554 if (jmp_padding) {
1555 /* To keep the jmp_offset valid, the extra bytes are
1556 * padded before the jump insn, so we subtract the
1557 * 2 bytes of jmp_cond insn from INSN_SZ_DIFF.
1558 *
1559 * If the previous pass already emits an imm8
1560 * jmp_cond, then this BPF insn won't shrink, so
1561 * "nops" is 0.
1562 *
1563 * On the other hand, if the previous pass emits an
1564 * imm32 jmp_cond, the extra 4 bytes(*) is padded to
1565 * keep the image from shrinking further.
1566 *
1567 * (*) imm32 jmp_cond is 6 bytes, and imm8 jmp_cond
1568 * is 2 bytes, so the size difference is 4 bytes.
1569 */
1570 nops = INSN_SZ_DIFF - 2;
1571 if (nops != 0 && nops != 4) {
1572 pr_err("unexpected jmp_cond padding: %d bytes\n",
1573 nops);
1574 return -EFAULT;
1575 }
1576 emit_nops(&prog, nops);
1577 }
1578 EMIT2(jmp_cond, jmp_offset);
1579 } else if (is_simm32(jmp_offset)) {
1580 EMIT2_off32(0x0F, jmp_cond + 0x10, jmp_offset);
1581 } else {
1582 pr_err("cond_jmp gen bug %llx\n", jmp_offset);
1583 return -EFAULT;
1584 }
1585
1586 break;
1587
1588 case BPF_JMP | BPF_JA:
1589 if (insn->off == -1)
1590 /* -1 jmp instructions will always jump
1591 * backwards two bytes. Explicitly handling
1592 * this case avoids wasting too many passes
1593 * when there are long sequences of replaced
1594 * dead code.
1595 */
1596 jmp_offset = -2;
1597 else
1598 jmp_offset = addrs[i + insn->off] - addrs[i];
1599
1600 if (!jmp_offset) {
1601 /*
1602 * If jmp_padding is enabled, the extra nops will
1603 * be inserted. Otherwise, optimize out nop jumps.
1604 */
1605 if (jmp_padding) {
1606 /* There are 3 possible conditions.
1607 * (1) This BPF_JA is already optimized out in
1608 * the previous run, so there is no need
1609 * to pad any extra byte (0 byte).
1610 * (2) The previous pass emits an imm8 jmp,
1611 * so we pad 2 bytes to match the previous
1612 * insn size.
1613 * (3) Similarly, the previous pass emits an
1614 * imm32 jmp, and 5 bytes is padded.
1615 */
1616 nops = INSN_SZ_DIFF;
1617 if (nops != 0 && nops != 2 && nops != 5) {
1618 pr_err("unexpected nop jump padding: %d bytes\n",
1619 nops);
1620 return -EFAULT;
1621 }
1622 emit_nops(&prog, nops);
1623 }
1624 break;
1625 }
1626 emit_jmp:
1627 if (is_imm8(jmp_offset)) {
1628 if (jmp_padding) {
1629 /* To avoid breaking jmp_offset, the extra bytes
1630 * are padded before the actual jmp insn, so
1631 * 2 bytes is subtracted from INSN_SZ_DIFF.
1632 *
1633 * If the previous pass already emits an imm8
1634 * jmp, there is nothing to pad (0 byte).
1635 *
1636 * If it emits an imm32 jmp (5 bytes) previously
1637 * and now an imm8 jmp (2 bytes), then we pad
1638 * (5 - 2 = 3) bytes to stop the image from
1639 * shrinking further.
1640 */
1641 nops = INSN_SZ_DIFF - 2;
1642 if (nops != 0 && nops != 3) {
1643 pr_err("unexpected jump padding: %d bytes\n",
1644 nops);
1645 return -EFAULT;
1646 }
1647 emit_nops(&prog, INSN_SZ_DIFF - 2);
1648 }
1649 EMIT2(0xEB, jmp_offset);
1650 } else if (is_simm32(jmp_offset)) {
1651 EMIT1_off32(0xE9, jmp_offset);
1652 } else {
1653 pr_err("jmp gen bug %llx\n", jmp_offset);
1654 return -EFAULT;
1655 }
1656 break;
1657
1658 case BPF_JMP | BPF_EXIT:
1659 if (seen_exit) {
1660 jmp_offset = ctx->cleanup_addr - addrs[i];
1661 goto emit_jmp;
1662 }
1663 seen_exit = true;
1664 /* Update cleanup_addr */
1665 ctx->cleanup_addr = proglen;
1666 pop_callee_regs(&prog, callee_regs_used);
1667 EMIT1(0xC9); /* leave */
1668 EMIT1(0xC3); /* ret */
1669 break;
1670
1671 default:
1672 /*
1673 * By design x86-64 JIT should support all BPF instructions.
1674 * This error will be seen if new instruction was added
1675 * to the interpreter, but not to the JIT, or if there is
1676 * junk in bpf_prog.
1677 */
1678 pr_err("bpf_jit: unknown opcode %02x\n", insn->code);
1679 return -EINVAL;
1680 }
1681
1682 ilen = prog - temp;
1683 if (ilen > BPF_MAX_INSN_SIZE) {
1684 pr_err("bpf_jit: fatal insn size error\n");
1685 return -EFAULT;
1686 }
1687
1688 if (image) {
1689 /*
1690 * When populating the image, assert that:
1691 *
1692 * i) We do not write beyond the allocated space, and
1693 * ii) addrs[i] did not change from the prior run, in order
1694 * to validate assumptions made for computing branch
1695 * displacements.
1696 */
1697 if (unlikely(proglen + ilen > oldproglen ||
1698 proglen + ilen != addrs[i])) {
1699 pr_err("bpf_jit: fatal error\n");
1700 return -EFAULT;
1701 }
1702 memcpy(image + proglen, temp, ilen);
1703 }
1704 proglen += ilen;
1705 addrs[i] = proglen;
1706 prog = temp;
1707 }
1708
1709 if (image && excnt != bpf_prog->aux->num_exentries) {
1710 pr_err("extable is not populated\n");
1711 return -EFAULT;
1712 }
1713 return proglen;
1714 }
1715
save_regs(const struct btf_func_model * m,u8 ** prog,int nr_args,int stack_size)1716 static void save_regs(const struct btf_func_model *m, u8 **prog, int nr_args,
1717 int stack_size)
1718 {
1719 int i;
1720 /* Store function arguments to stack.
1721 * For a function that accepts two pointers the sequence will be:
1722 * mov QWORD PTR [rbp-0x10],rdi
1723 * mov QWORD PTR [rbp-0x8],rsi
1724 */
1725 for (i = 0; i < min(nr_args, 6); i++)
1726 emit_stx(prog, bytes_to_bpf_size(m->arg_size[i]),
1727 BPF_REG_FP,
1728 i == 5 ? X86_REG_R9 : BPF_REG_1 + i,
1729 -(stack_size - i * 8));
1730 }
1731
restore_regs(const struct btf_func_model * m,u8 ** prog,int nr_args,int stack_size)1732 static void restore_regs(const struct btf_func_model *m, u8 **prog, int nr_args,
1733 int stack_size)
1734 {
1735 int i;
1736
1737 /* Restore function arguments from stack.
1738 * For a function that accepts two pointers the sequence will be:
1739 * EMIT4(0x48, 0x8B, 0x7D, 0xF0); mov rdi,QWORD PTR [rbp-0x10]
1740 * EMIT4(0x48, 0x8B, 0x75, 0xF8); mov rsi,QWORD PTR [rbp-0x8]
1741 */
1742 for (i = 0; i < min(nr_args, 6); i++)
1743 emit_ldx(prog, bytes_to_bpf_size(m->arg_size[i]),
1744 i == 5 ? X86_REG_R9 : BPF_REG_1 + i,
1745 BPF_REG_FP,
1746 -(stack_size - i * 8));
1747 }
1748
invoke_bpf_prog(const struct btf_func_model * m,u8 ** pprog,struct bpf_prog * p,int stack_size,bool save_ret)1749 static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog,
1750 struct bpf_prog *p, int stack_size, bool save_ret)
1751 {
1752 u8 *prog = *pprog;
1753 u8 *jmp_insn;
1754
1755 /* arg1: mov rdi, progs[i] */
1756 emit_mov_imm64(&prog, BPF_REG_1, (long) p >> 32, (u32) (long) p);
1757 if (emit_call(&prog,
1758 p->aux->sleepable ? __bpf_prog_enter_sleepable :
1759 __bpf_prog_enter, prog))
1760 return -EINVAL;
1761 /* remember prog start time returned by __bpf_prog_enter */
1762 emit_mov_reg(&prog, true, BPF_REG_6, BPF_REG_0);
1763
1764 /* if (__bpf_prog_enter*(prog) == 0)
1765 * goto skip_exec_of_prog;
1766 */
1767 EMIT3(0x48, 0x85, 0xC0); /* test rax,rax */
1768 /* emit 2 nops that will be replaced with JE insn */
1769 jmp_insn = prog;
1770 emit_nops(&prog, 2);
1771
1772 /* arg1: lea rdi, [rbp - stack_size] */
1773 EMIT4(0x48, 0x8D, 0x7D, -stack_size);
1774 /* arg2: progs[i]->insnsi for interpreter */
1775 if (!p->jited)
1776 emit_mov_imm64(&prog, BPF_REG_2,
1777 (long) p->insnsi >> 32,
1778 (u32) (long) p->insnsi);
1779 /* call JITed bpf program or interpreter */
1780 if (emit_call(&prog, p->bpf_func, prog))
1781 return -EINVAL;
1782
1783 /*
1784 * BPF_TRAMP_MODIFY_RETURN trampolines can modify the return
1785 * of the previous call which is then passed on the stack to
1786 * the next BPF program.
1787 *
1788 * BPF_TRAMP_FENTRY trampoline may need to return the return
1789 * value of BPF_PROG_TYPE_STRUCT_OPS prog.
1790 */
1791 if (save_ret)
1792 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
1793
1794 /* replace 2 nops with JE insn, since jmp target is known */
1795 jmp_insn[0] = X86_JE;
1796 jmp_insn[1] = prog - jmp_insn - 2;
1797
1798 /* arg1: mov rdi, progs[i] */
1799 emit_mov_imm64(&prog, BPF_REG_1, (long) p >> 32, (u32) (long) p);
1800 /* arg2: mov rsi, rbx <- start time in nsec */
1801 emit_mov_reg(&prog, true, BPF_REG_2, BPF_REG_6);
1802 if (emit_call(&prog,
1803 p->aux->sleepable ? __bpf_prog_exit_sleepable :
1804 __bpf_prog_exit, prog))
1805 return -EINVAL;
1806
1807 *pprog = prog;
1808 return 0;
1809 }
1810
emit_align(u8 ** pprog,u32 align)1811 static void emit_align(u8 **pprog, u32 align)
1812 {
1813 u8 *target, *prog = *pprog;
1814
1815 target = PTR_ALIGN(prog, align);
1816 if (target != prog)
1817 emit_nops(&prog, target - prog);
1818
1819 *pprog = prog;
1820 }
1821
emit_cond_near_jump(u8 ** pprog,void * func,void * ip,u8 jmp_cond)1822 static int emit_cond_near_jump(u8 **pprog, void *func, void *ip, u8 jmp_cond)
1823 {
1824 u8 *prog = *pprog;
1825 s64 offset;
1826
1827 offset = func - (ip + 2 + 4);
1828 if (!is_simm32(offset)) {
1829 pr_err("Target %p is out of range\n", func);
1830 return -EINVAL;
1831 }
1832 EMIT2_off32(0x0F, jmp_cond + 0x10, offset);
1833 *pprog = prog;
1834 return 0;
1835 }
1836
invoke_bpf(const struct btf_func_model * m,u8 ** pprog,struct bpf_tramp_progs * tp,int stack_size,bool save_ret)1837 static int invoke_bpf(const struct btf_func_model *m, u8 **pprog,
1838 struct bpf_tramp_progs *tp, int stack_size,
1839 bool save_ret)
1840 {
1841 int i;
1842 u8 *prog = *pprog;
1843
1844 for (i = 0; i < tp->nr_progs; i++) {
1845 if (invoke_bpf_prog(m, &prog, tp->progs[i], stack_size,
1846 save_ret))
1847 return -EINVAL;
1848 }
1849 *pprog = prog;
1850 return 0;
1851 }
1852
invoke_bpf_mod_ret(const struct btf_func_model * m,u8 ** pprog,struct bpf_tramp_progs * tp,int stack_size,u8 ** branches)1853 static int invoke_bpf_mod_ret(const struct btf_func_model *m, u8 **pprog,
1854 struct bpf_tramp_progs *tp, int stack_size,
1855 u8 **branches)
1856 {
1857 u8 *prog = *pprog;
1858 int i;
1859
1860 /* The first fmod_ret program will receive a garbage return value.
1861 * Set this to 0 to avoid confusing the program.
1862 */
1863 emit_mov_imm32(&prog, false, BPF_REG_0, 0);
1864 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
1865 for (i = 0; i < tp->nr_progs; i++) {
1866 if (invoke_bpf_prog(m, &prog, tp->progs[i], stack_size, true))
1867 return -EINVAL;
1868
1869 /* mod_ret prog stored return value into [rbp - 8]. Emit:
1870 * if (*(u64 *)(rbp - 8) != 0)
1871 * goto do_fexit;
1872 */
1873 /* cmp QWORD PTR [rbp - 0x8], 0x0 */
1874 EMIT4(0x48, 0x83, 0x7d, 0xf8); EMIT1(0x00);
1875
1876 /* Save the location of the branch and Generate 6 nops
1877 * (4 bytes for an offset and 2 bytes for the jump) These nops
1878 * are replaced with a conditional jump once do_fexit (i.e. the
1879 * start of the fexit invocation) is finalized.
1880 */
1881 branches[i] = prog;
1882 emit_nops(&prog, 4 + 2);
1883 }
1884
1885 *pprog = prog;
1886 return 0;
1887 }
1888
is_valid_bpf_tramp_flags(unsigned int flags)1889 static bool is_valid_bpf_tramp_flags(unsigned int flags)
1890 {
1891 if ((flags & BPF_TRAMP_F_RESTORE_REGS) &&
1892 (flags & BPF_TRAMP_F_SKIP_FRAME))
1893 return false;
1894
1895 /*
1896 * BPF_TRAMP_F_RET_FENTRY_RET is only used by bpf_struct_ops,
1897 * and it must be used alone.
1898 */
1899 if ((flags & BPF_TRAMP_F_RET_FENTRY_RET) &&
1900 (flags & ~BPF_TRAMP_F_RET_FENTRY_RET))
1901 return false;
1902
1903 return true;
1904 }
1905
1906 /* Example:
1907 * __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev);
1908 * its 'struct btf_func_model' will be nr_args=2
1909 * The assembly code when eth_type_trans is executing after trampoline:
1910 *
1911 * push rbp
1912 * mov rbp, rsp
1913 * sub rsp, 16 // space for skb and dev
1914 * push rbx // temp regs to pass start time
1915 * mov qword ptr [rbp - 16], rdi // save skb pointer to stack
1916 * mov qword ptr [rbp - 8], rsi // save dev pointer to stack
1917 * call __bpf_prog_enter // rcu_read_lock and preempt_disable
1918 * mov rbx, rax // remember start time in bpf stats are enabled
1919 * lea rdi, [rbp - 16] // R1==ctx of bpf prog
1920 * call addr_of_jited_FENTRY_prog
1921 * movabsq rdi, 64bit_addr_of_struct_bpf_prog // unused if bpf stats are off
1922 * mov rsi, rbx // prog start time
1923 * call __bpf_prog_exit // rcu_read_unlock, preempt_enable and stats math
1924 * mov rdi, qword ptr [rbp - 16] // restore skb pointer from stack
1925 * mov rsi, qword ptr [rbp - 8] // restore dev pointer from stack
1926 * pop rbx
1927 * leave
1928 * ret
1929 *
1930 * eth_type_trans has 5 byte nop at the beginning. These 5 bytes will be
1931 * replaced with 'call generated_bpf_trampoline'. When it returns
1932 * eth_type_trans will continue executing with original skb and dev pointers.
1933 *
1934 * The assembly code when eth_type_trans is called from trampoline:
1935 *
1936 * push rbp
1937 * mov rbp, rsp
1938 * sub rsp, 24 // space for skb, dev, return value
1939 * push rbx // temp regs to pass start time
1940 * mov qword ptr [rbp - 24], rdi // save skb pointer to stack
1941 * mov qword ptr [rbp - 16], rsi // save dev pointer to stack
1942 * call __bpf_prog_enter // rcu_read_lock and preempt_disable
1943 * mov rbx, rax // remember start time if bpf stats are enabled
1944 * lea rdi, [rbp - 24] // R1==ctx of bpf prog
1945 * call addr_of_jited_FENTRY_prog // bpf prog can access skb and dev
1946 * movabsq rdi, 64bit_addr_of_struct_bpf_prog // unused if bpf stats are off
1947 * mov rsi, rbx // prog start time
1948 * call __bpf_prog_exit // rcu_read_unlock, preempt_enable and stats math
1949 * mov rdi, qword ptr [rbp - 24] // restore skb pointer from stack
1950 * mov rsi, qword ptr [rbp - 16] // restore dev pointer from stack
1951 * call eth_type_trans+5 // execute body of eth_type_trans
1952 * mov qword ptr [rbp - 8], rax // save return value
1953 * call __bpf_prog_enter // rcu_read_lock and preempt_disable
1954 * mov rbx, rax // remember start time in bpf stats are enabled
1955 * lea rdi, [rbp - 24] // R1==ctx of bpf prog
1956 * call addr_of_jited_FEXIT_prog // bpf prog can access skb, dev, return value
1957 * movabsq rdi, 64bit_addr_of_struct_bpf_prog // unused if bpf stats are off
1958 * mov rsi, rbx // prog start time
1959 * call __bpf_prog_exit // rcu_read_unlock, preempt_enable and stats math
1960 * mov rax, qword ptr [rbp - 8] // restore eth_type_trans's return value
1961 * pop rbx
1962 * leave
1963 * add rsp, 8 // skip eth_type_trans's frame
1964 * ret // return to its caller
1965 */
arch_prepare_bpf_trampoline(struct bpf_tramp_image * im,void * image,void * image_end,const struct btf_func_model * m,u32 flags,struct bpf_tramp_progs * tprogs,void * orig_call)1966 int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *image_end,
1967 const struct btf_func_model *m, u32 flags,
1968 struct bpf_tramp_progs *tprogs,
1969 void *orig_call)
1970 {
1971 int ret, i, nr_args = m->nr_args;
1972 int stack_size = nr_args * 8;
1973 struct bpf_tramp_progs *fentry = &tprogs[BPF_TRAMP_FENTRY];
1974 struct bpf_tramp_progs *fexit = &tprogs[BPF_TRAMP_FEXIT];
1975 struct bpf_tramp_progs *fmod_ret = &tprogs[BPF_TRAMP_MODIFY_RETURN];
1976 u8 **branches = NULL;
1977 u8 *prog;
1978 bool save_ret;
1979
1980 /* x86-64 supports up to 6 arguments. 7+ can be added in the future */
1981 if (nr_args > 6)
1982 return -ENOTSUPP;
1983
1984 if (!is_valid_bpf_tramp_flags(flags))
1985 return -EINVAL;
1986
1987 /* room for return value of orig_call or fentry prog */
1988 save_ret = flags & (BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_RET_FENTRY_RET);
1989 if (save_ret)
1990 stack_size += 8;
1991
1992 if (flags & BPF_TRAMP_F_IP_ARG)
1993 stack_size += 8; /* room for IP address argument */
1994
1995 if (flags & BPF_TRAMP_F_SKIP_FRAME)
1996 /* skip patched call instruction and point orig_call to actual
1997 * body of the kernel function.
1998 */
1999 orig_call += X86_PATCH_SIZE;
2000
2001 prog = image;
2002
2003 EMIT1(0x55); /* push rbp */
2004 EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */
2005 EMIT4(0x48, 0x83, 0xEC, stack_size); /* sub rsp, stack_size */
2006 EMIT1(0x53); /* push rbx */
2007
2008 if (flags & BPF_TRAMP_F_IP_ARG) {
2009 /* Store IP address of the traced function:
2010 * mov rax, QWORD PTR [rbp + 8]
2011 * sub rax, X86_PATCH_SIZE
2012 * mov QWORD PTR [rbp - stack_size], rax
2013 */
2014 emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, 8);
2015 EMIT4(0x48, 0x83, 0xe8, X86_PATCH_SIZE);
2016 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -stack_size);
2017
2018 /* Continue with stack_size for regs storage, stack will
2019 * be correctly restored with 'leave' instruction.
2020 */
2021 stack_size -= 8;
2022 }
2023
2024 save_regs(m, &prog, nr_args, stack_size);
2025
2026 if (flags & BPF_TRAMP_F_CALL_ORIG) {
2027 /* arg1: mov rdi, im */
2028 emit_mov_imm64(&prog, BPF_REG_1, (long) im >> 32, (u32) (long) im);
2029 if (emit_call(&prog, __bpf_tramp_enter, prog)) {
2030 ret = -EINVAL;
2031 goto cleanup;
2032 }
2033 }
2034
2035 if (fentry->nr_progs)
2036 if (invoke_bpf(m, &prog, fentry, stack_size,
2037 flags & BPF_TRAMP_F_RET_FENTRY_RET))
2038 return -EINVAL;
2039
2040 if (fmod_ret->nr_progs) {
2041 branches = kcalloc(fmod_ret->nr_progs, sizeof(u8 *),
2042 GFP_KERNEL);
2043 if (!branches)
2044 return -ENOMEM;
2045
2046 if (invoke_bpf_mod_ret(m, &prog, fmod_ret, stack_size,
2047 branches)) {
2048 ret = -EINVAL;
2049 goto cleanup;
2050 }
2051 }
2052
2053 if (flags & BPF_TRAMP_F_CALL_ORIG) {
2054 restore_regs(m, &prog, nr_args, stack_size);
2055
2056 /* call original function */
2057 if (emit_call(&prog, orig_call, prog)) {
2058 ret = -EINVAL;
2059 goto cleanup;
2060 }
2061 /* remember return value in a stack for bpf prog to access */
2062 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
2063 im->ip_after_call = prog;
2064 memcpy(prog, x86_nops[5], X86_PATCH_SIZE);
2065 prog += X86_PATCH_SIZE;
2066 }
2067
2068 if (fmod_ret->nr_progs) {
2069 /* From Intel 64 and IA-32 Architectures Optimization
2070 * Reference Manual, 3.4.1.4 Code Alignment, Assembly/Compiler
2071 * Coding Rule 11: All branch targets should be 16-byte
2072 * aligned.
2073 */
2074 emit_align(&prog, 16);
2075 /* Update the branches saved in invoke_bpf_mod_ret with the
2076 * aligned address of do_fexit.
2077 */
2078 for (i = 0; i < fmod_ret->nr_progs; i++)
2079 emit_cond_near_jump(&branches[i], prog, branches[i],
2080 X86_JNE);
2081 }
2082
2083 if (fexit->nr_progs)
2084 if (invoke_bpf(m, &prog, fexit, stack_size, false)) {
2085 ret = -EINVAL;
2086 goto cleanup;
2087 }
2088
2089 if (flags & BPF_TRAMP_F_RESTORE_REGS)
2090 restore_regs(m, &prog, nr_args, stack_size);
2091
2092 /* This needs to be done regardless. If there were fmod_ret programs,
2093 * the return value is only updated on the stack and still needs to be
2094 * restored to R0.
2095 */
2096 if (flags & BPF_TRAMP_F_CALL_ORIG) {
2097 im->ip_epilogue = prog;
2098 /* arg1: mov rdi, im */
2099 emit_mov_imm64(&prog, BPF_REG_1, (long) im >> 32, (u32) (long) im);
2100 if (emit_call(&prog, __bpf_tramp_exit, prog)) {
2101 ret = -EINVAL;
2102 goto cleanup;
2103 }
2104 }
2105 /* restore return value of orig_call or fentry prog back into RAX */
2106 if (save_ret)
2107 emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, -8);
2108
2109 EMIT1(0x5B); /* pop rbx */
2110 EMIT1(0xC9); /* leave */
2111 if (flags & BPF_TRAMP_F_SKIP_FRAME)
2112 /* skip our return address and return to parent */
2113 EMIT4(0x48, 0x83, 0xC4, 8); /* add rsp, 8 */
2114 EMIT1(0xC3); /* ret */
2115 /* Make sure the trampoline generation logic doesn't overflow */
2116 if (WARN_ON_ONCE(prog > (u8 *)image_end - BPF_INSN_SAFETY)) {
2117 ret = -EFAULT;
2118 goto cleanup;
2119 }
2120 ret = prog - (u8 *)image;
2121
2122 cleanup:
2123 kfree(branches);
2124 return ret;
2125 }
2126
emit_fallback_jump(u8 ** pprog)2127 static int emit_fallback_jump(u8 **pprog)
2128 {
2129 u8 *prog = *pprog;
2130 int err = 0;
2131
2132 #ifdef CONFIG_RETPOLINE
2133 /* Note that this assumes the the compiler uses external
2134 * thunks for indirect calls. Both clang and GCC use the same
2135 * naming convention for external thunks.
2136 */
2137 err = emit_jump(&prog, __x86_indirect_thunk_rdx, prog);
2138 #else
2139 EMIT2(0xFF, 0xE2); /* jmp rdx */
2140 #endif
2141 *pprog = prog;
2142 return err;
2143 }
2144
emit_bpf_dispatcher(u8 ** pprog,int a,int b,s64 * progs)2145 static int emit_bpf_dispatcher(u8 **pprog, int a, int b, s64 *progs)
2146 {
2147 u8 *jg_reloc, *prog = *pprog;
2148 int pivot, err, jg_bytes = 1;
2149 s64 jg_offset;
2150
2151 if (a == b) {
2152 /* Leaf node of recursion, i.e. not a range of indices
2153 * anymore.
2154 */
2155 EMIT1(add_1mod(0x48, BPF_REG_3)); /* cmp rdx,func */
2156 if (!is_simm32(progs[a]))
2157 return -1;
2158 EMIT2_off32(0x81, add_1reg(0xF8, BPF_REG_3),
2159 progs[a]);
2160 err = emit_cond_near_jump(&prog, /* je func */
2161 (void *)progs[a], prog,
2162 X86_JE);
2163 if (err)
2164 return err;
2165
2166 err = emit_fallback_jump(&prog); /* jmp thunk/indirect */
2167 if (err)
2168 return err;
2169
2170 *pprog = prog;
2171 return 0;
2172 }
2173
2174 /* Not a leaf node, so we pivot, and recursively descend into
2175 * the lower and upper ranges.
2176 */
2177 pivot = (b - a) / 2;
2178 EMIT1(add_1mod(0x48, BPF_REG_3)); /* cmp rdx,func */
2179 if (!is_simm32(progs[a + pivot]))
2180 return -1;
2181 EMIT2_off32(0x81, add_1reg(0xF8, BPF_REG_3), progs[a + pivot]);
2182
2183 if (pivot > 2) { /* jg upper_part */
2184 /* Require near jump. */
2185 jg_bytes = 4;
2186 EMIT2_off32(0x0F, X86_JG + 0x10, 0);
2187 } else {
2188 EMIT2(X86_JG, 0);
2189 }
2190 jg_reloc = prog;
2191
2192 err = emit_bpf_dispatcher(&prog, a, a + pivot, /* emit lower_part */
2193 progs);
2194 if (err)
2195 return err;
2196
2197 /* From Intel 64 and IA-32 Architectures Optimization
2198 * Reference Manual, 3.4.1.4 Code Alignment, Assembly/Compiler
2199 * Coding Rule 11: All branch targets should be 16-byte
2200 * aligned.
2201 */
2202 emit_align(&prog, 16);
2203 jg_offset = prog - jg_reloc;
2204 emit_code(jg_reloc - jg_bytes, jg_offset, jg_bytes);
2205
2206 err = emit_bpf_dispatcher(&prog, a + pivot + 1, /* emit upper_part */
2207 b, progs);
2208 if (err)
2209 return err;
2210
2211 *pprog = prog;
2212 return 0;
2213 }
2214
cmp_ips(const void * a,const void * b)2215 static int cmp_ips(const void *a, const void *b)
2216 {
2217 const s64 *ipa = a;
2218 const s64 *ipb = b;
2219
2220 if (*ipa > *ipb)
2221 return 1;
2222 if (*ipa < *ipb)
2223 return -1;
2224 return 0;
2225 }
2226
arch_prepare_bpf_dispatcher(void * image,s64 * funcs,int num_funcs)2227 int arch_prepare_bpf_dispatcher(void *image, s64 *funcs, int num_funcs)
2228 {
2229 u8 *prog = image;
2230
2231 sort(funcs, num_funcs, sizeof(funcs[0]), cmp_ips, NULL);
2232 return emit_bpf_dispatcher(&prog, 0, num_funcs - 1, funcs);
2233 }
2234
2235 struct x64_jit_data {
2236 struct bpf_binary_header *header;
2237 int *addrs;
2238 u8 *image;
2239 int proglen;
2240 struct jit_context ctx;
2241 };
2242
2243 #define MAX_PASSES 20
2244 #define PADDING_PASSES (MAX_PASSES - 5)
2245
bpf_int_jit_compile(struct bpf_prog * prog)2246 struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
2247 {
2248 struct bpf_binary_header *header = NULL;
2249 struct bpf_prog *tmp, *orig_prog = prog;
2250 struct x64_jit_data *jit_data;
2251 int proglen, oldproglen = 0;
2252 struct jit_context ctx = {};
2253 bool tmp_blinded = false;
2254 bool extra_pass = false;
2255 bool padding = false;
2256 u8 *image = NULL;
2257 int *addrs;
2258 int pass;
2259 int i;
2260
2261 if (!prog->jit_requested)
2262 return orig_prog;
2263
2264 tmp = bpf_jit_blind_constants(prog);
2265 /*
2266 * If blinding was requested and we failed during blinding,
2267 * we must fall back to the interpreter.
2268 */
2269 if (IS_ERR(tmp))
2270 return orig_prog;
2271 if (tmp != prog) {
2272 tmp_blinded = true;
2273 prog = tmp;
2274 }
2275
2276 jit_data = prog->aux->jit_data;
2277 if (!jit_data) {
2278 jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL);
2279 if (!jit_data) {
2280 prog = orig_prog;
2281 goto out;
2282 }
2283 prog->aux->jit_data = jit_data;
2284 }
2285 addrs = jit_data->addrs;
2286 if (addrs) {
2287 ctx = jit_data->ctx;
2288 oldproglen = jit_data->proglen;
2289 image = jit_data->image;
2290 header = jit_data->header;
2291 extra_pass = true;
2292 padding = true;
2293 goto skip_init_addrs;
2294 }
2295 addrs = kvmalloc_array(prog->len + 1, sizeof(*addrs), GFP_KERNEL);
2296 if (!addrs) {
2297 prog = orig_prog;
2298 goto out_addrs;
2299 }
2300
2301 /*
2302 * Before first pass, make a rough estimation of addrs[]
2303 * each BPF instruction is translated to less than 64 bytes
2304 */
2305 for (proglen = 0, i = 0; i <= prog->len; i++) {
2306 proglen += 64;
2307 addrs[i] = proglen;
2308 }
2309 ctx.cleanup_addr = proglen;
2310 skip_init_addrs:
2311
2312 /*
2313 * JITed image shrinks with every pass and the loop iterates
2314 * until the image stops shrinking. Very large BPF programs
2315 * may converge on the last pass. In such case do one more
2316 * pass to emit the final image.
2317 */
2318 for (pass = 0; pass < MAX_PASSES || image; pass++) {
2319 if (!padding && pass >= PADDING_PASSES)
2320 padding = true;
2321 proglen = do_jit(prog, addrs, image, oldproglen, &ctx, padding);
2322 if (proglen <= 0) {
2323 out_image:
2324 image = NULL;
2325 if (header)
2326 bpf_jit_binary_free(header);
2327 prog = orig_prog;
2328 goto out_addrs;
2329 }
2330 if (image) {
2331 if (proglen != oldproglen) {
2332 pr_err("bpf_jit: proglen=%d != oldproglen=%d\n",
2333 proglen, oldproglen);
2334 goto out_image;
2335 }
2336 break;
2337 }
2338 if (proglen == oldproglen) {
2339 /*
2340 * The number of entries in extable is the number of BPF_LDX
2341 * insns that access kernel memory via "pointer to BTF type".
2342 * The verifier changed their opcode from LDX|MEM|size
2343 * to LDX|PROBE_MEM|size to make JITing easier.
2344 */
2345 u32 align = __alignof__(struct exception_table_entry);
2346 u32 extable_size = prog->aux->num_exentries *
2347 sizeof(struct exception_table_entry);
2348
2349 /* allocate module memory for x86 insns and extable */
2350 header = bpf_jit_binary_alloc(roundup(proglen, align) + extable_size,
2351 &image, align, jit_fill_hole);
2352 if (!header) {
2353 prog = orig_prog;
2354 goto out_addrs;
2355 }
2356 prog->aux->extable = (void *) image + roundup(proglen, align);
2357 }
2358 oldproglen = proglen;
2359 cond_resched();
2360 }
2361
2362 if (bpf_jit_enable > 1)
2363 bpf_jit_dump(prog->len, proglen, pass + 1, image);
2364
2365 if (image) {
2366 if (!prog->is_func || extra_pass) {
2367 bpf_tail_call_direct_fixup(prog);
2368 bpf_jit_binary_lock_ro(header);
2369 } else {
2370 jit_data->addrs = addrs;
2371 jit_data->ctx = ctx;
2372 jit_data->proglen = proglen;
2373 jit_data->image = image;
2374 jit_data->header = header;
2375 }
2376 prog->bpf_func = (void *)image;
2377 prog->jited = 1;
2378 prog->jited_len = proglen;
2379 } else {
2380 prog = orig_prog;
2381 }
2382
2383 if (!image || !prog->is_func || extra_pass) {
2384 if (image)
2385 bpf_prog_fill_jited_linfo(prog, addrs + 1);
2386 out_addrs:
2387 kvfree(addrs);
2388 kfree(jit_data);
2389 prog->aux->jit_data = NULL;
2390 }
2391 out:
2392 if (tmp_blinded)
2393 bpf_jit_prog_release_other(prog, prog == orig_prog ?
2394 tmp : orig_prog);
2395 return prog;
2396 }
2397
bpf_jit_supports_kfunc_call(void)2398 bool bpf_jit_supports_kfunc_call(void)
2399 {
2400 return true;
2401 }
2402