1 /* bpf_jit_comp.c: BPF JIT compiler
2 *
3 * Copyright 2011 Matt Evans <matt@ozlabs.org>, IBM Corporation
4 *
5 * Based on the x86 BPF compiler, by Eric Dumazet (eric.dumazet@gmail.com)
6 * Ported to ppc32 by Denis Kirjanov <kda@linux-powerpc.org>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; version 2
11 * of the License.
12 */
13 #include <linux/moduleloader.h>
14 #include <asm/cacheflush.h>
15 #include <asm/asm-compat.h>
16 #include <linux/netdevice.h>
17 #include <linux/filter.h>
18 #include <linux/if_vlan.h>
19
20 #include "bpf_jit32.h"
21
bpf_flush_icache(void * start,void * end)22 static inline void bpf_flush_icache(void *start, void *end)
23 {
24 smp_wmb();
25 flush_icache_range((unsigned long)start, (unsigned long)end);
26 }
27
bpf_jit_build_prologue(struct bpf_prog * fp,u32 * image,struct codegen_context * ctx)28 static void bpf_jit_build_prologue(struct bpf_prog *fp, u32 *image,
29 struct codegen_context *ctx)
30 {
31 int i;
32 const struct sock_filter *filter = fp->insns;
33
34 if (ctx->seen & (SEEN_MEM | SEEN_DATAREF)) {
35 /* Make stackframe */
36 if (ctx->seen & SEEN_DATAREF) {
37 /* If we call any helpers (for loads), save LR */
38 EMIT(PPC_INST_MFLR | __PPC_RT(R0));
39 PPC_BPF_STL(0, 1, PPC_LR_STKOFF);
40
41 /* Back up non-volatile regs. */
42 PPC_BPF_STL(r_D, 1, -(REG_SZ*(32-r_D)));
43 PPC_BPF_STL(r_HL, 1, -(REG_SZ*(32-r_HL)));
44 }
45 if (ctx->seen & SEEN_MEM) {
46 /*
47 * Conditionally save regs r15-r31 as some will be used
48 * for M[] data.
49 */
50 for (i = r_M; i < (r_M+16); i++) {
51 if (ctx->seen & (1 << (i-r_M)))
52 PPC_BPF_STL(i, 1, -(REG_SZ*(32-i)));
53 }
54 }
55 PPC_BPF_STLU(1, 1, -BPF_PPC_STACKFRAME);
56 }
57
58 if (ctx->seen & SEEN_DATAREF) {
59 /*
60 * If this filter needs to access skb data,
61 * prepare r_D and r_HL:
62 * r_HL = skb->len - skb->data_len
63 * r_D = skb->data
64 */
65 PPC_LWZ_OFFS(r_scratch1, r_skb, offsetof(struct sk_buff,
66 data_len));
67 PPC_LWZ_OFFS(r_HL, r_skb, offsetof(struct sk_buff, len));
68 PPC_SUB(r_HL, r_HL, r_scratch1);
69 PPC_LL_OFFS(r_D, r_skb, offsetof(struct sk_buff, data));
70 }
71
72 if (ctx->seen & SEEN_XREG) {
73 /*
74 * TODO: Could also detect whether first instr. sets X and
75 * avoid this (as below, with A).
76 */
77 PPC_LI(r_X, 0);
78 }
79
80 /* make sure we dont leak kernel information to user */
81 if (bpf_needs_clear_a(&filter[0]))
82 PPC_LI(r_A, 0);
83 }
84
bpf_jit_build_epilogue(u32 * image,struct codegen_context * ctx)85 static void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
86 {
87 int i;
88
89 if (ctx->seen & (SEEN_MEM | SEEN_DATAREF)) {
90 PPC_ADDI(1, 1, BPF_PPC_STACKFRAME);
91 if (ctx->seen & SEEN_DATAREF) {
92 PPC_BPF_LL(0, 1, PPC_LR_STKOFF);
93 PPC_MTLR(0);
94 PPC_BPF_LL(r_D, 1, -(REG_SZ*(32-r_D)));
95 PPC_BPF_LL(r_HL, 1, -(REG_SZ*(32-r_HL)));
96 }
97 if (ctx->seen & SEEN_MEM) {
98 /* Restore any saved non-vol registers */
99 for (i = r_M; i < (r_M+16); i++) {
100 if (ctx->seen & (1 << (i-r_M)))
101 PPC_BPF_LL(i, 1, -(REG_SZ*(32-i)));
102 }
103 }
104 }
105 /* The RETs have left a return value in R3. */
106
107 PPC_BLR();
108 }
109
110 #define CHOOSE_LOAD_FUNC(K, func) \
111 ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
112
113 /* Assemble the body code between the prologue & epilogue. */
bpf_jit_build_body(struct bpf_prog * fp,u32 * image,struct codegen_context * ctx,unsigned int * addrs)114 static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
115 struct codegen_context *ctx,
116 unsigned int *addrs)
117 {
118 const struct sock_filter *filter = fp->insns;
119 int flen = fp->len;
120 u8 *func;
121 unsigned int true_cond;
122 int i;
123
124 /* Start of epilogue code */
125 unsigned int exit_addr = addrs[flen];
126
127 for (i = 0; i < flen; i++) {
128 unsigned int K = filter[i].k;
129 u16 code = bpf_anc_helper(&filter[i]);
130
131 /*
132 * addrs[] maps a BPF bytecode address into a real offset from
133 * the start of the body code.
134 */
135 addrs[i] = ctx->idx * 4;
136
137 switch (code) {
138 /*** ALU ops ***/
139 case BPF_ALU | BPF_ADD | BPF_X: /* A += X; */
140 ctx->seen |= SEEN_XREG;
141 PPC_ADD(r_A, r_A, r_X);
142 break;
143 case BPF_ALU | BPF_ADD | BPF_K: /* A += K; */
144 if (!K)
145 break;
146 PPC_ADDI(r_A, r_A, IMM_L(K));
147 if (K >= 32768)
148 PPC_ADDIS(r_A, r_A, IMM_HA(K));
149 break;
150 case BPF_ALU | BPF_SUB | BPF_X: /* A -= X; */
151 ctx->seen |= SEEN_XREG;
152 PPC_SUB(r_A, r_A, r_X);
153 break;
154 case BPF_ALU | BPF_SUB | BPF_K: /* A -= K */
155 if (!K)
156 break;
157 PPC_ADDI(r_A, r_A, IMM_L(-K));
158 if (K >= 32768)
159 PPC_ADDIS(r_A, r_A, IMM_HA(-K));
160 break;
161 case BPF_ALU | BPF_MUL | BPF_X: /* A *= X; */
162 ctx->seen |= SEEN_XREG;
163 PPC_MULW(r_A, r_A, r_X);
164 break;
165 case BPF_ALU | BPF_MUL | BPF_K: /* A *= K */
166 if (K < 32768)
167 PPC_MULI(r_A, r_A, K);
168 else {
169 PPC_LI32(r_scratch1, K);
170 PPC_MULW(r_A, r_A, r_scratch1);
171 }
172 break;
173 case BPF_ALU | BPF_MOD | BPF_X: /* A %= X; */
174 case BPF_ALU | BPF_DIV | BPF_X: /* A /= X; */
175 ctx->seen |= SEEN_XREG;
176 PPC_CMPWI(r_X, 0);
177 if (ctx->pc_ret0 != -1) {
178 PPC_BCC(COND_EQ, addrs[ctx->pc_ret0]);
179 } else {
180 PPC_BCC_SHORT(COND_NE, (ctx->idx*4)+12);
181 PPC_LI(r_ret, 0);
182 PPC_JMP(exit_addr);
183 }
184 if (code == (BPF_ALU | BPF_MOD | BPF_X)) {
185 PPC_DIVWU(r_scratch1, r_A, r_X);
186 PPC_MULW(r_scratch1, r_X, r_scratch1);
187 PPC_SUB(r_A, r_A, r_scratch1);
188 } else {
189 PPC_DIVWU(r_A, r_A, r_X);
190 }
191 break;
192 case BPF_ALU | BPF_MOD | BPF_K: /* A %= K; */
193 PPC_LI32(r_scratch2, K);
194 PPC_DIVWU(r_scratch1, r_A, r_scratch2);
195 PPC_MULW(r_scratch1, r_scratch2, r_scratch1);
196 PPC_SUB(r_A, r_A, r_scratch1);
197 break;
198 case BPF_ALU | BPF_DIV | BPF_K: /* A /= K */
199 if (K == 1)
200 break;
201 PPC_LI32(r_scratch1, K);
202 PPC_DIVWU(r_A, r_A, r_scratch1);
203 break;
204 case BPF_ALU | BPF_AND | BPF_X:
205 ctx->seen |= SEEN_XREG;
206 PPC_AND(r_A, r_A, r_X);
207 break;
208 case BPF_ALU | BPF_AND | BPF_K:
209 if (!IMM_H(K))
210 PPC_ANDI(r_A, r_A, K);
211 else {
212 PPC_LI32(r_scratch1, K);
213 PPC_AND(r_A, r_A, r_scratch1);
214 }
215 break;
216 case BPF_ALU | BPF_OR | BPF_X:
217 ctx->seen |= SEEN_XREG;
218 PPC_OR(r_A, r_A, r_X);
219 break;
220 case BPF_ALU | BPF_OR | BPF_K:
221 if (IMM_L(K))
222 PPC_ORI(r_A, r_A, IMM_L(K));
223 if (K >= 65536)
224 PPC_ORIS(r_A, r_A, IMM_H(K));
225 break;
226 case BPF_ANC | SKF_AD_ALU_XOR_X:
227 case BPF_ALU | BPF_XOR | BPF_X: /* A ^= X */
228 ctx->seen |= SEEN_XREG;
229 PPC_XOR(r_A, r_A, r_X);
230 break;
231 case BPF_ALU | BPF_XOR | BPF_K: /* A ^= K */
232 if (IMM_L(K))
233 PPC_XORI(r_A, r_A, IMM_L(K));
234 if (K >= 65536)
235 PPC_XORIS(r_A, r_A, IMM_H(K));
236 break;
237 case BPF_ALU | BPF_LSH | BPF_X: /* A <<= X; */
238 ctx->seen |= SEEN_XREG;
239 PPC_SLW(r_A, r_A, r_X);
240 break;
241 case BPF_ALU | BPF_LSH | BPF_K:
242 if (K == 0)
243 break;
244 else
245 PPC_SLWI(r_A, r_A, K);
246 break;
247 case BPF_ALU | BPF_RSH | BPF_X: /* A >>= X; */
248 ctx->seen |= SEEN_XREG;
249 PPC_SRW(r_A, r_A, r_X);
250 break;
251 case BPF_ALU | BPF_RSH | BPF_K: /* A >>= K; */
252 if (K == 0)
253 break;
254 else
255 PPC_SRWI(r_A, r_A, K);
256 break;
257 case BPF_ALU | BPF_NEG:
258 PPC_NEG(r_A, r_A);
259 break;
260 case BPF_RET | BPF_K:
261 PPC_LI32(r_ret, K);
262 if (!K) {
263 if (ctx->pc_ret0 == -1)
264 ctx->pc_ret0 = i;
265 }
266 /*
267 * If this isn't the very last instruction, branch to
268 * the epilogue if we've stuff to clean up. Otherwise,
269 * if there's nothing to tidy, just return. If we /are/
270 * the last instruction, we're about to fall through to
271 * the epilogue to return.
272 */
273 if (i != flen - 1) {
274 /*
275 * Note: 'seen' is properly valid only on pass
276 * #2. Both parts of this conditional are the
277 * same instruction size though, meaning the
278 * first pass will still correctly determine the
279 * code size/addresses.
280 */
281 if (ctx->seen)
282 PPC_JMP(exit_addr);
283 else
284 PPC_BLR();
285 }
286 break;
287 case BPF_RET | BPF_A:
288 PPC_MR(r_ret, r_A);
289 if (i != flen - 1) {
290 if (ctx->seen)
291 PPC_JMP(exit_addr);
292 else
293 PPC_BLR();
294 }
295 break;
296 case BPF_MISC | BPF_TAX: /* X = A */
297 PPC_MR(r_X, r_A);
298 break;
299 case BPF_MISC | BPF_TXA: /* A = X */
300 ctx->seen |= SEEN_XREG;
301 PPC_MR(r_A, r_X);
302 break;
303
304 /*** Constant loads/M[] access ***/
305 case BPF_LD | BPF_IMM: /* A = K */
306 PPC_LI32(r_A, K);
307 break;
308 case BPF_LDX | BPF_IMM: /* X = K */
309 PPC_LI32(r_X, K);
310 break;
311 case BPF_LD | BPF_MEM: /* A = mem[K] */
312 PPC_MR(r_A, r_M + (K & 0xf));
313 ctx->seen |= SEEN_MEM | (1<<(K & 0xf));
314 break;
315 case BPF_LDX | BPF_MEM: /* X = mem[K] */
316 PPC_MR(r_X, r_M + (K & 0xf));
317 ctx->seen |= SEEN_MEM | (1<<(K & 0xf));
318 break;
319 case BPF_ST: /* mem[K] = A */
320 PPC_MR(r_M + (K & 0xf), r_A);
321 ctx->seen |= SEEN_MEM | (1<<(K & 0xf));
322 break;
323 case BPF_STX: /* mem[K] = X */
324 PPC_MR(r_M + (K & 0xf), r_X);
325 ctx->seen |= SEEN_XREG | SEEN_MEM | (1<<(K & 0xf));
326 break;
327 case BPF_LD | BPF_W | BPF_LEN: /* A = skb->len; */
328 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
329 PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, len));
330 break;
331 case BPF_LDX | BPF_W | BPF_ABS: /* A = *((u32 *)(seccomp_data + K)); */
332 PPC_LWZ_OFFS(r_A, r_skb, K);
333 break;
334 case BPF_LDX | BPF_W | BPF_LEN: /* X = skb->len; */
335 PPC_LWZ_OFFS(r_X, r_skb, offsetof(struct sk_buff, len));
336 break;
337
338 /*** Ancillary info loads ***/
339 case BPF_ANC | SKF_AD_PROTOCOL: /* A = ntohs(skb->protocol); */
340 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
341 protocol) != 2);
342 PPC_NTOHS_OFFS(r_A, r_skb, offsetof(struct sk_buff,
343 protocol));
344 break;
345 case BPF_ANC | SKF_AD_IFINDEX:
346 case BPF_ANC | SKF_AD_HATYPE:
347 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device,
348 ifindex) != 4);
349 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device,
350 type) != 2);
351 PPC_LL_OFFS(r_scratch1, r_skb, offsetof(struct sk_buff,
352 dev));
353 PPC_CMPDI(r_scratch1, 0);
354 if (ctx->pc_ret0 != -1) {
355 PPC_BCC(COND_EQ, addrs[ctx->pc_ret0]);
356 } else {
357 /* Exit, returning 0; first pass hits here. */
358 PPC_BCC_SHORT(COND_NE, ctx->idx * 4 + 12);
359 PPC_LI(r_ret, 0);
360 PPC_JMP(exit_addr);
361 }
362 if (code == (BPF_ANC | SKF_AD_IFINDEX)) {
363 PPC_LWZ_OFFS(r_A, r_scratch1,
364 offsetof(struct net_device, ifindex));
365 } else {
366 PPC_LHZ_OFFS(r_A, r_scratch1,
367 offsetof(struct net_device, type));
368 }
369
370 break;
371 case BPF_ANC | SKF_AD_MARK:
372 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
373 PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
374 mark));
375 break;
376 case BPF_ANC | SKF_AD_RXHASH:
377 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);
378 PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
379 hash));
380 break;
381 case BPF_ANC | SKF_AD_VLAN_TAG:
382 case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT:
383 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
384 BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000);
385
386 PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
387 vlan_tci));
388 if (code == (BPF_ANC | SKF_AD_VLAN_TAG)) {
389 PPC_ANDI(r_A, r_A, ~VLAN_TAG_PRESENT);
390 } else {
391 PPC_ANDI(r_A, r_A, VLAN_TAG_PRESENT);
392 PPC_SRWI(r_A, r_A, 12);
393 }
394 break;
395 case BPF_ANC | SKF_AD_QUEUE:
396 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
397 queue_mapping) != 2);
398 PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
399 queue_mapping));
400 break;
401 case BPF_ANC | SKF_AD_PKTTYPE:
402 PPC_LBZ_OFFS(r_A, r_skb, PKT_TYPE_OFFSET());
403 PPC_ANDI(r_A, r_A, PKT_TYPE_MAX);
404 PPC_SRWI(r_A, r_A, 5);
405 break;
406 case BPF_ANC | SKF_AD_CPU:
407 PPC_BPF_LOAD_CPU(r_A);
408 break;
409 /*** Absolute loads from packet header/data ***/
410 case BPF_LD | BPF_W | BPF_ABS:
411 func = CHOOSE_LOAD_FUNC(K, sk_load_word);
412 goto common_load;
413 case BPF_LD | BPF_H | BPF_ABS:
414 func = CHOOSE_LOAD_FUNC(K, sk_load_half);
415 goto common_load;
416 case BPF_LD | BPF_B | BPF_ABS:
417 func = CHOOSE_LOAD_FUNC(K, sk_load_byte);
418 common_load:
419 /* Load from [K]. */
420 ctx->seen |= SEEN_DATAREF;
421 PPC_FUNC_ADDR(r_scratch1, func);
422 PPC_MTLR(r_scratch1);
423 PPC_LI32(r_addr, K);
424 PPC_BLRL();
425 /*
426 * Helper returns 'lt' condition on error, and an
427 * appropriate return value in r3
428 */
429 PPC_BCC(COND_LT, exit_addr);
430 break;
431
432 /*** Indirect loads from packet header/data ***/
433 case BPF_LD | BPF_W | BPF_IND:
434 func = sk_load_word;
435 goto common_load_ind;
436 case BPF_LD | BPF_H | BPF_IND:
437 func = sk_load_half;
438 goto common_load_ind;
439 case BPF_LD | BPF_B | BPF_IND:
440 func = sk_load_byte;
441 common_load_ind:
442 /*
443 * Load from [X + K]. Negative offsets are tested for
444 * in the helper functions.
445 */
446 ctx->seen |= SEEN_DATAREF | SEEN_XREG;
447 PPC_FUNC_ADDR(r_scratch1, func);
448 PPC_MTLR(r_scratch1);
449 PPC_ADDI(r_addr, r_X, IMM_L(K));
450 if (K >= 32768)
451 PPC_ADDIS(r_addr, r_addr, IMM_HA(K));
452 PPC_BLRL();
453 /* If error, cr0.LT set */
454 PPC_BCC(COND_LT, exit_addr);
455 break;
456
457 case BPF_LDX | BPF_B | BPF_MSH:
458 func = CHOOSE_LOAD_FUNC(K, sk_load_byte_msh);
459 goto common_load;
460 break;
461
462 /*** Jump and branches ***/
463 case BPF_JMP | BPF_JA:
464 if (K != 0)
465 PPC_JMP(addrs[i + 1 + K]);
466 break;
467
468 case BPF_JMP | BPF_JGT | BPF_K:
469 case BPF_JMP | BPF_JGT | BPF_X:
470 true_cond = COND_GT;
471 goto cond_branch;
472 case BPF_JMP | BPF_JGE | BPF_K:
473 case BPF_JMP | BPF_JGE | BPF_X:
474 true_cond = COND_GE;
475 goto cond_branch;
476 case BPF_JMP | BPF_JEQ | BPF_K:
477 case BPF_JMP | BPF_JEQ | BPF_X:
478 true_cond = COND_EQ;
479 goto cond_branch;
480 case BPF_JMP | BPF_JSET | BPF_K:
481 case BPF_JMP | BPF_JSET | BPF_X:
482 true_cond = COND_NE;
483 /* Fall through */
484 cond_branch:
485 /* same targets, can avoid doing the test :) */
486 if (filter[i].jt == filter[i].jf) {
487 if (filter[i].jt > 0)
488 PPC_JMP(addrs[i + 1 + filter[i].jt]);
489 break;
490 }
491
492 switch (code) {
493 case BPF_JMP | BPF_JGT | BPF_X:
494 case BPF_JMP | BPF_JGE | BPF_X:
495 case BPF_JMP | BPF_JEQ | BPF_X:
496 ctx->seen |= SEEN_XREG;
497 PPC_CMPLW(r_A, r_X);
498 break;
499 case BPF_JMP | BPF_JSET | BPF_X:
500 ctx->seen |= SEEN_XREG;
501 PPC_AND_DOT(r_scratch1, r_A, r_X);
502 break;
503 case BPF_JMP | BPF_JEQ | BPF_K:
504 case BPF_JMP | BPF_JGT | BPF_K:
505 case BPF_JMP | BPF_JGE | BPF_K:
506 if (K < 32768)
507 PPC_CMPLWI(r_A, K);
508 else {
509 PPC_LI32(r_scratch1, K);
510 PPC_CMPLW(r_A, r_scratch1);
511 }
512 break;
513 case BPF_JMP | BPF_JSET | BPF_K:
514 if (K < 32768)
515 /* PPC_ANDI is /only/ dot-form */
516 PPC_ANDI(r_scratch1, r_A, K);
517 else {
518 PPC_LI32(r_scratch1, K);
519 PPC_AND_DOT(r_scratch1, r_A,
520 r_scratch1);
521 }
522 break;
523 }
524 /* Sometimes branches are constructed "backward", with
525 * the false path being the branch and true path being
526 * a fallthrough to the next instruction.
527 */
528 if (filter[i].jt == 0)
529 /* Swap the sense of the branch */
530 PPC_BCC(true_cond ^ COND_CMP_TRUE,
531 addrs[i + 1 + filter[i].jf]);
532 else {
533 PPC_BCC(true_cond, addrs[i + 1 + filter[i].jt]);
534 if (filter[i].jf != 0)
535 PPC_JMP(addrs[i + 1 + filter[i].jf]);
536 }
537 break;
538 default:
539 /* The filter contains something cruel & unusual.
540 * We don't handle it, but also there shouldn't be
541 * anything missing from our list.
542 */
543 if (printk_ratelimit())
544 pr_err("BPF filter opcode %04x (@%d) unsupported\n",
545 filter[i].code, i);
546 return -ENOTSUPP;
547 }
548
549 }
550 /* Set end-of-body-code address for exit. */
551 addrs[i] = ctx->idx * 4;
552
553 return 0;
554 }
555
bpf_jit_compile(struct bpf_prog * fp)556 void bpf_jit_compile(struct bpf_prog *fp)
557 {
558 unsigned int proglen;
559 unsigned int alloclen;
560 u32 *image = NULL;
561 u32 *code_base;
562 unsigned int *addrs;
563 struct codegen_context cgctx;
564 int pass;
565 int flen = fp->len;
566
567 if (!bpf_jit_enable)
568 return;
569
570 addrs = kcalloc(flen + 1, sizeof(*addrs), GFP_KERNEL);
571 if (addrs == NULL)
572 return;
573
574 /*
575 * There are multiple assembly passes as the generated code will change
576 * size as it settles down, figuring out the max branch offsets/exit
577 * paths required.
578 *
579 * The range of standard conditional branches is +/- 32Kbytes. Since
580 * BPF_MAXINSNS = 4096, we can only jump from (worst case) start to
581 * finish with 8 bytes/instruction. Not feasible, so long jumps are
582 * used, distinct from short branches.
583 *
584 * Current:
585 *
586 * For now, both branch types assemble to 2 words (short branches padded
587 * with a NOP); this is less efficient, but assembly will always complete
588 * after exactly 3 passes:
589 *
590 * First pass: No code buffer; Program is "faux-generated" -- no code
591 * emitted but maximum size of output determined (and addrs[] filled
592 * in). Also, we note whether we use M[], whether we use skb data, etc.
593 * All generation choices assumed to be 'worst-case', e.g. branches all
594 * far (2 instructions), return path code reduction not available, etc.
595 *
596 * Second pass: Code buffer allocated with size determined previously.
597 * Prologue generated to support features we have seen used. Exit paths
598 * determined and addrs[] is filled in again, as code may be slightly
599 * smaller as a result.
600 *
601 * Third pass: Code generated 'for real', and branch destinations
602 * determined from now-accurate addrs[] map.
603 *
604 * Ideal:
605 *
606 * If we optimise this, near branches will be shorter. On the
607 * first assembly pass, we should err on the side of caution and
608 * generate the biggest code. On subsequent passes, branches will be
609 * generated short or long and code size will reduce. With smaller
610 * code, more branches may fall into the short category, and code will
611 * reduce more.
612 *
613 * Finally, if we see one pass generate code the same size as the
614 * previous pass we have converged and should now generate code for
615 * real. Allocating at the end will also save the memory that would
616 * otherwise be wasted by the (small) current code shrinkage.
617 * Preferably, we should do a small number of passes (e.g. 5) and if we
618 * haven't converged by then, get impatient and force code to generate
619 * as-is, even if the odd branch would be left long. The chances of a
620 * long jump are tiny with all but the most enormous of BPF filter
621 * inputs, so we should usually converge on the third pass.
622 */
623
624 cgctx.idx = 0;
625 cgctx.seen = 0;
626 cgctx.pc_ret0 = -1;
627 /* Scouting faux-generate pass 0 */
628 if (bpf_jit_build_body(fp, 0, &cgctx, addrs))
629 /* We hit something illegal or unsupported. */
630 goto out;
631
632 /*
633 * Pretend to build prologue, given the features we've seen. This will
634 * update ctgtx.idx as it pretends to output instructions, then we can
635 * calculate total size from idx.
636 */
637 bpf_jit_build_prologue(fp, 0, &cgctx);
638 bpf_jit_build_epilogue(0, &cgctx);
639
640 proglen = cgctx.idx * 4;
641 alloclen = proglen + FUNCTION_DESCR_SIZE;
642 image = module_alloc(alloclen);
643 if (!image)
644 goto out;
645
646 code_base = image + (FUNCTION_DESCR_SIZE/4);
647
648 /* Code generation passes 1-2 */
649 for (pass = 1; pass < 3; pass++) {
650 /* Now build the prologue, body code & epilogue for real. */
651 cgctx.idx = 0;
652 bpf_jit_build_prologue(fp, code_base, &cgctx);
653 bpf_jit_build_body(fp, code_base, &cgctx, addrs);
654 bpf_jit_build_epilogue(code_base, &cgctx);
655
656 if (bpf_jit_enable > 1)
657 pr_info("Pass %d: shrink = %d, seen = 0x%x\n", pass,
658 proglen - (cgctx.idx * 4), cgctx.seen);
659 }
660
661 if (bpf_jit_enable > 1)
662 /* Note that we output the base address of the code_base
663 * rather than image, since opcodes are in code_base.
664 */
665 bpf_jit_dump(flen, proglen, pass, code_base);
666
667 bpf_flush_icache(code_base, code_base + (proglen/4));
668
669 #ifdef CONFIG_PPC64
670 /* Function descriptor nastiness: Address + TOC */
671 ((u64 *)image)[0] = (u64)code_base;
672 ((u64 *)image)[1] = local_paca->kernel_toc;
673 #endif
674
675 fp->bpf_func = (void *)image;
676 fp->jited = 1;
677
678 out:
679 kfree(addrs);
680 return;
681 }
682
bpf_jit_free(struct bpf_prog * fp)683 void bpf_jit_free(struct bpf_prog *fp)
684 {
685 if (fp->jited)
686 module_memfree(fp->bpf_func);
687
688 bpf_prog_unlock_free(fp);
689 }
690