1  /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
2  /* Copyright (C) 2016-2018 Netronome Systems, Inc. */
3  
4  #ifndef __NFP_BPF_H__
5  #define __NFP_BPF_H__ 1
6  
7  #include <linux/bitfield.h>
8  #include <linux/bpf.h>
9  #include <linux/bpf_verifier.h>
10  #include <linux/kernel.h>
11  #include <linux/list.h>
12  #include <linux/rhashtable.h>
13  #include <linux/skbuff.h>
14  #include <linux/types.h>
15  #include <linux/wait.h>
16  
17  #include "../ccm.h"
18  #include "../nfp_asm.h"
19  #include "fw.h"
20  
21  #define cmsg_warn(bpf, msg...)	nn_dp_warn(&(bpf)->app->ctrl->dp, msg)
22  
23  /* For relocation logic use up-most byte of branch instruction as scratch
24   * area.  Remember to clear this before sending instructions to HW!
25   */
26  #define OP_RELO_TYPE	0xff00000000000000ULL
27  
28  enum nfp_relo_type {
29  	RELO_NONE = 0,
30  	/* standard internal jumps */
31  	RELO_BR_REL,
32  	/* internal jumps to parts of the outro */
33  	RELO_BR_GO_OUT,
34  	RELO_BR_GO_ABORT,
35  	RELO_BR_GO_CALL_PUSH_REGS,
36  	RELO_BR_GO_CALL_POP_REGS,
37  	/* external jumps to fixed addresses */
38  	RELO_BR_NEXT_PKT,
39  	RELO_BR_HELPER,
40  	/* immediate relocation against load address */
41  	RELO_IMMED_REL,
42  };
43  
44  /* To make absolute relocated branches (branches other than RELO_BR_REL)
45   * distinguishable in user space dumps from normal jumps, add a large offset
46   * to them.
47   */
48  #define BR_OFF_RELO		15000
49  
50  enum static_regs {
51  	STATIC_REG_IMMA		= 20, /* Bank AB */
52  	STATIC_REG_IMM		= 21, /* Bank AB */
53  	STATIC_REG_STACK	= 22, /* Bank A */
54  	STATIC_REG_PKT_LEN	= 22, /* Bank B */
55  };
56  
57  enum pkt_vec {
58  	PKT_VEC_PKT_LEN		= 0,
59  	PKT_VEC_PKT_PTR		= 2,
60  	PKT_VEC_QSEL_SET	= 4,
61  	PKT_VEC_QSEL_VAL	= 6,
62  };
63  
64  #define PKT_VEL_QSEL_SET_BIT	4
65  
66  #define pv_len(np)	reg_lm(1, PKT_VEC_PKT_LEN)
67  #define pv_ctm_ptr(np)	reg_lm(1, PKT_VEC_PKT_PTR)
68  #define pv_qsel_set(np)	reg_lm(1, PKT_VEC_QSEL_SET)
69  #define pv_qsel_val(np)	reg_lm(1, PKT_VEC_QSEL_VAL)
70  
71  #define stack_reg(np)	reg_a(STATIC_REG_STACK)
72  #define stack_imm(np)	imm_b(np)
73  #define plen_reg(np)	reg_b(STATIC_REG_PKT_LEN)
74  #define pptr_reg(np)	pv_ctm_ptr(np)
75  #define imm_a(np)	reg_a(STATIC_REG_IMM)
76  #define imm_b(np)	reg_b(STATIC_REG_IMM)
77  #define imma_a(np)	reg_a(STATIC_REG_IMMA)
78  #define imma_b(np)	reg_b(STATIC_REG_IMMA)
79  #define imm_both(np)	reg_both(STATIC_REG_IMM)
80  #define ret_reg(np)	imm_a(np)
81  
82  #define NFP_BPF_ABI_FLAGS	reg_imm(0)
83  #define   NFP_BPF_ABI_FLAG_MARK	1
84  
85  /**
86   * struct nfp_app_bpf - bpf app priv structure
87   * @app:		backpointer to the app
88   * @ccm:		common control message handler data
89   *
90   * @bpf_dev:		BPF offload device handle
91   *
92   * @cmsg_key_sz:	size of key in cmsg element array
93   * @cmsg_val_sz:	size of value in cmsg element array
94   *
95   * @map_list:		list of offloaded maps
96   * @maps_in_use:	number of currently offloaded maps
97   * @map_elems_in_use:	number of elements allocated to offloaded maps
98   *
99   * @maps_neutral:	hash table of offload-neutral maps (on pointer)
100   *
101   * @abi_version:	global BPF ABI version
102   * @cmsg_cache_cnt:	number of entries to read for caching
103   *
104   * @adjust_head:	adjust head capability
105   * @adjust_head.flags:		extra flags for adjust head
106   * @adjust_head.off_min:	minimal packet offset within buffer required
107   * @adjust_head.off_max:	maximum packet offset within buffer required
108   * @adjust_head.guaranteed_sub:	negative adjustment guaranteed possible
109   * @adjust_head.guaranteed_add:	positive adjustment guaranteed possible
110   *
111   * @maps:		map capability
112   * @maps.types:			supported map types
113   * @maps.max_maps:		max number of maps supported
114   * @maps.max_elems:		max number of entries in each map
115   * @maps.max_key_sz:		max size of map key
116   * @maps.max_val_sz:		max size of map value
117   * @maps.max_elem_sz:		max size of map entry (key + value)
118   *
119   * @helpers:		helper addressess for various calls
120   * @helpers.map_lookup:		map lookup helper address
121   * @helpers.map_update:		map update helper address
122   * @helpers.map_delete:		map delete helper address
123   * @helpers.perf_event_output:	output perf event to a ring buffer
124   *
125   * @pseudo_random:	FW initialized the pseudo-random machinery (CSRs)
126   * @queue_select:	BPF can set the RX queue ID in packet vector
127   * @adjust_tail:	BPF can simply trunc packet size for adjust tail
128   * @cmsg_multi_ent:	FW can pack multiple map entries in a single cmsg
129   */
130  struct nfp_app_bpf {
131  	struct nfp_app *app;
132  	struct nfp_ccm ccm;
133  
134  	struct bpf_offload_dev *bpf_dev;
135  
136  	unsigned int cmsg_key_sz;
137  	unsigned int cmsg_val_sz;
138  
139  	unsigned int cmsg_cache_cnt;
140  
141  	struct list_head map_list;
142  	unsigned int maps_in_use;
143  	unsigned int map_elems_in_use;
144  
145  	struct rhashtable maps_neutral;
146  
147  	u32 abi_version;
148  
149  	struct nfp_bpf_cap_adjust_head {
150  		u32 flags;
151  		int off_min;
152  		int off_max;
153  		int guaranteed_sub;
154  		int guaranteed_add;
155  	} adjust_head;
156  
157  	struct {
158  		u32 types;
159  		u32 max_maps;
160  		u32 max_elems;
161  		u32 max_key_sz;
162  		u32 max_val_sz;
163  		u32 max_elem_sz;
164  	} maps;
165  
166  	struct {
167  		u32 map_lookup;
168  		u32 map_update;
169  		u32 map_delete;
170  		u32 perf_event_output;
171  	} helpers;
172  
173  	bool pseudo_random;
174  	bool queue_select;
175  	bool adjust_tail;
176  	bool cmsg_multi_ent;
177  };
178  
179  enum nfp_bpf_map_use {
180  	NFP_MAP_UNUSED = 0,
181  	NFP_MAP_USE_READ,
182  	NFP_MAP_USE_WRITE,
183  	NFP_MAP_USE_ATOMIC_CNT,
184  };
185  
186  struct nfp_bpf_map_word {
187  	unsigned char type		:4;
188  	unsigned char non_zero_update	:1;
189  };
190  
191  #define NFP_BPF_MAP_CACHE_CNT		4U
192  #define NFP_BPF_MAP_CACHE_TIME_NS	(250 * 1000)
193  
194  /**
195   * struct nfp_bpf_map - private per-map data attached to BPF maps for offload
196   * @offmap:	pointer to the offloaded BPF map
197   * @bpf:	back pointer to bpf app private structure
198   * @tid:	table id identifying map on datapath
199   *
200   * @cache_lock:	protects @cache_blockers, @cache_to, @cache
201   * @cache_blockers:	number of ops in flight which block caching
202   * @cache_gen:	counter incremented by every blocker on exit
203   * @cache_to:	time when cache will no longer be valid (ns)
204   * @cache:	skb with cached response
205   *
206   * @l:		link on the nfp_app_bpf->map_list list
207   * @use_map:	map of how the value is used (in 4B chunks)
208   */
209  struct nfp_bpf_map {
210  	struct bpf_offloaded_map *offmap;
211  	struct nfp_app_bpf *bpf;
212  	u32 tid;
213  
214  	spinlock_t cache_lock;
215  	u32 cache_blockers;
216  	u32 cache_gen;
217  	u64 cache_to;
218  	struct sk_buff *cache;
219  
220  	struct list_head l;
221  	struct nfp_bpf_map_word use_map[];
222  };
223  
224  struct nfp_bpf_neutral_map {
225  	struct rhash_head l;
226  	struct bpf_map *ptr;
227  	u32 map_id;
228  	u32 count;
229  };
230  
231  extern const struct rhashtable_params nfp_bpf_maps_neutral_params;
232  
233  struct nfp_prog;
234  struct nfp_insn_meta;
235  typedef int (*instr_cb_t)(struct nfp_prog *, struct nfp_insn_meta *);
236  
237  #define nfp_prog_first_meta(nfp_prog)					\
238  	list_first_entry(&(nfp_prog)->insns, struct nfp_insn_meta, l)
239  #define nfp_prog_last_meta(nfp_prog)					\
240  	list_last_entry(&(nfp_prog)->insns, struct nfp_insn_meta, l)
241  #define nfp_meta_next(meta)	list_next_entry(meta, l)
242  #define nfp_meta_prev(meta)	list_prev_entry(meta, l)
243  
244  /**
245   * struct nfp_bpf_reg_state - register state for calls
246   * @reg: BPF register state from latest path
247   * @var_off: for stack arg - changes stack offset on different paths
248   */
249  struct nfp_bpf_reg_state {
250  	struct bpf_reg_state reg;
251  	bool var_off;
252  };
253  
254  #define FLAG_INSN_IS_JUMP_DST			BIT(0)
255  #define FLAG_INSN_IS_SUBPROG_START		BIT(1)
256  #define FLAG_INSN_PTR_CALLER_STACK_FRAME	BIT(2)
257  /* Instruction is pointless, noop even on its own */
258  #define FLAG_INSN_SKIP_NOOP			BIT(3)
259  /* Instruction is optimized out based on preceding instructions */
260  #define FLAG_INSN_SKIP_PREC_DEPENDENT		BIT(4)
261  /* Instruction is optimized by the verifier */
262  #define FLAG_INSN_SKIP_VERIFIER_OPT		BIT(5)
263  /* Instruction needs to zero extend to high 32-bit */
264  #define FLAG_INSN_DO_ZEXT			BIT(6)
265  
266  #define FLAG_INSN_SKIP_MASK		(FLAG_INSN_SKIP_NOOP | \
267  					 FLAG_INSN_SKIP_PREC_DEPENDENT | \
268  					 FLAG_INSN_SKIP_VERIFIER_OPT)
269  
270  /**
271   * struct nfp_insn_meta - BPF instruction wrapper
272   * @insn: BPF instruction
273   * @ptr: pointer type for memory operations
274   * @ldst_gather_len: memcpy length gathered from load/store sequence
275   * @paired_st: the paired store insn at the head of the sequence
276   * @ptr_not_const: pointer is not always constant
277   * @pkt_cache: packet data cache information
278   * @pkt_cache.range_start: start offset for associated packet data cache
279   * @pkt_cache.range_end: end offset for associated packet data cache
280   * @pkt_cache.do_init: this read needs to initialize packet data cache
281   * @xadd_over_16bit: 16bit immediate is not guaranteed
282   * @xadd_maybe_16bit: 16bit immediate is possible
283   * @jmp_dst: destination info for jump instructions
284   * @jump_neg_op: jump instruction has inverted immediate, use ADD instead of SUB
285   * @num_insns_after_br: number of insns following a branch jump, used for fixup
286   * @func_id: function id for call instructions
287   * @arg1: arg1 for call instructions
288   * @arg2: arg2 for call instructions
289   * @umin_src: copy of core verifier umin_value for src opearnd.
290   * @umax_src: copy of core verifier umax_value for src operand.
291   * @umin_dst: copy of core verifier umin_value for dst opearnd.
292   * @umax_dst: copy of core verifier umax_value for dst operand.
293   * @off: index of first generated machine instruction (in nfp_prog.prog)
294   * @n: eBPF instruction number
295   * @flags: eBPF instruction extra optimization flags
296   * @subprog_idx: index of subprogram to which the instruction belongs
297   * @double_cb: callback for second part of the instruction
298   * @l: link on nfp_prog->insns list
299   */
300  struct nfp_insn_meta {
301  	struct bpf_insn insn;
302  	union {
303  		/* pointer ops (ld/st/xadd) */
304  		struct {
305  			struct bpf_reg_state ptr;
306  			struct bpf_insn *paired_st;
307  			s16 ldst_gather_len;
308  			bool ptr_not_const;
309  			struct {
310  				s16 range_start;
311  				s16 range_end;
312  				bool do_init;
313  			} pkt_cache;
314  			bool xadd_over_16bit;
315  			bool xadd_maybe_16bit;
316  		};
317  		/* jump */
318  		struct {
319  			struct nfp_insn_meta *jmp_dst;
320  			bool jump_neg_op;
321  			u32 num_insns_after_br; /* only for BPF-to-BPF calls */
322  		};
323  		/* function calls */
324  		struct {
325  			u32 func_id;
326  			struct bpf_reg_state arg1;
327  			struct nfp_bpf_reg_state arg2;
328  		};
329  		/* We are interested in range info for operands of ALU
330  		 * operations. For example, shift amount, multiplicand and
331  		 * multiplier etc.
332  		 */
333  		struct {
334  			u64 umin_src;
335  			u64 umax_src;
336  			u64 umin_dst;
337  			u64 umax_dst;
338  		};
339  	};
340  	unsigned int off;
341  	unsigned short n;
342  	unsigned short flags;
343  	unsigned short subprog_idx;
344  	instr_cb_t double_cb;
345  
346  	struct list_head l;
347  };
348  
349  #define BPF_SIZE_MASK	0x18
350  
mbpf_class(const struct nfp_insn_meta * meta)351  static inline u8 mbpf_class(const struct nfp_insn_meta *meta)
352  {
353  	return BPF_CLASS(meta->insn.code);
354  }
355  
mbpf_src(const struct nfp_insn_meta * meta)356  static inline u8 mbpf_src(const struct nfp_insn_meta *meta)
357  {
358  	return BPF_SRC(meta->insn.code);
359  }
360  
mbpf_op(const struct nfp_insn_meta * meta)361  static inline u8 mbpf_op(const struct nfp_insn_meta *meta)
362  {
363  	return BPF_OP(meta->insn.code);
364  }
365  
mbpf_mode(const struct nfp_insn_meta * meta)366  static inline u8 mbpf_mode(const struct nfp_insn_meta *meta)
367  {
368  	return BPF_MODE(meta->insn.code);
369  }
370  
is_mbpf_alu(const struct nfp_insn_meta * meta)371  static inline bool is_mbpf_alu(const struct nfp_insn_meta *meta)
372  {
373  	return mbpf_class(meta) == BPF_ALU64 || mbpf_class(meta) == BPF_ALU;
374  }
375  
is_mbpf_load(const struct nfp_insn_meta * meta)376  static inline bool is_mbpf_load(const struct nfp_insn_meta *meta)
377  {
378  	return (meta->insn.code & ~BPF_SIZE_MASK) == (BPF_LDX | BPF_MEM);
379  }
380  
is_mbpf_jmp32(const struct nfp_insn_meta * meta)381  static inline bool is_mbpf_jmp32(const struct nfp_insn_meta *meta)
382  {
383  	return mbpf_class(meta) == BPF_JMP32;
384  }
385  
is_mbpf_jmp64(const struct nfp_insn_meta * meta)386  static inline bool is_mbpf_jmp64(const struct nfp_insn_meta *meta)
387  {
388  	return mbpf_class(meta) == BPF_JMP;
389  }
390  
is_mbpf_jmp(const struct nfp_insn_meta * meta)391  static inline bool is_mbpf_jmp(const struct nfp_insn_meta *meta)
392  {
393  	return is_mbpf_jmp32(meta) || is_mbpf_jmp64(meta);
394  }
395  
is_mbpf_store(const struct nfp_insn_meta * meta)396  static inline bool is_mbpf_store(const struct nfp_insn_meta *meta)
397  {
398  	return (meta->insn.code & ~BPF_SIZE_MASK) == (BPF_STX | BPF_MEM);
399  }
400  
is_mbpf_load_pkt(const struct nfp_insn_meta * meta)401  static inline bool is_mbpf_load_pkt(const struct nfp_insn_meta *meta)
402  {
403  	return is_mbpf_load(meta) && meta->ptr.type == PTR_TO_PACKET;
404  }
405  
is_mbpf_store_pkt(const struct nfp_insn_meta * meta)406  static inline bool is_mbpf_store_pkt(const struct nfp_insn_meta *meta)
407  {
408  	return is_mbpf_store(meta) && meta->ptr.type == PTR_TO_PACKET;
409  }
410  
is_mbpf_classic_load(const struct nfp_insn_meta * meta)411  static inline bool is_mbpf_classic_load(const struct nfp_insn_meta *meta)
412  {
413  	u8 code = meta->insn.code;
414  
415  	return BPF_CLASS(code) == BPF_LD &&
416  	       (BPF_MODE(code) == BPF_ABS || BPF_MODE(code) == BPF_IND);
417  }
418  
is_mbpf_classic_store(const struct nfp_insn_meta * meta)419  static inline bool is_mbpf_classic_store(const struct nfp_insn_meta *meta)
420  {
421  	u8 code = meta->insn.code;
422  
423  	return BPF_CLASS(code) == BPF_ST && BPF_MODE(code) == BPF_MEM;
424  }
425  
is_mbpf_classic_store_pkt(const struct nfp_insn_meta * meta)426  static inline bool is_mbpf_classic_store_pkt(const struct nfp_insn_meta *meta)
427  {
428  	return is_mbpf_classic_store(meta) && meta->ptr.type == PTR_TO_PACKET;
429  }
430  
is_mbpf_atomic(const struct nfp_insn_meta * meta)431  static inline bool is_mbpf_atomic(const struct nfp_insn_meta *meta)
432  {
433  	return (meta->insn.code & ~BPF_SIZE_MASK) == (BPF_STX | BPF_ATOMIC);
434  }
435  
is_mbpf_mul(const struct nfp_insn_meta * meta)436  static inline bool is_mbpf_mul(const struct nfp_insn_meta *meta)
437  {
438  	return is_mbpf_alu(meta) && mbpf_op(meta) == BPF_MUL;
439  }
440  
is_mbpf_div(const struct nfp_insn_meta * meta)441  static inline bool is_mbpf_div(const struct nfp_insn_meta *meta)
442  {
443  	return is_mbpf_alu(meta) && mbpf_op(meta) == BPF_DIV;
444  }
445  
is_mbpf_cond_jump(const struct nfp_insn_meta * meta)446  static inline bool is_mbpf_cond_jump(const struct nfp_insn_meta *meta)
447  {
448  	u8 op;
449  
450  	if (is_mbpf_jmp32(meta))
451  		return true;
452  
453  	if (!is_mbpf_jmp64(meta))
454  		return false;
455  
456  	op = mbpf_op(meta);
457  	return op != BPF_JA && op != BPF_EXIT && op != BPF_CALL;
458  }
459  
is_mbpf_helper_call(const struct nfp_insn_meta * meta)460  static inline bool is_mbpf_helper_call(const struct nfp_insn_meta *meta)
461  {
462  	struct bpf_insn insn = meta->insn;
463  
464  	return insn.code == (BPF_JMP | BPF_CALL) &&
465  		insn.src_reg != BPF_PSEUDO_CALL;
466  }
467  
is_mbpf_pseudo_call(const struct nfp_insn_meta * meta)468  static inline bool is_mbpf_pseudo_call(const struct nfp_insn_meta *meta)
469  {
470  	struct bpf_insn insn = meta->insn;
471  
472  	return insn.code == (BPF_JMP | BPF_CALL) &&
473  		insn.src_reg == BPF_PSEUDO_CALL;
474  }
475  
476  #define STACK_FRAME_ALIGN 64
477  
478  /**
479   * struct nfp_bpf_subprog_info - nfp BPF sub-program (a.k.a. function) info
480   * @stack_depth:	maximum stack depth used by this sub-program
481   * @needs_reg_push:	whether sub-program uses callee-saved registers
482   */
483  struct nfp_bpf_subprog_info {
484  	u16 stack_depth;
485  	u8 needs_reg_push : 1;
486  };
487  
488  /**
489   * struct nfp_prog - nfp BPF program
490   * @bpf: backpointer to the bpf app priv structure
491   * @prog: machine code
492   * @prog_len: number of valid instructions in @prog array
493   * @__prog_alloc_len: alloc size of @prog array
494   * @stack_size: total amount of stack used
495   * @verifier_meta: temporary storage for verifier's insn meta
496   * @type: BPF program type
497   * @last_bpf_off: address of the last instruction translated from BPF
498   * @tgt_out: jump target for normal exit
499   * @tgt_abort: jump target for abort (e.g. access outside of packet buffer)
500   * @tgt_call_push_regs: jump target for subroutine for saving R6~R9 to stack
501   * @tgt_call_pop_regs: jump target for subroutine used for restoring R6~R9
502   * @n_translated: number of successfully translated instructions (for errors)
503   * @error: error code if something went wrong
504   * @stack_frame_depth: max stack depth for current frame
505   * @adjust_head_location: if program has single adjust head call - the insn no.
506   * @map_records_cnt: the number of map pointers recorded for this prog
507   * @subprog_cnt: number of sub-programs, including main function
508   * @map_records: the map record pointers from bpf->maps_neutral
509   * @subprog: pointer to an array of objects holding info about sub-programs
510   * @n_insns: number of instructions on @insns list
511   * @insns: list of BPF instruction wrappers (struct nfp_insn_meta)
512   */
513  struct nfp_prog {
514  	struct nfp_app_bpf *bpf;
515  
516  	u64 *prog;
517  	unsigned int prog_len;
518  	unsigned int __prog_alloc_len;
519  
520  	unsigned int stack_size;
521  
522  	struct nfp_insn_meta *verifier_meta;
523  
524  	enum bpf_prog_type type;
525  
526  	unsigned int last_bpf_off;
527  	unsigned int tgt_out;
528  	unsigned int tgt_abort;
529  	unsigned int tgt_call_push_regs;
530  	unsigned int tgt_call_pop_regs;
531  
532  	unsigned int n_translated;
533  	int error;
534  
535  	unsigned int stack_frame_depth;
536  	unsigned int adjust_head_location;
537  
538  	unsigned int map_records_cnt;
539  	unsigned int subprog_cnt;
540  	struct nfp_bpf_neutral_map **map_records;
541  	struct nfp_bpf_subprog_info *subprog;
542  
543  	unsigned int n_insns;
544  	struct list_head insns;
545  };
546  
547  /**
548   * struct nfp_bpf_vnic - per-vNIC BPF priv structure
549   * @tc_prog:	currently loaded cls_bpf program
550   * @start_off:	address of the first instruction in the memory
551   * @tgt_done:	jump target to get the next packet
552   */
553  struct nfp_bpf_vnic {
554  	struct bpf_prog *tc_prog;
555  	unsigned int start_off;
556  	unsigned int tgt_done;
557  };
558  
559  bool nfp_is_subprog_start(struct nfp_insn_meta *meta);
560  void nfp_bpf_jit_prepare(struct nfp_prog *nfp_prog);
561  int nfp_bpf_jit(struct nfp_prog *prog);
562  bool nfp_bpf_supported_opcode(u8 code);
563  bool nfp_bpf_offload_check_mtu(struct nfp_net *nn, struct bpf_prog *prog,
564  			       unsigned int mtu);
565  
566  int nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx,
567  		    int prev_insn_idx);
568  int nfp_bpf_finalize(struct bpf_verifier_env *env);
569  
570  int nfp_bpf_opt_replace_insn(struct bpf_verifier_env *env, u32 off,
571  			     struct bpf_insn *insn);
572  int nfp_bpf_opt_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt);
573  
574  extern const struct bpf_prog_offload_ops nfp_bpf_dev_ops;
575  
576  struct netdev_bpf;
577  struct nfp_app;
578  struct nfp_net;
579  
580  int nfp_ndo_bpf(struct nfp_app *app, struct nfp_net *nn,
581  		struct netdev_bpf *bpf);
582  int nfp_net_bpf_offload(struct nfp_net *nn, struct bpf_prog *prog,
583  			bool old_prog, struct netlink_ext_ack *extack);
584  
585  struct nfp_insn_meta *
586  nfp_bpf_goto_meta(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
587  		  unsigned int insn_idx);
588  
589  void *nfp_bpf_relo_for_vnic(struct nfp_prog *nfp_prog, struct nfp_bpf_vnic *bv);
590  
591  unsigned int nfp_bpf_ctrl_cmsg_min_mtu(struct nfp_app_bpf *bpf);
592  unsigned int nfp_bpf_ctrl_cmsg_mtu(struct nfp_app_bpf *bpf);
593  unsigned int nfp_bpf_ctrl_cmsg_cache_cnt(struct nfp_app_bpf *bpf);
594  long long int
595  nfp_bpf_ctrl_alloc_map(struct nfp_app_bpf *bpf, struct bpf_map *map);
596  void
597  nfp_bpf_ctrl_free_map(struct nfp_app_bpf *bpf, struct nfp_bpf_map *nfp_map);
598  int nfp_bpf_ctrl_getfirst_entry(struct bpf_offloaded_map *offmap,
599  				void *next_key);
600  int nfp_bpf_ctrl_update_entry(struct bpf_offloaded_map *offmap,
601  			      void *key, void *value, u64 flags);
602  int nfp_bpf_ctrl_del_entry(struct bpf_offloaded_map *offmap, void *key);
603  int nfp_bpf_ctrl_lookup_entry(struct bpf_offloaded_map *offmap,
604  			      void *key, void *value);
605  int nfp_bpf_ctrl_getnext_entry(struct bpf_offloaded_map *offmap,
606  			       void *key, void *next_key);
607  
608  int nfp_bpf_event_output(struct nfp_app_bpf *bpf, const void *data,
609  			 unsigned int len);
610  
611  void nfp_bpf_ctrl_msg_rx(struct nfp_app *app, struct sk_buff *skb);
612  void
613  nfp_bpf_ctrl_msg_rx_raw(struct nfp_app *app, const void *data,
614  			unsigned int len);
615  #endif
616