1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __LINUX_NETFILTER_H
3 #define __LINUX_NETFILTER_H
4
5 #include <linux/init.h>
6 #include <linux/skbuff.h>
7 #include <linux/net.h>
8 #include <linux/if.h>
9 #include <linux/in.h>
10 #include <linux/in6.h>
11 #include <linux/wait.h>
12 #include <linux/list.h>
13 #include <linux/static_key.h>
14 #include <linux/module.h>
15 #include <linux/netfilter_defs.h>
16 #include <linux/netdevice.h>
17 #include <linux/sockptr.h>
18 #include <net/net_namespace.h>
19
NF_DROP_GETERR(int verdict)20 static inline int NF_DROP_GETERR(int verdict)
21 {
22 return -(verdict >> NF_VERDICT_QBITS);
23 }
24
nf_inet_addr_cmp(const union nf_inet_addr * a1,const union nf_inet_addr * a2)25 static inline int nf_inet_addr_cmp(const union nf_inet_addr *a1,
26 const union nf_inet_addr *a2)
27 {
28 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
29 const unsigned long *ul1 = (const unsigned long *)a1;
30 const unsigned long *ul2 = (const unsigned long *)a2;
31
32 return ((ul1[0] ^ ul2[0]) | (ul1[1] ^ ul2[1])) == 0UL;
33 #else
34 return a1->all[0] == a2->all[0] &&
35 a1->all[1] == a2->all[1] &&
36 a1->all[2] == a2->all[2] &&
37 a1->all[3] == a2->all[3];
38 #endif
39 }
40
nf_inet_addr_mask(const union nf_inet_addr * a1,union nf_inet_addr * result,const union nf_inet_addr * mask)41 static inline void nf_inet_addr_mask(const union nf_inet_addr *a1,
42 union nf_inet_addr *result,
43 const union nf_inet_addr *mask)
44 {
45 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
46 const unsigned long *ua = (const unsigned long *)a1;
47 unsigned long *ur = (unsigned long *)result;
48 const unsigned long *um = (const unsigned long *)mask;
49
50 ur[0] = ua[0] & um[0];
51 ur[1] = ua[1] & um[1];
52 #else
53 result->all[0] = a1->all[0] & mask->all[0];
54 result->all[1] = a1->all[1] & mask->all[1];
55 result->all[2] = a1->all[2] & mask->all[2];
56 result->all[3] = a1->all[3] & mask->all[3];
57 #endif
58 }
59
60 int netfilter_init(void);
61
62 struct sk_buff;
63
64 struct nf_hook_ops;
65
66 struct sock;
67
68 struct nf_hook_state {
69 u8 hook;
70 u8 pf;
71 struct net_device *in;
72 struct net_device *out;
73 struct sock *sk;
74 struct net *net;
75 int (*okfn)(struct net *, struct sock *, struct sk_buff *);
76 };
77
78 typedef unsigned int nf_hookfn(void *priv,
79 struct sk_buff *skb,
80 const struct nf_hook_state *state);
81 enum nf_hook_ops_type {
82 NF_HOOK_OP_UNDEFINED,
83 NF_HOOK_OP_NF_TABLES,
84 NF_HOOK_OP_BPF,
85 };
86
87 struct nf_hook_ops {
88 /* User fills in from here down. */
89 nf_hookfn *hook;
90 struct net_device *dev;
91 void *priv;
92 u8 pf;
93 enum nf_hook_ops_type hook_ops_type:8;
94 unsigned int hooknum;
95 /* Hooks are ordered in ascending priority. */
96 int priority;
97 };
98
99 struct nf_hook_entry {
100 nf_hookfn *hook;
101 void *priv;
102 };
103
104 struct nf_hook_entries_rcu_head {
105 struct rcu_head head;
106 void *allocation;
107 };
108
109 struct nf_hook_entries {
110 u16 num_hook_entries;
111 /* padding */
112 struct nf_hook_entry hooks[];
113
114 /* trailer: pointers to original orig_ops of each hook,
115 * followed by rcu_head and scratch space used for freeing
116 * the structure via call_rcu.
117 *
118 * This is not part of struct nf_hook_entry since its only
119 * needed in slow path (hook register/unregister):
120 * const struct nf_hook_ops *orig_ops[]
121 *
122 * For the same reason, we store this at end -- its
123 * only needed when a hook is deleted, not during
124 * packet path processing:
125 * struct nf_hook_entries_rcu_head head
126 */
127 };
128
129 #ifdef CONFIG_NETFILTER
nf_hook_entries_get_hook_ops(const struct nf_hook_entries * e)130 static inline struct nf_hook_ops **nf_hook_entries_get_hook_ops(const struct nf_hook_entries *e)
131 {
132 unsigned int n = e->num_hook_entries;
133 const void *hook_end;
134
135 hook_end = &e->hooks[n]; /* this is *past* ->hooks[]! */
136
137 return (struct nf_hook_ops **)hook_end;
138 }
139
140 static inline int
nf_hook_entry_hookfn(const struct nf_hook_entry * entry,struct sk_buff * skb,struct nf_hook_state * state)141 nf_hook_entry_hookfn(const struct nf_hook_entry *entry, struct sk_buff *skb,
142 struct nf_hook_state *state)
143 {
144 return entry->hook(entry->priv, skb, state);
145 }
146
nf_hook_state_init(struct nf_hook_state * p,unsigned int hook,u_int8_t pf,struct net_device * indev,struct net_device * outdev,struct sock * sk,struct net * net,int (* okfn)(struct net *,struct sock *,struct sk_buff *))147 static inline void nf_hook_state_init(struct nf_hook_state *p,
148 unsigned int hook,
149 u_int8_t pf,
150 struct net_device *indev,
151 struct net_device *outdev,
152 struct sock *sk,
153 struct net *net,
154 int (*okfn)(struct net *, struct sock *, struct sk_buff *))
155 {
156 p->hook = hook;
157 p->pf = pf;
158 p->in = indev;
159 p->out = outdev;
160 p->sk = sk;
161 p->net = net;
162 p->okfn = okfn;
163 }
164
165
166
167 struct nf_sockopt_ops {
168 struct list_head list;
169
170 u_int8_t pf;
171
172 /* Non-inclusive ranges: use 0/0/NULL to never get called. */
173 int set_optmin;
174 int set_optmax;
175 int (*set)(struct sock *sk, int optval, sockptr_t arg,
176 unsigned int len);
177 int get_optmin;
178 int get_optmax;
179 int (*get)(struct sock *sk, int optval, void __user *user, int *len);
180 /* Use the module struct to lock set/get code in place */
181 struct module *owner;
182 };
183
184 /* Function to register/unregister hook points. */
185 int nf_register_net_hook(struct net *net, const struct nf_hook_ops *ops);
186 void nf_unregister_net_hook(struct net *net, const struct nf_hook_ops *ops);
187 int nf_register_net_hooks(struct net *net, const struct nf_hook_ops *reg,
188 unsigned int n);
189 void nf_unregister_net_hooks(struct net *net, const struct nf_hook_ops *reg,
190 unsigned int n);
191
192 /* Functions to register get/setsockopt ranges (non-inclusive). You
193 need to check permissions yourself! */
194 int nf_register_sockopt(struct nf_sockopt_ops *reg);
195 void nf_unregister_sockopt(struct nf_sockopt_ops *reg);
196
197 #ifdef CONFIG_JUMP_LABEL
198 extern struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
199 #endif
200
201 int nf_hook_slow(struct sk_buff *skb, struct nf_hook_state *state,
202 const struct nf_hook_entries *e, unsigned int i);
203
204 void nf_hook_slow_list(struct list_head *head, struct nf_hook_state *state,
205 const struct nf_hook_entries *e);
206 /**
207 * nf_hook - call a netfilter hook
208 *
209 * Returns 1 if the hook has allowed the packet to pass. The function
210 * okfn must be invoked by the caller in this case. Any other return
211 * value indicates the packet has been consumed by the hook.
212 */
nf_hook(u_int8_t pf,unsigned int hook,struct net * net,struct sock * sk,struct sk_buff * skb,struct net_device * indev,struct net_device * outdev,int (* okfn)(struct net *,struct sock *,struct sk_buff *))213 static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net,
214 struct sock *sk, struct sk_buff *skb,
215 struct net_device *indev, struct net_device *outdev,
216 int (*okfn)(struct net *, struct sock *, struct sk_buff *))
217 {
218 struct nf_hook_entries *hook_head = NULL;
219 int ret = 1;
220
221 #ifdef CONFIG_JUMP_LABEL
222 if (__builtin_constant_p(pf) &&
223 __builtin_constant_p(hook) &&
224 !static_key_false(&nf_hooks_needed[pf][hook]))
225 return 1;
226 #endif
227
228 rcu_read_lock();
229 switch (pf) {
230 case NFPROTO_IPV4:
231 hook_head = rcu_dereference(net->nf.hooks_ipv4[hook]);
232 break;
233 case NFPROTO_IPV6:
234 hook_head = rcu_dereference(net->nf.hooks_ipv6[hook]);
235 break;
236 case NFPROTO_ARP:
237 #ifdef CONFIG_NETFILTER_FAMILY_ARP
238 if (WARN_ON_ONCE(hook >= ARRAY_SIZE(net->nf.hooks_arp)))
239 break;
240 hook_head = rcu_dereference(net->nf.hooks_arp[hook]);
241 #endif
242 break;
243 case NFPROTO_BRIDGE:
244 #ifdef CONFIG_NETFILTER_FAMILY_BRIDGE
245 hook_head = rcu_dereference(net->nf.hooks_bridge[hook]);
246 #endif
247 break;
248 default:
249 WARN_ON_ONCE(1);
250 break;
251 }
252
253 if (hook_head) {
254 struct nf_hook_state state;
255
256 nf_hook_state_init(&state, hook, pf, indev, outdev,
257 sk, net, okfn);
258
259 ret = nf_hook_slow(skb, &state, hook_head, 0);
260 }
261 rcu_read_unlock();
262
263 return ret;
264 }
265
266 /* Activate hook; either okfn or kfree_skb called, unless a hook
267 returns NF_STOLEN (in which case, it's up to the hook to deal with
268 the consequences).
269
270 Returns -ERRNO if packet dropped. Zero means queued, stolen or
271 accepted.
272 */
273
274 /* RR:
275 > I don't want nf_hook to return anything because people might forget
276 > about async and trust the return value to mean "packet was ok".
277
278 AK:
279 Just document it clearly, then you can expect some sense from kernel
280 coders :)
281 */
282
283 static inline int
NF_HOOK_COND(uint8_t pf,unsigned int hook,struct net * net,struct sock * sk,struct sk_buff * skb,struct net_device * in,struct net_device * out,int (* okfn)(struct net *,struct sock *,struct sk_buff *),bool cond)284 NF_HOOK_COND(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk,
285 struct sk_buff *skb, struct net_device *in, struct net_device *out,
286 int (*okfn)(struct net *, struct sock *, struct sk_buff *),
287 bool cond)
288 {
289 int ret;
290
291 if (!cond ||
292 ((ret = nf_hook(pf, hook, net, sk, skb, in, out, okfn)) == 1))
293 ret = okfn(net, sk, skb);
294 return ret;
295 }
296
297 static inline int
NF_HOOK(uint8_t pf,unsigned int hook,struct net * net,struct sock * sk,struct sk_buff * skb,struct net_device * in,struct net_device * out,int (* okfn)(struct net *,struct sock *,struct sk_buff *))298 NF_HOOK(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk, struct sk_buff *skb,
299 struct net_device *in, struct net_device *out,
300 int (*okfn)(struct net *, struct sock *, struct sk_buff *))
301 {
302 int ret = nf_hook(pf, hook, net, sk, skb, in, out, okfn);
303 if (ret == 1)
304 ret = okfn(net, sk, skb);
305 return ret;
306 }
307
308 static inline void
NF_HOOK_LIST(uint8_t pf,unsigned int hook,struct net * net,struct sock * sk,struct list_head * head,struct net_device * in,struct net_device * out,int (* okfn)(struct net *,struct sock *,struct sk_buff *))309 NF_HOOK_LIST(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk,
310 struct list_head *head, struct net_device *in, struct net_device *out,
311 int (*okfn)(struct net *, struct sock *, struct sk_buff *))
312 {
313 struct nf_hook_entries *hook_head = NULL;
314
315 #ifdef CONFIG_JUMP_LABEL
316 if (__builtin_constant_p(pf) &&
317 __builtin_constant_p(hook) &&
318 !static_key_false(&nf_hooks_needed[pf][hook]))
319 return;
320 #endif
321
322 rcu_read_lock();
323 switch (pf) {
324 case NFPROTO_IPV4:
325 hook_head = rcu_dereference(net->nf.hooks_ipv4[hook]);
326 break;
327 case NFPROTO_IPV6:
328 hook_head = rcu_dereference(net->nf.hooks_ipv6[hook]);
329 break;
330 default:
331 WARN_ON_ONCE(1);
332 break;
333 }
334
335 if (hook_head) {
336 struct nf_hook_state state;
337
338 nf_hook_state_init(&state, hook, pf, in, out, sk, net, okfn);
339
340 nf_hook_slow_list(head, &state, hook_head);
341 }
342 rcu_read_unlock();
343 }
344
345 /* Call setsockopt() */
346 int nf_setsockopt(struct sock *sk, u_int8_t pf, int optval, sockptr_t opt,
347 unsigned int len);
348 int nf_getsockopt(struct sock *sk, u_int8_t pf, int optval, char __user *opt,
349 int *len);
350
351 struct flowi;
352 struct nf_queue_entry;
353
354 __sum16 nf_checksum(struct sk_buff *skb, unsigned int hook,
355 unsigned int dataoff, u_int8_t protocol,
356 unsigned short family);
357
358 __sum16 nf_checksum_partial(struct sk_buff *skb, unsigned int hook,
359 unsigned int dataoff, unsigned int len,
360 u_int8_t protocol, unsigned short family);
361 int nf_route(struct net *net, struct dst_entry **dst, struct flowi *fl,
362 bool strict, unsigned short family);
363 int nf_reroute(struct sk_buff *skb, struct nf_queue_entry *entry);
364
365 #include <net/flow.h>
366
367 struct nf_conn;
368 enum nf_nat_manip_type;
369 struct nlattr;
370 enum ip_conntrack_dir;
371
372 struct nf_nat_hook {
373 int (*parse_nat_setup)(struct nf_conn *ct, enum nf_nat_manip_type manip,
374 const struct nlattr *attr);
375 void (*decode_session)(struct sk_buff *skb, struct flowi *fl);
376 unsigned int (*manip_pkt)(struct sk_buff *skb, struct nf_conn *ct,
377 enum nf_nat_manip_type mtype,
378 enum ip_conntrack_dir dir);
379 void (*remove_nat_bysrc)(struct nf_conn *ct);
380 };
381
382 extern const struct nf_nat_hook __rcu *nf_nat_hook;
383
384 static inline void
nf_nat_decode_session(struct sk_buff * skb,struct flowi * fl,u_int8_t family)385 nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family)
386 {
387 #if IS_ENABLED(CONFIG_NF_NAT)
388 const struct nf_nat_hook *nat_hook;
389
390 rcu_read_lock();
391 nat_hook = rcu_dereference(nf_nat_hook);
392 if (nat_hook && nat_hook->decode_session)
393 nat_hook->decode_session(skb, fl);
394 rcu_read_unlock();
395 #endif
396 }
397
398 #else /* !CONFIG_NETFILTER */
399 static inline int
NF_HOOK_COND(uint8_t pf,unsigned int hook,struct net * net,struct sock * sk,struct sk_buff * skb,struct net_device * in,struct net_device * out,int (* okfn)(struct net *,struct sock *,struct sk_buff *),bool cond)400 NF_HOOK_COND(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk,
401 struct sk_buff *skb, struct net_device *in, struct net_device *out,
402 int (*okfn)(struct net *, struct sock *, struct sk_buff *),
403 bool cond)
404 {
405 return okfn(net, sk, skb);
406 }
407
408 static inline int
NF_HOOK(uint8_t pf,unsigned int hook,struct net * net,struct sock * sk,struct sk_buff * skb,struct net_device * in,struct net_device * out,int (* okfn)(struct net *,struct sock *,struct sk_buff *))409 NF_HOOK(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk,
410 struct sk_buff *skb, struct net_device *in, struct net_device *out,
411 int (*okfn)(struct net *, struct sock *, struct sk_buff *))
412 {
413 return okfn(net, sk, skb);
414 }
415
416 static inline void
NF_HOOK_LIST(uint8_t pf,unsigned int hook,struct net * net,struct sock * sk,struct list_head * head,struct net_device * in,struct net_device * out,int (* okfn)(struct net *,struct sock *,struct sk_buff *))417 NF_HOOK_LIST(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk,
418 struct list_head *head, struct net_device *in, struct net_device *out,
419 int (*okfn)(struct net *, struct sock *, struct sk_buff *))
420 {
421 /* nothing to do */
422 }
423
nf_hook(u_int8_t pf,unsigned int hook,struct net * net,struct sock * sk,struct sk_buff * skb,struct net_device * indev,struct net_device * outdev,int (* okfn)(struct net *,struct sock *,struct sk_buff *))424 static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net,
425 struct sock *sk, struct sk_buff *skb,
426 struct net_device *indev, struct net_device *outdev,
427 int (*okfn)(struct net *, struct sock *, struct sk_buff *))
428 {
429 return 1;
430 }
431 struct flowi;
432 static inline void
nf_nat_decode_session(struct sk_buff * skb,struct flowi * fl,u_int8_t family)433 nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family)
434 {
435 }
436 #endif /*CONFIG_NETFILTER*/
437
438 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
439 #include <linux/netfilter/nf_conntrack_zones_common.h>
440
441 void nf_ct_attach(struct sk_buff *, const struct sk_buff *);
442 void nf_ct_set_closing(struct nf_conntrack *nfct);
443 struct nf_conntrack_tuple;
444 bool nf_ct_get_tuple_skb(struct nf_conntrack_tuple *dst_tuple,
445 const struct sk_buff *skb);
446 #else
nf_ct_attach(struct sk_buff * new,struct sk_buff * skb)447 static inline void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb) {}
nf_ct_set_closing(struct nf_conntrack * nfct)448 static inline void nf_ct_set_closing(struct nf_conntrack *nfct) {}
449 struct nf_conntrack_tuple;
nf_ct_get_tuple_skb(struct nf_conntrack_tuple * dst_tuple,const struct sk_buff * skb)450 static inline bool nf_ct_get_tuple_skb(struct nf_conntrack_tuple *dst_tuple,
451 const struct sk_buff *skb)
452 {
453 return false;
454 }
455 #endif
456
457 struct nf_conn;
458 enum ip_conntrack_info;
459
460 struct nf_ct_hook {
461 int (*update)(struct net *net, struct sk_buff *skb);
462 void (*destroy)(struct nf_conntrack *);
463 bool (*get_tuple_skb)(struct nf_conntrack_tuple *,
464 const struct sk_buff *);
465 void (*attach)(struct sk_buff *nskb, const struct sk_buff *skb);
466 void (*set_closing)(struct nf_conntrack *nfct);
467 };
468 extern const struct nf_ct_hook __rcu *nf_ct_hook;
469
470 struct nlattr;
471
472 struct nfnl_ct_hook {
473 size_t (*build_size)(const struct nf_conn *ct);
474 int (*build)(struct sk_buff *skb, struct nf_conn *ct,
475 enum ip_conntrack_info ctinfo,
476 u_int16_t ct_attr, u_int16_t ct_info_attr);
477 int (*parse)(const struct nlattr *attr, struct nf_conn *ct);
478 int (*attach_expect)(const struct nlattr *attr, struct nf_conn *ct,
479 u32 portid, u32 report);
480 void (*seq_adjust)(struct sk_buff *skb, struct nf_conn *ct,
481 enum ip_conntrack_info ctinfo, s32 off);
482 };
483 extern const struct nfnl_ct_hook __rcu *nfnl_ct_hook;
484
485 struct nf_defrag_hook {
486 struct module *owner;
487 int (*enable)(struct net *net);
488 void (*disable)(struct net *net);
489 };
490
491 extern const struct nf_defrag_hook __rcu *nf_defrag_v4_hook;
492 extern const struct nf_defrag_hook __rcu *nf_defrag_v6_hook;
493
494 /*
495 * nf_skb_duplicated - TEE target has sent a packet
496 *
497 * When a xtables target sends a packet, the OUTPUT and POSTROUTING
498 * hooks are traversed again, i.e. nft and xtables are invoked recursively.
499 *
500 * This is used by xtables TEE target to prevent the duplicated skb from
501 * being duplicated again.
502 */
503 DECLARE_PER_CPU(bool, nf_skb_duplicated);
504
505 /*
506 * Contains bitmask of ctnetlink event subscribers, if any.
507 * Can't be pernet due to NETLINK_LISTEN_ALL_NSID setsockopt flag.
508 */
509 extern u8 nf_ctnetlink_has_listener;
510 #endif /*__LINUX_NETFILTER_H*/
511