1 // SPDX-License-Identifier: GPL-2.0
2 #include <vmlinux.h>
3 #include <bpf/bpf_helpers.h>
4 #include <bpf/bpf_endian.h>
5
6 #define EAFNOSUPPORT 97
7 #define EPROTO 71
8 #define ENONET 64
9 #define EINVAL 22
10 #define ENOENT 2
11
12 extern unsigned long CONFIG_HZ __kconfig;
13
14 int test_einval_bpf_tuple = 0;
15 int test_einval_reserved = 0;
16 int test_einval_netns_id = 0;
17 int test_einval_len_opts = 0;
18 int test_eproto_l4proto = 0;
19 int test_enonet_netns_id = 0;
20 int test_enoent_lookup = 0;
21 int test_eafnosupport = 0;
22 int test_alloc_entry = -EINVAL;
23 int test_insert_entry = -EAFNOSUPPORT;
24 int test_succ_lookup = -ENOENT;
25 u32 test_delta_timeout = 0;
26 u32 test_status = 0;
27 u32 test_insert_lookup_mark = 0;
28 int test_snat_addr = -EINVAL;
29 int test_dnat_addr = -EINVAL;
30 __be32 saddr = 0;
31 __be16 sport = 0;
32 __be32 daddr = 0;
33 __be16 dport = 0;
34 int test_exist_lookup = -ENOENT;
35 u32 test_exist_lookup_mark = 0;
36
37 struct nf_conn;
38
39 struct bpf_ct_opts___local {
40 s32 netns_id;
41 s32 error;
42 u8 l4proto;
43 u8 reserved[3];
44 } __attribute__((preserve_access_index));
45
46 struct nf_conn *bpf_xdp_ct_alloc(struct xdp_md *, struct bpf_sock_tuple *, u32,
47 struct bpf_ct_opts___local *, u32) __ksym;
48 struct nf_conn *bpf_xdp_ct_lookup(struct xdp_md *, struct bpf_sock_tuple *, u32,
49 struct bpf_ct_opts___local *, u32) __ksym;
50 struct nf_conn *bpf_skb_ct_alloc(struct __sk_buff *, struct bpf_sock_tuple *, u32,
51 struct bpf_ct_opts___local *, u32) __ksym;
52 struct nf_conn *bpf_skb_ct_lookup(struct __sk_buff *, struct bpf_sock_tuple *, u32,
53 struct bpf_ct_opts___local *, u32) __ksym;
54 struct nf_conn *bpf_ct_insert_entry(struct nf_conn *) __ksym;
55 void bpf_ct_release(struct nf_conn *) __ksym;
56 void bpf_ct_set_timeout(struct nf_conn *, u32) __ksym;
57 int bpf_ct_change_timeout(struct nf_conn *, u32) __ksym;
58 int bpf_ct_set_status(struct nf_conn *, u32) __ksym;
59 int bpf_ct_change_status(struct nf_conn *, u32) __ksym;
60 int bpf_ct_set_nat_info(struct nf_conn *, union nf_inet_addr *,
61 int port, enum nf_nat_manip_type) __ksym;
62
63 static __always_inline void
nf_ct_test(struct nf_conn * (* lookup_fn)(void *,struct bpf_sock_tuple *,u32,struct bpf_ct_opts___local *,u32),struct nf_conn * (* alloc_fn)(void *,struct bpf_sock_tuple *,u32,struct bpf_ct_opts___local *,u32),void * ctx)64 nf_ct_test(struct nf_conn *(*lookup_fn)(void *, struct bpf_sock_tuple *, u32,
65 struct bpf_ct_opts___local *, u32),
66 struct nf_conn *(*alloc_fn)(void *, struct bpf_sock_tuple *, u32,
67 struct bpf_ct_opts___local *, u32),
68 void *ctx)
69 {
70 struct bpf_ct_opts___local opts_def = { .l4proto = IPPROTO_TCP, .netns_id = -1 };
71 struct bpf_sock_tuple bpf_tuple;
72 struct nf_conn *ct;
73 int err;
74
75 __builtin_memset(&bpf_tuple, 0, sizeof(bpf_tuple.ipv4));
76
77 ct = lookup_fn(ctx, NULL, 0, &opts_def, sizeof(opts_def));
78 if (ct)
79 bpf_ct_release(ct);
80 else
81 test_einval_bpf_tuple = opts_def.error;
82
83 opts_def.reserved[0] = 1;
84 ct = lookup_fn(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4), &opts_def,
85 sizeof(opts_def));
86 opts_def.reserved[0] = 0;
87 opts_def.l4proto = IPPROTO_TCP;
88 if (ct)
89 bpf_ct_release(ct);
90 else
91 test_einval_reserved = opts_def.error;
92
93 opts_def.netns_id = -2;
94 ct = lookup_fn(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4), &opts_def,
95 sizeof(opts_def));
96 opts_def.netns_id = -1;
97 if (ct)
98 bpf_ct_release(ct);
99 else
100 test_einval_netns_id = opts_def.error;
101
102 ct = lookup_fn(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4), &opts_def,
103 sizeof(opts_def) - 1);
104 if (ct)
105 bpf_ct_release(ct);
106 else
107 test_einval_len_opts = opts_def.error;
108
109 opts_def.l4proto = IPPROTO_ICMP;
110 ct = lookup_fn(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4), &opts_def,
111 sizeof(opts_def));
112 opts_def.l4proto = IPPROTO_TCP;
113 if (ct)
114 bpf_ct_release(ct);
115 else
116 test_eproto_l4proto = opts_def.error;
117
118 opts_def.netns_id = 0xf00f;
119 ct = lookup_fn(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4), &opts_def,
120 sizeof(opts_def));
121 opts_def.netns_id = -1;
122 if (ct)
123 bpf_ct_release(ct);
124 else
125 test_enonet_netns_id = opts_def.error;
126
127 ct = lookup_fn(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4), &opts_def,
128 sizeof(opts_def));
129 if (ct)
130 bpf_ct_release(ct);
131 else
132 test_enoent_lookup = opts_def.error;
133
134 ct = lookup_fn(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4) - 1, &opts_def,
135 sizeof(opts_def));
136 if (ct)
137 bpf_ct_release(ct);
138 else
139 test_eafnosupport = opts_def.error;
140
141 bpf_tuple.ipv4.saddr = bpf_get_prandom_u32(); /* src IP */
142 bpf_tuple.ipv4.daddr = bpf_get_prandom_u32(); /* dst IP */
143 bpf_tuple.ipv4.sport = bpf_get_prandom_u32(); /* src port */
144 bpf_tuple.ipv4.dport = bpf_get_prandom_u32(); /* dst port */
145
146 ct = alloc_fn(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4), &opts_def,
147 sizeof(opts_def));
148 if (ct) {
149 __u16 sport = bpf_get_prandom_u32();
150 __u16 dport = bpf_get_prandom_u32();
151 union nf_inet_addr saddr = {};
152 union nf_inet_addr daddr = {};
153 struct nf_conn *ct_ins;
154
155 bpf_ct_set_timeout(ct, 10000);
156 ct->mark = 77;
157
158 /* snat */
159 saddr.ip = bpf_get_prandom_u32();
160 bpf_ct_set_nat_info(ct, &saddr, sport, NF_NAT_MANIP_SRC);
161 /* dnat */
162 daddr.ip = bpf_get_prandom_u32();
163 bpf_ct_set_nat_info(ct, &daddr, dport, NF_NAT_MANIP_DST);
164
165 ct_ins = bpf_ct_insert_entry(ct);
166 if (ct_ins) {
167 struct nf_conn *ct_lk;
168
169 ct_lk = lookup_fn(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4),
170 &opts_def, sizeof(opts_def));
171 if (ct_lk) {
172 struct nf_conntrack_tuple *tuple;
173
174 /* check snat and dnat addresses */
175 tuple = &ct_lk->tuplehash[IP_CT_DIR_REPLY].tuple;
176 if (tuple->dst.u3.ip == saddr.ip &&
177 tuple->dst.u.all == bpf_htons(sport))
178 test_snat_addr = 0;
179 if (tuple->src.u3.ip == daddr.ip &&
180 tuple->src.u.all == bpf_htons(dport))
181 test_dnat_addr = 0;
182
183 /* update ct entry timeout */
184 bpf_ct_change_timeout(ct_lk, 10000);
185 test_delta_timeout = ct_lk->timeout - bpf_jiffies64();
186 test_delta_timeout /= CONFIG_HZ;
187 test_insert_lookup_mark = ct_lk->mark;
188 bpf_ct_change_status(ct_lk,
189 IPS_CONFIRMED | IPS_SEEN_REPLY);
190 test_status = ct_lk->status;
191
192 bpf_ct_release(ct_lk);
193 test_succ_lookup = 0;
194 }
195 bpf_ct_release(ct_ins);
196 test_insert_entry = 0;
197 }
198 test_alloc_entry = 0;
199 }
200
201 bpf_tuple.ipv4.saddr = saddr;
202 bpf_tuple.ipv4.daddr = daddr;
203 bpf_tuple.ipv4.sport = sport;
204 bpf_tuple.ipv4.dport = dport;
205 ct = lookup_fn(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4), &opts_def,
206 sizeof(opts_def));
207 if (ct) {
208 test_exist_lookup = 0;
209 if (ct->mark == 42) {
210 ct->mark++;
211 test_exist_lookup_mark = ct->mark;
212 }
213 bpf_ct_release(ct);
214 } else {
215 test_exist_lookup = opts_def.error;
216 }
217 }
218
219 SEC("xdp")
nf_xdp_ct_test(struct xdp_md * ctx)220 int nf_xdp_ct_test(struct xdp_md *ctx)
221 {
222 nf_ct_test((void *)bpf_xdp_ct_lookup, (void *)bpf_xdp_ct_alloc, ctx);
223 return 0;
224 }
225
226 SEC("tc")
nf_skb_ct_test(struct __sk_buff * ctx)227 int nf_skb_ct_test(struct __sk_buff *ctx)
228 {
229 nf_ct_test((void *)bpf_skb_ct_lookup, (void *)bpf_skb_ct_alloc, ctx);
230 return 0;
231 }
232
233 char _license[] SEC("license") = "GPL";
234