1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019 Facebook */
3
4 #include <linux/init.h>
5 #include <linux/types.h>
6 #include <linux/bpf_verifier.h>
7 #include <linux/bpf.h>
8 #include <linux/btf.h>
9 #include <linux/btf_ids.h>
10 #include <linux/filter.h>
11 #include <net/tcp.h>
12 #include <net/bpf_sk_storage.h>
13
14 /* "extern" is to avoid sparse warning. It is only used in bpf_struct_ops.c. */
15 extern struct bpf_struct_ops bpf_tcp_congestion_ops;
16
17 static u32 unsupported_ops[] = {
18 offsetof(struct tcp_congestion_ops, get_info),
19 };
20
21 static const struct btf_type *tcp_sock_type;
22 static u32 tcp_sock_id, sock_id;
23
bpf_tcp_ca_init(struct btf * btf)24 static int bpf_tcp_ca_init(struct btf *btf)
25 {
26 s32 type_id;
27
28 type_id = btf_find_by_name_kind(btf, "sock", BTF_KIND_STRUCT);
29 if (type_id < 0)
30 return -EINVAL;
31 sock_id = type_id;
32
33 type_id = btf_find_by_name_kind(btf, "tcp_sock", BTF_KIND_STRUCT);
34 if (type_id < 0)
35 return -EINVAL;
36 tcp_sock_id = type_id;
37 tcp_sock_type = btf_type_by_id(btf, tcp_sock_id);
38
39 return 0;
40 }
41
is_unsupported(u32 member_offset)42 static bool is_unsupported(u32 member_offset)
43 {
44 unsigned int i;
45
46 for (i = 0; i < ARRAY_SIZE(unsupported_ops); i++) {
47 if (member_offset == unsupported_ops[i])
48 return true;
49 }
50
51 return false;
52 }
53
54 extern struct btf *btf_vmlinux;
55
bpf_tcp_ca_is_valid_access(int off,int size,enum bpf_access_type type,const struct bpf_prog * prog,struct bpf_insn_access_aux * info)56 static bool bpf_tcp_ca_is_valid_access(int off, int size,
57 enum bpf_access_type type,
58 const struct bpf_prog *prog,
59 struct bpf_insn_access_aux *info)
60 {
61 if (!bpf_tracing_btf_ctx_access(off, size, type, prog, info))
62 return false;
63
64 if (info->reg_type == PTR_TO_BTF_ID && info->btf_id == sock_id)
65 /* promote it to tcp_sock */
66 info->btf_id = tcp_sock_id;
67
68 return true;
69 }
70
bpf_tcp_ca_btf_struct_access(struct bpf_verifier_log * log,const struct btf * btf,const struct btf_type * t,int off,int size,enum bpf_access_type atype,u32 * next_btf_id,enum bpf_type_flag * flag)71 static int bpf_tcp_ca_btf_struct_access(struct bpf_verifier_log *log,
72 const struct btf *btf,
73 const struct btf_type *t, int off,
74 int size, enum bpf_access_type atype,
75 u32 *next_btf_id,
76 enum bpf_type_flag *flag)
77 {
78 size_t end;
79
80 if (atype == BPF_READ)
81 return btf_struct_access(log, btf, t, off, size, atype, next_btf_id,
82 flag);
83
84 if (t != tcp_sock_type) {
85 bpf_log(log, "only read is supported\n");
86 return -EACCES;
87 }
88
89 switch (off) {
90 case offsetof(struct sock, sk_pacing_rate):
91 end = offsetofend(struct sock, sk_pacing_rate);
92 break;
93 case offsetof(struct sock, sk_pacing_status):
94 end = offsetofend(struct sock, sk_pacing_status);
95 break;
96 case bpf_ctx_range(struct inet_connection_sock, icsk_ca_priv):
97 end = offsetofend(struct inet_connection_sock, icsk_ca_priv);
98 break;
99 case offsetof(struct inet_connection_sock, icsk_ack.pending):
100 end = offsetofend(struct inet_connection_sock,
101 icsk_ack.pending);
102 break;
103 case offsetof(struct tcp_sock, snd_cwnd):
104 end = offsetofend(struct tcp_sock, snd_cwnd);
105 break;
106 case offsetof(struct tcp_sock, snd_cwnd_cnt):
107 end = offsetofend(struct tcp_sock, snd_cwnd_cnt);
108 break;
109 case offsetof(struct tcp_sock, snd_ssthresh):
110 end = offsetofend(struct tcp_sock, snd_ssthresh);
111 break;
112 case offsetof(struct tcp_sock, ecn_flags):
113 end = offsetofend(struct tcp_sock, ecn_flags);
114 break;
115 default:
116 bpf_log(log, "no write support to tcp_sock at off %d\n", off);
117 return -EACCES;
118 }
119
120 if (off + size > end) {
121 bpf_log(log,
122 "write access at off %d with size %d beyond the member of tcp_sock ended at %zu\n",
123 off, size, end);
124 return -EACCES;
125 }
126
127 return 0;
128 }
129
BPF_CALL_2(bpf_tcp_send_ack,struct tcp_sock *,tp,u32,rcv_nxt)130 BPF_CALL_2(bpf_tcp_send_ack, struct tcp_sock *, tp, u32, rcv_nxt)
131 {
132 /* bpf_tcp_ca prog cannot have NULL tp */
133 __tcp_send_ack((struct sock *)tp, rcv_nxt);
134 return 0;
135 }
136
137 static const struct bpf_func_proto bpf_tcp_send_ack_proto = {
138 .func = bpf_tcp_send_ack,
139 .gpl_only = false,
140 /* In case we want to report error later */
141 .ret_type = RET_INTEGER,
142 .arg1_type = ARG_PTR_TO_BTF_ID,
143 .arg1_btf_id = &tcp_sock_id,
144 .arg2_type = ARG_ANYTHING,
145 };
146
prog_ops_moff(const struct bpf_prog * prog)147 static u32 prog_ops_moff(const struct bpf_prog *prog)
148 {
149 const struct btf_member *m;
150 const struct btf_type *t;
151 u32 midx;
152
153 midx = prog->expected_attach_type;
154 t = bpf_tcp_congestion_ops.type;
155 m = &btf_type_member(t)[midx];
156
157 return __btf_member_bit_offset(t, m) / 8;
158 }
159
160 static const struct bpf_func_proto *
bpf_tcp_ca_get_func_proto(enum bpf_func_id func_id,const struct bpf_prog * prog)161 bpf_tcp_ca_get_func_proto(enum bpf_func_id func_id,
162 const struct bpf_prog *prog)
163 {
164 switch (func_id) {
165 case BPF_FUNC_tcp_send_ack:
166 return &bpf_tcp_send_ack_proto;
167 case BPF_FUNC_sk_storage_get:
168 return &bpf_sk_storage_get_proto;
169 case BPF_FUNC_sk_storage_delete:
170 return &bpf_sk_storage_delete_proto;
171 case BPF_FUNC_setsockopt:
172 /* Does not allow release() to call setsockopt.
173 * release() is called when the current bpf-tcp-cc
174 * is retiring. It is not allowed to call
175 * setsockopt() to make further changes which
176 * may potentially allocate new resources.
177 */
178 if (prog_ops_moff(prog) !=
179 offsetof(struct tcp_congestion_ops, release))
180 return &bpf_sk_setsockopt_proto;
181 return NULL;
182 case BPF_FUNC_getsockopt:
183 /* Since get/setsockopt is usually expected to
184 * be available together, disable getsockopt for
185 * release also to avoid usage surprise.
186 * The bpf-tcp-cc already has a more powerful way
187 * to read tcp_sock from the PTR_TO_BTF_ID.
188 */
189 if (prog_ops_moff(prog) !=
190 offsetof(struct tcp_congestion_ops, release))
191 return &bpf_sk_getsockopt_proto;
192 return NULL;
193 case BPF_FUNC_ktime_get_coarse_ns:
194 return &bpf_ktime_get_coarse_ns_proto;
195 default:
196 return bpf_base_func_proto(func_id);
197 }
198 }
199
200 BTF_SET8_START(bpf_tcp_ca_check_kfunc_ids)
201 BTF_ID_FLAGS(func, tcp_reno_ssthresh)
202 BTF_ID_FLAGS(func, tcp_reno_cong_avoid)
203 BTF_ID_FLAGS(func, tcp_reno_undo_cwnd)
204 BTF_ID_FLAGS(func, tcp_slow_start)
205 BTF_ID_FLAGS(func, tcp_cong_avoid_ai)
206 BTF_SET8_END(bpf_tcp_ca_check_kfunc_ids)
207
208 static const struct btf_kfunc_id_set bpf_tcp_ca_kfunc_set = {
209 .owner = THIS_MODULE,
210 .set = &bpf_tcp_ca_check_kfunc_ids,
211 };
212
213 static const struct bpf_verifier_ops bpf_tcp_ca_verifier_ops = {
214 .get_func_proto = bpf_tcp_ca_get_func_proto,
215 .is_valid_access = bpf_tcp_ca_is_valid_access,
216 .btf_struct_access = bpf_tcp_ca_btf_struct_access,
217 };
218
bpf_tcp_ca_init_member(const struct btf_type * t,const struct btf_member * member,void * kdata,const void * udata)219 static int bpf_tcp_ca_init_member(const struct btf_type *t,
220 const struct btf_member *member,
221 void *kdata, const void *udata)
222 {
223 const struct tcp_congestion_ops *utcp_ca;
224 struct tcp_congestion_ops *tcp_ca;
225 u32 moff;
226
227 utcp_ca = (const struct tcp_congestion_ops *)udata;
228 tcp_ca = (struct tcp_congestion_ops *)kdata;
229
230 moff = __btf_member_bit_offset(t, member) / 8;
231 switch (moff) {
232 case offsetof(struct tcp_congestion_ops, flags):
233 if (utcp_ca->flags & ~TCP_CONG_MASK)
234 return -EINVAL;
235 tcp_ca->flags = utcp_ca->flags;
236 return 1;
237 case offsetof(struct tcp_congestion_ops, name):
238 if (bpf_obj_name_cpy(tcp_ca->name, utcp_ca->name,
239 sizeof(tcp_ca->name)) <= 0)
240 return -EINVAL;
241 if (tcp_ca_find(utcp_ca->name))
242 return -EEXIST;
243 return 1;
244 }
245
246 return 0;
247 }
248
bpf_tcp_ca_check_member(const struct btf_type * t,const struct btf_member * member)249 static int bpf_tcp_ca_check_member(const struct btf_type *t,
250 const struct btf_member *member)
251 {
252 if (is_unsupported(__btf_member_bit_offset(t, member) / 8))
253 return -ENOTSUPP;
254 return 0;
255 }
256
bpf_tcp_ca_reg(void * kdata)257 static int bpf_tcp_ca_reg(void *kdata)
258 {
259 return tcp_register_congestion_control(kdata);
260 }
261
bpf_tcp_ca_unreg(void * kdata)262 static void bpf_tcp_ca_unreg(void *kdata)
263 {
264 tcp_unregister_congestion_control(kdata);
265 }
266
267 struct bpf_struct_ops bpf_tcp_congestion_ops = {
268 .verifier_ops = &bpf_tcp_ca_verifier_ops,
269 .reg = bpf_tcp_ca_reg,
270 .unreg = bpf_tcp_ca_unreg,
271 .check_member = bpf_tcp_ca_check_member,
272 .init_member = bpf_tcp_ca_init_member,
273 .init = bpf_tcp_ca_init,
274 .name = "tcp_congestion_ops",
275 };
276
bpf_tcp_ca_kfunc_init(void)277 static int __init bpf_tcp_ca_kfunc_init(void)
278 {
279 return register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &bpf_tcp_ca_kfunc_set);
280 }
281 late_initcall(bpf_tcp_ca_kfunc_init);
282