1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Copyright (c) 2017 - 2018 Covalent IO, Inc. http://covalent.io */
3 
4 #ifndef _LINUX_SKMSG_H
5 #define _LINUX_SKMSG_H
6 
7 #include <linux/bpf.h>
8 #include <linux/filter.h>
9 #include <linux/scatterlist.h>
10 #include <linux/skbuff.h>
11 
12 #include <net/sock.h>
13 #include <net/tcp.h>
14 #include <net/strparser.h>
15 
16 #define MAX_MSG_FRAGS			MAX_SKB_FRAGS
17 #define NR_MSG_FRAG_IDS			(MAX_MSG_FRAGS + 1)
18 
19 enum __sk_action {
20 	__SK_DROP = 0,
21 	__SK_PASS,
22 	__SK_REDIRECT,
23 	__SK_NONE,
24 };
25 
26 struct sk_msg_sg {
27 	u32				start;
28 	u32				curr;
29 	u32				end;
30 	u32				size;
31 	u32				copybreak;
32 	DECLARE_BITMAP(copy, MAX_MSG_FRAGS + 2);
33 	/* The extra two elements:
34 	 * 1) used for chaining the front and sections when the list becomes
35 	 *    partitioned (e.g. end < start). The crypto APIs require the
36 	 *    chaining;
37 	 * 2) to chain tailer SG entries after the message.
38 	 */
39 	struct scatterlist		data[MAX_MSG_FRAGS + 2];
40 };
41 
42 /* UAPI in filter.c depends on struct sk_msg_sg being first element. */
43 struct sk_msg {
44 	struct sk_msg_sg		sg;
45 	void				*data;
46 	void				*data_end;
47 	u32				apply_bytes;
48 	u32				cork_bytes;
49 	u32				flags;
50 	struct sk_buff			*skb;
51 	struct sock			*sk_redir;
52 	struct sock			*sk;
53 	struct list_head		list;
54 };
55 
56 struct sk_psock_progs {
57 	struct bpf_prog			*msg_parser;
58 	struct bpf_prog			*stream_parser;
59 	struct bpf_prog			*stream_verdict;
60 	struct bpf_prog			*skb_verdict;
61 };
62 
63 enum sk_psock_state_bits {
64 	SK_PSOCK_TX_ENABLED,
65 };
66 
67 struct sk_psock_link {
68 	struct list_head		list;
69 	struct bpf_map			*map;
70 	void				*link_raw;
71 };
72 
73 struct sk_psock_work_state {
74 	struct sk_buff			*skb;
75 	u32				len;
76 	u32				off;
77 };
78 
79 struct sk_psock {
80 	struct sock			*sk;
81 	struct sock			*sk_redir;
82 	u32				apply_bytes;
83 	u32				cork_bytes;
84 	u32				eval;
85 	struct sk_msg			*cork;
86 	struct sk_psock_progs		progs;
87 #if IS_ENABLED(CONFIG_BPF_STREAM_PARSER)
88 	struct strparser		strp;
89 #endif
90 	struct sk_buff_head		ingress_skb;
91 	struct list_head		ingress_msg;
92 	spinlock_t			ingress_lock;
93 	unsigned long			state;
94 	struct list_head		link;
95 	spinlock_t			link_lock;
96 	refcount_t			refcnt;
97 	void (*saved_unhash)(struct sock *sk);
98 	void (*saved_destroy)(struct sock *sk);
99 	void (*saved_close)(struct sock *sk, long timeout);
100 	void (*saved_write_space)(struct sock *sk);
101 	void (*saved_data_ready)(struct sock *sk);
102 	int  (*psock_update_sk_prot)(struct sock *sk, struct sk_psock *psock,
103 				     bool restore);
104 	struct proto			*sk_proto;
105 	struct mutex			work_mutex;
106 	struct sk_psock_work_state	work_state;
107 	struct work_struct		work;
108 	struct rcu_work			rwork;
109 };
110 
111 int sk_msg_alloc(struct sock *sk, struct sk_msg *msg, int len,
112 		 int elem_first_coalesce);
113 int sk_msg_clone(struct sock *sk, struct sk_msg *dst, struct sk_msg *src,
114 		 u32 off, u32 len);
115 void sk_msg_trim(struct sock *sk, struct sk_msg *msg, int len);
116 int sk_msg_free(struct sock *sk, struct sk_msg *msg);
117 int sk_msg_free_nocharge(struct sock *sk, struct sk_msg *msg);
118 void sk_msg_free_partial(struct sock *sk, struct sk_msg *msg, u32 bytes);
119 void sk_msg_free_partial_nocharge(struct sock *sk, struct sk_msg *msg,
120 				  u32 bytes);
121 
122 void sk_msg_return(struct sock *sk, struct sk_msg *msg, int bytes);
123 void sk_msg_return_zero(struct sock *sk, struct sk_msg *msg, int bytes);
124 
125 int sk_msg_zerocopy_from_iter(struct sock *sk, struct iov_iter *from,
126 			      struct sk_msg *msg, u32 bytes);
127 int sk_msg_memcopy_from_iter(struct sock *sk, struct iov_iter *from,
128 			     struct sk_msg *msg, u32 bytes);
129 int sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg,
130 		   int len, int flags);
131 bool sk_msg_is_readable(struct sock *sk);
132 
sk_msg_check_to_free(struct sk_msg * msg,u32 i,u32 bytes)133 static inline void sk_msg_check_to_free(struct sk_msg *msg, u32 i, u32 bytes)
134 {
135 	WARN_ON(i == msg->sg.end && bytes);
136 }
137 
sk_msg_apply_bytes(struct sk_psock * psock,u32 bytes)138 static inline void sk_msg_apply_bytes(struct sk_psock *psock, u32 bytes)
139 {
140 	if (psock->apply_bytes) {
141 		if (psock->apply_bytes < bytes)
142 			psock->apply_bytes = 0;
143 		else
144 			psock->apply_bytes -= bytes;
145 	}
146 }
147 
sk_msg_iter_dist(u32 start,u32 end)148 static inline u32 sk_msg_iter_dist(u32 start, u32 end)
149 {
150 	return end >= start ? end - start : end + (NR_MSG_FRAG_IDS - start);
151 }
152 
153 #define sk_msg_iter_var_prev(var)			\
154 	do {						\
155 		if (var == 0)				\
156 			var = NR_MSG_FRAG_IDS - 1;	\
157 		else					\
158 			var--;				\
159 	} while (0)
160 
161 #define sk_msg_iter_var_next(var)			\
162 	do {						\
163 		var++;					\
164 		if (var == NR_MSG_FRAG_IDS)		\
165 			var = 0;			\
166 	} while (0)
167 
168 #define sk_msg_iter_prev(msg, which)			\
169 	sk_msg_iter_var_prev(msg->sg.which)
170 
171 #define sk_msg_iter_next(msg, which)			\
172 	sk_msg_iter_var_next(msg->sg.which)
173 
sk_msg_init(struct sk_msg * msg)174 static inline void sk_msg_init(struct sk_msg *msg)
175 {
176 	BUILD_BUG_ON(ARRAY_SIZE(msg->sg.data) - 1 != NR_MSG_FRAG_IDS);
177 	memset(msg, 0, sizeof(*msg));
178 	sg_init_marker(msg->sg.data, NR_MSG_FRAG_IDS);
179 }
180 
sk_msg_xfer(struct sk_msg * dst,struct sk_msg * src,int which,u32 size)181 static inline void sk_msg_xfer(struct sk_msg *dst, struct sk_msg *src,
182 			       int which, u32 size)
183 {
184 	dst->sg.data[which] = src->sg.data[which];
185 	dst->sg.data[which].length  = size;
186 	dst->sg.size		   += size;
187 	src->sg.size		   -= size;
188 	src->sg.data[which].length -= size;
189 	src->sg.data[which].offset += size;
190 }
191 
sk_msg_xfer_full(struct sk_msg * dst,struct sk_msg * src)192 static inline void sk_msg_xfer_full(struct sk_msg *dst, struct sk_msg *src)
193 {
194 	memcpy(dst, src, sizeof(*src));
195 	sk_msg_init(src);
196 }
197 
sk_msg_full(const struct sk_msg * msg)198 static inline bool sk_msg_full(const struct sk_msg *msg)
199 {
200 	return sk_msg_iter_dist(msg->sg.start, msg->sg.end) == MAX_MSG_FRAGS;
201 }
202 
sk_msg_elem_used(const struct sk_msg * msg)203 static inline u32 sk_msg_elem_used(const struct sk_msg *msg)
204 {
205 	return sk_msg_iter_dist(msg->sg.start, msg->sg.end);
206 }
207 
sk_msg_elem(struct sk_msg * msg,int which)208 static inline struct scatterlist *sk_msg_elem(struct sk_msg *msg, int which)
209 {
210 	return &msg->sg.data[which];
211 }
212 
sk_msg_elem_cpy(struct sk_msg * msg,int which)213 static inline struct scatterlist sk_msg_elem_cpy(struct sk_msg *msg, int which)
214 {
215 	return msg->sg.data[which];
216 }
217 
sk_msg_page(struct sk_msg * msg,int which)218 static inline struct page *sk_msg_page(struct sk_msg *msg, int which)
219 {
220 	return sg_page(sk_msg_elem(msg, which));
221 }
222 
sk_msg_to_ingress(const struct sk_msg * msg)223 static inline bool sk_msg_to_ingress(const struct sk_msg *msg)
224 {
225 	return msg->flags & BPF_F_INGRESS;
226 }
227 
sk_msg_compute_data_pointers(struct sk_msg * msg)228 static inline void sk_msg_compute_data_pointers(struct sk_msg *msg)
229 {
230 	struct scatterlist *sge = sk_msg_elem(msg, msg->sg.start);
231 
232 	if (test_bit(msg->sg.start, msg->sg.copy)) {
233 		msg->data = NULL;
234 		msg->data_end = NULL;
235 	} else {
236 		msg->data = sg_virt(sge);
237 		msg->data_end = msg->data + sge->length;
238 	}
239 }
240 
sk_msg_page_add(struct sk_msg * msg,struct page * page,u32 len,u32 offset)241 static inline void sk_msg_page_add(struct sk_msg *msg, struct page *page,
242 				   u32 len, u32 offset)
243 {
244 	struct scatterlist *sge;
245 
246 	get_page(page);
247 	sge = sk_msg_elem(msg, msg->sg.end);
248 	sg_set_page(sge, page, len, offset);
249 	sg_unmark_end(sge);
250 
251 	__set_bit(msg->sg.end, msg->sg.copy);
252 	msg->sg.size += len;
253 	sk_msg_iter_next(msg, end);
254 }
255 
sk_msg_sg_copy(struct sk_msg * msg,u32 i,bool copy_state)256 static inline void sk_msg_sg_copy(struct sk_msg *msg, u32 i, bool copy_state)
257 {
258 	do {
259 		if (copy_state)
260 			__set_bit(i, msg->sg.copy);
261 		else
262 			__clear_bit(i, msg->sg.copy);
263 		sk_msg_iter_var_next(i);
264 		if (i == msg->sg.end)
265 			break;
266 	} while (1);
267 }
268 
sk_msg_sg_copy_set(struct sk_msg * msg,u32 start)269 static inline void sk_msg_sg_copy_set(struct sk_msg *msg, u32 start)
270 {
271 	sk_msg_sg_copy(msg, start, true);
272 }
273 
sk_msg_sg_copy_clear(struct sk_msg * msg,u32 start)274 static inline void sk_msg_sg_copy_clear(struct sk_msg *msg, u32 start)
275 {
276 	sk_msg_sg_copy(msg, start, false);
277 }
278 
sk_psock(const struct sock * sk)279 static inline struct sk_psock *sk_psock(const struct sock *sk)
280 {
281 	return __rcu_dereference_sk_user_data_with_flags(sk,
282 							 SK_USER_DATA_PSOCK);
283 }
284 
sk_psock_set_state(struct sk_psock * psock,enum sk_psock_state_bits bit)285 static inline void sk_psock_set_state(struct sk_psock *psock,
286 				      enum sk_psock_state_bits bit)
287 {
288 	set_bit(bit, &psock->state);
289 }
290 
sk_psock_clear_state(struct sk_psock * psock,enum sk_psock_state_bits bit)291 static inline void sk_psock_clear_state(struct sk_psock *psock,
292 					enum sk_psock_state_bits bit)
293 {
294 	clear_bit(bit, &psock->state);
295 }
296 
sk_psock_test_state(const struct sk_psock * psock,enum sk_psock_state_bits bit)297 static inline bool sk_psock_test_state(const struct sk_psock *psock,
298 				       enum sk_psock_state_bits bit)
299 {
300 	return test_bit(bit, &psock->state);
301 }
302 
sock_drop(struct sock * sk,struct sk_buff * skb)303 static inline void sock_drop(struct sock *sk, struct sk_buff *skb)
304 {
305 	sk_drops_add(sk, skb);
306 	kfree_skb(skb);
307 }
308 
sk_psock_queue_msg(struct sk_psock * psock,struct sk_msg * msg)309 static inline void sk_psock_queue_msg(struct sk_psock *psock,
310 				      struct sk_msg *msg)
311 {
312 	spin_lock_bh(&psock->ingress_lock);
313 	if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED))
314 		list_add_tail(&msg->list, &psock->ingress_msg);
315 	else {
316 		sk_msg_free(psock->sk, msg);
317 		kfree(msg);
318 	}
319 	spin_unlock_bh(&psock->ingress_lock);
320 }
321 
sk_psock_dequeue_msg(struct sk_psock * psock)322 static inline struct sk_msg *sk_psock_dequeue_msg(struct sk_psock *psock)
323 {
324 	struct sk_msg *msg;
325 
326 	spin_lock_bh(&psock->ingress_lock);
327 	msg = list_first_entry_or_null(&psock->ingress_msg, struct sk_msg, list);
328 	if (msg)
329 		list_del(&msg->list);
330 	spin_unlock_bh(&psock->ingress_lock);
331 	return msg;
332 }
333 
sk_psock_peek_msg(struct sk_psock * psock)334 static inline struct sk_msg *sk_psock_peek_msg(struct sk_psock *psock)
335 {
336 	struct sk_msg *msg;
337 
338 	spin_lock_bh(&psock->ingress_lock);
339 	msg = list_first_entry_or_null(&psock->ingress_msg, struct sk_msg, list);
340 	spin_unlock_bh(&psock->ingress_lock);
341 	return msg;
342 }
343 
sk_psock_next_msg(struct sk_psock * psock,struct sk_msg * msg)344 static inline struct sk_msg *sk_psock_next_msg(struct sk_psock *psock,
345 					       struct sk_msg *msg)
346 {
347 	struct sk_msg *ret;
348 
349 	spin_lock_bh(&psock->ingress_lock);
350 	if (list_is_last(&msg->list, &psock->ingress_msg))
351 		ret = NULL;
352 	else
353 		ret = list_next_entry(msg, list);
354 	spin_unlock_bh(&psock->ingress_lock);
355 	return ret;
356 }
357 
sk_psock_queue_empty(const struct sk_psock * psock)358 static inline bool sk_psock_queue_empty(const struct sk_psock *psock)
359 {
360 	return psock ? list_empty(&psock->ingress_msg) : true;
361 }
362 
kfree_sk_msg(struct sk_msg * msg)363 static inline void kfree_sk_msg(struct sk_msg *msg)
364 {
365 	if (msg->skb)
366 		consume_skb(msg->skb);
367 	kfree(msg);
368 }
369 
sk_psock_report_error(struct sk_psock * psock,int err)370 static inline void sk_psock_report_error(struct sk_psock *psock, int err)
371 {
372 	struct sock *sk = psock->sk;
373 
374 	sk->sk_err = err;
375 	sk_error_report(sk);
376 }
377 
378 struct sk_psock *sk_psock_init(struct sock *sk, int node);
379 void sk_psock_stop(struct sk_psock *psock);
380 
381 #if IS_ENABLED(CONFIG_BPF_STREAM_PARSER)
382 int sk_psock_init_strp(struct sock *sk, struct sk_psock *psock);
383 void sk_psock_start_strp(struct sock *sk, struct sk_psock *psock);
384 void sk_psock_stop_strp(struct sock *sk, struct sk_psock *psock);
385 #else
sk_psock_init_strp(struct sock * sk,struct sk_psock * psock)386 static inline int sk_psock_init_strp(struct sock *sk, struct sk_psock *psock)
387 {
388 	return -EOPNOTSUPP;
389 }
390 
sk_psock_start_strp(struct sock * sk,struct sk_psock * psock)391 static inline void sk_psock_start_strp(struct sock *sk, struct sk_psock *psock)
392 {
393 }
394 
sk_psock_stop_strp(struct sock * sk,struct sk_psock * psock)395 static inline void sk_psock_stop_strp(struct sock *sk, struct sk_psock *psock)
396 {
397 }
398 #endif
399 
400 void sk_psock_start_verdict(struct sock *sk, struct sk_psock *psock);
401 void sk_psock_stop_verdict(struct sock *sk, struct sk_psock *psock);
402 
403 int sk_psock_msg_verdict(struct sock *sk, struct sk_psock *psock,
404 			 struct sk_msg *msg);
405 
sk_psock_init_link(void)406 static inline struct sk_psock_link *sk_psock_init_link(void)
407 {
408 	return kzalloc(sizeof(struct sk_psock_link),
409 		       GFP_ATOMIC | __GFP_NOWARN);
410 }
411 
sk_psock_free_link(struct sk_psock_link * link)412 static inline void sk_psock_free_link(struct sk_psock_link *link)
413 {
414 	kfree(link);
415 }
416 
417 struct sk_psock_link *sk_psock_link_pop(struct sk_psock *psock);
418 
sk_psock_cork_free(struct sk_psock * psock)419 static inline void sk_psock_cork_free(struct sk_psock *psock)
420 {
421 	if (psock->cork) {
422 		sk_msg_free(psock->sk, psock->cork);
423 		kfree(psock->cork);
424 		psock->cork = NULL;
425 	}
426 }
427 
sk_psock_restore_proto(struct sock * sk,struct sk_psock * psock)428 static inline void sk_psock_restore_proto(struct sock *sk,
429 					  struct sk_psock *psock)
430 {
431 	if (psock->psock_update_sk_prot)
432 		psock->psock_update_sk_prot(sk, psock, true);
433 }
434 
sk_psock_get(struct sock * sk)435 static inline struct sk_psock *sk_psock_get(struct sock *sk)
436 {
437 	struct sk_psock *psock;
438 
439 	rcu_read_lock();
440 	psock = sk_psock(sk);
441 	if (psock && !refcount_inc_not_zero(&psock->refcnt))
442 		psock = NULL;
443 	rcu_read_unlock();
444 	return psock;
445 }
446 
447 void sk_psock_drop(struct sock *sk, struct sk_psock *psock);
448 
sk_psock_put(struct sock * sk,struct sk_psock * psock)449 static inline void sk_psock_put(struct sock *sk, struct sk_psock *psock)
450 {
451 	if (refcount_dec_and_test(&psock->refcnt))
452 		sk_psock_drop(sk, psock);
453 }
454 
sk_psock_data_ready(struct sock * sk,struct sk_psock * psock)455 static inline void sk_psock_data_ready(struct sock *sk, struct sk_psock *psock)
456 {
457 	if (psock->saved_data_ready)
458 		psock->saved_data_ready(sk);
459 	else
460 		sk->sk_data_ready(sk);
461 }
462 
psock_set_prog(struct bpf_prog ** pprog,struct bpf_prog * prog)463 static inline void psock_set_prog(struct bpf_prog **pprog,
464 				  struct bpf_prog *prog)
465 {
466 	prog = xchg(pprog, prog);
467 	if (prog)
468 		bpf_prog_put(prog);
469 }
470 
psock_replace_prog(struct bpf_prog ** pprog,struct bpf_prog * prog,struct bpf_prog * old)471 static inline int psock_replace_prog(struct bpf_prog **pprog,
472 				     struct bpf_prog *prog,
473 				     struct bpf_prog *old)
474 {
475 	if (cmpxchg(pprog, old, prog) != old)
476 		return -ENOENT;
477 
478 	if (old)
479 		bpf_prog_put(old);
480 
481 	return 0;
482 }
483 
psock_progs_drop(struct sk_psock_progs * progs)484 static inline void psock_progs_drop(struct sk_psock_progs *progs)
485 {
486 	psock_set_prog(&progs->msg_parser, NULL);
487 	psock_set_prog(&progs->stream_parser, NULL);
488 	psock_set_prog(&progs->stream_verdict, NULL);
489 	psock_set_prog(&progs->skb_verdict, NULL);
490 }
491 
492 int sk_psock_tls_strp_read(struct sk_psock *psock, struct sk_buff *skb);
493 
sk_psock_strp_enabled(struct sk_psock * psock)494 static inline bool sk_psock_strp_enabled(struct sk_psock *psock)
495 {
496 	if (!psock)
497 		return false;
498 	return !!psock->saved_data_ready;
499 }
500 
sk_is_udp(const struct sock * sk)501 static inline bool sk_is_udp(const struct sock *sk)
502 {
503 	return sk->sk_type == SOCK_DGRAM &&
504 	       sk->sk_protocol == IPPROTO_UDP;
505 }
506 
507 #if IS_ENABLED(CONFIG_NET_SOCK_MSG)
508 
509 #define BPF_F_STRPARSER	(1UL << 1)
510 
511 /* We only have two bits so far. */
512 #define BPF_F_PTR_MASK ~(BPF_F_INGRESS | BPF_F_STRPARSER)
513 
skb_bpf_strparser(const struct sk_buff * skb)514 static inline bool skb_bpf_strparser(const struct sk_buff *skb)
515 {
516 	unsigned long sk_redir = skb->_sk_redir;
517 
518 	return sk_redir & BPF_F_STRPARSER;
519 }
520 
skb_bpf_set_strparser(struct sk_buff * skb)521 static inline void skb_bpf_set_strparser(struct sk_buff *skb)
522 {
523 	skb->_sk_redir |= BPF_F_STRPARSER;
524 }
525 
skb_bpf_ingress(const struct sk_buff * skb)526 static inline bool skb_bpf_ingress(const struct sk_buff *skb)
527 {
528 	unsigned long sk_redir = skb->_sk_redir;
529 
530 	return sk_redir & BPF_F_INGRESS;
531 }
532 
skb_bpf_set_ingress(struct sk_buff * skb)533 static inline void skb_bpf_set_ingress(struct sk_buff *skb)
534 {
535 	skb->_sk_redir |= BPF_F_INGRESS;
536 }
537 
skb_bpf_set_redir(struct sk_buff * skb,struct sock * sk_redir,bool ingress)538 static inline void skb_bpf_set_redir(struct sk_buff *skb, struct sock *sk_redir,
539 				     bool ingress)
540 {
541 	skb->_sk_redir = (unsigned long)sk_redir;
542 	if (ingress)
543 		skb->_sk_redir |= BPF_F_INGRESS;
544 }
545 
skb_bpf_redirect_fetch(const struct sk_buff * skb)546 static inline struct sock *skb_bpf_redirect_fetch(const struct sk_buff *skb)
547 {
548 	unsigned long sk_redir = skb->_sk_redir;
549 
550 	return (struct sock *)(sk_redir & BPF_F_PTR_MASK);
551 }
552 
skb_bpf_redirect_clear(struct sk_buff * skb)553 static inline void skb_bpf_redirect_clear(struct sk_buff *skb)
554 {
555 	skb->_sk_redir = 0;
556 }
557 #endif /* CONFIG_NET_SOCK_MSG */
558 #endif /* _LINUX_SKMSG_H */
559