1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2017 - 2018 Covalent IO, Inc. http://covalent.io */
3 
4 #include <linux/skmsg.h>
5 #include <linux/skbuff.h>
6 #include <linux/scatterlist.h>
7 
8 #include <net/sock.h>
9 #include <net/tcp.h>
10 #include <net/tls.h>
11 
sk_msg_try_coalesce_ok(struct sk_msg * msg,int elem_first_coalesce)12 static bool sk_msg_try_coalesce_ok(struct sk_msg *msg, int elem_first_coalesce)
13 {
14 	if (msg->sg.end > msg->sg.start &&
15 	    elem_first_coalesce < msg->sg.end)
16 		return true;
17 
18 	if (msg->sg.end < msg->sg.start &&
19 	    (elem_first_coalesce > msg->sg.start ||
20 	     elem_first_coalesce < msg->sg.end))
21 		return true;
22 
23 	return false;
24 }
25 
sk_msg_alloc(struct sock * sk,struct sk_msg * msg,int len,int elem_first_coalesce)26 int sk_msg_alloc(struct sock *sk, struct sk_msg *msg, int len,
27 		 int elem_first_coalesce)
28 {
29 	struct page_frag *pfrag = sk_page_frag(sk);
30 	int ret = 0;
31 
32 	len -= msg->sg.size;
33 	while (len > 0) {
34 		struct scatterlist *sge;
35 		u32 orig_offset;
36 		int use, i;
37 
38 		if (!sk_page_frag_refill(sk, pfrag))
39 			return -ENOMEM;
40 
41 		orig_offset = pfrag->offset;
42 		use = min_t(int, len, pfrag->size - orig_offset);
43 		if (!sk_wmem_schedule(sk, use))
44 			return -ENOMEM;
45 
46 		i = msg->sg.end;
47 		sk_msg_iter_var_prev(i);
48 		sge = &msg->sg.data[i];
49 
50 		if (sk_msg_try_coalesce_ok(msg, elem_first_coalesce) &&
51 		    sg_page(sge) == pfrag->page &&
52 		    sge->offset + sge->length == orig_offset) {
53 			sge->length += use;
54 		} else {
55 			if (sk_msg_full(msg)) {
56 				ret = -ENOSPC;
57 				break;
58 			}
59 
60 			sge = &msg->sg.data[msg->sg.end];
61 			sg_unmark_end(sge);
62 			sg_set_page(sge, pfrag->page, use, orig_offset);
63 			get_page(pfrag->page);
64 			sk_msg_iter_next(msg, end);
65 		}
66 
67 		sk_mem_charge(sk, use);
68 		msg->sg.size += use;
69 		pfrag->offset += use;
70 		len -= use;
71 	}
72 
73 	return ret;
74 }
75 EXPORT_SYMBOL_GPL(sk_msg_alloc);
76 
sk_msg_clone(struct sock * sk,struct sk_msg * dst,struct sk_msg * src,u32 off,u32 len)77 int sk_msg_clone(struct sock *sk, struct sk_msg *dst, struct sk_msg *src,
78 		 u32 off, u32 len)
79 {
80 	int i = src->sg.start;
81 	struct scatterlist *sge = sk_msg_elem(src, i);
82 	struct scatterlist *sgd = NULL;
83 	u32 sge_len, sge_off;
84 
85 	while (off) {
86 		if (sge->length > off)
87 			break;
88 		off -= sge->length;
89 		sk_msg_iter_var_next(i);
90 		if (i == src->sg.end && off)
91 			return -ENOSPC;
92 		sge = sk_msg_elem(src, i);
93 	}
94 
95 	while (len) {
96 		sge_len = sge->length - off;
97 		if (sge_len > len)
98 			sge_len = len;
99 
100 		if (dst->sg.end)
101 			sgd = sk_msg_elem(dst, dst->sg.end - 1);
102 
103 		if (sgd &&
104 		    (sg_page(sge) == sg_page(sgd)) &&
105 		    (sg_virt(sge) + off == sg_virt(sgd) + sgd->length)) {
106 			sgd->length += sge_len;
107 			dst->sg.size += sge_len;
108 		} else if (!sk_msg_full(dst)) {
109 			sge_off = sge->offset + off;
110 			sk_msg_page_add(dst, sg_page(sge), sge_len, sge_off);
111 		} else {
112 			return -ENOSPC;
113 		}
114 
115 		off = 0;
116 		len -= sge_len;
117 		sk_mem_charge(sk, sge_len);
118 		sk_msg_iter_var_next(i);
119 		if (i == src->sg.end && len)
120 			return -ENOSPC;
121 		sge = sk_msg_elem(src, i);
122 	}
123 
124 	return 0;
125 }
126 EXPORT_SYMBOL_GPL(sk_msg_clone);
127 
sk_msg_return_zero(struct sock * sk,struct sk_msg * msg,int bytes)128 void sk_msg_return_zero(struct sock *sk, struct sk_msg *msg, int bytes)
129 {
130 	int i = msg->sg.start;
131 
132 	do {
133 		struct scatterlist *sge = sk_msg_elem(msg, i);
134 
135 		if (bytes < sge->length) {
136 			sge->length -= bytes;
137 			sge->offset += bytes;
138 			sk_mem_uncharge(sk, bytes);
139 			break;
140 		}
141 
142 		sk_mem_uncharge(sk, sge->length);
143 		bytes -= sge->length;
144 		sge->length = 0;
145 		sge->offset = 0;
146 		sk_msg_iter_var_next(i);
147 	} while (bytes && i != msg->sg.end);
148 	msg->sg.start = i;
149 }
150 EXPORT_SYMBOL_GPL(sk_msg_return_zero);
151 
sk_msg_return(struct sock * sk,struct sk_msg * msg,int bytes)152 void sk_msg_return(struct sock *sk, struct sk_msg *msg, int bytes)
153 {
154 	int i = msg->sg.start;
155 
156 	do {
157 		struct scatterlist *sge = &msg->sg.data[i];
158 		int uncharge = (bytes < sge->length) ? bytes : sge->length;
159 
160 		sk_mem_uncharge(sk, uncharge);
161 		bytes -= uncharge;
162 		sk_msg_iter_var_next(i);
163 	} while (i != msg->sg.end);
164 }
165 EXPORT_SYMBOL_GPL(sk_msg_return);
166 
sk_msg_free_elem(struct sock * sk,struct sk_msg * msg,u32 i,bool charge)167 static int sk_msg_free_elem(struct sock *sk, struct sk_msg *msg, u32 i,
168 			    bool charge)
169 {
170 	struct scatterlist *sge = sk_msg_elem(msg, i);
171 	u32 len = sge->length;
172 
173 	/* When the skb owns the memory we free it from consume_skb path. */
174 	if (!msg->skb) {
175 		if (charge)
176 			sk_mem_uncharge(sk, len);
177 		put_page(sg_page(sge));
178 	}
179 	memset(sge, 0, sizeof(*sge));
180 	return len;
181 }
182 
__sk_msg_free(struct sock * sk,struct sk_msg * msg,u32 i,bool charge)183 static int __sk_msg_free(struct sock *sk, struct sk_msg *msg, u32 i,
184 			 bool charge)
185 {
186 	struct scatterlist *sge = sk_msg_elem(msg, i);
187 	int freed = 0;
188 
189 	while (msg->sg.size) {
190 		msg->sg.size -= sge->length;
191 		freed += sk_msg_free_elem(sk, msg, i, charge);
192 		sk_msg_iter_var_next(i);
193 		sk_msg_check_to_free(msg, i, msg->sg.size);
194 		sge = sk_msg_elem(msg, i);
195 	}
196 	consume_skb(msg->skb);
197 	sk_msg_init(msg);
198 	return freed;
199 }
200 
sk_msg_free_nocharge(struct sock * sk,struct sk_msg * msg)201 int sk_msg_free_nocharge(struct sock *sk, struct sk_msg *msg)
202 {
203 	return __sk_msg_free(sk, msg, msg->sg.start, false);
204 }
205 EXPORT_SYMBOL_GPL(sk_msg_free_nocharge);
206 
sk_msg_free(struct sock * sk,struct sk_msg * msg)207 int sk_msg_free(struct sock *sk, struct sk_msg *msg)
208 {
209 	return __sk_msg_free(sk, msg, msg->sg.start, true);
210 }
211 EXPORT_SYMBOL_GPL(sk_msg_free);
212 
__sk_msg_free_partial(struct sock * sk,struct sk_msg * msg,u32 bytes,bool charge)213 static void __sk_msg_free_partial(struct sock *sk, struct sk_msg *msg,
214 				  u32 bytes, bool charge)
215 {
216 	struct scatterlist *sge;
217 	u32 i = msg->sg.start;
218 
219 	while (bytes) {
220 		sge = sk_msg_elem(msg, i);
221 		if (!sge->length)
222 			break;
223 		if (bytes < sge->length) {
224 			if (charge)
225 				sk_mem_uncharge(sk, bytes);
226 			sge->length -= bytes;
227 			sge->offset += bytes;
228 			msg->sg.size -= bytes;
229 			break;
230 		}
231 
232 		msg->sg.size -= sge->length;
233 		bytes -= sge->length;
234 		sk_msg_free_elem(sk, msg, i, charge);
235 		sk_msg_iter_var_next(i);
236 		sk_msg_check_to_free(msg, i, bytes);
237 	}
238 	msg->sg.start = i;
239 }
240 
sk_msg_free_partial(struct sock * sk,struct sk_msg * msg,u32 bytes)241 void sk_msg_free_partial(struct sock *sk, struct sk_msg *msg, u32 bytes)
242 {
243 	__sk_msg_free_partial(sk, msg, bytes, true);
244 }
245 EXPORT_SYMBOL_GPL(sk_msg_free_partial);
246 
sk_msg_free_partial_nocharge(struct sock * sk,struct sk_msg * msg,u32 bytes)247 void sk_msg_free_partial_nocharge(struct sock *sk, struct sk_msg *msg,
248 				  u32 bytes)
249 {
250 	__sk_msg_free_partial(sk, msg, bytes, false);
251 }
252 
sk_msg_trim(struct sock * sk,struct sk_msg * msg,int len)253 void sk_msg_trim(struct sock *sk, struct sk_msg *msg, int len)
254 {
255 	int trim = msg->sg.size - len;
256 	u32 i = msg->sg.end;
257 
258 	if (trim <= 0) {
259 		WARN_ON(trim < 0);
260 		return;
261 	}
262 
263 	sk_msg_iter_var_prev(i);
264 	msg->sg.size = len;
265 	while (msg->sg.data[i].length &&
266 	       trim >= msg->sg.data[i].length) {
267 		trim -= msg->sg.data[i].length;
268 		sk_msg_free_elem(sk, msg, i, true);
269 		sk_msg_iter_var_prev(i);
270 		if (!trim)
271 			goto out;
272 	}
273 
274 	msg->sg.data[i].length -= trim;
275 	sk_mem_uncharge(sk, trim);
276 	/* Adjust copybreak if it falls into the trimmed part of last buf */
277 	if (msg->sg.curr == i && msg->sg.copybreak > msg->sg.data[i].length)
278 		msg->sg.copybreak = msg->sg.data[i].length;
279 out:
280 	sk_msg_iter_var_next(i);
281 	msg->sg.end = i;
282 
283 	/* If we trim data a full sg elem before curr pointer update
284 	 * copybreak and current so that any future copy operations
285 	 * start at new copy location.
286 	 * However trimed data that has not yet been used in a copy op
287 	 * does not require an update.
288 	 */
289 	if (!msg->sg.size) {
290 		msg->sg.curr = msg->sg.start;
291 		msg->sg.copybreak = 0;
292 	} else if (sk_msg_iter_dist(msg->sg.start, msg->sg.curr) >=
293 		   sk_msg_iter_dist(msg->sg.start, msg->sg.end)) {
294 		sk_msg_iter_var_prev(i);
295 		msg->sg.curr = i;
296 		msg->sg.copybreak = msg->sg.data[i].length;
297 	}
298 }
299 EXPORT_SYMBOL_GPL(sk_msg_trim);
300 
sk_msg_zerocopy_from_iter(struct sock * sk,struct iov_iter * from,struct sk_msg * msg,u32 bytes)301 int sk_msg_zerocopy_from_iter(struct sock *sk, struct iov_iter *from,
302 			      struct sk_msg *msg, u32 bytes)
303 {
304 	int i, maxpages, ret = 0, num_elems = sk_msg_elem_used(msg);
305 	const int to_max_pages = MAX_MSG_FRAGS;
306 	struct page *pages[MAX_MSG_FRAGS];
307 	ssize_t orig, copied, use, offset;
308 
309 	orig = msg->sg.size;
310 	while (bytes > 0) {
311 		i = 0;
312 		maxpages = to_max_pages - num_elems;
313 		if (maxpages == 0) {
314 			ret = -EFAULT;
315 			goto out;
316 		}
317 
318 		copied = iov_iter_get_pages(from, pages, bytes, maxpages,
319 					    &offset);
320 		if (copied <= 0) {
321 			ret = -EFAULT;
322 			goto out;
323 		}
324 
325 		iov_iter_advance(from, copied);
326 		bytes -= copied;
327 		msg->sg.size += copied;
328 
329 		while (copied) {
330 			use = min_t(int, copied, PAGE_SIZE - offset);
331 			sg_set_page(&msg->sg.data[msg->sg.end],
332 				    pages[i], use, offset);
333 			sg_unmark_end(&msg->sg.data[msg->sg.end]);
334 			sk_mem_charge(sk, use);
335 
336 			offset = 0;
337 			copied -= use;
338 			sk_msg_iter_next(msg, end);
339 			num_elems++;
340 			i++;
341 		}
342 		/* When zerocopy is mixed with sk_msg_*copy* operations we
343 		 * may have a copybreak set in this case clear and prefer
344 		 * zerocopy remainder when possible.
345 		 */
346 		msg->sg.copybreak = 0;
347 		msg->sg.curr = msg->sg.end;
348 	}
349 out:
350 	/* Revert iov_iter updates, msg will need to use 'trim' later if it
351 	 * also needs to be cleared.
352 	 */
353 	if (ret)
354 		iov_iter_revert(from, msg->sg.size - orig);
355 	return ret;
356 }
357 EXPORT_SYMBOL_GPL(sk_msg_zerocopy_from_iter);
358 
sk_msg_memcopy_from_iter(struct sock * sk,struct iov_iter * from,struct sk_msg * msg,u32 bytes)359 int sk_msg_memcopy_from_iter(struct sock *sk, struct iov_iter *from,
360 			     struct sk_msg *msg, u32 bytes)
361 {
362 	int ret = -ENOSPC, i = msg->sg.curr;
363 	struct scatterlist *sge;
364 	u32 copy, buf_size;
365 	void *to;
366 
367 	do {
368 		sge = sk_msg_elem(msg, i);
369 		/* This is possible if a trim operation shrunk the buffer */
370 		if (msg->sg.copybreak >= sge->length) {
371 			msg->sg.copybreak = 0;
372 			sk_msg_iter_var_next(i);
373 			if (i == msg->sg.end)
374 				break;
375 			sge = sk_msg_elem(msg, i);
376 		}
377 
378 		buf_size = sge->length - msg->sg.copybreak;
379 		copy = (buf_size > bytes) ? bytes : buf_size;
380 		to = sg_virt(sge) + msg->sg.copybreak;
381 		msg->sg.copybreak += copy;
382 		if (sk->sk_route_caps & NETIF_F_NOCACHE_COPY)
383 			ret = copy_from_iter_nocache(to, copy, from);
384 		else
385 			ret = copy_from_iter(to, copy, from);
386 		if (ret != copy) {
387 			ret = -EFAULT;
388 			goto out;
389 		}
390 		bytes -= copy;
391 		if (!bytes)
392 			break;
393 		msg->sg.copybreak = 0;
394 		sk_msg_iter_var_next(i);
395 	} while (i != msg->sg.end);
396 out:
397 	msg->sg.curr = i;
398 	return ret;
399 }
400 EXPORT_SYMBOL_GPL(sk_msg_memcopy_from_iter);
401 
402 /* Receive sk_msg from psock->ingress_msg to @msg. */
sk_msg_recvmsg(struct sock * sk,struct sk_psock * psock,struct msghdr * msg,int len,int flags)403 int sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg,
404 		   int len, int flags)
405 {
406 	struct iov_iter *iter = &msg->msg_iter;
407 	int peek = flags & MSG_PEEK;
408 	struct sk_msg *msg_rx;
409 	int i, copied = 0;
410 
411 	msg_rx = sk_psock_peek_msg(psock);
412 	while (copied != len) {
413 		struct scatterlist *sge;
414 
415 		if (unlikely(!msg_rx))
416 			break;
417 
418 		i = msg_rx->sg.start;
419 		do {
420 			struct page *page;
421 			int copy;
422 
423 			sge = sk_msg_elem(msg_rx, i);
424 			copy = sge->length;
425 			page = sg_page(sge);
426 			if (copied + copy > len)
427 				copy = len - copied;
428 			copy = copy_page_to_iter(page, sge->offset, copy, iter);
429 			if (!copy)
430 				return copied ? copied : -EFAULT;
431 
432 			copied += copy;
433 			if (likely(!peek)) {
434 				sge->offset += copy;
435 				sge->length -= copy;
436 				if (!msg_rx->skb)
437 					sk_mem_uncharge(sk, copy);
438 				msg_rx->sg.size -= copy;
439 
440 				if (!sge->length) {
441 					sk_msg_iter_var_next(i);
442 					if (!msg_rx->skb)
443 						put_page(page);
444 				}
445 			} else {
446 				/* Lets not optimize peek case if copy_page_to_iter
447 				 * didn't copy the entire length lets just break.
448 				 */
449 				if (copy != sge->length)
450 					return copied;
451 				sk_msg_iter_var_next(i);
452 			}
453 
454 			if (copied == len)
455 				break;
456 		} while (i != msg_rx->sg.end);
457 
458 		if (unlikely(peek)) {
459 			msg_rx = sk_psock_next_msg(psock, msg_rx);
460 			if (!msg_rx)
461 				break;
462 			continue;
463 		}
464 
465 		msg_rx->sg.start = i;
466 		if (!sge->length && msg_rx->sg.start == msg_rx->sg.end) {
467 			msg_rx = sk_psock_dequeue_msg(psock);
468 			kfree_sk_msg(msg_rx);
469 		}
470 		msg_rx = sk_psock_peek_msg(psock);
471 	}
472 
473 	return copied;
474 }
475 EXPORT_SYMBOL_GPL(sk_msg_recvmsg);
476 
sk_msg_is_readable(struct sock * sk)477 bool sk_msg_is_readable(struct sock *sk)
478 {
479 	struct sk_psock *psock;
480 	bool empty = true;
481 
482 	rcu_read_lock();
483 	psock = sk_psock(sk);
484 	if (likely(psock))
485 		empty = list_empty(&psock->ingress_msg);
486 	rcu_read_unlock();
487 	return !empty;
488 }
489 EXPORT_SYMBOL_GPL(sk_msg_is_readable);
490 
sk_psock_create_ingress_msg(struct sock * sk,struct sk_buff * skb)491 static struct sk_msg *sk_psock_create_ingress_msg(struct sock *sk,
492 						  struct sk_buff *skb)
493 {
494 	struct sk_msg *msg;
495 
496 	if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf)
497 		return NULL;
498 
499 	if (!sk_rmem_schedule(sk, skb, skb->truesize))
500 		return NULL;
501 
502 	msg = kzalloc(sizeof(*msg), __GFP_NOWARN | GFP_KERNEL);
503 	if (unlikely(!msg))
504 		return NULL;
505 
506 	sk_msg_init(msg);
507 	return msg;
508 }
509 
sk_psock_skb_ingress_enqueue(struct sk_buff * skb,struct sk_psock * psock,struct sock * sk,struct sk_msg * msg)510 static int sk_psock_skb_ingress_enqueue(struct sk_buff *skb,
511 					struct sk_psock *psock,
512 					struct sock *sk,
513 					struct sk_msg *msg)
514 {
515 	int num_sge, copied;
516 
517 	/* skb linearize may fail with ENOMEM, but lets simply try again
518 	 * later if this happens. Under memory pressure we don't want to
519 	 * drop the skb. We need to linearize the skb so that the mapping
520 	 * in skb_to_sgvec can not error.
521 	 */
522 	if (skb_linearize(skb))
523 		return -EAGAIN;
524 	num_sge = skb_to_sgvec(skb, msg->sg.data, 0, skb->len);
525 	if (unlikely(num_sge < 0))
526 		return num_sge;
527 
528 	copied = skb->len;
529 	msg->sg.start = 0;
530 	msg->sg.size = copied;
531 	msg->sg.end = num_sge;
532 	msg->skb = skb;
533 
534 	sk_psock_queue_msg(psock, msg);
535 	sk_psock_data_ready(sk, psock);
536 	return copied;
537 }
538 
539 static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb);
540 
sk_psock_skb_ingress(struct sk_psock * psock,struct sk_buff * skb)541 static int sk_psock_skb_ingress(struct sk_psock *psock, struct sk_buff *skb)
542 {
543 	struct sock *sk = psock->sk;
544 	struct sk_msg *msg;
545 	int err;
546 
547 	/* If we are receiving on the same sock skb->sk is already assigned,
548 	 * skip memory accounting and owner transition seeing it already set
549 	 * correctly.
550 	 */
551 	if (unlikely(skb->sk == sk))
552 		return sk_psock_skb_ingress_self(psock, skb);
553 	msg = sk_psock_create_ingress_msg(sk, skb);
554 	if (!msg)
555 		return -EAGAIN;
556 
557 	/* This will transition ownership of the data from the socket where
558 	 * the BPF program was run initiating the redirect to the socket
559 	 * we will eventually receive this data on. The data will be released
560 	 * from skb_consume found in __tcp_bpf_recvmsg() after its been copied
561 	 * into user buffers.
562 	 */
563 	skb_set_owner_r(skb, sk);
564 	err = sk_psock_skb_ingress_enqueue(skb, psock, sk, msg);
565 	if (err < 0)
566 		kfree(msg);
567 	return err;
568 }
569 
570 /* Puts an skb on the ingress queue of the socket already assigned to the
571  * skb. In this case we do not need to check memory limits or skb_set_owner_r
572  * because the skb is already accounted for here.
573  */
sk_psock_skb_ingress_self(struct sk_psock * psock,struct sk_buff * skb)574 static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb)
575 {
576 	struct sk_msg *msg = kzalloc(sizeof(*msg), __GFP_NOWARN | GFP_ATOMIC);
577 	struct sock *sk = psock->sk;
578 	int err;
579 
580 	if (unlikely(!msg))
581 		return -EAGAIN;
582 	sk_msg_init(msg);
583 	skb_set_owner_r(skb, sk);
584 	err = sk_psock_skb_ingress_enqueue(skb, psock, sk, msg);
585 	if (err < 0)
586 		kfree(msg);
587 	return err;
588 }
589 
sk_psock_handle_skb(struct sk_psock * psock,struct sk_buff * skb,u32 off,u32 len,bool ingress)590 static int sk_psock_handle_skb(struct sk_psock *psock, struct sk_buff *skb,
591 			       u32 off, u32 len, bool ingress)
592 {
593 	if (!ingress) {
594 		if (!sock_writeable(psock->sk))
595 			return -EAGAIN;
596 		return skb_send_sock(psock->sk, skb, off, len);
597 	}
598 	return sk_psock_skb_ingress(psock, skb);
599 }
600 
sk_psock_skb_state(struct sk_psock * psock,struct sk_psock_work_state * state,struct sk_buff * skb,int len,int off)601 static void sk_psock_skb_state(struct sk_psock *psock,
602 			       struct sk_psock_work_state *state,
603 			       struct sk_buff *skb,
604 			       int len, int off)
605 {
606 	spin_lock_bh(&psock->ingress_lock);
607 	if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
608 		state->skb = skb;
609 		state->len = len;
610 		state->off = off;
611 	} else {
612 		sock_drop(psock->sk, skb);
613 	}
614 	spin_unlock_bh(&psock->ingress_lock);
615 }
616 
sk_psock_backlog(struct work_struct * work)617 static void sk_psock_backlog(struct work_struct *work)
618 {
619 	struct sk_psock *psock = container_of(work, struct sk_psock, work);
620 	struct sk_psock_work_state *state = &psock->work_state;
621 	struct sk_buff *skb = NULL;
622 	bool ingress;
623 	u32 len, off;
624 	int ret;
625 
626 	mutex_lock(&psock->work_mutex);
627 	if (unlikely(state->skb)) {
628 		spin_lock_bh(&psock->ingress_lock);
629 		skb = state->skb;
630 		len = state->len;
631 		off = state->off;
632 		state->skb = NULL;
633 		spin_unlock_bh(&psock->ingress_lock);
634 	}
635 	if (skb)
636 		goto start;
637 
638 	while ((skb = skb_dequeue(&psock->ingress_skb))) {
639 		len = skb->len;
640 		off = 0;
641 start:
642 		ingress = skb_bpf_ingress(skb);
643 		skb_bpf_redirect_clear(skb);
644 		do {
645 			ret = -EIO;
646 			if (!sock_flag(psock->sk, SOCK_DEAD))
647 				ret = sk_psock_handle_skb(psock, skb, off,
648 							  len, ingress);
649 			if (ret <= 0) {
650 				if (ret == -EAGAIN) {
651 					sk_psock_skb_state(psock, state, skb,
652 							   len, off);
653 					goto end;
654 				}
655 				/* Hard errors break pipe and stop xmit. */
656 				sk_psock_report_error(psock, ret ? -ret : EPIPE);
657 				sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED);
658 				sock_drop(psock->sk, skb);
659 				goto end;
660 			}
661 			off += ret;
662 			len -= ret;
663 		} while (len);
664 
665 		if (!ingress)
666 			kfree_skb(skb);
667 	}
668 end:
669 	mutex_unlock(&psock->work_mutex);
670 }
671 
sk_psock_init(struct sock * sk,int node)672 struct sk_psock *sk_psock_init(struct sock *sk, int node)
673 {
674 	struct sk_psock *psock;
675 	struct proto *prot;
676 
677 	write_lock_bh(&sk->sk_callback_lock);
678 
679 	if (sk->sk_user_data) {
680 		psock = ERR_PTR(-EBUSY);
681 		goto out;
682 	}
683 
684 	psock = kzalloc_node(sizeof(*psock), GFP_ATOMIC | __GFP_NOWARN, node);
685 	if (!psock) {
686 		psock = ERR_PTR(-ENOMEM);
687 		goto out;
688 	}
689 
690 	prot = READ_ONCE(sk->sk_prot);
691 	psock->sk = sk;
692 	psock->eval = __SK_NONE;
693 	psock->sk_proto = prot;
694 	psock->saved_unhash = prot->unhash;
695 	psock->saved_close = prot->close;
696 	psock->saved_write_space = sk->sk_write_space;
697 
698 	INIT_LIST_HEAD(&psock->link);
699 	spin_lock_init(&psock->link_lock);
700 
701 	INIT_WORK(&psock->work, sk_psock_backlog);
702 	mutex_init(&psock->work_mutex);
703 	INIT_LIST_HEAD(&psock->ingress_msg);
704 	spin_lock_init(&psock->ingress_lock);
705 	skb_queue_head_init(&psock->ingress_skb);
706 
707 	sk_psock_set_state(psock, SK_PSOCK_TX_ENABLED);
708 	refcount_set(&psock->refcnt, 1);
709 
710 	rcu_assign_sk_user_data_nocopy(sk, psock);
711 	sock_hold(sk);
712 
713 out:
714 	write_unlock_bh(&sk->sk_callback_lock);
715 	return psock;
716 }
717 EXPORT_SYMBOL_GPL(sk_psock_init);
718 
sk_psock_link_pop(struct sk_psock * psock)719 struct sk_psock_link *sk_psock_link_pop(struct sk_psock *psock)
720 {
721 	struct sk_psock_link *link;
722 
723 	spin_lock_bh(&psock->link_lock);
724 	link = list_first_entry_or_null(&psock->link, struct sk_psock_link,
725 					list);
726 	if (link)
727 		list_del(&link->list);
728 	spin_unlock_bh(&psock->link_lock);
729 	return link;
730 }
731 
__sk_psock_purge_ingress_msg(struct sk_psock * psock)732 static void __sk_psock_purge_ingress_msg(struct sk_psock *psock)
733 {
734 	struct sk_msg *msg, *tmp;
735 
736 	list_for_each_entry_safe(msg, tmp, &psock->ingress_msg, list) {
737 		list_del(&msg->list);
738 		sk_msg_free(psock->sk, msg);
739 		kfree(msg);
740 	}
741 }
742 
__sk_psock_zap_ingress(struct sk_psock * psock)743 static void __sk_psock_zap_ingress(struct sk_psock *psock)
744 {
745 	struct sk_buff *skb;
746 
747 	while ((skb = skb_dequeue(&psock->ingress_skb)) != NULL) {
748 		skb_bpf_redirect_clear(skb);
749 		sock_drop(psock->sk, skb);
750 	}
751 	kfree_skb(psock->work_state.skb);
752 	/* We null the skb here to ensure that calls to sk_psock_backlog
753 	 * do not pick up the free'd skb.
754 	 */
755 	psock->work_state.skb = NULL;
756 	__sk_psock_purge_ingress_msg(psock);
757 }
758 
sk_psock_link_destroy(struct sk_psock * psock)759 static void sk_psock_link_destroy(struct sk_psock *psock)
760 {
761 	struct sk_psock_link *link, *tmp;
762 
763 	list_for_each_entry_safe(link, tmp, &psock->link, list) {
764 		list_del(&link->list);
765 		sk_psock_free_link(link);
766 	}
767 }
768 
sk_psock_stop(struct sk_psock * psock,bool wait)769 void sk_psock_stop(struct sk_psock *psock, bool wait)
770 {
771 	spin_lock_bh(&psock->ingress_lock);
772 	sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED);
773 	sk_psock_cork_free(psock);
774 	__sk_psock_zap_ingress(psock);
775 	spin_unlock_bh(&psock->ingress_lock);
776 
777 	if (wait)
778 		cancel_work_sync(&psock->work);
779 }
780 
781 static void sk_psock_done_strp(struct sk_psock *psock);
782 
sk_psock_destroy(struct work_struct * work)783 static void sk_psock_destroy(struct work_struct *work)
784 {
785 	struct sk_psock *psock = container_of(to_rcu_work(work),
786 					      struct sk_psock, rwork);
787 	/* No sk_callback_lock since already detached. */
788 
789 	sk_psock_done_strp(psock);
790 
791 	cancel_work_sync(&psock->work);
792 	mutex_destroy(&psock->work_mutex);
793 
794 	psock_progs_drop(&psock->progs);
795 
796 	sk_psock_link_destroy(psock);
797 	sk_psock_cork_free(psock);
798 
799 	if (psock->sk_redir)
800 		sock_put(psock->sk_redir);
801 	sock_put(psock->sk);
802 	kfree(psock);
803 }
804 
sk_psock_drop(struct sock * sk,struct sk_psock * psock)805 void sk_psock_drop(struct sock *sk, struct sk_psock *psock)
806 {
807 	write_lock_bh(&sk->sk_callback_lock);
808 	sk_psock_restore_proto(sk, psock);
809 	rcu_assign_sk_user_data(sk, NULL);
810 	if (psock->progs.stream_parser)
811 		sk_psock_stop_strp(sk, psock);
812 	else if (psock->progs.stream_verdict || psock->progs.skb_verdict)
813 		sk_psock_stop_verdict(sk, psock);
814 	write_unlock_bh(&sk->sk_callback_lock);
815 
816 	sk_psock_stop(psock, false);
817 
818 	INIT_RCU_WORK(&psock->rwork, sk_psock_destroy);
819 	queue_rcu_work(system_wq, &psock->rwork);
820 }
821 EXPORT_SYMBOL_GPL(sk_psock_drop);
822 
sk_psock_map_verd(int verdict,bool redir)823 static int sk_psock_map_verd(int verdict, bool redir)
824 {
825 	switch (verdict) {
826 	case SK_PASS:
827 		return redir ? __SK_REDIRECT : __SK_PASS;
828 	case SK_DROP:
829 	default:
830 		break;
831 	}
832 
833 	return __SK_DROP;
834 }
835 
sk_psock_msg_verdict(struct sock * sk,struct sk_psock * psock,struct sk_msg * msg)836 int sk_psock_msg_verdict(struct sock *sk, struct sk_psock *psock,
837 			 struct sk_msg *msg)
838 {
839 	struct bpf_prog *prog;
840 	int ret;
841 
842 	rcu_read_lock();
843 	prog = READ_ONCE(psock->progs.msg_parser);
844 	if (unlikely(!prog)) {
845 		ret = __SK_PASS;
846 		goto out;
847 	}
848 
849 	sk_msg_compute_data_pointers(msg);
850 	msg->sk = sk;
851 	ret = bpf_prog_run_pin_on_cpu(prog, msg);
852 	ret = sk_psock_map_verd(ret, msg->sk_redir);
853 	psock->apply_bytes = msg->apply_bytes;
854 	if (ret == __SK_REDIRECT) {
855 		if (psock->sk_redir)
856 			sock_put(psock->sk_redir);
857 		psock->sk_redir = msg->sk_redir;
858 		if (!psock->sk_redir) {
859 			ret = __SK_DROP;
860 			goto out;
861 		}
862 		sock_hold(psock->sk_redir);
863 	}
864 out:
865 	rcu_read_unlock();
866 	return ret;
867 }
868 EXPORT_SYMBOL_GPL(sk_psock_msg_verdict);
869 
sk_psock_skb_redirect(struct sk_psock * from,struct sk_buff * skb)870 static int sk_psock_skb_redirect(struct sk_psock *from, struct sk_buff *skb)
871 {
872 	struct sk_psock *psock_other;
873 	struct sock *sk_other;
874 
875 	sk_other = skb_bpf_redirect_fetch(skb);
876 	/* This error is a buggy BPF program, it returned a redirect
877 	 * return code, but then didn't set a redirect interface.
878 	 */
879 	if (unlikely(!sk_other)) {
880 		sock_drop(from->sk, skb);
881 		return -EIO;
882 	}
883 	psock_other = sk_psock(sk_other);
884 	/* This error indicates the socket is being torn down or had another
885 	 * error that caused the pipe to break. We can't send a packet on
886 	 * a socket that is in this state so we drop the skb.
887 	 */
888 	if (!psock_other || sock_flag(sk_other, SOCK_DEAD)) {
889 		skb_bpf_redirect_clear(skb);
890 		sock_drop(from->sk, skb);
891 		return -EIO;
892 	}
893 	spin_lock_bh(&psock_other->ingress_lock);
894 	if (!sk_psock_test_state(psock_other, SK_PSOCK_TX_ENABLED)) {
895 		spin_unlock_bh(&psock_other->ingress_lock);
896 		skb_bpf_redirect_clear(skb);
897 		sock_drop(from->sk, skb);
898 		return -EIO;
899 	}
900 
901 	skb_queue_tail(&psock_other->ingress_skb, skb);
902 	schedule_work(&psock_other->work);
903 	spin_unlock_bh(&psock_other->ingress_lock);
904 	return 0;
905 }
906 
sk_psock_tls_verdict_apply(struct sk_buff * skb,struct sk_psock * from,int verdict)907 static void sk_psock_tls_verdict_apply(struct sk_buff *skb,
908 				       struct sk_psock *from, int verdict)
909 {
910 	switch (verdict) {
911 	case __SK_REDIRECT:
912 		sk_psock_skb_redirect(from, skb);
913 		break;
914 	case __SK_PASS:
915 	case __SK_DROP:
916 	default:
917 		break;
918 	}
919 }
920 
sk_psock_tls_strp_read(struct sk_psock * psock,struct sk_buff * skb)921 int sk_psock_tls_strp_read(struct sk_psock *psock, struct sk_buff *skb)
922 {
923 	struct bpf_prog *prog;
924 	int ret = __SK_PASS;
925 
926 	rcu_read_lock();
927 	prog = READ_ONCE(psock->progs.stream_verdict);
928 	if (likely(prog)) {
929 		skb->sk = psock->sk;
930 		skb_dst_drop(skb);
931 		skb_bpf_redirect_clear(skb);
932 		ret = bpf_prog_run_pin_on_cpu(prog, skb);
933 		ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb));
934 		skb->sk = NULL;
935 	}
936 	sk_psock_tls_verdict_apply(skb, psock, ret);
937 	rcu_read_unlock();
938 	return ret;
939 }
940 EXPORT_SYMBOL_GPL(sk_psock_tls_strp_read);
941 
sk_psock_verdict_apply(struct sk_psock * psock,struct sk_buff * skb,int verdict)942 static int sk_psock_verdict_apply(struct sk_psock *psock, struct sk_buff *skb,
943 				  int verdict)
944 {
945 	struct sock *sk_other;
946 	int err = 0;
947 
948 	switch (verdict) {
949 	case __SK_PASS:
950 		err = -EIO;
951 		sk_other = psock->sk;
952 		if (sock_flag(sk_other, SOCK_DEAD) ||
953 		    !sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
954 			goto out_free;
955 		}
956 
957 		skb_bpf_set_ingress(skb);
958 
959 		/* If the queue is empty then we can submit directly
960 		 * into the msg queue. If its not empty we have to
961 		 * queue work otherwise we may get OOO data. Otherwise,
962 		 * if sk_psock_skb_ingress errors will be handled by
963 		 * retrying later from workqueue.
964 		 */
965 		if (skb_queue_empty(&psock->ingress_skb)) {
966 			err = sk_psock_skb_ingress_self(psock, skb);
967 		}
968 		if (err < 0) {
969 			spin_lock_bh(&psock->ingress_lock);
970 			if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
971 				skb_queue_tail(&psock->ingress_skb, skb);
972 				schedule_work(&psock->work);
973 				err = 0;
974 			}
975 			spin_unlock_bh(&psock->ingress_lock);
976 			if (err < 0) {
977 				skb_bpf_redirect_clear(skb);
978 				goto out_free;
979 			}
980 		}
981 		break;
982 	case __SK_REDIRECT:
983 		err = sk_psock_skb_redirect(psock, skb);
984 		break;
985 	case __SK_DROP:
986 	default:
987 out_free:
988 		sock_drop(psock->sk, skb);
989 	}
990 
991 	return err;
992 }
993 
sk_psock_write_space(struct sock * sk)994 static void sk_psock_write_space(struct sock *sk)
995 {
996 	struct sk_psock *psock;
997 	void (*write_space)(struct sock *sk) = NULL;
998 
999 	rcu_read_lock();
1000 	psock = sk_psock(sk);
1001 	if (likely(psock)) {
1002 		if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED))
1003 			schedule_work(&psock->work);
1004 		write_space = psock->saved_write_space;
1005 	}
1006 	rcu_read_unlock();
1007 	if (write_space)
1008 		write_space(sk);
1009 }
1010 
1011 #if IS_ENABLED(CONFIG_BPF_STREAM_PARSER)
sk_psock_strp_read(struct strparser * strp,struct sk_buff * skb)1012 static void sk_psock_strp_read(struct strparser *strp, struct sk_buff *skb)
1013 {
1014 	struct sk_psock *psock;
1015 	struct bpf_prog *prog;
1016 	int ret = __SK_DROP;
1017 	struct sock *sk;
1018 
1019 	rcu_read_lock();
1020 	sk = strp->sk;
1021 	psock = sk_psock(sk);
1022 	if (unlikely(!psock)) {
1023 		sock_drop(sk, skb);
1024 		goto out;
1025 	}
1026 	prog = READ_ONCE(psock->progs.stream_verdict);
1027 	if (likely(prog)) {
1028 		skb->sk = sk;
1029 		skb_dst_drop(skb);
1030 		skb_bpf_redirect_clear(skb);
1031 		ret = bpf_prog_run_pin_on_cpu(prog, skb);
1032 		ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb));
1033 		skb->sk = NULL;
1034 	}
1035 	sk_psock_verdict_apply(psock, skb, ret);
1036 out:
1037 	rcu_read_unlock();
1038 }
1039 
sk_psock_strp_read_done(struct strparser * strp,int err)1040 static int sk_psock_strp_read_done(struct strparser *strp, int err)
1041 {
1042 	return err;
1043 }
1044 
sk_psock_strp_parse(struct strparser * strp,struct sk_buff * skb)1045 static int sk_psock_strp_parse(struct strparser *strp, struct sk_buff *skb)
1046 {
1047 	struct sk_psock *psock = container_of(strp, struct sk_psock, strp);
1048 	struct bpf_prog *prog;
1049 	int ret = skb->len;
1050 
1051 	rcu_read_lock();
1052 	prog = READ_ONCE(psock->progs.stream_parser);
1053 	if (likely(prog)) {
1054 		skb->sk = psock->sk;
1055 		ret = bpf_prog_run_pin_on_cpu(prog, skb);
1056 		skb->sk = NULL;
1057 	}
1058 	rcu_read_unlock();
1059 	return ret;
1060 }
1061 
1062 /* Called with socket lock held. */
sk_psock_strp_data_ready(struct sock * sk)1063 static void sk_psock_strp_data_ready(struct sock *sk)
1064 {
1065 	struct sk_psock *psock;
1066 
1067 	rcu_read_lock();
1068 	psock = sk_psock(sk);
1069 	if (likely(psock)) {
1070 		if (tls_sw_has_ctx_rx(sk)) {
1071 			psock->saved_data_ready(sk);
1072 		} else {
1073 			write_lock_bh(&sk->sk_callback_lock);
1074 			strp_data_ready(&psock->strp);
1075 			write_unlock_bh(&sk->sk_callback_lock);
1076 		}
1077 	}
1078 	rcu_read_unlock();
1079 }
1080 
sk_psock_init_strp(struct sock * sk,struct sk_psock * psock)1081 int sk_psock_init_strp(struct sock *sk, struct sk_psock *psock)
1082 {
1083 	static const struct strp_callbacks cb = {
1084 		.rcv_msg	= sk_psock_strp_read,
1085 		.read_sock_done	= sk_psock_strp_read_done,
1086 		.parse_msg	= sk_psock_strp_parse,
1087 	};
1088 
1089 	return strp_init(&psock->strp, sk, &cb);
1090 }
1091 
sk_psock_start_strp(struct sock * sk,struct sk_psock * psock)1092 void sk_psock_start_strp(struct sock *sk, struct sk_psock *psock)
1093 {
1094 	if (psock->saved_data_ready)
1095 		return;
1096 
1097 	psock->saved_data_ready = sk->sk_data_ready;
1098 	sk->sk_data_ready = sk_psock_strp_data_ready;
1099 	sk->sk_write_space = sk_psock_write_space;
1100 }
1101 
sk_psock_stop_strp(struct sock * sk,struct sk_psock * psock)1102 void sk_psock_stop_strp(struct sock *sk, struct sk_psock *psock)
1103 {
1104 	if (!psock->saved_data_ready)
1105 		return;
1106 
1107 	sk->sk_data_ready = psock->saved_data_ready;
1108 	psock->saved_data_ready = NULL;
1109 	strp_stop(&psock->strp);
1110 }
1111 
sk_psock_done_strp(struct sk_psock * psock)1112 static void sk_psock_done_strp(struct sk_psock *psock)
1113 {
1114 	/* Parser has been stopped */
1115 	if (psock->progs.stream_parser)
1116 		strp_done(&psock->strp);
1117 }
1118 #else
sk_psock_done_strp(struct sk_psock * psock)1119 static void sk_psock_done_strp(struct sk_psock *psock)
1120 {
1121 }
1122 #endif /* CONFIG_BPF_STREAM_PARSER */
1123 
sk_psock_verdict_recv(read_descriptor_t * desc,struct sk_buff * skb,unsigned int offset,size_t orig_len)1124 static int sk_psock_verdict_recv(read_descriptor_t *desc, struct sk_buff *skb,
1125 				 unsigned int offset, size_t orig_len)
1126 {
1127 	struct sock *sk = (struct sock *)desc->arg.data;
1128 	struct sk_psock *psock;
1129 	struct bpf_prog *prog;
1130 	int ret = __SK_DROP;
1131 	int len = skb->len;
1132 
1133 	/* clone here so sk_eat_skb() in tcp_read_sock does not drop our data */
1134 	skb = skb_clone(skb, GFP_ATOMIC);
1135 	if (!skb) {
1136 		desc->error = -ENOMEM;
1137 		return 0;
1138 	}
1139 
1140 	rcu_read_lock();
1141 	psock = sk_psock(sk);
1142 	if (unlikely(!psock)) {
1143 		len = 0;
1144 		sock_drop(sk, skb);
1145 		goto out;
1146 	}
1147 	prog = READ_ONCE(psock->progs.stream_verdict);
1148 	if (!prog)
1149 		prog = READ_ONCE(psock->progs.skb_verdict);
1150 	if (likely(prog)) {
1151 		skb->sk = sk;
1152 		skb_dst_drop(skb);
1153 		skb_bpf_redirect_clear(skb);
1154 		ret = bpf_prog_run_pin_on_cpu(prog, skb);
1155 		ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb));
1156 		skb->sk = NULL;
1157 	}
1158 	if (sk_psock_verdict_apply(psock, skb, ret) < 0)
1159 		len = 0;
1160 out:
1161 	rcu_read_unlock();
1162 	return len;
1163 }
1164 
sk_psock_verdict_data_ready(struct sock * sk)1165 static void sk_psock_verdict_data_ready(struct sock *sk)
1166 {
1167 	struct socket *sock = sk->sk_socket;
1168 	read_descriptor_t desc;
1169 
1170 	if (unlikely(!sock || !sock->ops || !sock->ops->read_sock))
1171 		return;
1172 
1173 	desc.arg.data = sk;
1174 	desc.error = 0;
1175 	desc.count = 1;
1176 
1177 	sock->ops->read_sock(sk, &desc, sk_psock_verdict_recv);
1178 }
1179 
sk_psock_start_verdict(struct sock * sk,struct sk_psock * psock)1180 void sk_psock_start_verdict(struct sock *sk, struct sk_psock *psock)
1181 {
1182 	if (psock->saved_data_ready)
1183 		return;
1184 
1185 	psock->saved_data_ready = sk->sk_data_ready;
1186 	sk->sk_data_ready = sk_psock_verdict_data_ready;
1187 	sk->sk_write_space = sk_psock_write_space;
1188 }
1189 
sk_psock_stop_verdict(struct sock * sk,struct sk_psock * psock)1190 void sk_psock_stop_verdict(struct sock *sk, struct sk_psock *psock)
1191 {
1192 	if (!psock->saved_data_ready)
1193 		return;
1194 
1195 	sk->sk_data_ready = psock->saved_data_ready;
1196 	psock->saved_data_ready = NULL;
1197 }
1198