Lines Matching +full:mux +full:- +full:locked
1 // SPDX-License-Identifier: GPL-2.0-only
44 return (struct kcm_tx_msg *)skb->cb; in kcm_tx_msg()
49 csk->sk_err = EPIPE; in report_csk_error()
56 struct sock *csk = psock->sk; in kcm_abort_tx_psock()
57 struct kcm_mux *mux = psock->mux; in kcm_abort_tx_psock() local
61 spin_lock_bh(&mux->lock); in kcm_abort_tx_psock()
63 if (psock->tx_stopped) { in kcm_abort_tx_psock()
64 spin_unlock_bh(&mux->lock); in kcm_abort_tx_psock()
68 psock->tx_stopped = 1; in kcm_abort_tx_psock()
69 KCM_STATS_INCR(psock->stats.tx_aborts); in kcm_abort_tx_psock()
71 if (!psock->tx_kcm) { in kcm_abort_tx_psock()
73 list_del(&psock->psock_avail_list); in kcm_abort_tx_psock()
82 queue_work(kcm_wq, &psock->tx_kcm->tx_work); in kcm_abort_tx_psock()
85 spin_unlock_bh(&mux->lock); in kcm_abort_tx_psock()
91 /* RX mux lock held. */
92 static void kcm_update_rx_mux_stats(struct kcm_mux *mux, in kcm_update_rx_mux_stats() argument
95 STRP_STATS_ADD(mux->stats.rx_bytes, in kcm_update_rx_mux_stats()
96 psock->strp.stats.bytes - in kcm_update_rx_mux_stats()
97 psock->saved_rx_bytes); in kcm_update_rx_mux_stats()
98 mux->stats.rx_msgs += in kcm_update_rx_mux_stats()
99 psock->strp.stats.msgs - psock->saved_rx_msgs; in kcm_update_rx_mux_stats()
100 psock->saved_rx_msgs = psock->strp.stats.msgs; in kcm_update_rx_mux_stats()
101 psock->saved_rx_bytes = psock->strp.stats.bytes; in kcm_update_rx_mux_stats()
104 static void kcm_update_tx_mux_stats(struct kcm_mux *mux, in kcm_update_tx_mux_stats() argument
107 KCM_STATS_ADD(mux->stats.tx_bytes, in kcm_update_tx_mux_stats()
108 psock->stats.tx_bytes - psock->saved_tx_bytes); in kcm_update_tx_mux_stats()
109 mux->stats.tx_msgs += in kcm_update_tx_mux_stats()
110 psock->stats.tx_msgs - psock->saved_tx_msgs; in kcm_update_tx_mux_stats()
111 psock->saved_tx_msgs = psock->stats.tx_msgs; in kcm_update_tx_mux_stats()
112 psock->saved_tx_bytes = psock->stats.tx_bytes; in kcm_update_tx_mux_stats()
117 /* KCM is ready to receive messages on its queue-- either the KCM is new or
119 * pending ready messages on a psock. RX mux lock held.
123 struct kcm_mux *mux = kcm->mux; in kcm_rcv_ready() local
127 if (unlikely(kcm->rx_wait || kcm->rx_psock || kcm->rx_disabled)) in kcm_rcv_ready()
130 while (unlikely((skb = __skb_dequeue(&mux->rx_hold_queue)))) { in kcm_rcv_ready()
131 if (kcm_queue_rcv_skb(&kcm->sk, skb)) { in kcm_rcv_ready()
133 skb_queue_head(&mux->rx_hold_queue, skb); in kcm_rcv_ready()
134 WARN_ON(!sk_rmem_alloc_get(&kcm->sk)); in kcm_rcv_ready()
139 while (!list_empty(&mux->psocks_ready)) { in kcm_rcv_ready()
140 psock = list_first_entry(&mux->psocks_ready, struct kcm_psock, in kcm_rcv_ready()
143 if (kcm_queue_rcv_skb(&kcm->sk, psock->ready_rx_msg)) { in kcm_rcv_ready()
145 WARN_ON(!sk_rmem_alloc_get(&kcm->sk)); in kcm_rcv_ready()
152 list_del(&psock->psock_ready_list); in kcm_rcv_ready()
153 psock->ready_rx_msg = NULL; in kcm_rcv_ready()
157 strp_unpause(&psock->strp); in kcm_rcv_ready()
158 strp_check_rcv(&psock->strp); in kcm_rcv_ready()
162 list_add_tail(&kcm->wait_rx_list, in kcm_rcv_ready()
163 &kcm->mux->kcm_rx_waiters); in kcm_rcv_ready()
164 kcm->rx_wait = true; in kcm_rcv_ready()
169 struct sock *sk = skb->sk; in kcm_rfree()
171 struct kcm_mux *mux = kcm->mux; in kcm_rfree() local
172 unsigned int len = skb->truesize; in kcm_rfree()
175 atomic_sub(len, &sk->sk_rmem_alloc); in kcm_rfree()
180 if (!kcm->rx_wait && !kcm->rx_psock && in kcm_rfree()
181 sk_rmem_alloc_get(sk) < sk->sk_rcvlowat) { in kcm_rfree()
182 spin_lock_bh(&mux->rx_lock); in kcm_rfree()
184 spin_unlock_bh(&mux->rx_lock); in kcm_rfree()
190 struct sk_buff_head *list = &sk->sk_receive_queue; in kcm_queue_rcv_skb()
192 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) in kcm_queue_rcv_skb()
193 return -ENOMEM; in kcm_queue_rcv_skb()
195 if (!sk_rmem_schedule(sk, skb, skb->truesize)) in kcm_queue_rcv_skb()
196 return -ENOBUFS; in kcm_queue_rcv_skb()
198 skb->dev = NULL; in kcm_queue_rcv_skb()
201 skb->sk = sk; in kcm_queue_rcv_skb()
202 skb->destructor = kcm_rfree; in kcm_queue_rcv_skb()
203 atomic_add(skb->truesize, &sk->sk_rmem_alloc); in kcm_queue_rcv_skb()
204 sk_mem_charge(sk, skb->truesize); in kcm_queue_rcv_skb()
209 sk->sk_data_ready(sk); in kcm_queue_rcv_skb()
216 * RX mux lock held.
218 static void requeue_rx_msgs(struct kcm_mux *mux, struct sk_buff_head *head) in requeue_rx_msgs() argument
225 skb->destructor = sock_rfree; in requeue_rx_msgs()
228 if (list_empty(&mux->kcm_rx_waiters)) { in requeue_rx_msgs()
229 skb_queue_tail(&mux->rx_hold_queue, skb); in requeue_rx_msgs()
233 kcm = list_first_entry(&mux->kcm_rx_waiters, in requeue_rx_msgs()
236 if (kcm_queue_rcv_skb(&kcm->sk, skb)) { in requeue_rx_msgs()
238 list_del(&kcm->wait_rx_list); in requeue_rx_msgs()
239 kcm->rx_wait = false; in requeue_rx_msgs()
253 struct kcm_mux *mux = psock->mux; in reserve_rx_kcm() local
256 WARN_ON(psock->ready_rx_msg); in reserve_rx_kcm()
258 if (psock->rx_kcm) in reserve_rx_kcm()
259 return psock->rx_kcm; in reserve_rx_kcm()
261 spin_lock_bh(&mux->rx_lock); in reserve_rx_kcm()
263 if (psock->rx_kcm) { in reserve_rx_kcm()
264 spin_unlock_bh(&mux->rx_lock); in reserve_rx_kcm()
265 return psock->rx_kcm; in reserve_rx_kcm()
268 kcm_update_rx_mux_stats(mux, psock); in reserve_rx_kcm()
270 if (list_empty(&mux->kcm_rx_waiters)) { in reserve_rx_kcm()
271 psock->ready_rx_msg = head; in reserve_rx_kcm()
272 strp_pause(&psock->strp); in reserve_rx_kcm()
273 list_add_tail(&psock->psock_ready_list, in reserve_rx_kcm()
274 &mux->psocks_ready); in reserve_rx_kcm()
275 spin_unlock_bh(&mux->rx_lock); in reserve_rx_kcm()
279 kcm = list_first_entry(&mux->kcm_rx_waiters, in reserve_rx_kcm()
281 list_del(&kcm->wait_rx_list); in reserve_rx_kcm()
282 kcm->rx_wait = false; in reserve_rx_kcm()
284 psock->rx_kcm = kcm; in reserve_rx_kcm()
285 kcm->rx_psock = psock; in reserve_rx_kcm()
287 spin_unlock_bh(&mux->rx_lock); in reserve_rx_kcm()
303 struct kcm_sock *kcm = psock->rx_kcm; in unreserve_rx_kcm()
304 struct kcm_mux *mux = psock->mux; in unreserve_rx_kcm() local
309 spin_lock_bh(&mux->rx_lock); in unreserve_rx_kcm()
311 psock->rx_kcm = NULL; in unreserve_rx_kcm()
312 kcm->rx_psock = NULL; in unreserve_rx_kcm()
314 /* Commit kcm->rx_psock before sk_rmem_alloc_get to sync with in unreserve_rx_kcm()
319 if (unlikely(kcm->done)) { in unreserve_rx_kcm()
320 spin_unlock_bh(&mux->rx_lock); in unreserve_rx_kcm()
325 INIT_WORK(&kcm->done_work, kcm_done_work); in unreserve_rx_kcm()
326 schedule_work(&kcm->done_work); in unreserve_rx_kcm()
330 if (unlikely(kcm->rx_disabled)) { in unreserve_rx_kcm()
331 requeue_rx_msgs(mux, &kcm->sk.sk_receive_queue); in unreserve_rx_kcm()
332 } else if (rcv_ready || unlikely(!sk_rmem_alloc_get(&kcm->sk))) { in unreserve_rx_kcm()
338 spin_unlock_bh(&mux->rx_lock); in unreserve_rx_kcm()
346 read_lock_bh(&sk->sk_callback_lock); in psock_data_ready()
348 psock = (struct kcm_psock *)sk->sk_user_data; in psock_data_ready()
350 strp_data_ready(&psock->strp); in psock_data_ready()
352 read_unlock_bh(&sk->sk_callback_lock); in psock_data_ready()
370 if (kcm_queue_rcv_skb(&kcm->sk, skb)) { in kcm_rcv_strparser()
380 struct bpf_prog *prog = psock->bpf_prog; in kcm_parse_func_strparser()
409 struct kcm_mux *mux; in psock_write_space() local
412 read_lock_bh(&sk->sk_callback_lock); in psock_write_space()
414 psock = (struct kcm_psock *)sk->sk_user_data; in psock_write_space()
417 mux = psock->mux; in psock_write_space()
419 spin_lock_bh(&mux->lock); in psock_write_space()
422 kcm = psock->tx_kcm; in psock_write_space()
423 if (kcm && !unlikely(kcm->tx_stopped)) in psock_write_space()
424 queue_work(kcm_wq, &kcm->tx_work); in psock_write_space()
426 spin_unlock_bh(&mux->lock); in psock_write_space()
428 read_unlock_bh(&sk->sk_callback_lock); in psock_write_space()
433 /* kcm sock is locked. */
436 struct kcm_mux *mux = kcm->mux; in reserve_psock() local
439 psock = kcm->tx_psock; in reserve_psock()
444 WARN_ON(kcm->tx_wait); in reserve_psock()
445 if (unlikely(psock->tx_stopped)) in reserve_psock()
448 return kcm->tx_psock; in reserve_psock()
451 spin_lock_bh(&mux->lock); in reserve_psock()
456 psock = kcm->tx_psock; in reserve_psock()
458 WARN_ON(kcm->tx_wait); in reserve_psock()
459 spin_unlock_bh(&mux->lock); in reserve_psock()
460 return kcm->tx_psock; in reserve_psock()
463 if (!list_empty(&mux->psocks_avail)) { in reserve_psock()
464 psock = list_first_entry(&mux->psocks_avail, in reserve_psock()
467 list_del(&psock->psock_avail_list); in reserve_psock()
468 if (kcm->tx_wait) { in reserve_psock()
469 list_del(&kcm->wait_psock_list); in reserve_psock()
470 kcm->tx_wait = false; in reserve_psock()
472 kcm->tx_psock = psock; in reserve_psock()
473 psock->tx_kcm = kcm; in reserve_psock()
474 KCM_STATS_INCR(psock->stats.reserved); in reserve_psock()
475 } else if (!kcm->tx_wait) { in reserve_psock()
476 list_add_tail(&kcm->wait_psock_list, in reserve_psock()
477 &mux->kcm_tx_waiters); in reserve_psock()
478 kcm->tx_wait = true; in reserve_psock()
481 spin_unlock_bh(&mux->lock); in reserve_psock()
486 /* mux lock held */
489 struct kcm_mux *mux = psock->mux; in psock_now_avail() local
492 if (list_empty(&mux->kcm_tx_waiters)) { in psock_now_avail()
493 list_add_tail(&psock->psock_avail_list, in psock_now_avail()
494 &mux->psocks_avail); in psock_now_avail()
496 kcm = list_first_entry(&mux->kcm_tx_waiters, in psock_now_avail()
499 list_del(&kcm->wait_psock_list); in psock_now_avail()
500 kcm->tx_wait = false; in psock_now_avail()
501 psock->tx_kcm = kcm; in psock_now_avail()
508 kcm->tx_psock = psock; in psock_now_avail()
509 KCM_STATS_INCR(psock->stats.reserved); in psock_now_avail()
510 queue_work(kcm_wq, &kcm->tx_work); in psock_now_avail()
514 /* kcm sock is locked. */
518 struct kcm_mux *mux = kcm->mux; in unreserve_psock() local
520 spin_lock_bh(&mux->lock); in unreserve_psock()
522 psock = kcm->tx_psock; in unreserve_psock()
525 spin_unlock_bh(&mux->lock); in unreserve_psock()
531 kcm_update_tx_mux_stats(mux, psock); in unreserve_psock()
533 WARN_ON(kcm->tx_wait); in unreserve_psock()
535 kcm->tx_psock = NULL; in unreserve_psock()
536 psock->tx_kcm = NULL; in unreserve_psock()
537 KCM_STATS_INCR(psock->stats.unreserved); in unreserve_psock()
539 if (unlikely(psock->tx_stopped)) { in unreserve_psock()
540 if (psock->done) { in unreserve_psock()
542 list_del(&psock->psock_list); in unreserve_psock()
543 mux->psocks_cnt--; in unreserve_psock()
544 sock_put(psock->sk); in unreserve_psock()
545 fput(psock->sk->sk_socket->file); in unreserve_psock()
551 spin_unlock_bh(&mux->lock); in unreserve_psock()
558 spin_unlock_bh(&mux->lock); in unreserve_psock()
563 struct kcm_mux *mux = kcm->mux; in kcm_report_tx_retry() local
565 spin_lock_bh(&mux->lock); in kcm_report_tx_retry()
566 KCM_STATS_INCR(mux->stats.tx_retries); in kcm_report_tx_retry()
567 spin_unlock_bh(&mux->lock); in kcm_report_tx_retry()
575 struct sock *sk = &kcm->sk; in kcm_write_msgs()
583 kcm->tx_wait_more = false; in kcm_write_msgs()
584 psock = kcm->tx_psock; in kcm_write_msgs()
585 if (unlikely(psock && psock->tx_stopped)) { in kcm_write_msgs()
591 if (skb_queue_empty(&sk->sk_write_queue)) in kcm_write_msgs()
594 kcm_tx_msg(skb_peek(&sk->sk_write_queue))->sent = 0; in kcm_write_msgs()
596 } else if (skb_queue_empty(&sk->sk_write_queue)) { in kcm_write_msgs()
600 head = skb_peek(&sk->sk_write_queue); in kcm_write_msgs()
603 if (txm->sent) { in kcm_write_msgs()
606 ret = -EINVAL; in kcm_write_msgs()
609 sent = txm->sent; in kcm_write_msgs()
610 frag_offset = txm->frag_offset; in kcm_write_msgs()
611 fragidx = txm->fragidx; in kcm_write_msgs()
612 skb = txm->frag_skb; in kcm_write_msgs()
628 if (WARN_ON(!skb_shinfo(skb)->nr_frags)) { in kcm_write_msgs()
629 ret = -EINVAL; in kcm_write_msgs()
633 for (fragidx = 0; fragidx < skb_shinfo(skb)->nr_frags; in kcm_write_msgs()
639 frag = &skb_shinfo(skb)->frags[fragidx]; in kcm_write_msgs()
641 ret = -EINVAL; in kcm_write_msgs()
645 ret = kernel_sendpage(psock->sk->sk_socket, in kcm_write_msgs()
648 skb_frag_size(frag) - frag_offset, in kcm_write_msgs()
651 if (ret == -EAGAIN) { in kcm_write_msgs()
655 txm->sent = sent; in kcm_write_msgs()
656 txm->frag_offset = frag_offset; in kcm_write_msgs()
657 txm->fragidx = fragidx; in kcm_write_msgs()
658 txm->frag_skb = skb; in kcm_write_msgs()
669 kcm_abort_tx_psock(psock, ret ? -ret : EPIPE, in kcm_write_msgs()
673 txm->sent = 0; in kcm_write_msgs()
682 KCM_STATS_ADD(psock->stats.tx_bytes, ret); in kcm_write_msgs()
691 skb = skb_shinfo(skb)->frag_list; in kcm_write_msgs()
694 } else if (skb->next) { in kcm_write_msgs()
695 skb = skb->next; in kcm_write_msgs()
700 skb_dequeue(&sk->sk_write_queue); in kcm_write_msgs()
702 sk->sk_wmem_queued -= sent; in kcm_write_msgs()
704 KCM_STATS_INCR(psock->stats.tx_msgs); in kcm_write_msgs()
705 } while ((head = skb_peek(&sk->sk_write_queue))); in kcm_write_msgs()
709 WARN_ON(!skb_queue_empty(&sk->sk_write_queue)); in kcm_write_msgs()
714 sk->sk_write_space(sk); in kcm_write_msgs()
722 struct sock *sk = &kcm->sk; in kcm_tx_work()
734 report_csk_error(&kcm->sk, -err); in kcm_tx_work()
739 if (likely(sk->sk_socket) && in kcm_tx_work()
740 test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) { in kcm_tx_work()
741 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags); in kcm_tx_work()
742 sk->sk_write_space(sk); in kcm_tx_work()
751 if (kcm->tx_wait_more) in kcm_push()
759 struct sock *sk = sock->sk; in kcm_sendpage()
777 err = -EPIPE; in kcm_sendpage()
778 if (sk->sk_err) in kcm_sendpage()
781 if (kcm->seq_skb) { in kcm_sendpage()
783 head = kcm->seq_skb; in kcm_sendpage()
784 skb = kcm_tx_msg(head)->last_skb; in kcm_sendpage()
785 i = skb_shinfo(skb)->nr_frags; in kcm_sendpage()
788 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], size); in kcm_sendpage()
789 skb_shinfo(skb)->flags |= SKBFL_SHARED_FRAG; in kcm_sendpage()
796 tskb = alloc_skb(0, sk->sk_allocation); in kcm_sendpage()
805 skb_shinfo(head)->frag_list = tskb; in kcm_sendpage()
807 skb->next = tskb; in kcm_sendpage()
810 skb->ip_summed = CHECKSUM_UNNECESSARY; in kcm_sendpage()
817 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); in kcm_sendpage()
823 head = alloc_skb(0, sk->sk_allocation); in kcm_sendpage()
837 skb_shinfo(skb)->flags |= SKBFL_SHARED_FRAG; in kcm_sendpage()
840 skb->len += size; in kcm_sendpage()
841 skb->data_len += size; in kcm_sendpage()
842 skb->truesize += size; in kcm_sendpage()
843 sk->sk_wmem_queued += size; in kcm_sendpage()
847 head->len += size; in kcm_sendpage()
848 head->data_len += size; in kcm_sendpage()
849 head->truesize += size; in kcm_sendpage()
853 bool not_busy = skb_queue_empty(&sk->sk_write_queue); in kcm_sendpage()
856 __skb_queue_tail(&sk->sk_write_queue, head); in kcm_sendpage()
857 kcm->seq_skb = NULL; in kcm_sendpage()
858 KCM_STATS_INCR(kcm->stats.tx_msgs); in kcm_sendpage()
861 kcm->tx_wait_more = true; in kcm_sendpage()
862 } else if (kcm->tx_wait_more || not_busy) { in kcm_sendpage()
871 report_csk_error(&kcm->sk, -err); in kcm_sendpage()
876 kcm->seq_skb = head; in kcm_sendpage()
877 kcm_tx_msg(head)->last_skb = skb; in kcm_sendpage()
880 KCM_STATS_ADD(kcm->stats.tx_bytes, size); in kcm_sendpage()
891 if (unlikely(skb_queue_len(&sk->sk_write_queue) == 0 && err == -EAGAIN)) in kcm_sendpage()
892 sk->sk_write_space(sk); in kcm_sendpage()
900 struct sock *sk = sock->sk; in kcm_sendmsg()
904 long timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); in kcm_sendmsg()
905 int eor = (sock->type == SOCK_DGRAM) ? in kcm_sendmsg()
906 !(msg->msg_flags & MSG_MORE) : !!(msg->msg_flags & MSG_EOR); in kcm_sendmsg()
907 int err = -EPIPE; in kcm_sendmsg()
914 if (sk->sk_err) in kcm_sendmsg()
917 if (kcm->seq_skb) { in kcm_sendmsg()
919 head = kcm->seq_skb; in kcm_sendmsg()
920 skb = kcm_tx_msg(head)->last_skb; in kcm_sendmsg()
927 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); in kcm_sendmsg()
935 head = alloc_skb(0, sk->sk_allocation); in kcm_sendmsg()
942 head = alloc_skb(0, sk->sk_allocation); in kcm_sendmsg()
950 skb->ip_summed = CHECKSUM_UNNECESSARY; in kcm_sendmsg()
956 int i = skb_shinfo(skb)->nr_frags; in kcm_sendmsg()
962 if (!skb_can_coalesce(skb, i, pfrag->page, in kcm_sendmsg()
963 pfrag->offset)) { in kcm_sendmsg()
967 tskb = alloc_skb(0, sk->sk_allocation); in kcm_sendmsg()
972 skb_shinfo(head)->frag_list = tskb; in kcm_sendmsg()
974 skb->next = tskb; in kcm_sendmsg()
977 skb->ip_summed = CHECKSUM_UNNECESSARY; in kcm_sendmsg()
984 pfrag->size - pfrag->offset); in kcm_sendmsg()
989 err = skb_copy_to_page_nocache(sk, &msg->msg_iter, skb, in kcm_sendmsg()
990 pfrag->page, in kcm_sendmsg()
991 pfrag->offset, in kcm_sendmsg()
998 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy); in kcm_sendmsg()
1000 skb_fill_page_desc(skb, i, pfrag->page, in kcm_sendmsg()
1001 pfrag->offset, copy); in kcm_sendmsg()
1002 get_page(pfrag->page); in kcm_sendmsg()
1005 pfrag->offset += copy; in kcm_sendmsg()
1008 head->len += copy; in kcm_sendmsg()
1009 head->data_len += copy; in kcm_sendmsg()
1022 bool not_busy = skb_queue_empty(&sk->sk_write_queue); in kcm_sendmsg()
1026 __skb_queue_tail(&sk->sk_write_queue, head); in kcm_sendmsg()
1027 kcm->seq_skb = NULL; in kcm_sendmsg()
1028 KCM_STATS_INCR(kcm->stats.tx_msgs); in kcm_sendmsg()
1031 if (msg->msg_flags & MSG_BATCH) { in kcm_sendmsg()
1032 kcm->tx_wait_more = true; in kcm_sendmsg()
1033 } else if (kcm->tx_wait_more || not_busy) { in kcm_sendmsg()
1042 report_csk_error(&kcm->sk, -err); in kcm_sendmsg()
1049 kcm->seq_skb = head; in kcm_sendmsg()
1050 kcm_tx_msg(head)->last_skb = skb; in kcm_sendmsg()
1054 KCM_STATS_ADD(kcm->stats.tx_bytes, copied); in kcm_sendmsg()
1062 if (copied && sock->type == SOCK_SEQPACKET) { in kcm_sendmsg()
1069 if (head != kcm->seq_skb) in kcm_sendmsg()
1072 err = sk_stream_error(sk, msg->msg_flags, err); in kcm_sendmsg()
1075 if (unlikely(skb_queue_len(&sk->sk_write_queue) == 0 && err == -EAGAIN)) in kcm_sendmsg()
1076 sk->sk_write_space(sk); in kcm_sendmsg()
1087 while (!(skb = skb_peek(&sk->sk_receive_queue))) { in kcm_wait_data()
1088 if (sk->sk_err) { in kcm_wait_data()
1097 *err = -EAGAIN; in kcm_wait_data()
1116 struct sock *sk = sock->sk; in kcm_recvmsg()
1136 if (len > stm->full_len) in kcm_recvmsg()
1137 len = stm->full_len; in kcm_recvmsg()
1139 err = skb_copy_datagram_msg(skb, stm->offset, msg, len); in kcm_recvmsg()
1145 KCM_STATS_ADD(kcm->stats.rx_bytes, copied); in kcm_recvmsg()
1146 if (copied < stm->full_len) { in kcm_recvmsg()
1147 if (sock->type == SOCK_DGRAM) { in kcm_recvmsg()
1149 msg->msg_flags |= MSG_TRUNC; in kcm_recvmsg()
1152 stm->offset += copied; in kcm_recvmsg()
1153 stm->full_len -= copied; in kcm_recvmsg()
1157 msg->msg_flags |= MSG_EOR; in kcm_recvmsg()
1158 KCM_STATS_INCR(kcm->stats.rx_msgs); in kcm_recvmsg()
1159 skb_unlink(skb, &sk->sk_receive_queue); in kcm_recvmsg()
1174 struct sock *sk = sock->sk; in kcm_splice_read()
1196 if (len > stm->full_len) in kcm_splice_read()
1197 len = stm->full_len; in kcm_splice_read()
1199 copied = skb_splice_bits(skb, sk, stm->offset, pipe, len, flags); in kcm_splice_read()
1205 KCM_STATS_ADD(kcm->stats.rx_bytes, copied); in kcm_splice_read()
1207 stm->offset += copied; in kcm_splice_read()
1208 stm->full_len -= copied; in kcm_splice_read()
1229 struct kcm_mux *mux = kcm->mux; in kcm_recv_disable() local
1231 if (kcm->rx_disabled) in kcm_recv_disable()
1234 spin_lock_bh(&mux->rx_lock); in kcm_recv_disable()
1236 kcm->rx_disabled = 1; in kcm_recv_disable()
1239 if (!kcm->rx_psock) { in kcm_recv_disable()
1240 if (kcm->rx_wait) { in kcm_recv_disable()
1241 list_del(&kcm->wait_rx_list); in kcm_recv_disable()
1242 kcm->rx_wait = false; in kcm_recv_disable()
1245 requeue_rx_msgs(mux, &kcm->sk.sk_receive_queue); in kcm_recv_disable()
1248 spin_unlock_bh(&mux->rx_lock); in kcm_recv_disable()
1254 struct kcm_mux *mux = kcm->mux; in kcm_recv_enable() local
1256 if (!kcm->rx_disabled) in kcm_recv_enable()
1259 spin_lock_bh(&mux->rx_lock); in kcm_recv_enable()
1261 kcm->rx_disabled = 0; in kcm_recv_enable()
1264 spin_unlock_bh(&mux->rx_lock); in kcm_recv_enable()
1270 struct kcm_sock *kcm = kcm_sk(sock->sk); in kcm_setsockopt()
1275 return -ENOPROTOOPT; in kcm_setsockopt()
1278 return -EINVAL; in kcm_setsockopt()
1281 return -EFAULT; in kcm_setsockopt()
1287 lock_sock(&kcm->sk); in kcm_setsockopt()
1292 release_sock(&kcm->sk); in kcm_setsockopt()
1295 err = -ENOPROTOOPT; in kcm_setsockopt()
1304 struct kcm_sock *kcm = kcm_sk(sock->sk); in kcm_getsockopt()
1308 return -ENOPROTOOPT; in kcm_getsockopt()
1311 return -EFAULT; in kcm_getsockopt()
1315 return -EINVAL; in kcm_getsockopt()
1319 val = kcm->rx_disabled; in kcm_getsockopt()
1322 return -ENOPROTOOPT; in kcm_getsockopt()
1326 return -EFAULT; in kcm_getsockopt()
1328 return -EFAULT; in kcm_getsockopt()
1332 static void init_kcm_sock(struct kcm_sock *kcm, struct kcm_mux *mux) in init_kcm_sock() argument
1342 kcm->sk.sk_state = TCP_ESTABLISHED; in init_kcm_sock()
1344 /* Add to mux's kcm sockets list */ in init_kcm_sock()
1345 kcm->mux = mux; in init_kcm_sock()
1346 spin_lock_bh(&mux->lock); in init_kcm_sock()
1348 head = &mux->kcm_socks; in init_kcm_sock()
1349 list_for_each_entry(tkcm, &mux->kcm_socks, kcm_sock_list) { in init_kcm_sock()
1350 if (tkcm->index != index) in init_kcm_sock()
1352 head = &tkcm->kcm_sock_list; in init_kcm_sock()
1356 list_add(&kcm->kcm_sock_list, head); in init_kcm_sock()
1357 kcm->index = index; in init_kcm_sock()
1359 mux->kcm_socks_cnt++; in init_kcm_sock()
1360 spin_unlock_bh(&mux->lock); in init_kcm_sock()
1362 INIT_WORK(&kcm->tx_work, kcm_tx_work); in init_kcm_sock()
1364 spin_lock_bh(&mux->rx_lock); in init_kcm_sock()
1366 spin_unlock_bh(&mux->rx_lock); in init_kcm_sock()
1372 struct kcm_sock *kcm = kcm_sk(sock->sk); in kcm_attach()
1373 struct kcm_mux *mux = kcm->mux; in kcm_attach() local
1385 csk = csock->sk; in kcm_attach()
1387 return -EINVAL; in kcm_attach()
1392 if ((csk->sk_family != AF_INET && csk->sk_family != AF_INET6) || in kcm_attach()
1393 csk->sk_protocol != IPPROTO_TCP) { in kcm_attach()
1394 err = -EOPNOTSUPP; in kcm_attach()
1399 if (csk->sk_state == TCP_LISTEN || csk->sk_state == TCP_CLOSE) { in kcm_attach()
1400 err = -EOPNOTSUPP; in kcm_attach()
1406 err = -ENOMEM; in kcm_attach()
1410 psock->mux = mux; in kcm_attach()
1411 psock->sk = csk; in kcm_attach()
1412 psock->bpf_prog = prog; in kcm_attach()
1414 err = strp_init(&psock->strp, csk, &cb); in kcm_attach()
1420 write_lock_bh(&csk->sk_callback_lock); in kcm_attach()
1425 if (csk->sk_user_data) { in kcm_attach()
1426 write_unlock_bh(&csk->sk_callback_lock); in kcm_attach()
1427 strp_stop(&psock->strp); in kcm_attach()
1428 strp_done(&psock->strp); in kcm_attach()
1430 err = -EALREADY; in kcm_attach()
1434 psock->save_data_ready = csk->sk_data_ready; in kcm_attach()
1435 psock->save_write_space = csk->sk_write_space; in kcm_attach()
1436 psock->save_state_change = csk->sk_state_change; in kcm_attach()
1437 csk->sk_user_data = psock; in kcm_attach()
1438 csk->sk_data_ready = psock_data_ready; in kcm_attach()
1439 csk->sk_write_space = psock_write_space; in kcm_attach()
1440 csk->sk_state_change = psock_state_change; in kcm_attach()
1442 write_unlock_bh(&csk->sk_callback_lock); in kcm_attach()
1446 /* Finished initialization, now add the psock to the MUX. */ in kcm_attach()
1447 spin_lock_bh(&mux->lock); in kcm_attach()
1448 head = &mux->psocks; in kcm_attach()
1449 list_for_each_entry(tpsock, &mux->psocks, psock_list) { in kcm_attach()
1450 if (tpsock->index != index) in kcm_attach()
1452 head = &tpsock->psock_list; in kcm_attach()
1456 list_add(&psock->psock_list, head); in kcm_attach()
1457 psock->index = index; in kcm_attach()
1459 KCM_STATS_INCR(mux->stats.psock_attach); in kcm_attach()
1460 mux->psocks_cnt++; in kcm_attach()
1462 spin_unlock_bh(&mux->lock); in kcm_attach()
1465 strp_check_rcv(&psock->strp); in kcm_attach()
1479 csock = sockfd_lookup(info->fd, &err); in kcm_attach_ioctl()
1481 return -ENOENT; in kcm_attach_ioctl()
1483 prog = bpf_prog_get_type(info->bpf_fd, BPF_PROG_TYPE_SOCKET_FILTER); in kcm_attach_ioctl()
1505 struct sock *csk = psock->sk; in kcm_unattach()
1506 struct kcm_mux *mux = psock->mux; in kcm_unattach() local
1513 write_lock_bh(&csk->sk_callback_lock); in kcm_unattach()
1514 csk->sk_user_data = NULL; in kcm_unattach()
1515 csk->sk_data_ready = psock->save_data_ready; in kcm_unattach()
1516 csk->sk_write_space = psock->save_write_space; in kcm_unattach()
1517 csk->sk_state_change = psock->save_state_change; in kcm_unattach()
1518 strp_stop(&psock->strp); in kcm_unattach()
1520 if (WARN_ON(psock->rx_kcm)) { in kcm_unattach()
1521 write_unlock_bh(&csk->sk_callback_lock); in kcm_unattach()
1526 spin_lock_bh(&mux->rx_lock); in kcm_unattach()
1531 if (psock->ready_rx_msg) { in kcm_unattach()
1532 list_del(&psock->psock_ready_list); in kcm_unattach()
1533 kfree_skb(psock->ready_rx_msg); in kcm_unattach()
1534 psock->ready_rx_msg = NULL; in kcm_unattach()
1535 KCM_STATS_INCR(mux->stats.rx_ready_drops); in kcm_unattach()
1538 spin_unlock_bh(&mux->rx_lock); in kcm_unattach()
1540 write_unlock_bh(&csk->sk_callback_lock); in kcm_unattach()
1544 strp_done(&psock->strp); in kcm_unattach()
1547 bpf_prog_put(psock->bpf_prog); in kcm_unattach()
1549 spin_lock_bh(&mux->lock); in kcm_unattach()
1551 aggregate_psock_stats(&psock->stats, &mux->aggregate_psock_stats); in kcm_unattach()
1552 save_strp_stats(&psock->strp, &mux->aggregate_strp_stats); in kcm_unattach()
1554 KCM_STATS_INCR(mux->stats.psock_unattach); in kcm_unattach()
1556 if (psock->tx_kcm) { in kcm_unattach()
1561 KCM_STATS_INCR(mux->stats.psock_unattach_rsvd); in kcm_unattach()
1562 spin_unlock_bh(&mux->lock); in kcm_unattach()
1566 * to do this without the mux lock. in kcm_unattach()
1570 spin_lock_bh(&mux->lock); in kcm_unattach()
1571 if (!psock->tx_kcm) { in kcm_unattach()
1572 /* psock now unreserved in window mux was unlocked */ in kcm_unattach()
1575 psock->done = 1; in kcm_unattach()
1580 /* Queue tx work to make sure psock->done is handled */ in kcm_unattach()
1581 queue_work(kcm_wq, &psock->tx_kcm->tx_work); in kcm_unattach()
1582 spin_unlock_bh(&mux->lock); in kcm_unattach()
1585 if (!psock->tx_stopped) in kcm_unattach()
1586 list_del(&psock->psock_avail_list); in kcm_unattach()
1587 list_del(&psock->psock_list); in kcm_unattach()
1588 mux->psocks_cnt--; in kcm_unattach()
1589 spin_unlock_bh(&mux->lock); in kcm_unattach()
1592 fput(csk->sk_socket->file); in kcm_unattach()
1601 struct kcm_sock *kcm = kcm_sk(sock->sk); in kcm_unattach_ioctl()
1602 struct kcm_mux *mux = kcm->mux; in kcm_unattach_ioctl() local
1608 csock = sockfd_lookup(info->fd, &err); in kcm_unattach_ioctl()
1610 return -ENOENT; in kcm_unattach_ioctl()
1612 csk = csock->sk; in kcm_unattach_ioctl()
1614 err = -EINVAL; in kcm_unattach_ioctl()
1618 err = -ENOENT; in kcm_unattach_ioctl()
1620 spin_lock_bh(&mux->lock); in kcm_unattach_ioctl()
1622 list_for_each_entry(psock, &mux->psocks, psock_list) { in kcm_unattach_ioctl()
1623 if (psock->sk != csk) in kcm_unattach_ioctl()
1628 if (psock->unattaching || WARN_ON(psock->done)) { in kcm_unattach_ioctl()
1629 err = -EALREADY; in kcm_unattach_ioctl()
1633 psock->unattaching = 1; in kcm_unattach_ioctl()
1635 spin_unlock_bh(&mux->lock); in kcm_unattach_ioctl()
1644 spin_unlock_bh(&mux->lock); in kcm_unattach_ioctl()
1665 return ERR_PTR(-ENFILE); in kcm_clone()
1667 newsock->type = osock->type; in kcm_clone()
1668 newsock->ops = osock->ops; in kcm_clone()
1670 __module_get(newsock->ops->owner); in kcm_clone()
1672 newsk = sk_alloc(sock_net(osock->sk), PF_KCM, GFP_KERNEL, in kcm_clone()
1676 return ERR_PTR(-ENOMEM); in kcm_clone()
1679 init_kcm_sock(kcm_sk(newsk), kcm_sk(osock->sk)->mux); in kcm_clone()
1681 return sock_alloc_file(newsock, 0, osock->sk->sk_prot_creator->name); in kcm_clone()
1693 return -EFAULT; in kcm_ioctl()
1703 return -EFAULT; in kcm_ioctl()
1726 return -EFAULT; in kcm_ioctl()
1733 err = -ENOIOCTLCMD; in kcm_ioctl()
1742 struct kcm_mux *mux = container_of(rcu, in free_mux() local
1745 kmem_cache_free(kcm_muxp, mux); in free_mux()
1748 static void release_mux(struct kcm_mux *mux) in release_mux() argument
1750 struct kcm_net *knet = mux->knet; in release_mux()
1755 &mux->psocks, psock_list) { in release_mux()
1756 if (!WARN_ON(psock->unattaching)) in release_mux()
1760 if (WARN_ON(mux->psocks_cnt)) in release_mux()
1763 __skb_queue_purge(&mux->rx_hold_queue); in release_mux()
1765 mutex_lock(&knet->mutex); in release_mux()
1766 aggregate_mux_stats(&mux->stats, &knet->aggregate_mux_stats); in release_mux()
1767 aggregate_psock_stats(&mux->aggregate_psock_stats, in release_mux()
1768 &knet->aggregate_psock_stats); in release_mux()
1769 aggregate_strp_stats(&mux->aggregate_strp_stats, in release_mux()
1770 &knet->aggregate_strp_stats); in release_mux()
1771 list_del_rcu(&mux->kcm_mux_list); in release_mux()
1772 knet->count--; in release_mux()
1773 mutex_unlock(&knet->mutex); in release_mux()
1775 call_rcu(&mux->rcu, free_mux); in release_mux()
1780 struct kcm_mux *mux = kcm->mux; in kcm_done() local
1781 struct sock *sk = &kcm->sk; in kcm_done()
1784 spin_lock_bh(&mux->rx_lock); in kcm_done()
1785 if (kcm->rx_psock) { in kcm_done()
1787 WARN_ON(kcm->done); in kcm_done()
1788 kcm->rx_disabled = 1; in kcm_done()
1789 kcm->done = 1; in kcm_done()
1790 spin_unlock_bh(&mux->rx_lock); in kcm_done()
1794 if (kcm->rx_wait) { in kcm_done()
1795 list_del(&kcm->wait_rx_list); in kcm_done()
1796 kcm->rx_wait = false; in kcm_done()
1799 requeue_rx_msgs(mux, &sk->sk_receive_queue); in kcm_done()
1801 spin_unlock_bh(&mux->rx_lock); in kcm_done()
1806 /* Detach from MUX */ in kcm_done()
1807 spin_lock_bh(&mux->lock); in kcm_done()
1809 list_del(&kcm->kcm_sock_list); in kcm_done()
1810 mux->kcm_socks_cnt--; in kcm_done()
1811 socks_cnt = mux->kcm_socks_cnt; in kcm_done()
1813 spin_unlock_bh(&mux->lock); in kcm_done()
1816 /* We are done with the mux now. */ in kcm_done()
1817 release_mux(mux); in kcm_done()
1820 WARN_ON(kcm->rx_wait); in kcm_done()
1822 sock_put(&kcm->sk); in kcm_done()
1826 * If this is the last KCM socket on the MUX, destroy the MUX.
1830 struct sock *sk = sock->sk; in kcm_release()
1832 struct kcm_mux *mux; in kcm_release() local
1839 mux = kcm->mux; in kcm_release()
1842 kfree_skb(kcm->seq_skb); in kcm_release()
1849 __skb_queue_purge(&sk->sk_write_queue); in kcm_release()
1855 kcm->tx_stopped = 1; in kcm_release()
1859 spin_lock_bh(&mux->lock); in kcm_release()
1860 if (kcm->tx_wait) { in kcm_release()
1864 list_del(&kcm->wait_psock_list); in kcm_release()
1865 kcm->tx_wait = false; in kcm_release()
1867 spin_unlock_bh(&mux->lock); in kcm_release()
1872 cancel_work_sync(&kcm->tx_work); in kcm_release()
1875 psock = kcm->tx_psock; in kcm_release()
1886 WARN_ON(kcm->tx_wait); in kcm_release()
1887 WARN_ON(kcm->tx_psock); in kcm_release()
1889 sock->sk = NULL; in kcm_release()
1945 struct kcm_mux *mux; in kcm_create() local
1947 switch (sock->type) { in kcm_create()
1949 sock->ops = &kcm_dgram_ops; in kcm_create()
1952 sock->ops = &kcm_seqpacket_ops; in kcm_create()
1955 return -ESOCKTNOSUPPORT; in kcm_create()
1959 return -EPROTONOSUPPORT; in kcm_create()
1963 return -ENOMEM; in kcm_create()
1965 /* Allocate a kcm mux, shared between KCM sockets */ in kcm_create()
1966 mux = kmem_cache_zalloc(kcm_muxp, GFP_KERNEL); in kcm_create()
1967 if (!mux) { in kcm_create()
1969 return -ENOMEM; in kcm_create()
1972 spin_lock_init(&mux->lock); in kcm_create()
1973 spin_lock_init(&mux->rx_lock); in kcm_create()
1974 INIT_LIST_HEAD(&mux->kcm_socks); in kcm_create()
1975 INIT_LIST_HEAD(&mux->kcm_rx_waiters); in kcm_create()
1976 INIT_LIST_HEAD(&mux->kcm_tx_waiters); in kcm_create()
1978 INIT_LIST_HEAD(&mux->psocks); in kcm_create()
1979 INIT_LIST_HEAD(&mux->psocks_ready); in kcm_create()
1980 INIT_LIST_HEAD(&mux->psocks_avail); in kcm_create()
1982 mux->knet = knet; in kcm_create()
1984 /* Add new MUX to list */ in kcm_create()
1985 mutex_lock(&knet->mutex); in kcm_create()
1986 list_add_rcu(&mux->kcm_mux_list, &knet->mux_list); in kcm_create()
1987 knet->count++; in kcm_create()
1988 mutex_unlock(&knet->mutex); in kcm_create()
1990 skb_queue_head_init(&mux->rx_hold_queue); in kcm_create()
1994 init_kcm_sock(kcm_sk(sk), mux); in kcm_create()
2009 INIT_LIST_HEAD_RCU(&knet->mux_list); in kcm_init_net()
2010 mutex_init(&knet->mutex); in kcm_init_net()
2022 WARN_ON(!list_empty(&knet->mux_list)); in kcm_exit_net()
2034 int err = -ENOMEM; in kcm_init()