Lines Matching +full:mux +full:- +full:locked
1 // SPDX-License-Identifier: GPL-2.0-only
45 return (struct kcm_tx_msg *)skb->cb; in kcm_tx_msg()
50 csk->sk_err = EPIPE; in report_csk_error()
57 struct sock *csk = psock->sk; in kcm_abort_tx_psock()
58 struct kcm_mux *mux = psock->mux; in kcm_abort_tx_psock() local
62 spin_lock_bh(&mux->lock); in kcm_abort_tx_psock()
64 if (psock->tx_stopped) { in kcm_abort_tx_psock()
65 spin_unlock_bh(&mux->lock); in kcm_abort_tx_psock()
69 psock->tx_stopped = 1; in kcm_abort_tx_psock()
70 KCM_STATS_INCR(psock->stats.tx_aborts); in kcm_abort_tx_psock()
72 if (!psock->tx_kcm) { in kcm_abort_tx_psock()
74 list_del(&psock->psock_avail_list); in kcm_abort_tx_psock()
83 queue_work(kcm_wq, &psock->tx_kcm->tx_work); in kcm_abort_tx_psock()
86 spin_unlock_bh(&mux->lock); in kcm_abort_tx_psock()
92 /* RX mux lock held. */
93 static void kcm_update_rx_mux_stats(struct kcm_mux *mux, in kcm_update_rx_mux_stats() argument
96 STRP_STATS_ADD(mux->stats.rx_bytes, in kcm_update_rx_mux_stats()
97 psock->strp.stats.bytes - in kcm_update_rx_mux_stats()
98 psock->saved_rx_bytes); in kcm_update_rx_mux_stats()
99 mux->stats.rx_msgs += in kcm_update_rx_mux_stats()
100 psock->strp.stats.msgs - psock->saved_rx_msgs; in kcm_update_rx_mux_stats()
101 psock->saved_rx_msgs = psock->strp.stats.msgs; in kcm_update_rx_mux_stats()
102 psock->saved_rx_bytes = psock->strp.stats.bytes; in kcm_update_rx_mux_stats()
105 static void kcm_update_tx_mux_stats(struct kcm_mux *mux, in kcm_update_tx_mux_stats() argument
108 KCM_STATS_ADD(mux->stats.tx_bytes, in kcm_update_tx_mux_stats()
109 psock->stats.tx_bytes - psock->saved_tx_bytes); in kcm_update_tx_mux_stats()
110 mux->stats.tx_msgs += in kcm_update_tx_mux_stats()
111 psock->stats.tx_msgs - psock->saved_tx_msgs; in kcm_update_tx_mux_stats()
112 psock->saved_tx_msgs = psock->stats.tx_msgs; in kcm_update_tx_mux_stats()
113 psock->saved_tx_bytes = psock->stats.tx_bytes; in kcm_update_tx_mux_stats()
118 /* KCM is ready to receive messages on its queue-- either the KCM is new or
120 * pending ready messages on a psock. RX mux lock held.
124 struct kcm_mux *mux = kcm->mux; in kcm_rcv_ready() local
128 if (unlikely(kcm->rx_wait || kcm->rx_psock || kcm->rx_disabled)) in kcm_rcv_ready()
131 while (unlikely((skb = __skb_dequeue(&mux->rx_hold_queue)))) { in kcm_rcv_ready()
132 if (kcm_queue_rcv_skb(&kcm->sk, skb)) { in kcm_rcv_ready()
134 skb_queue_head(&mux->rx_hold_queue, skb); in kcm_rcv_ready()
135 WARN_ON(!sk_rmem_alloc_get(&kcm->sk)); in kcm_rcv_ready()
140 while (!list_empty(&mux->psocks_ready)) { in kcm_rcv_ready()
141 psock = list_first_entry(&mux->psocks_ready, struct kcm_psock, in kcm_rcv_ready()
144 if (kcm_queue_rcv_skb(&kcm->sk, psock->ready_rx_msg)) { in kcm_rcv_ready()
146 WARN_ON(!sk_rmem_alloc_get(&kcm->sk)); in kcm_rcv_ready()
153 list_del(&psock->psock_ready_list); in kcm_rcv_ready()
154 psock->ready_rx_msg = NULL; in kcm_rcv_ready()
158 strp_unpause(&psock->strp); in kcm_rcv_ready()
159 strp_check_rcv(&psock->strp); in kcm_rcv_ready()
163 list_add_tail(&kcm->wait_rx_list, in kcm_rcv_ready()
164 &kcm->mux->kcm_rx_waiters); in kcm_rcv_ready()
166 WRITE_ONCE(kcm->rx_wait, true); in kcm_rcv_ready()
171 struct sock *sk = skb->sk; in kcm_rfree()
173 struct kcm_mux *mux = kcm->mux; in kcm_rfree() local
174 unsigned int len = skb->truesize; in kcm_rfree()
177 atomic_sub(len, &sk->sk_rmem_alloc); in kcm_rfree()
182 if (!READ_ONCE(kcm->rx_wait) && !READ_ONCE(kcm->rx_psock) && in kcm_rfree()
183 sk_rmem_alloc_get(sk) < sk->sk_rcvlowat) { in kcm_rfree()
184 spin_lock_bh(&mux->rx_lock); in kcm_rfree()
186 spin_unlock_bh(&mux->rx_lock); in kcm_rfree()
192 struct sk_buff_head *list = &sk->sk_receive_queue; in kcm_queue_rcv_skb()
194 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) in kcm_queue_rcv_skb()
195 return -ENOMEM; in kcm_queue_rcv_skb()
197 if (!sk_rmem_schedule(sk, skb, skb->truesize)) in kcm_queue_rcv_skb()
198 return -ENOBUFS; in kcm_queue_rcv_skb()
200 skb->dev = NULL; in kcm_queue_rcv_skb()
203 skb->sk = sk; in kcm_queue_rcv_skb()
204 skb->destructor = kcm_rfree; in kcm_queue_rcv_skb()
205 atomic_add(skb->truesize, &sk->sk_rmem_alloc); in kcm_queue_rcv_skb()
206 sk_mem_charge(sk, skb->truesize); in kcm_queue_rcv_skb()
211 sk->sk_data_ready(sk); in kcm_queue_rcv_skb()
218 * RX mux lock held.
220 static void requeue_rx_msgs(struct kcm_mux *mux, struct sk_buff_head *head) in requeue_rx_msgs() argument
227 skb->destructor = sock_rfree; in requeue_rx_msgs()
230 if (list_empty(&mux->kcm_rx_waiters)) { in requeue_rx_msgs()
231 skb_queue_tail(&mux->rx_hold_queue, skb); in requeue_rx_msgs()
235 kcm = list_first_entry(&mux->kcm_rx_waiters, in requeue_rx_msgs()
238 if (kcm_queue_rcv_skb(&kcm->sk, skb)) { in requeue_rx_msgs()
240 list_del(&kcm->wait_rx_list); in requeue_rx_msgs()
242 WRITE_ONCE(kcm->rx_wait, false); in requeue_rx_msgs()
256 struct kcm_mux *mux = psock->mux; in reserve_rx_kcm() local
259 WARN_ON(psock->ready_rx_msg); in reserve_rx_kcm()
261 if (psock->rx_kcm) in reserve_rx_kcm()
262 return psock->rx_kcm; in reserve_rx_kcm()
264 spin_lock_bh(&mux->rx_lock); in reserve_rx_kcm()
266 if (psock->rx_kcm) { in reserve_rx_kcm()
267 spin_unlock_bh(&mux->rx_lock); in reserve_rx_kcm()
268 return psock->rx_kcm; in reserve_rx_kcm()
271 kcm_update_rx_mux_stats(mux, psock); in reserve_rx_kcm()
273 if (list_empty(&mux->kcm_rx_waiters)) { in reserve_rx_kcm()
274 psock->ready_rx_msg = head; in reserve_rx_kcm()
275 strp_pause(&psock->strp); in reserve_rx_kcm()
276 list_add_tail(&psock->psock_ready_list, in reserve_rx_kcm()
277 &mux->psocks_ready); in reserve_rx_kcm()
278 spin_unlock_bh(&mux->rx_lock); in reserve_rx_kcm()
282 kcm = list_first_entry(&mux->kcm_rx_waiters, in reserve_rx_kcm()
284 list_del(&kcm->wait_rx_list); in reserve_rx_kcm()
286 WRITE_ONCE(kcm->rx_wait, false); in reserve_rx_kcm()
288 psock->rx_kcm = kcm; in reserve_rx_kcm()
290 WRITE_ONCE(kcm->rx_psock, psock); in reserve_rx_kcm()
292 spin_unlock_bh(&mux->rx_lock); in reserve_rx_kcm()
308 struct kcm_sock *kcm = psock->rx_kcm; in unreserve_rx_kcm()
309 struct kcm_mux *mux = psock->mux; in unreserve_rx_kcm() local
314 spin_lock_bh(&mux->rx_lock); in unreserve_rx_kcm()
316 psock->rx_kcm = NULL; in unreserve_rx_kcm()
318 WRITE_ONCE(kcm->rx_psock, NULL); in unreserve_rx_kcm()
320 /* Commit kcm->rx_psock before sk_rmem_alloc_get to sync with in unreserve_rx_kcm()
325 if (unlikely(kcm->done)) { in unreserve_rx_kcm()
326 spin_unlock_bh(&mux->rx_lock); in unreserve_rx_kcm()
331 INIT_WORK(&kcm->done_work, kcm_done_work); in unreserve_rx_kcm()
332 schedule_work(&kcm->done_work); in unreserve_rx_kcm()
336 if (unlikely(kcm->rx_disabled)) { in unreserve_rx_kcm()
337 requeue_rx_msgs(mux, &kcm->sk.sk_receive_queue); in unreserve_rx_kcm()
338 } else if (rcv_ready || unlikely(!sk_rmem_alloc_get(&kcm->sk))) { in unreserve_rx_kcm()
344 spin_unlock_bh(&mux->rx_lock); in unreserve_rx_kcm()
352 read_lock_bh(&sk->sk_callback_lock); in psock_data_ready()
354 psock = (struct kcm_psock *)sk->sk_user_data; in psock_data_ready()
356 strp_data_ready(&psock->strp); in psock_data_ready()
358 read_unlock_bh(&sk->sk_callback_lock); in psock_data_ready()
376 if (kcm_queue_rcv_skb(&kcm->sk, skb)) { in kcm_rcv_strparser()
386 struct bpf_prog *prog = psock->bpf_prog; in kcm_parse_func_strparser()
415 struct kcm_mux *mux; in psock_write_space() local
418 read_lock_bh(&sk->sk_callback_lock); in psock_write_space()
420 psock = (struct kcm_psock *)sk->sk_user_data; in psock_write_space()
423 mux = psock->mux; in psock_write_space()
425 spin_lock_bh(&mux->lock); in psock_write_space()
428 kcm = psock->tx_kcm; in psock_write_space()
429 if (kcm && !unlikely(kcm->tx_stopped)) in psock_write_space()
430 queue_work(kcm_wq, &kcm->tx_work); in psock_write_space()
432 spin_unlock_bh(&mux->lock); in psock_write_space()
434 read_unlock_bh(&sk->sk_callback_lock); in psock_write_space()
439 /* kcm sock is locked. */
442 struct kcm_mux *mux = kcm->mux; in reserve_psock() local
445 psock = kcm->tx_psock; in reserve_psock()
450 WARN_ON(kcm->tx_wait); in reserve_psock()
451 if (unlikely(psock->tx_stopped)) in reserve_psock()
454 return kcm->tx_psock; in reserve_psock()
457 spin_lock_bh(&mux->lock); in reserve_psock()
462 psock = kcm->tx_psock; in reserve_psock()
464 WARN_ON(kcm->tx_wait); in reserve_psock()
465 spin_unlock_bh(&mux->lock); in reserve_psock()
466 return kcm->tx_psock; in reserve_psock()
469 if (!list_empty(&mux->psocks_avail)) { in reserve_psock()
470 psock = list_first_entry(&mux->psocks_avail, in reserve_psock()
473 list_del(&psock->psock_avail_list); in reserve_psock()
474 if (kcm->tx_wait) { in reserve_psock()
475 list_del(&kcm->wait_psock_list); in reserve_psock()
476 kcm->tx_wait = false; in reserve_psock()
478 kcm->tx_psock = psock; in reserve_psock()
479 psock->tx_kcm = kcm; in reserve_psock()
480 KCM_STATS_INCR(psock->stats.reserved); in reserve_psock()
481 } else if (!kcm->tx_wait) { in reserve_psock()
482 list_add_tail(&kcm->wait_psock_list, in reserve_psock()
483 &mux->kcm_tx_waiters); in reserve_psock()
484 kcm->tx_wait = true; in reserve_psock()
487 spin_unlock_bh(&mux->lock); in reserve_psock()
492 /* mux lock held */
495 struct kcm_mux *mux = psock->mux; in psock_now_avail() local
498 if (list_empty(&mux->kcm_tx_waiters)) { in psock_now_avail()
499 list_add_tail(&psock->psock_avail_list, in psock_now_avail()
500 &mux->psocks_avail); in psock_now_avail()
502 kcm = list_first_entry(&mux->kcm_tx_waiters, in psock_now_avail()
505 list_del(&kcm->wait_psock_list); in psock_now_avail()
506 kcm->tx_wait = false; in psock_now_avail()
507 psock->tx_kcm = kcm; in psock_now_avail()
514 kcm->tx_psock = psock; in psock_now_avail()
515 KCM_STATS_INCR(psock->stats.reserved); in psock_now_avail()
516 queue_work(kcm_wq, &kcm->tx_work); in psock_now_avail()
520 /* kcm sock is locked. */
524 struct kcm_mux *mux = kcm->mux; in unreserve_psock() local
526 spin_lock_bh(&mux->lock); in unreserve_psock()
528 psock = kcm->tx_psock; in unreserve_psock()
531 spin_unlock_bh(&mux->lock); in unreserve_psock()
537 kcm_update_tx_mux_stats(mux, psock); in unreserve_psock()
539 WARN_ON(kcm->tx_wait); in unreserve_psock()
541 kcm->tx_psock = NULL; in unreserve_psock()
542 psock->tx_kcm = NULL; in unreserve_psock()
543 KCM_STATS_INCR(psock->stats.unreserved); in unreserve_psock()
545 if (unlikely(psock->tx_stopped)) { in unreserve_psock()
546 if (psock->done) { in unreserve_psock()
548 list_del(&psock->psock_list); in unreserve_psock()
549 mux->psocks_cnt--; in unreserve_psock()
550 sock_put(psock->sk); in unreserve_psock()
551 fput(psock->sk->sk_socket->file); in unreserve_psock()
557 spin_unlock_bh(&mux->lock); in unreserve_psock()
564 spin_unlock_bh(&mux->lock); in unreserve_psock()
569 struct kcm_mux *mux = kcm->mux; in kcm_report_tx_retry() local
571 spin_lock_bh(&mux->lock); in kcm_report_tx_retry()
572 KCM_STATS_INCR(mux->stats.tx_retries); in kcm_report_tx_retry()
573 spin_unlock_bh(&mux->lock); in kcm_report_tx_retry()
581 struct sock *sk = &kcm->sk; in kcm_write_msgs()
589 kcm->tx_wait_more = false; in kcm_write_msgs()
590 psock = kcm->tx_psock; in kcm_write_msgs()
591 if (unlikely(psock && psock->tx_stopped)) { in kcm_write_msgs()
597 if (skb_queue_empty(&sk->sk_write_queue)) in kcm_write_msgs()
600 kcm_tx_msg(skb_peek(&sk->sk_write_queue))->sent = 0; in kcm_write_msgs()
602 } else if (skb_queue_empty(&sk->sk_write_queue)) { in kcm_write_msgs()
606 head = skb_peek(&sk->sk_write_queue); in kcm_write_msgs()
609 if (txm->sent) { in kcm_write_msgs()
612 ret = -EINVAL; in kcm_write_msgs()
615 sent = txm->sent; in kcm_write_msgs()
616 frag_offset = txm->frag_offset; in kcm_write_msgs()
617 fragidx = txm->fragidx; in kcm_write_msgs()
618 skb = txm->frag_skb; in kcm_write_msgs()
634 if (WARN_ON(!skb_shinfo(skb)->nr_frags)) { in kcm_write_msgs()
635 ret = -EINVAL; in kcm_write_msgs()
639 for (fragidx = 0; fragidx < skb_shinfo(skb)->nr_frags; in kcm_write_msgs()
645 frag = &skb_shinfo(skb)->frags[fragidx]; in kcm_write_msgs()
647 ret = -EINVAL; in kcm_write_msgs()
651 ret = kernel_sendpage(psock->sk->sk_socket, in kcm_write_msgs()
654 skb_frag_size(frag) - frag_offset, in kcm_write_msgs()
657 if (ret == -EAGAIN) { in kcm_write_msgs()
661 txm->sent = sent; in kcm_write_msgs()
662 txm->frag_offset = frag_offset; in kcm_write_msgs()
663 txm->fragidx = fragidx; in kcm_write_msgs()
664 txm->frag_skb = skb; in kcm_write_msgs()
675 kcm_abort_tx_psock(psock, ret ? -ret : EPIPE, in kcm_write_msgs()
679 txm->sent = 0; in kcm_write_msgs()
688 KCM_STATS_ADD(psock->stats.tx_bytes, ret); in kcm_write_msgs()
697 skb = skb_shinfo(skb)->frag_list; in kcm_write_msgs()
700 } else if (skb->next) { in kcm_write_msgs()
701 skb = skb->next; in kcm_write_msgs()
706 skb_dequeue(&sk->sk_write_queue); in kcm_write_msgs()
708 sk->sk_wmem_queued -= sent; in kcm_write_msgs()
710 KCM_STATS_INCR(psock->stats.tx_msgs); in kcm_write_msgs()
711 } while ((head = skb_peek(&sk->sk_write_queue))); in kcm_write_msgs()
715 WARN_ON(!skb_queue_empty(&sk->sk_write_queue)); in kcm_write_msgs()
720 sk->sk_write_space(sk); in kcm_write_msgs()
728 struct sock *sk = &kcm->sk; in kcm_tx_work()
740 report_csk_error(&kcm->sk, -err); in kcm_tx_work()
745 if (likely(sk->sk_socket) && in kcm_tx_work()
746 test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) { in kcm_tx_work()
747 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags); in kcm_tx_work()
748 sk->sk_write_space(sk); in kcm_tx_work()
757 if (kcm->tx_wait_more) in kcm_push()
765 struct sock *sk = sock->sk; in kcm_sendpage()
783 err = -EPIPE; in kcm_sendpage()
784 if (sk->sk_err) in kcm_sendpage()
787 if (kcm->seq_skb) { in kcm_sendpage()
789 head = kcm->seq_skb; in kcm_sendpage()
790 skb = kcm_tx_msg(head)->last_skb; in kcm_sendpage()
791 i = skb_shinfo(skb)->nr_frags; in kcm_sendpage()
794 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], size); in kcm_sendpage()
795 skb_shinfo(skb)->flags |= SKBFL_SHARED_FRAG; in kcm_sendpage()
802 tskb = alloc_skb(0, sk->sk_allocation); in kcm_sendpage()
811 skb_shinfo(head)->frag_list = tskb; in kcm_sendpage()
813 skb->next = tskb; in kcm_sendpage()
816 skb->ip_summed = CHECKSUM_UNNECESSARY; in kcm_sendpage()
823 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); in kcm_sendpage()
829 head = alloc_skb(0, sk->sk_allocation); in kcm_sendpage()
843 skb_shinfo(skb)->flags |= SKBFL_SHARED_FRAG; in kcm_sendpage()
846 skb->len += size; in kcm_sendpage()
847 skb->data_len += size; in kcm_sendpage()
848 skb->truesize += size; in kcm_sendpage()
849 sk->sk_wmem_queued += size; in kcm_sendpage()
853 head->len += size; in kcm_sendpage()
854 head->data_len += size; in kcm_sendpage()
855 head->truesize += size; in kcm_sendpage()
859 bool not_busy = skb_queue_empty(&sk->sk_write_queue); in kcm_sendpage()
862 __skb_queue_tail(&sk->sk_write_queue, head); in kcm_sendpage()
863 kcm->seq_skb = NULL; in kcm_sendpage()
864 KCM_STATS_INCR(kcm->stats.tx_msgs); in kcm_sendpage()
867 kcm->tx_wait_more = true; in kcm_sendpage()
868 } else if (kcm->tx_wait_more || not_busy) { in kcm_sendpage()
877 report_csk_error(&kcm->sk, -err); in kcm_sendpage()
882 kcm->seq_skb = head; in kcm_sendpage()
883 kcm_tx_msg(head)->last_skb = skb; in kcm_sendpage()
886 KCM_STATS_ADD(kcm->stats.tx_bytes, size); in kcm_sendpage()
897 if (unlikely(skb_queue_len(&sk->sk_write_queue) == 0 && err == -EAGAIN)) in kcm_sendpage()
898 sk->sk_write_space(sk); in kcm_sendpage()
906 struct sock *sk = sock->sk; in kcm_sendmsg()
910 long timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); in kcm_sendmsg()
911 int eor = (sock->type == SOCK_DGRAM) ? in kcm_sendmsg()
912 !(msg->msg_flags & MSG_MORE) : !!(msg->msg_flags & MSG_EOR); in kcm_sendmsg()
913 int err = -EPIPE; in kcm_sendmsg()
920 if (sk->sk_err) in kcm_sendmsg()
923 if (kcm->seq_skb) { in kcm_sendmsg()
925 head = kcm->seq_skb; in kcm_sendmsg()
926 skb = kcm_tx_msg(head)->last_skb; in kcm_sendmsg()
933 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); in kcm_sendmsg()
941 head = alloc_skb(0, sk->sk_allocation); in kcm_sendmsg()
948 head = alloc_skb(0, sk->sk_allocation); in kcm_sendmsg()
956 skb->ip_summed = CHECKSUM_UNNECESSARY; in kcm_sendmsg()
962 int i = skb_shinfo(skb)->nr_frags; in kcm_sendmsg()
968 if (!skb_can_coalesce(skb, i, pfrag->page, in kcm_sendmsg()
969 pfrag->offset)) { in kcm_sendmsg()
973 tskb = alloc_skb(0, sk->sk_allocation); in kcm_sendmsg()
978 skb_shinfo(head)->frag_list = tskb; in kcm_sendmsg()
980 skb->next = tskb; in kcm_sendmsg()
983 skb->ip_summed = CHECKSUM_UNNECESSARY; in kcm_sendmsg()
990 pfrag->size - pfrag->offset); in kcm_sendmsg()
995 err = skb_copy_to_page_nocache(sk, &msg->msg_iter, skb, in kcm_sendmsg()
996 pfrag->page, in kcm_sendmsg()
997 pfrag->offset, in kcm_sendmsg()
1004 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy); in kcm_sendmsg()
1006 skb_fill_page_desc(skb, i, pfrag->page, in kcm_sendmsg()
1007 pfrag->offset, copy); in kcm_sendmsg()
1008 get_page(pfrag->page); in kcm_sendmsg()
1011 pfrag->offset += copy; in kcm_sendmsg()
1014 head->len += copy; in kcm_sendmsg()
1015 head->data_len += copy; in kcm_sendmsg()
1028 bool not_busy = skb_queue_empty(&sk->sk_write_queue); in kcm_sendmsg()
1032 __skb_queue_tail(&sk->sk_write_queue, head); in kcm_sendmsg()
1033 kcm->seq_skb = NULL; in kcm_sendmsg()
1034 KCM_STATS_INCR(kcm->stats.tx_msgs); in kcm_sendmsg()
1037 if (msg->msg_flags & MSG_BATCH) { in kcm_sendmsg()
1038 kcm->tx_wait_more = true; in kcm_sendmsg()
1039 } else if (kcm->tx_wait_more || not_busy) { in kcm_sendmsg()
1048 report_csk_error(&kcm->sk, -err); in kcm_sendmsg()
1055 kcm->seq_skb = head; in kcm_sendmsg()
1056 kcm_tx_msg(head)->last_skb = skb; in kcm_sendmsg()
1060 KCM_STATS_ADD(kcm->stats.tx_bytes, copied); in kcm_sendmsg()
1068 if (copied && sock->type == SOCK_SEQPACKET) { in kcm_sendmsg()
1075 if (head != kcm->seq_skb) in kcm_sendmsg()
1078 err = sk_stream_error(sk, msg->msg_flags, err); in kcm_sendmsg()
1081 if (unlikely(skb_queue_len(&sk->sk_write_queue) == 0 && err == -EAGAIN)) in kcm_sendmsg()
1082 sk->sk_write_space(sk); in kcm_sendmsg()
1091 struct sock *sk = sock->sk; in kcm_recvmsg()
1106 if (len > stm->full_len) in kcm_recvmsg()
1107 len = stm->full_len; in kcm_recvmsg()
1109 err = skb_copy_datagram_msg(skb, stm->offset, msg, len); in kcm_recvmsg()
1115 KCM_STATS_ADD(kcm->stats.rx_bytes, copied); in kcm_recvmsg()
1116 if (copied < stm->full_len) { in kcm_recvmsg()
1117 if (sock->type == SOCK_DGRAM) { in kcm_recvmsg()
1119 msg->msg_flags |= MSG_TRUNC; in kcm_recvmsg()
1122 stm->offset += copied; in kcm_recvmsg()
1123 stm->full_len -= copied; in kcm_recvmsg()
1127 msg->msg_flags |= MSG_EOR; in kcm_recvmsg()
1128 KCM_STATS_INCR(kcm->stats.rx_msgs); in kcm_recvmsg()
1141 struct sock *sk = sock->sk; in kcm_splice_read()
1158 if (len > stm->full_len) in kcm_splice_read()
1159 len = stm->full_len; in kcm_splice_read()
1161 copied = skb_splice_bits(skb, sk, stm->offset, pipe, len, flags); in kcm_splice_read()
1167 KCM_STATS_ADD(kcm->stats.rx_bytes, copied); in kcm_splice_read()
1169 stm->offset += copied; in kcm_splice_read()
1170 stm->full_len -= copied; in kcm_splice_read()
1189 struct kcm_mux *mux = kcm->mux; in kcm_recv_disable() local
1191 if (kcm->rx_disabled) in kcm_recv_disable()
1194 spin_lock_bh(&mux->rx_lock); in kcm_recv_disable()
1196 kcm->rx_disabled = 1; in kcm_recv_disable()
1199 if (!kcm->rx_psock) { in kcm_recv_disable()
1200 if (kcm->rx_wait) { in kcm_recv_disable()
1201 list_del(&kcm->wait_rx_list); in kcm_recv_disable()
1203 WRITE_ONCE(kcm->rx_wait, false); in kcm_recv_disable()
1206 requeue_rx_msgs(mux, &kcm->sk.sk_receive_queue); in kcm_recv_disable()
1209 spin_unlock_bh(&mux->rx_lock); in kcm_recv_disable()
1215 struct kcm_mux *mux = kcm->mux; in kcm_recv_enable() local
1217 if (!kcm->rx_disabled) in kcm_recv_enable()
1220 spin_lock_bh(&mux->rx_lock); in kcm_recv_enable()
1222 kcm->rx_disabled = 0; in kcm_recv_enable()
1225 spin_unlock_bh(&mux->rx_lock); in kcm_recv_enable()
1231 struct kcm_sock *kcm = kcm_sk(sock->sk); in kcm_setsockopt()
1236 return -ENOPROTOOPT; in kcm_setsockopt()
1239 return -EINVAL; in kcm_setsockopt()
1242 return -EFAULT; in kcm_setsockopt()
1248 lock_sock(&kcm->sk); in kcm_setsockopt()
1253 release_sock(&kcm->sk); in kcm_setsockopt()
1256 err = -ENOPROTOOPT; in kcm_setsockopt()
1265 struct kcm_sock *kcm = kcm_sk(sock->sk); in kcm_getsockopt()
1269 return -ENOPROTOOPT; in kcm_getsockopt()
1272 return -EFAULT; in kcm_getsockopt()
1276 return -EINVAL; in kcm_getsockopt()
1280 val = kcm->rx_disabled; in kcm_getsockopt()
1283 return -ENOPROTOOPT; in kcm_getsockopt()
1287 return -EFAULT; in kcm_getsockopt()
1289 return -EFAULT; in kcm_getsockopt()
1293 static void init_kcm_sock(struct kcm_sock *kcm, struct kcm_mux *mux) in init_kcm_sock() argument
1303 kcm->sk.sk_state = TCP_ESTABLISHED; in init_kcm_sock()
1305 /* Add to mux's kcm sockets list */ in init_kcm_sock()
1306 kcm->mux = mux; in init_kcm_sock()
1307 spin_lock_bh(&mux->lock); in init_kcm_sock()
1309 head = &mux->kcm_socks; in init_kcm_sock()
1310 list_for_each_entry(tkcm, &mux->kcm_socks, kcm_sock_list) { in init_kcm_sock()
1311 if (tkcm->index != index) in init_kcm_sock()
1313 head = &tkcm->kcm_sock_list; in init_kcm_sock()
1317 list_add(&kcm->kcm_sock_list, head); in init_kcm_sock()
1318 kcm->index = index; in init_kcm_sock()
1320 mux->kcm_socks_cnt++; in init_kcm_sock()
1321 spin_unlock_bh(&mux->lock); in init_kcm_sock()
1323 INIT_WORK(&kcm->tx_work, kcm_tx_work); in init_kcm_sock()
1325 spin_lock_bh(&mux->rx_lock); in init_kcm_sock()
1327 spin_unlock_bh(&mux->rx_lock); in init_kcm_sock()
1333 struct kcm_sock *kcm = kcm_sk(sock->sk); in kcm_attach()
1334 struct kcm_mux *mux = kcm->mux; in kcm_attach() local
1346 csk = csock->sk; in kcm_attach()
1348 return -EINVAL; in kcm_attach()
1353 if ((csk->sk_family != AF_INET && csk->sk_family != AF_INET6) || in kcm_attach()
1354 csk->sk_protocol != IPPROTO_TCP) { in kcm_attach()
1355 err = -EOPNOTSUPP; in kcm_attach()
1360 if (csk->sk_state == TCP_LISTEN || csk->sk_state == TCP_CLOSE) { in kcm_attach()
1361 err = -EOPNOTSUPP; in kcm_attach()
1367 err = -ENOMEM; in kcm_attach()
1371 psock->mux = mux; in kcm_attach()
1372 psock->sk = csk; in kcm_attach()
1373 psock->bpf_prog = prog; in kcm_attach()
1375 write_lock_bh(&csk->sk_callback_lock); in kcm_attach()
1380 if (csk->sk_user_data) { in kcm_attach()
1381 write_unlock_bh(&csk->sk_callback_lock); in kcm_attach()
1383 err = -EALREADY; in kcm_attach()
1387 err = strp_init(&psock->strp, csk, &cb); in kcm_attach()
1389 write_unlock_bh(&csk->sk_callback_lock); in kcm_attach()
1394 psock->save_data_ready = csk->sk_data_ready; in kcm_attach()
1395 psock->save_write_space = csk->sk_write_space; in kcm_attach()
1396 psock->save_state_change = csk->sk_state_change; in kcm_attach()
1397 csk->sk_user_data = psock; in kcm_attach()
1398 csk->sk_data_ready = psock_data_ready; in kcm_attach()
1399 csk->sk_write_space = psock_write_space; in kcm_attach()
1400 csk->sk_state_change = psock_state_change; in kcm_attach()
1402 write_unlock_bh(&csk->sk_callback_lock); in kcm_attach()
1406 /* Finished initialization, now add the psock to the MUX. */ in kcm_attach()
1407 spin_lock_bh(&mux->lock); in kcm_attach()
1408 head = &mux->psocks; in kcm_attach()
1409 list_for_each_entry(tpsock, &mux->psocks, psock_list) { in kcm_attach()
1410 if (tpsock->index != index) in kcm_attach()
1412 head = &tpsock->psock_list; in kcm_attach()
1416 list_add(&psock->psock_list, head); in kcm_attach()
1417 psock->index = index; in kcm_attach()
1419 KCM_STATS_INCR(mux->stats.psock_attach); in kcm_attach()
1420 mux->psocks_cnt++; in kcm_attach()
1422 spin_unlock_bh(&mux->lock); in kcm_attach()
1425 strp_check_rcv(&psock->strp); in kcm_attach()
1439 csock = sockfd_lookup(info->fd, &err); in kcm_attach_ioctl()
1441 return -ENOENT; in kcm_attach_ioctl()
1443 prog = bpf_prog_get_type(info->bpf_fd, BPF_PROG_TYPE_SOCKET_FILTER); in kcm_attach_ioctl()
1465 struct sock *csk = psock->sk; in kcm_unattach()
1466 struct kcm_mux *mux = psock->mux; in kcm_unattach() local
1473 write_lock_bh(&csk->sk_callback_lock); in kcm_unattach()
1474 csk->sk_user_data = NULL; in kcm_unattach()
1475 csk->sk_data_ready = psock->save_data_ready; in kcm_unattach()
1476 csk->sk_write_space = psock->save_write_space; in kcm_unattach()
1477 csk->sk_state_change = psock->save_state_change; in kcm_unattach()
1478 strp_stop(&psock->strp); in kcm_unattach()
1480 if (WARN_ON(psock->rx_kcm)) { in kcm_unattach()
1481 write_unlock_bh(&csk->sk_callback_lock); in kcm_unattach()
1486 spin_lock_bh(&mux->rx_lock); in kcm_unattach()
1491 if (psock->ready_rx_msg) { in kcm_unattach()
1492 list_del(&psock->psock_ready_list); in kcm_unattach()
1493 kfree_skb(psock->ready_rx_msg); in kcm_unattach()
1494 psock->ready_rx_msg = NULL; in kcm_unattach()
1495 KCM_STATS_INCR(mux->stats.rx_ready_drops); in kcm_unattach()
1498 spin_unlock_bh(&mux->rx_lock); in kcm_unattach()
1500 write_unlock_bh(&csk->sk_callback_lock); in kcm_unattach()
1504 strp_done(&psock->strp); in kcm_unattach()
1507 bpf_prog_put(psock->bpf_prog); in kcm_unattach()
1509 spin_lock_bh(&mux->lock); in kcm_unattach()
1511 aggregate_psock_stats(&psock->stats, &mux->aggregate_psock_stats); in kcm_unattach()
1512 save_strp_stats(&psock->strp, &mux->aggregate_strp_stats); in kcm_unattach()
1514 KCM_STATS_INCR(mux->stats.psock_unattach); in kcm_unattach()
1516 if (psock->tx_kcm) { in kcm_unattach()
1521 KCM_STATS_INCR(mux->stats.psock_unattach_rsvd); in kcm_unattach()
1522 spin_unlock_bh(&mux->lock); in kcm_unattach()
1526 * to do this without the mux lock. in kcm_unattach()
1530 spin_lock_bh(&mux->lock); in kcm_unattach()
1531 if (!psock->tx_kcm) { in kcm_unattach()
1532 /* psock now unreserved in window mux was unlocked */ in kcm_unattach()
1535 psock->done = 1; in kcm_unattach()
1540 /* Queue tx work to make sure psock->done is handled */ in kcm_unattach()
1541 queue_work(kcm_wq, &psock->tx_kcm->tx_work); in kcm_unattach()
1542 spin_unlock_bh(&mux->lock); in kcm_unattach()
1545 if (!psock->tx_stopped) in kcm_unattach()
1546 list_del(&psock->psock_avail_list); in kcm_unattach()
1547 list_del(&psock->psock_list); in kcm_unattach()
1548 mux->psocks_cnt--; in kcm_unattach()
1549 spin_unlock_bh(&mux->lock); in kcm_unattach()
1552 fput(csk->sk_socket->file); in kcm_unattach()
1561 struct kcm_sock *kcm = kcm_sk(sock->sk); in kcm_unattach_ioctl()
1562 struct kcm_mux *mux = kcm->mux; in kcm_unattach_ioctl() local
1568 csock = sockfd_lookup(info->fd, &err); in kcm_unattach_ioctl()
1570 return -ENOENT; in kcm_unattach_ioctl()
1572 csk = csock->sk; in kcm_unattach_ioctl()
1574 err = -EINVAL; in kcm_unattach_ioctl()
1578 err = -ENOENT; in kcm_unattach_ioctl()
1580 spin_lock_bh(&mux->lock); in kcm_unattach_ioctl()
1582 list_for_each_entry(psock, &mux->psocks, psock_list) { in kcm_unattach_ioctl()
1583 if (psock->sk != csk) in kcm_unattach_ioctl()
1588 if (psock->unattaching || WARN_ON(psock->done)) { in kcm_unattach_ioctl()
1589 err = -EALREADY; in kcm_unattach_ioctl()
1593 psock->unattaching = 1; in kcm_unattach_ioctl()
1595 spin_unlock_bh(&mux->lock); in kcm_unattach_ioctl()
1604 spin_unlock_bh(&mux->lock); in kcm_unattach_ioctl()
1625 return ERR_PTR(-ENFILE); in kcm_clone()
1627 newsock->type = osock->type; in kcm_clone()
1628 newsock->ops = osock->ops; in kcm_clone()
1630 __module_get(newsock->ops->owner); in kcm_clone()
1632 newsk = sk_alloc(sock_net(osock->sk), PF_KCM, GFP_KERNEL, in kcm_clone()
1636 return ERR_PTR(-ENOMEM); in kcm_clone()
1639 init_kcm_sock(kcm_sk(newsk), kcm_sk(osock->sk)->mux); in kcm_clone()
1641 return sock_alloc_file(newsock, 0, osock->sk->sk_prot_creator->name); in kcm_clone()
1653 return -EFAULT; in kcm_ioctl()
1663 return -EFAULT; in kcm_ioctl()
1686 return -EFAULT; in kcm_ioctl()
1693 err = -ENOIOCTLCMD; in kcm_ioctl()
1702 struct kcm_mux *mux = container_of(rcu, in free_mux() local
1705 kmem_cache_free(kcm_muxp, mux); in free_mux()
1708 static void release_mux(struct kcm_mux *mux) in release_mux() argument
1710 struct kcm_net *knet = mux->knet; in release_mux()
1715 &mux->psocks, psock_list) { in release_mux()
1716 if (!WARN_ON(psock->unattaching)) in release_mux()
1720 if (WARN_ON(mux->psocks_cnt)) in release_mux()
1723 __skb_queue_purge(&mux->rx_hold_queue); in release_mux()
1725 mutex_lock(&knet->mutex); in release_mux()
1726 aggregate_mux_stats(&mux->stats, &knet->aggregate_mux_stats); in release_mux()
1727 aggregate_psock_stats(&mux->aggregate_psock_stats, in release_mux()
1728 &knet->aggregate_psock_stats); in release_mux()
1729 aggregate_strp_stats(&mux->aggregate_strp_stats, in release_mux()
1730 &knet->aggregate_strp_stats); in release_mux()
1731 list_del_rcu(&mux->kcm_mux_list); in release_mux()
1732 knet->count--; in release_mux()
1733 mutex_unlock(&knet->mutex); in release_mux()
1735 call_rcu(&mux->rcu, free_mux); in release_mux()
1740 struct kcm_mux *mux = kcm->mux; in kcm_done() local
1741 struct sock *sk = &kcm->sk; in kcm_done()
1744 spin_lock_bh(&mux->rx_lock); in kcm_done()
1745 if (kcm->rx_psock) { in kcm_done()
1747 WARN_ON(kcm->done); in kcm_done()
1748 kcm->rx_disabled = 1; in kcm_done()
1749 kcm->done = 1; in kcm_done()
1750 spin_unlock_bh(&mux->rx_lock); in kcm_done()
1754 if (kcm->rx_wait) { in kcm_done()
1755 list_del(&kcm->wait_rx_list); in kcm_done()
1757 WRITE_ONCE(kcm->rx_wait, false); in kcm_done()
1760 requeue_rx_msgs(mux, &sk->sk_receive_queue); in kcm_done()
1762 spin_unlock_bh(&mux->rx_lock); in kcm_done()
1767 /* Detach from MUX */ in kcm_done()
1768 spin_lock_bh(&mux->lock); in kcm_done()
1770 list_del(&kcm->kcm_sock_list); in kcm_done()
1771 mux->kcm_socks_cnt--; in kcm_done()
1772 socks_cnt = mux->kcm_socks_cnt; in kcm_done()
1774 spin_unlock_bh(&mux->lock); in kcm_done()
1777 /* We are done with the mux now. */ in kcm_done()
1778 release_mux(mux); in kcm_done()
1781 WARN_ON(kcm->rx_wait); in kcm_done()
1783 sock_put(&kcm->sk); in kcm_done()
1787 * If this is the last KCM socket on the MUX, destroy the MUX.
1791 struct sock *sk = sock->sk; in kcm_release()
1793 struct kcm_mux *mux; in kcm_release() local
1800 mux = kcm->mux; in kcm_release()
1804 kfree_skb(kcm->seq_skb); in kcm_release()
1810 __skb_queue_purge(&sk->sk_write_queue); in kcm_release()
1816 kcm->tx_stopped = 1; in kcm_release()
1820 spin_lock_bh(&mux->lock); in kcm_release()
1821 if (kcm->tx_wait) { in kcm_release()
1825 list_del(&kcm->wait_psock_list); in kcm_release()
1826 kcm->tx_wait = false; in kcm_release()
1828 spin_unlock_bh(&mux->lock); in kcm_release()
1833 cancel_work_sync(&kcm->tx_work); in kcm_release()
1836 psock = kcm->tx_psock; in kcm_release()
1847 WARN_ON(kcm->tx_wait); in kcm_release()
1848 WARN_ON(kcm->tx_psock); in kcm_release()
1850 sock->sk = NULL; in kcm_release()
1906 struct kcm_mux *mux; in kcm_create() local
1908 switch (sock->type) { in kcm_create()
1910 sock->ops = &kcm_dgram_ops; in kcm_create()
1913 sock->ops = &kcm_seqpacket_ops; in kcm_create()
1916 return -ESOCKTNOSUPPORT; in kcm_create()
1920 return -EPROTONOSUPPORT; in kcm_create()
1924 return -ENOMEM; in kcm_create()
1926 /* Allocate a kcm mux, shared between KCM sockets */ in kcm_create()
1927 mux = kmem_cache_zalloc(kcm_muxp, GFP_KERNEL); in kcm_create()
1928 if (!mux) { in kcm_create()
1930 return -ENOMEM; in kcm_create()
1933 spin_lock_init(&mux->lock); in kcm_create()
1934 spin_lock_init(&mux->rx_lock); in kcm_create()
1935 INIT_LIST_HEAD(&mux->kcm_socks); in kcm_create()
1936 INIT_LIST_HEAD(&mux->kcm_rx_waiters); in kcm_create()
1937 INIT_LIST_HEAD(&mux->kcm_tx_waiters); in kcm_create()
1939 INIT_LIST_HEAD(&mux->psocks); in kcm_create()
1940 INIT_LIST_HEAD(&mux->psocks_ready); in kcm_create()
1941 INIT_LIST_HEAD(&mux->psocks_avail); in kcm_create()
1943 mux->knet = knet; in kcm_create()
1945 /* Add new MUX to list */ in kcm_create()
1946 mutex_lock(&knet->mutex); in kcm_create()
1947 list_add_rcu(&mux->kcm_mux_list, &knet->mux_list); in kcm_create()
1948 knet->count++; in kcm_create()
1949 mutex_unlock(&knet->mutex); in kcm_create()
1951 skb_queue_head_init(&mux->rx_hold_queue); in kcm_create()
1955 init_kcm_sock(kcm_sk(sk), mux); in kcm_create()
1970 INIT_LIST_HEAD_RCU(&knet->mux_list); in kcm_init_net()
1971 mutex_init(&knet->mutex); in kcm_init_net()
1983 WARN_ON(!list_empty(&knet->mux_list)); in kcm_exit_net()
1995 int err = -ENOMEM; in kcm_init()