| /Linux-v5.4/net/unix/ |
| D | garbage.c | 97 spin_lock(&x->sk_receive_queue.lock); in scan_inflight() 98 skb_queue_walk_safe(&x->sk_receive_queue, skb, next) { in scan_inflight() 125 __skb_unlink(skb, &x->sk_receive_queue); in scan_inflight() 130 spin_unlock(&x->sk_receive_queue.lock); in scan_inflight() 147 spin_lock(&x->sk_receive_queue.lock); in scan_children() 148 skb_queue_walk_safe(&x->sk_receive_queue, skb, next) { in scan_children() 157 spin_unlock(&x->sk_receive_queue.lock); in scan_children()
|
| D | diag.c | 68 spin_lock(&sk->sk_receive_queue.lock); in sk_diag_dump_icons() 71 sk->sk_receive_queue.qlen * sizeof(u32)); in sk_diag_dump_icons() 77 skb_queue_walk(&sk->sk_receive_queue, skb) { in sk_diag_dump_icons() 91 spin_unlock(&sk->sk_receive_queue.lock); in sk_diag_dump_icons() 97 spin_unlock(&sk->sk_receive_queue.lock); in sk_diag_dump_icons() 106 rql.udiag_rqueue = sk->sk_receive_queue.qlen; in sk_diag_show_rqlen()
|
| D | af_unix.c | 194 return skb_queue_len(&sk->sk_receive_queue) > sk->sk_max_ack_backlog; in unix_recvq_full() 473 if (!skb_queue_empty(&sk->sk_receive_queue)) { in unix_dgram_disconnected() 474 skb_queue_purge(&sk->sk_receive_queue); in unix_dgram_disconnected() 492 skb_queue_purge(&sk->sk_receive_queue); in unix_sock_destructor() 545 if (!skb_queue_empty(&sk->sk_receive_queue) || embrion) in unix_release_sock() 559 while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) { in unix_release_sock() 1368 spin_lock(&other->sk_receive_queue.lock); in unix_stream_connect() 1369 __skb_queue_tail(&other->sk_receive_queue, skb); in unix_stream_connect() 1370 spin_unlock(&other->sk_receive_queue.lock); in unix_stream_connect() 1750 skb_queue_tail(&other->sk_receive_queue, skb); in unix_dgram_sendmsg() [all …]
|
| /Linux-v5.4/net/atm/ |
| D | signaling.c | 33 skb_queue_tail(&sk_atm(sigd)->sk_receive_queue, skb); in sigd_put_skb() 113 skb_queue_tail(&sk->sk_receive_queue, skb); in sigd_send() 204 if (skb_peek(&sk_atm(vcc)->sk_receive_queue)) in sigd_close() 206 skb_queue_purge(&sk_atm(vcc)->sk_receive_queue); in sigd_close()
|
| D | raw.c | 28 skb_queue_tail(&sk->sk_receive_queue, skb); in atm_push_raw()
|
| /Linux-v5.4/net/bluetooth/ |
| D | af_bluetooth.c | 308 if (!skb_queue_empty(&sk->sk_receive_queue)) in bt_sock_data_wait() 351 skb = skb_dequeue(&sk->sk_receive_queue); in bt_sock_stream_recvmsg() 377 skb_queue_head(&sk->sk_receive_queue, skb); in bt_sock_stream_recvmsg() 416 skb_queue_head(&sk->sk_receive_queue, skb); in bt_sock_stream_recvmsg() 423 skb_queue_head(&sk->sk_receive_queue, skb); in bt_sock_stream_recvmsg() 473 if (!skb_queue_empty_lockless(&sk->sk_receive_queue)) in bt_sock_poll() 518 skb = skb_peek(&sk->sk_receive_queue); in bt_sock_ioctl()
|
| /Linux-v5.4/net/sctp/ |
| D | ulpqueue.c | 138 &sk->sk_receive_queue); in sctp_clear_pd() 155 __skb_queue_tail(&sk->sk_receive_queue, in sctp_clear_pd() 216 queue = &sk->sk_receive_queue; in sctp_ulpq_tail_event() 230 queue = &sk->sk_receive_queue; in sctp_ulpq_tail_event() 239 queue = &sk->sk_receive_queue; in sctp_ulpq_tail_event() 254 if (queue == &sk->sk_receive_queue && !sp->data_ready_signalled) { in sctp_ulpq_tail_event() 1086 if (skb_queue_empty(&asoc->base.sk->sk_receive_queue)) { in sctp_ulpq_renege() 1130 __skb_queue_tail(&sk->sk_receive_queue, sctp_event2skb(ev)); in sctp_ulpq_abort_pd()
|
| /Linux-v5.4/net/caif/ |
| D | caif_socket.c | 128 struct sk_buff_head *list = &sk->sk_receive_queue; in caif_queue_rcv_skb() 317 if (!skb_queue_empty(&sk->sk_receive_queue) || in caif_stream_data_wait() 381 skb = skb_dequeue(&sk->sk_receive_queue); in caif_stream_recvmsg() 426 skb_queue_head(&sk->sk_receive_queue, skb); in caif_stream_recvmsg() 440 skb_queue_head(&sk->sk_receive_queue, skb); in caif_stream_recvmsg() 450 skb_queue_head(&sk->sk_receive_queue, skb); in caif_stream_recvmsg() 913 spin_lock_bh(&sk->sk_receive_queue.lock); in caif_release() 915 spin_unlock_bh(&sk->sk_receive_queue.lock); in caif_release() 956 if (!skb_queue_empty_lockless(&sk->sk_receive_queue) || in caif_poll()
|
| /Linux-v5.4/net/phonet/ |
| D | datagram.c | 39 skb = skb_peek(&sk->sk_receive_queue); in pn_ioctl() 64 skb_queue_purge(&sk->sk_receive_queue); in pn_destruct()
|
| D | pep.c | 405 queue = &sk->sk_receive_queue; in pipe_do_rcv() 464 skb_queue_purge(&sk->sk_receive_queue); in pipe_destruct() 577 skb_queue_tail(&sk->sk_receive_queue, skb); in pipe_handler_do_rcv() 684 skb_queue_head(&sk->sk_receive_queue, skb); in pep_do_rcv() 935 else if (!skb_queue_empty(&sk->sk_receive_queue)) in pep_ioctl() 936 answ = skb_peek(&sk->sk_receive_queue)->len; in pep_ioctl() 1230 struct sk_buff *skb = skb_dequeue(&sk->sk_receive_queue); in pep_read()
|
| /Linux-v5.4/net/core/ |
| D | datagram.c | 100 if (READ_ONCE(sk->sk_receive_queue.prev) != skb) in __skb_wait_for_more_packets() 250 struct sk_buff_head *queue = &sk->sk_receive_queue; in __skb_try_recv_datagram() 281 } while (READ_ONCE(sk->sk_receive_queue.prev) != *last); in __skb_try_recv_datagram() 401 int err = __sk_queue_drop_skb(sk, &sk->sk_receive_queue, skb, flags, in skb_kill_datagram() 780 if (!skb_queue_empty_lockless(&sk->sk_receive_queue)) in datagram_poll()
|
| D | stream.c | 196 __skb_queue_purge(&sk->sk_receive_queue); in sk_stream_kill_queues()
|
| /Linux-v5.4/net/kcm/ |
| D | kcmproc.c | 119 kcm->sk.sk_receive_queue.qlen, in kcm_format_sock() 149 psock->sk->sk_receive_queue.qlen, in kcm_format_psock() 167 if (psock->sk->sk_receive_queue.qlen) { in kcm_format_psock()
|
| D | kcmsock.c | 190 struct sk_buff_head *list = &sk->sk_receive_queue; in kcm_queue_rcv_skb() 331 requeue_rx_msgs(mux, &kcm->sk.sk_receive_queue); in unreserve_rx_kcm() 1089 while (!(skb = skb_peek(&sk->sk_receive_queue))) { in kcm_wait_data() 1161 skb_unlink(skb, &sk->sk_receive_queue); in kcm_recvmsg() 1247 requeue_rx_msgs(mux, &kcm->sk.sk_receive_queue); in kcm_recv_disable() 1801 requeue_rx_msgs(mux, &sk->sk_receive_queue); in kcm_done()
|
| /Linux-v5.4/drivers/crypto/chelsio/chtls/ |
| D | chtls_cm.h | 177 __skb_unlink(skb, &sk->sk_receive_queue); in chtls_free_skb() 184 __skb_unlink(skb, &sk->sk_receive_queue); in chtls_kfree_skb()
|
| D | chtls_io.c | 1431 skb = skb_peek(&sk->sk_receive_queue); in chtls_pt_recvmsg() 1488 __skb_unlink(skb, &sk->sk_receive_queue); in chtls_pt_recvmsg() 1549 !skb_peek(&sk->sk_receive_queue)) in chtls_pt_recvmsg() 1588 skb_queue_walk(&sk->sk_receive_queue, skb) { in peekmsg() 1705 skb_queue_empty_lockless(&sk->sk_receive_queue) && in chtls_recvmsg() 1736 skb = skb_peek(&sk->sk_receive_queue); in chtls_recvmsg() 1849 !skb_peek(&sk->sk_receive_queue)) in chtls_recvmsg()
|
| /Linux-v5.4/net/rxrpc/ |
| D | af_rxrpc.c | 825 spin_lock_bh(&sk->sk_receive_queue.lock); in rxrpc_shutdown() 832 spin_unlock_bh(&sk->sk_receive_queue.lock); in rxrpc_shutdown() 847 rxrpc_purge_queue(&sk->sk_receive_queue); in rxrpc_sock_destructor() 885 spin_lock_bh(&sk->sk_receive_queue.lock); in rxrpc_release_sock() 887 spin_unlock_bh(&sk->sk_receive_queue.lock); in rxrpc_release_sock() 899 rxrpc_purge_queue(&sk->sk_receive_queue); in rxrpc_release_sock()
|
| /Linux-v5.4/net/tipc/ |
| D | socket.c | 238 kfree_skb(__skb_dequeue(&sk->sk_receive_queue)); in tsk_advance_rx_queue() 267 while ((skb = __skb_dequeue(&sk->sk_receive_queue))) in tsk_rej_rx_queue() 521 while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) { in __tipc_shutdown() 743 if (!skb_queue_empty_lockless(&sk->sk_receive_queue)) in tipc_poll() 751 if (skb_queue_empty_lockless(&sk->sk_receive_queue)) in tipc_poll() 1691 if (timeo && skb_queue_empty(&sk->sk_receive_queue)) { in tipc_wait_for_rcvmsg() 1704 if (!skb_queue_empty(&sk->sk_receive_queue)) in tipc_wait_for_rcvmsg() 1761 skb = skb_peek(&sk->sk_receive_queue); in tipc_recvmsg() 1871 skb = skb_peek(&sk->sk_receive_queue); in tipc_recvstream() 1929 } while (!skb_queue_empty(&sk->sk_receive_queue) || copied < required); in tipc_recvstream() [all …]
|
| /Linux-v5.4/drivers/xen/ |
| D | pvcalls-back.c | 115 spin_lock_irqsave(&map->sock->sk->sk_receive_queue.lock, flags); in pvcalls_conn_back_read() 116 if (skb_queue_empty(&map->sock->sk->sk_receive_queue)) { in pvcalls_conn_back_read() 118 spin_unlock_irqrestore(&map->sock->sk->sk_receive_queue.lock, in pvcalls_conn_back_read() 122 spin_unlock_irqrestore(&map->sock->sk->sk_receive_queue.lock, flags); in pvcalls_conn_back_read() 147 spin_lock_irqsave(&map->sock->sk->sk_receive_queue.lock, flags); in pvcalls_conn_back_read() 148 if (ret > 0 && !skb_queue_empty(&map->sock->sk->sk_receive_queue)) in pvcalls_conn_back_read() 150 spin_unlock_irqrestore(&map->sock->sk->sk_receive_queue.lock, flags); in pvcalls_conn_back_read()
|
| /Linux-v5.4/net/nfc/ |
| D | llcp_sock.c | 561 if (!skb_queue_empty_lockless(&sk->sk_receive_queue)) in llcp_sock_poll() 815 skb_queue_empty(&sk->sk_receive_queue)) { in llcp_sock_recvmsg() 842 skb_queue_head(&sk->sk_receive_queue, skb); in llcp_sock_recvmsg() 873 skb_queue_head(&sk->sk_receive_queue, skb); in llcp_sock_recvmsg() 940 skb_queue_purge(&sk->sk_receive_queue); in llcp_sock_destruct()
|
| /Linux-v5.4/net/l2tp/ |
| D | l2tp_ip.c | 568 spin_lock_bh(&sk->sk_receive_queue.lock); in l2tp_ioctl() 569 skb = skb_peek(&sk->sk_receive_queue); in l2tp_ioctl() 571 spin_unlock_bh(&sk->sk_receive_queue.lock); in l2tp_ioctl()
|
| /Linux-v5.4/net/llc/ |
| D | af_llc.c | 683 if (skb_queue_empty(&sk->sk_receive_queue)) { in llc_ui_accept() 690 skb = skb_dequeue(&sk->sk_receive_queue); in llc_ui_accept() 776 skb = skb_peek(&sk->sk_receive_queue); in llc_ui_recvmsg() 860 skb_unlink(skb, &sk->sk_receive_queue); in llc_ui_recvmsg() 882 skb_unlink(skb, &sk->sk_receive_queue); in llc_ui_recvmsg()
|
| /Linux-v5.4/net/packet/ |
| D | af_packet.c | 642 spin_lock(&po->sk.sk_receive_queue.lock); in prb_retire_rx_blk_timer_expired() 705 spin_unlock(&po->sk.sk_receive_queue.lock); in prb_retire_rx_blk_timer_expired() 2126 spin_lock(&sk->sk_receive_queue.lock); in packet_rcv() 2129 __skb_queue_tail(&sk->sk_receive_queue, skb); in packet_rcv() 2130 spin_unlock(&sk->sk_receive_queue.lock); in packet_rcv() 2264 spin_lock(&sk->sk_receive_queue.lock); in tpacket_rcv() 2290 __skb_queue_tail(&sk->sk_receive_queue, copy_skb); in tpacket_rcv() 2292 spin_unlock(&sk->sk_receive_queue.lock); in tpacket_rcv() 2393 spin_unlock(&sk->sk_receive_queue.lock); in tpacket_rcv() 3055 skb_queue_purge(&sk->sk_receive_queue); in packet_release() [all …]
|
| /Linux-v5.4/net/dccp/ |
| D | proto.c | 284 __skb_queue_purge(&sk->sk_receive_queue); in dccp_disconnect() 382 skb = skb_peek(&sk->sk_receive_queue); in dccp_ioctl() 839 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue); in dccp_recvmsg() 1023 while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) { in dccp_close()
|
| /Linux-v5.4/net/ipv4/ |
| D | tcp_bpf.c | 39 !skb_queue_empty(&sk->sk_receive_queue), &wait); in tcp_bpf_wait_data() 126 if (!skb_queue_empty(&sk->sk_receive_queue)) in tcp_bpf_recvmsg() 142 if (skb_queue_empty(&sk->sk_receive_queue)) in tcp_bpf_recvmsg()
|