Lines Matching refs:sk
82 #define SOCK_DEBUG(sk, msg...) do { if ((sk) && sock_flag((sk), SOCK_DBG)) \ argument
87 void SOCK_DEBUG(const struct sock *sk, const char *msg, ...) in SOCK_DEBUG() argument
523 void (*sk_state_change)(struct sock *sk);
524 void (*sk_data_ready)(struct sock *sk);
525 void (*sk_write_space)(struct sock *sk);
526 void (*sk_error_report)(struct sock *sk);
527 int (*sk_backlog_rcv)(struct sock *sk,
530 struct sk_buff* (*sk_validate_xmit_skb)(struct sock *sk,
534 void (*sk_destruct)(struct sock *sk);
575 static inline bool sk_user_data_is_nocopy(const struct sock *sk) in sk_user_data_is_nocopy() argument
577 return ((uintptr_t)sk->sk_user_data & SK_USER_DATA_NOCOPY); in sk_user_data_is_nocopy()
580 #define __sk_user_data(sk) ((*((void __rcu **)&(sk)->sk_user_data))) argument
593 __locked_read_sk_user_data_with_flags(const struct sock *sk, in __locked_read_sk_user_data_with_flags() argument
597 (uintptr_t)rcu_dereference_check(__sk_user_data(sk), in __locked_read_sk_user_data_with_flags()
598 lockdep_is_held(&sk->sk_callback_lock)); in __locked_read_sk_user_data_with_flags()
616 __rcu_dereference_sk_user_data_with_flags(const struct sock *sk, in __rcu_dereference_sk_user_data_with_flags() argument
619 uintptr_t sk_user_data = (uintptr_t)rcu_dereference(__sk_user_data(sk)); in __rcu_dereference_sk_user_data_with_flags()
628 #define rcu_dereference_sk_user_data(sk) \ argument
629 __rcu_dereference_sk_user_data_with_flags(sk, 0)
630 #define __rcu_assign_sk_user_data_with_flags(sk, ptr, flags) \ argument
636 rcu_assign_pointer(__sk_user_data((sk)), \
639 #define rcu_assign_sk_user_data(sk, ptr) \ argument
640 __rcu_assign_sk_user_data_with_flags(sk, ptr, 0)
643 struct net *sock_net(const struct sock *sk) in sock_net() argument
645 return read_pnet(&sk->sk_net); in sock_net()
649 void sock_net_set(struct sock *sk, struct net *net) in sock_net_set() argument
651 write_pnet(&sk->sk_net, net); in sock_net_set()
665 int sk_set_peek_off(struct sock *sk, int val);
667 static inline int sk_peek_offset(const struct sock *sk, int flags) in sk_peek_offset() argument
670 return READ_ONCE(sk->sk_peek_off); in sk_peek_offset()
676 static inline void sk_peek_offset_bwd(struct sock *sk, int val) in sk_peek_offset_bwd() argument
678 s32 off = READ_ONCE(sk->sk_peek_off); in sk_peek_offset_bwd()
682 WRITE_ONCE(sk->sk_peek_off, off); in sk_peek_offset_bwd()
686 static inline void sk_peek_offset_fwd(struct sock *sk, int val) in sk_peek_offset_fwd() argument
688 sk_peek_offset_bwd(sk, -val); in sk_peek_offset_fwd()
719 static inline struct sock *sk_next(const struct sock *sk) in sk_next() argument
721 return hlist_entry_safe(sk->sk_node.next, struct sock, sk_node); in sk_next()
724 static inline struct sock *sk_nulls_next(const struct sock *sk) in sk_nulls_next() argument
726 return (!is_a_nulls(sk->sk_nulls_node.next)) ? in sk_nulls_next()
727 hlist_nulls_entry(sk->sk_nulls_node.next, in sk_nulls_next()
732 static inline bool sk_unhashed(const struct sock *sk) in sk_unhashed() argument
734 return hlist_unhashed(&sk->sk_node); in sk_unhashed()
737 static inline bool sk_hashed(const struct sock *sk) in sk_hashed() argument
739 return !sk_unhashed(sk); in sk_hashed()
747 static inline void __sk_del_node(struct sock *sk) in __sk_del_node() argument
749 __hlist_del(&sk->sk_node); in __sk_del_node()
753 static inline bool __sk_del_node_init(struct sock *sk) in __sk_del_node_init() argument
755 if (sk_hashed(sk)) { in __sk_del_node_init()
756 __sk_del_node(sk); in __sk_del_node_init()
757 sk_node_init(&sk->sk_node); in __sk_del_node_init()
769 static __always_inline void sock_hold(struct sock *sk) in sock_hold() argument
771 refcount_inc(&sk->sk_refcnt); in sock_hold()
777 static __always_inline void __sock_put(struct sock *sk) in __sock_put() argument
779 refcount_dec(&sk->sk_refcnt); in __sock_put()
782 static inline bool sk_del_node_init(struct sock *sk) in sk_del_node_init() argument
784 bool rc = __sk_del_node_init(sk); in sk_del_node_init()
788 WARN_ON(refcount_read(&sk->sk_refcnt) == 1); in sk_del_node_init()
789 __sock_put(sk); in sk_del_node_init()
793 #define sk_del_node_init_rcu(sk) sk_del_node_init(sk) argument
795 static inline bool __sk_nulls_del_node_init_rcu(struct sock *sk) in __sk_nulls_del_node_init_rcu() argument
797 if (sk_hashed(sk)) { in __sk_nulls_del_node_init_rcu()
798 hlist_nulls_del_init_rcu(&sk->sk_nulls_node); in __sk_nulls_del_node_init_rcu()
804 static inline bool sk_nulls_del_node_init_rcu(struct sock *sk) in sk_nulls_del_node_init_rcu() argument
806 bool rc = __sk_nulls_del_node_init_rcu(sk); in sk_nulls_del_node_init_rcu()
810 WARN_ON(refcount_read(&sk->sk_refcnt) == 1); in sk_nulls_del_node_init_rcu()
811 __sock_put(sk); in sk_nulls_del_node_init_rcu()
816 static inline void __sk_add_node(struct sock *sk, struct hlist_head *list) in __sk_add_node() argument
818 hlist_add_head(&sk->sk_node, list); in __sk_add_node()
821 static inline void sk_add_node(struct sock *sk, struct hlist_head *list) in sk_add_node() argument
823 sock_hold(sk); in sk_add_node()
824 __sk_add_node(sk, list); in sk_add_node()
827 static inline void sk_add_node_rcu(struct sock *sk, struct hlist_head *list) in sk_add_node_rcu() argument
829 sock_hold(sk); in sk_add_node_rcu()
830 if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport && in sk_add_node_rcu()
831 sk->sk_family == AF_INET6) in sk_add_node_rcu()
832 hlist_add_tail_rcu(&sk->sk_node, list); in sk_add_node_rcu()
834 hlist_add_head_rcu(&sk->sk_node, list); in sk_add_node_rcu()
837 static inline void sk_add_node_tail_rcu(struct sock *sk, struct hlist_head *list) in sk_add_node_tail_rcu() argument
839 sock_hold(sk); in sk_add_node_tail_rcu()
840 hlist_add_tail_rcu(&sk->sk_node, list); in sk_add_node_tail_rcu()
843 static inline void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list) in __sk_nulls_add_node_rcu() argument
845 hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list); in __sk_nulls_add_node_rcu()
848 static inline void __sk_nulls_add_node_tail_rcu(struct sock *sk, struct hlist_nulls_head *list) in __sk_nulls_add_node_tail_rcu() argument
850 hlist_nulls_add_tail_rcu(&sk->sk_nulls_node, list); in __sk_nulls_add_node_tail_rcu()
853 static inline void sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list) in sk_nulls_add_node_rcu() argument
855 sock_hold(sk); in sk_nulls_add_node_rcu()
856 __sk_nulls_add_node_rcu(sk, list); in sk_nulls_add_node_rcu()
859 static inline void __sk_del_bind_node(struct sock *sk) in __sk_del_bind_node() argument
861 __hlist_del(&sk->sk_bind_node); in __sk_del_bind_node()
864 static inline void sk_add_bind_node(struct sock *sk, in sk_add_bind_node() argument
867 hlist_add_head(&sk->sk_bind_node, list); in sk_add_bind_node()
870 static inline void __sk_del_bind2_node(struct sock *sk) in __sk_del_bind2_node() argument
872 __hlist_del(&sk->sk_bind2_node); in __sk_del_bind2_node()
875 static inline void sk_add_bind2_node(struct sock *sk, struct hlist_head *list) in sk_add_bind2_node() argument
877 hlist_add_head(&sk->sk_bind2_node, list); in sk_add_bind2_node()
914 static inline struct user_namespace *sk_user_ns(const struct sock *sk) in sk_user_ns() argument
920 return sk->sk_socket->file->f_cred->user_ns; in sk_user_ns()
965 static inline void sock_set_flag(struct sock *sk, enum sock_flags flag) in sock_set_flag() argument
967 __set_bit(flag, &sk->sk_flags); in sock_set_flag()
970 static inline void sock_reset_flag(struct sock *sk, enum sock_flags flag) in sock_reset_flag() argument
972 __clear_bit(flag, &sk->sk_flags); in sock_reset_flag()
975 static inline void sock_valbool_flag(struct sock *sk, enum sock_flags bit, in sock_valbool_flag() argument
979 sock_set_flag(sk, bit); in sock_valbool_flag()
981 sock_reset_flag(sk, bit); in sock_valbool_flag()
984 static inline bool sock_flag(const struct sock *sk, enum sock_flags flag) in sock_flag() argument
986 return test_bit(flag, &sk->sk_flags); in sock_flag()
1008 static inline gfp_t sk_gfp_mask(const struct sock *sk, gfp_t gfp_mask) in sk_gfp_mask() argument
1010 return gfp_mask | (sk->sk_allocation & __GFP_MEMALLOC); in sk_gfp_mask()
1013 static inline void sk_acceptq_removed(struct sock *sk) in sk_acceptq_removed() argument
1015 WRITE_ONCE(sk->sk_ack_backlog, sk->sk_ack_backlog - 1); in sk_acceptq_removed()
1018 static inline void sk_acceptq_added(struct sock *sk) in sk_acceptq_added() argument
1020 WRITE_ONCE(sk->sk_ack_backlog, sk->sk_ack_backlog + 1); in sk_acceptq_added()
1027 static inline bool sk_acceptq_is_full(const struct sock *sk) in sk_acceptq_is_full() argument
1029 return READ_ONCE(sk->sk_ack_backlog) > READ_ONCE(sk->sk_max_ack_backlog); in sk_acceptq_is_full()
1035 static inline int sk_stream_min_wspace(const struct sock *sk) in sk_stream_min_wspace() argument
1037 return READ_ONCE(sk->sk_wmem_queued) >> 1; in sk_stream_min_wspace()
1040 static inline int sk_stream_wspace(const struct sock *sk) in sk_stream_wspace() argument
1042 return READ_ONCE(sk->sk_sndbuf) - READ_ONCE(sk->sk_wmem_queued); in sk_stream_wspace()
1045 static inline void sk_wmem_queued_add(struct sock *sk, int val) in sk_wmem_queued_add() argument
1047 WRITE_ONCE(sk->sk_wmem_queued, sk->sk_wmem_queued + val); in sk_wmem_queued_add()
1050 void sk_stream_write_space(struct sock *sk);
1053 static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb) in __sk_add_backlog() argument
1058 if (!sk->sk_backlog.tail) in __sk_add_backlog()
1059 WRITE_ONCE(sk->sk_backlog.head, skb); in __sk_add_backlog()
1061 sk->sk_backlog.tail->next = skb; in __sk_add_backlog()
1063 WRITE_ONCE(sk->sk_backlog.tail, skb); in __sk_add_backlog()
1072 static inline bool sk_rcvqueues_full(const struct sock *sk, unsigned int limit) in sk_rcvqueues_full() argument
1074 unsigned int qsize = sk->sk_backlog.len + atomic_read(&sk->sk_rmem_alloc); in sk_rcvqueues_full()
1080 static inline __must_check int sk_add_backlog(struct sock *sk, struct sk_buff *skb, in sk_add_backlog() argument
1083 if (sk_rcvqueues_full(sk, limit)) in sk_add_backlog()
1091 if (skb_pfmemalloc(skb) && !sock_flag(sk, SOCK_MEMALLOC)) in sk_add_backlog()
1094 __sk_add_backlog(sk, skb); in sk_add_backlog()
1095 sk->sk_backlog.len += skb->truesize; in sk_add_backlog()
1099 int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb);
1101 INDIRECT_CALLABLE_DECLARE(int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb));
1102 INDIRECT_CALLABLE_DECLARE(int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb));
1104 static inline int sk_backlog_rcv(struct sock *sk, struct sk_buff *skb) in sk_backlog_rcv() argument
1107 return __sk_backlog_rcv(sk, skb); in sk_backlog_rcv()
1109 return INDIRECT_CALL_INET(sk->sk_backlog_rcv, in sk_backlog_rcv()
1112 sk, skb); in sk_backlog_rcv()
1115 static inline void sk_incoming_cpu_update(struct sock *sk) in sk_incoming_cpu_update() argument
1119 if (unlikely(READ_ONCE(sk->sk_incoming_cpu) != cpu)) in sk_incoming_cpu_update()
1120 WRITE_ONCE(sk->sk_incoming_cpu, cpu); in sk_incoming_cpu_update()
1135 static inline void sock_rps_record_flow(const struct sock *sk) in sock_rps_record_flow() argument
1149 if (sk->sk_state == TCP_ESTABLISHED) in sock_rps_record_flow()
1150 sock_rps_record_flow_hash(sk->sk_rxhash); in sock_rps_record_flow()
1155 static inline void sock_rps_save_rxhash(struct sock *sk, in sock_rps_save_rxhash() argument
1159 if (unlikely(sk->sk_rxhash != skb->hash)) in sock_rps_save_rxhash()
1160 sk->sk_rxhash = skb->hash; in sock_rps_save_rxhash()
1164 static inline void sock_rps_reset_rxhash(struct sock *sk) in sock_rps_reset_rxhash() argument
1167 sk->sk_rxhash = 0; in sock_rps_reset_rxhash()
1186 int sk_stream_wait_connect(struct sock *sk, long *timeo_p);
1187 int sk_stream_wait_memory(struct sock *sk, long *timeo_p);
1188 void sk_stream_wait_close(struct sock *sk, long timeo_p);
1189 int sk_stream_error(struct sock *sk, int flags, int err);
1190 void sk_stream_kill_queues(struct sock *sk);
1191 void sk_set_memalloc(struct sock *sk);
1192 void sk_clear_memalloc(struct sock *sk);
1194 void __sk_flush_backlog(struct sock *sk);
1196 static inline bool sk_flush_backlog(struct sock *sk) in sk_flush_backlog() argument
1198 if (unlikely(READ_ONCE(sk->sk_backlog.tail))) { in sk_flush_backlog()
1199 __sk_flush_backlog(sk); in sk_flush_backlog()
1205 int sk_wait_data(struct sock *sk, long *timeo, const struct sk_buff *skb);
1219 static inline void sk_prot_clear_nulls(struct sock *sk, int size) in sk_prot_clear_nulls() argument
1222 memset(sk, 0, offsetof(struct sock, sk_node.next)); in sk_prot_clear_nulls()
1223 memset(&sk->sk_node.pprev, 0, in sk_prot_clear_nulls()
1231 void (*close)(struct sock *sk,
1233 int (*pre_connect)(struct sock *sk,
1236 int (*connect)(struct sock *sk,
1239 int (*disconnect)(struct sock *sk, int flags);
1241 struct sock * (*accept)(struct sock *sk, int flags, int *err,
1244 int (*ioctl)(struct sock *sk, int cmd,
1246 int (*init)(struct sock *sk);
1247 void (*destroy)(struct sock *sk);
1248 void (*shutdown)(struct sock *sk, int how);
1249 int (*setsockopt)(struct sock *sk, int level,
1252 int (*getsockopt)(struct sock *sk, int level,
1255 void (*keepalive)(struct sock *sk, int valbool);
1257 int (*compat_ioctl)(struct sock *sk,
1260 int (*sendmsg)(struct sock *sk, struct msghdr *msg,
1262 int (*recvmsg)(struct sock *sk, struct msghdr *msg,
1264 int (*sendpage)(struct sock *sk, struct page *page,
1266 int (*bind)(struct sock *sk,
1268 int (*bind_add)(struct sock *sk,
1271 int (*backlog_rcv) (struct sock *sk,
1276 void (*release_cb)(struct sock *sk);
1279 int (*hash)(struct sock *sk);
1280 void (*unhash)(struct sock *sk);
1281 void (*rehash)(struct sock *sk);
1282 int (*get_port)(struct sock *sk, unsigned short snum);
1283 void (*put_port)(struct sock *sk);
1285 int (*psock_update_sk_prot)(struct sock *sk,
1296 int (*forward_alloc_get)(const struct sock *sk);
1299 bool (*stream_memory_free)(const struct sock *sk, int wake);
1300 bool (*sock_is_readable)(struct sock *sk);
1302 void (*enter_memory_pressure)(struct sock *sk);
1303 void (*leave_memory_pressure)(struct sock *sk);
1351 int (*diag_destroy)(struct sock *sk, int err);
1359 static inline void sk_refcnt_debug_inc(struct sock *sk) in sk_refcnt_debug_inc() argument
1361 atomic_inc(&sk->sk_prot->socks); in sk_refcnt_debug_inc()
1364 static inline void sk_refcnt_debug_dec(struct sock *sk) in sk_refcnt_debug_dec() argument
1366 atomic_dec(&sk->sk_prot->socks); in sk_refcnt_debug_dec()
1368 sk->sk_prot->name, sk, atomic_read(&sk->sk_prot->socks)); in sk_refcnt_debug_dec()
1371 static inline void sk_refcnt_debug_release(const struct sock *sk) in sk_refcnt_debug_release() argument
1373 if (refcount_read(&sk->sk_refcnt) != 1) in sk_refcnt_debug_release()
1375 sk->sk_prot->name, sk, refcount_read(&sk->sk_refcnt)); in sk_refcnt_debug_release()
1378 #define sk_refcnt_debug_inc(sk) do { } while (0) argument
1379 #define sk_refcnt_debug_dec(sk) do { } while (0) argument
1380 #define sk_refcnt_debug_release(sk) do { } while (0) argument
1383 INDIRECT_CALLABLE_DECLARE(bool tcp_stream_memory_free(const struct sock *sk, int wake));
1385 static inline int sk_forward_alloc_get(const struct sock *sk) in sk_forward_alloc_get() argument
1388 if (sk->sk_prot->forward_alloc_get) in sk_forward_alloc_get()
1389 return sk->sk_prot->forward_alloc_get(sk); in sk_forward_alloc_get()
1391 return sk->sk_forward_alloc; in sk_forward_alloc_get()
1394 static inline bool __sk_stream_memory_free(const struct sock *sk, int wake) in __sk_stream_memory_free() argument
1396 if (READ_ONCE(sk->sk_wmem_queued) >= READ_ONCE(sk->sk_sndbuf)) in __sk_stream_memory_free()
1399 return sk->sk_prot->stream_memory_free ? in __sk_stream_memory_free()
1400 INDIRECT_CALL_INET_1(sk->sk_prot->stream_memory_free, in __sk_stream_memory_free()
1401 tcp_stream_memory_free, sk, wake) : true; in __sk_stream_memory_free()
1404 static inline bool sk_stream_memory_free(const struct sock *sk) in sk_stream_memory_free() argument
1406 return __sk_stream_memory_free(sk, 0); in sk_stream_memory_free()
1409 static inline bool __sk_stream_is_writeable(const struct sock *sk, int wake) in __sk_stream_is_writeable() argument
1411 return sk_stream_wspace(sk) >= sk_stream_min_wspace(sk) && in __sk_stream_is_writeable()
1412 __sk_stream_memory_free(sk, wake); in __sk_stream_is_writeable()
1415 static inline bool sk_stream_is_writeable(const struct sock *sk) in sk_stream_is_writeable() argument
1417 return __sk_stream_is_writeable(sk, 0); in sk_stream_is_writeable()
1420 static inline int sk_under_cgroup_hierarchy(struct sock *sk, in sk_under_cgroup_hierarchy() argument
1424 return cgroup_is_descendant(sock_cgroup_ptr(&sk->sk_cgrp_data), in sk_under_cgroup_hierarchy()
1431 static inline bool sk_has_memory_pressure(const struct sock *sk) in sk_has_memory_pressure() argument
1433 return sk->sk_prot->memory_pressure != NULL; in sk_has_memory_pressure()
1436 static inline bool sk_under_memory_pressure(const struct sock *sk) in sk_under_memory_pressure() argument
1438 if (!sk->sk_prot->memory_pressure) in sk_under_memory_pressure()
1441 if (mem_cgroup_sockets_enabled && sk->sk_memcg && in sk_under_memory_pressure()
1442 mem_cgroup_under_socket_pressure(sk->sk_memcg)) in sk_under_memory_pressure()
1445 return !!*sk->sk_prot->memory_pressure; in sk_under_memory_pressure()
1455 sk_memory_allocated(const struct sock *sk) in sk_memory_allocated() argument
1457 return proto_memory_allocated(sk->sk_prot); in sk_memory_allocated()
1464 sk_memory_allocated_add(struct sock *sk, int amt) in sk_memory_allocated_add() argument
1469 local_reserve = __this_cpu_add_return(*sk->sk_prot->per_cpu_fw_alloc, amt); in sk_memory_allocated_add()
1471 __this_cpu_sub(*sk->sk_prot->per_cpu_fw_alloc, local_reserve); in sk_memory_allocated_add()
1472 atomic_long_add(local_reserve, sk->sk_prot->memory_allocated); in sk_memory_allocated_add()
1478 sk_memory_allocated_sub(struct sock *sk, int amt) in sk_memory_allocated_sub() argument
1483 local_reserve = __this_cpu_sub_return(*sk->sk_prot->per_cpu_fw_alloc, amt); in sk_memory_allocated_sub()
1485 __this_cpu_sub(*sk->sk_prot->per_cpu_fw_alloc, local_reserve); in sk_memory_allocated_sub()
1486 atomic_long_add(local_reserve, sk->sk_prot->memory_allocated); in sk_memory_allocated_sub()
1493 static inline void sk_sockets_allocated_dec(struct sock *sk) in sk_sockets_allocated_dec() argument
1495 percpu_counter_add_batch(sk->sk_prot->sockets_allocated, -1, in sk_sockets_allocated_dec()
1499 static inline void sk_sockets_allocated_inc(struct sock *sk) in sk_sockets_allocated_inc() argument
1501 percpu_counter_add_batch(sk->sk_prot->sockets_allocated, 1, in sk_sockets_allocated_inc()
1506 sk_sockets_allocated_read_positive(struct sock *sk) in sk_sockets_allocated_read_positive() argument
1508 return percpu_counter_read_positive(sk->sk_prot->sockets_allocated); in sk_sockets_allocated_read_positive()
1561 static inline int __sk_prot_rehash(struct sock *sk) in __sk_prot_rehash() argument
1563 sk->sk_prot->unhash(sk); in __sk_prot_rehash()
1564 return sk->sk_prot->hash(sk); in __sk_prot_rehash()
1598 int __sk_mem_raise_allocated(struct sock *sk, int size, int amt, int kind);
1599 int __sk_mem_schedule(struct sock *sk, int size, int kind);
1600 void __sk_mem_reduce_allocated(struct sock *sk, int amount);
1601 void __sk_mem_reclaim(struct sock *sk, int amount);
1607 static inline long sk_prot_mem_limits(const struct sock *sk, int index) in sk_prot_mem_limits() argument
1609 return READ_ONCE(sk->sk_prot->sysctl_mem[index]); in sk_prot_mem_limits()
1617 static inline bool sk_has_account(struct sock *sk) in sk_has_account() argument
1620 return !!sk->sk_prot->memory_allocated; in sk_has_account()
1623 static inline bool sk_wmem_schedule(struct sock *sk, int size) in sk_wmem_schedule() argument
1627 if (!sk_has_account(sk)) in sk_wmem_schedule()
1629 delta = size - sk->sk_forward_alloc; in sk_wmem_schedule()
1630 return delta <= 0 || __sk_mem_schedule(sk, delta, SK_MEM_SEND); in sk_wmem_schedule()
1634 sk_rmem_schedule(struct sock *sk, struct sk_buff *skb, int size) in sk_rmem_schedule() argument
1638 if (!sk_has_account(sk)) in sk_rmem_schedule()
1640 delta = size - sk->sk_forward_alloc; in sk_rmem_schedule()
1641 return delta <= 0 || __sk_mem_schedule(sk, delta, SK_MEM_RECV) || in sk_rmem_schedule()
1645 static inline int sk_unused_reserved_mem(const struct sock *sk) in sk_unused_reserved_mem() argument
1649 if (likely(!sk->sk_reserved_mem)) in sk_unused_reserved_mem()
1652 unused_mem = sk->sk_reserved_mem - sk->sk_wmem_queued - in sk_unused_reserved_mem()
1653 atomic_read(&sk->sk_rmem_alloc); in sk_unused_reserved_mem()
1658 static inline void sk_mem_reclaim(struct sock *sk) in sk_mem_reclaim() argument
1662 if (!sk_has_account(sk)) in sk_mem_reclaim()
1665 reclaimable = sk->sk_forward_alloc - sk_unused_reserved_mem(sk); in sk_mem_reclaim()
1668 __sk_mem_reclaim(sk, reclaimable); in sk_mem_reclaim()
1671 static inline void sk_mem_reclaim_final(struct sock *sk) in sk_mem_reclaim_final() argument
1673 sk->sk_reserved_mem = 0; in sk_mem_reclaim_final()
1674 sk_mem_reclaim(sk); in sk_mem_reclaim_final()
1677 static inline void sk_mem_charge(struct sock *sk, int size) in sk_mem_charge() argument
1679 if (!sk_has_account(sk)) in sk_mem_charge()
1681 sk->sk_forward_alloc -= size; in sk_mem_charge()
1684 static inline void sk_mem_uncharge(struct sock *sk, int size) in sk_mem_uncharge() argument
1686 if (!sk_has_account(sk)) in sk_mem_uncharge()
1688 sk->sk_forward_alloc += size; in sk_mem_uncharge()
1689 sk_mem_reclaim(sk); in sk_mem_uncharge()
1699 #define sock_lock_init_class_and_name(sk, sname, skey, name, key) \ argument
1701 sk->sk_lock.owned = 0; \
1702 init_waitqueue_head(&sk->sk_lock.wq); \
1703 spin_lock_init(&(sk)->sk_lock.slock); \
1704 debug_check_no_locks_freed((void *)&(sk)->sk_lock, \
1705 sizeof((sk)->sk_lock)); \
1706 lockdep_set_class_and_name(&(sk)->sk_lock.slock, \
1708 lockdep_init_map(&(sk)->sk_lock.dep_map, (name), (key), 0); \
1711 static inline bool lockdep_sock_is_held(const struct sock *sk) in lockdep_sock_is_held() argument
1713 return lockdep_is_held(&sk->sk_lock) || in lockdep_sock_is_held()
1714 lockdep_is_held(&sk->sk_lock.slock); in lockdep_sock_is_held()
1717 void lock_sock_nested(struct sock *sk, int subclass);
1719 static inline void lock_sock(struct sock *sk) in lock_sock() argument
1721 lock_sock_nested(sk, 0); in lock_sock()
1724 void __lock_sock(struct sock *sk);
1725 void __release_sock(struct sock *sk);
1726 void release_sock(struct sock *sk);
1735 bool __lock_sock_fast(struct sock *sk) __acquires(&sk->sk_lock.slock);
1750 static inline bool lock_sock_fast(struct sock *sk) in lock_sock_fast() argument
1753 mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_); in lock_sock_fast()
1755 return __lock_sock_fast(sk); in lock_sock_fast()
1759 static inline bool lock_sock_fast_nested(struct sock *sk) in lock_sock_fast_nested() argument
1761 mutex_acquire(&sk->sk_lock.dep_map, SINGLE_DEPTH_NESTING, 0, _RET_IP_); in lock_sock_fast_nested()
1763 return __lock_sock_fast(sk); in lock_sock_fast_nested()
1774 static inline void unlock_sock_fast(struct sock *sk, bool slow) in unlock_sock_fast() argument
1775 __releases(&sk->sk_lock.slock) in unlock_sock_fast()
1778 release_sock(sk); in unlock_sock_fast()
1779 __release(&sk->sk_lock.slock); in unlock_sock_fast()
1781 mutex_release(&sk->sk_lock.dep_map, _RET_IP_); in unlock_sock_fast()
1782 spin_unlock_bh(&sk->sk_lock.slock); in unlock_sock_fast()
1786 void sockopt_lock_sock(struct sock *sk);
1787 void sockopt_release_sock(struct sock *sk);
1805 static inline void sock_owned_by_me(const struct sock *sk) in sock_owned_by_me() argument
1808 WARN_ON_ONCE(!lockdep_sock_is_held(sk) && debug_locks); in sock_owned_by_me()
1812 static inline bool sock_owned_by_user(const struct sock *sk) in sock_owned_by_user() argument
1814 sock_owned_by_me(sk); in sock_owned_by_user()
1815 return sk->sk_lock.owned; in sock_owned_by_user()
1818 static inline bool sock_owned_by_user_nocheck(const struct sock *sk) in sock_owned_by_user_nocheck() argument
1820 return sk->sk_lock.owned; in sock_owned_by_user_nocheck()
1823 static inline void sock_release_ownership(struct sock *sk) in sock_release_ownership() argument
1825 if (sock_owned_by_user_nocheck(sk)) { in sock_release_ownership()
1826 sk->sk_lock.owned = 0; in sock_release_ownership()
1829 mutex_release(&sk->sk_lock.dep_map, _RET_IP_); in sock_release_ownership()
1836 struct sock *sk = (struct sock *)csk; in sock_allow_reclassification() local
1838 return !sock_owned_by_user_nocheck(sk) && in sock_allow_reclassification()
1839 !spin_is_locked(&sk->sk_lock.slock); in sock_allow_reclassification()
1844 void sk_free(struct sock *sk);
1845 void sk_destruct(struct sock *sk);
1846 struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority);
1847 void sk_free_unlock_clone(struct sock *sk);
1849 struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
1853 struct sk_buff *sock_omalloc(struct sock *sk, unsigned long size,
1865 int sk_setsockopt(struct sock *sk, int level, int optname,
1870 int sk_getsockopt(struct sock *sk, int level, int optname,
1876 struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
1880 static inline struct sk_buff *sock_alloc_send_skb(struct sock *sk, in sock_alloc_send_skb() argument
1884 return sock_alloc_send_pskb(sk, size, 0, noblock, errcode, 0); in sock_alloc_send_skb()
1887 void *sock_kmalloc(struct sock *sk, int size, gfp_t priority);
1888 void sock_kfree_s(struct sock *sk, void *mem, int size);
1889 void sock_kzfree_s(struct sock *sk, void *mem, int size);
1890 void sk_send_sigurg(struct sock *sk);
1892 static inline void sock_replace_proto(struct sock *sk, struct proto *proto) in sock_replace_proto() argument
1894 if (sk->sk_socket) in sock_replace_proto()
1895 clear_bit(SOCK_SUPPORT_ZC, &sk->sk_socket->flags); in sock_replace_proto()
1896 WRITE_ONCE(sk->sk_prot, proto); in sock_replace_proto()
1906 const struct sock *sk) in sockcm_init() argument
1908 *sockc = (struct sockcm_cookie) { .tsflags = sk->sk_tsflags }; in sockcm_init()
1911 int __sock_cmsg_send(struct sock *sk, struct msghdr *msg, struct cmsghdr *cmsg,
1913 int sock_cmsg_send(struct sock *sk, struct msghdr *msg,
1929 int sock_no_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t len);
1935 ssize_t sock_no_sendpage_locked(struct sock *sk, struct page *page,
1949 void sk_common_release(struct sock *sk);
1956 void sock_init_data(struct socket *sock, struct sock *sk);
1984 static inline void sock_put(struct sock *sk) in sock_put() argument
1986 if (refcount_dec_and_test(&sk->sk_refcnt)) in sock_put()
1987 sk_free(sk); in sock_put()
1992 void sock_gen_put(struct sock *sk);
1994 int __sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested,
1996 static inline int sk_receive_skb(struct sock *sk, struct sk_buff *skb, in sk_receive_skb() argument
1999 return __sk_receive_skb(sk, skb, nested, 1, true); in sk_receive_skb()
2002 static inline void sk_tx_queue_set(struct sock *sk, int tx_queue) in sk_tx_queue_set() argument
2007 sk->sk_tx_queue_mapping = tx_queue; in sk_tx_queue_set()
2012 static inline void sk_tx_queue_clear(struct sock *sk) in sk_tx_queue_clear() argument
2014 sk->sk_tx_queue_mapping = NO_QUEUE_MAPPING; in sk_tx_queue_clear()
2017 static inline int sk_tx_queue_get(const struct sock *sk) in sk_tx_queue_get() argument
2019 if (sk && sk->sk_tx_queue_mapping != NO_QUEUE_MAPPING) in sk_tx_queue_get()
2020 return sk->sk_tx_queue_mapping; in sk_tx_queue_get()
2025 static inline void __sk_rx_queue_set(struct sock *sk, in __sk_rx_queue_set() argument
2034 unlikely(READ_ONCE(sk->sk_rx_queue_mapping) != rx_queue)) in __sk_rx_queue_set()
2035 WRITE_ONCE(sk->sk_rx_queue_mapping, rx_queue); in __sk_rx_queue_set()
2040 static inline void sk_rx_queue_set(struct sock *sk, const struct sk_buff *skb) in sk_rx_queue_set() argument
2042 __sk_rx_queue_set(sk, skb, true); in sk_rx_queue_set()
2045 static inline void sk_rx_queue_update(struct sock *sk, const struct sk_buff *skb) in sk_rx_queue_update() argument
2047 __sk_rx_queue_set(sk, skb, false); in sk_rx_queue_update()
2050 static inline void sk_rx_queue_clear(struct sock *sk) in sk_rx_queue_clear() argument
2053 WRITE_ONCE(sk->sk_rx_queue_mapping, NO_QUEUE_MAPPING); in sk_rx_queue_clear()
2057 static inline int sk_rx_queue_get(const struct sock *sk) in sk_rx_queue_get() argument
2060 if (sk) { in sk_rx_queue_get()
2061 int res = READ_ONCE(sk->sk_rx_queue_mapping); in sk_rx_queue_get()
2071 static inline void sk_set_socket(struct sock *sk, struct socket *sock) in sk_set_socket() argument
2073 sk->sk_socket = sock; in sk_set_socket()
2076 static inline wait_queue_head_t *sk_sleep(struct sock *sk) in sk_sleep() argument
2079 return &rcu_dereference_raw(sk->sk_wq)->wait; in sk_sleep()
2088 static inline void sock_orphan(struct sock *sk) in sock_orphan() argument
2090 write_lock_bh(&sk->sk_callback_lock); in sock_orphan()
2091 sock_set_flag(sk, SOCK_DEAD); in sock_orphan()
2092 sk_set_socket(sk, NULL); in sock_orphan()
2093 sk->sk_wq = NULL; in sock_orphan()
2094 write_unlock_bh(&sk->sk_callback_lock); in sock_orphan()
2097 static inline void sock_graft(struct sock *sk, struct socket *parent) in sock_graft() argument
2099 WARN_ON(parent->sk); in sock_graft()
2100 write_lock_bh(&sk->sk_callback_lock); in sock_graft()
2101 rcu_assign_pointer(sk->sk_wq, &parent->wq); in sock_graft()
2102 parent->sk = sk; in sock_graft()
2103 sk_set_socket(sk, parent); in sock_graft()
2104 sk->sk_uid = SOCK_INODE(parent)->i_uid; in sock_graft()
2105 security_sock_graft(sk, parent); in sock_graft()
2106 write_unlock_bh(&sk->sk_callback_lock); in sock_graft()
2109 kuid_t sock_i_uid(struct sock *sk);
2110 unsigned long sock_i_ino(struct sock *sk);
2112 static inline kuid_t sock_net_uid(const struct net *net, const struct sock *sk) in sock_net_uid() argument
2114 return sk ? sk->sk_uid : make_kuid(net->user_ns, 0); in sock_net_uid()
2124 static inline void sk_set_txhash(struct sock *sk) in sk_set_txhash() argument
2127 WRITE_ONCE(sk->sk_txhash, net_tx_rndhash()); in sk_set_txhash()
2130 static inline bool sk_rethink_txhash(struct sock *sk) in sk_rethink_txhash() argument
2132 if (sk->sk_txhash && sk->sk_txrehash == SOCK_TXREHASH_ENABLED) { in sk_rethink_txhash()
2133 sk_set_txhash(sk); in sk_rethink_txhash()
2140 __sk_dst_get(struct sock *sk) in __sk_dst_get() argument
2142 return rcu_dereference_check(sk->sk_dst_cache, in __sk_dst_get()
2143 lockdep_sock_is_held(sk)); in __sk_dst_get()
2147 sk_dst_get(struct sock *sk) in sk_dst_get() argument
2152 dst = rcu_dereference(sk->sk_dst_cache); in sk_dst_get()
2159 static inline void __dst_negative_advice(struct sock *sk) in __dst_negative_advice() argument
2161 struct dst_entry *ndst, *dst = __sk_dst_get(sk); in __dst_negative_advice()
2167 rcu_assign_pointer(sk->sk_dst_cache, ndst); in __dst_negative_advice()
2168 sk_tx_queue_clear(sk); in __dst_negative_advice()
2169 sk->sk_dst_pending_confirm = 0; in __dst_negative_advice()
2174 static inline void dst_negative_advice(struct sock *sk) in dst_negative_advice() argument
2176 sk_rethink_txhash(sk); in dst_negative_advice()
2177 __dst_negative_advice(sk); in dst_negative_advice()
2181 __sk_dst_set(struct sock *sk, struct dst_entry *dst) in __sk_dst_set() argument
2185 sk_tx_queue_clear(sk); in __sk_dst_set()
2186 sk->sk_dst_pending_confirm = 0; in __sk_dst_set()
2187 old_dst = rcu_dereference_protected(sk->sk_dst_cache, in __sk_dst_set()
2188 lockdep_sock_is_held(sk)); in __sk_dst_set()
2189 rcu_assign_pointer(sk->sk_dst_cache, dst); in __sk_dst_set()
2194 sk_dst_set(struct sock *sk, struct dst_entry *dst) in sk_dst_set() argument
2198 sk_tx_queue_clear(sk); in sk_dst_set()
2199 sk->sk_dst_pending_confirm = 0; in sk_dst_set()
2200 old_dst = xchg((__force struct dst_entry **)&sk->sk_dst_cache, dst); in sk_dst_set()
2205 __sk_dst_reset(struct sock *sk) in __sk_dst_reset() argument
2207 __sk_dst_set(sk, NULL); in __sk_dst_reset()
2211 sk_dst_reset(struct sock *sk) in sk_dst_reset() argument
2213 sk_dst_set(sk, NULL); in sk_dst_reset()
2216 struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie);
2218 struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie);
2220 static inline void sk_dst_confirm(struct sock *sk) in sk_dst_confirm() argument
2222 if (!READ_ONCE(sk->sk_dst_pending_confirm)) in sk_dst_confirm()
2223 WRITE_ONCE(sk->sk_dst_pending_confirm, 1); in sk_dst_confirm()
2229 struct sock *sk = skb->sk; in sock_confirm_neigh() local
2231 if (sk && READ_ONCE(sk->sk_dst_pending_confirm)) in sock_confirm_neigh()
2232 WRITE_ONCE(sk->sk_dst_pending_confirm, 0); in sock_confirm_neigh()
2237 bool sk_mc_loop(struct sock *sk);
2239 static inline bool sk_can_gso(const struct sock *sk) in sk_can_gso() argument
2241 return net_gso_ok(sk->sk_route_caps, sk->sk_gso_type); in sk_can_gso()
2244 void sk_setup_caps(struct sock *sk, struct dst_entry *dst);
2246 static inline void sk_gso_disable(struct sock *sk) in sk_gso_disable() argument
2248 sk->sk_gso_disabled = 1; in sk_gso_disable()
2249 sk->sk_route_caps &= ~NETIF_F_GSO_MASK; in sk_gso_disable()
2252 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb, in skb_do_copy_data_nocache() argument
2261 } else if (sk->sk_route_caps & NETIF_F_NOCACHE_COPY) { in skb_do_copy_data_nocache()
2270 static inline int skb_add_data_nocache(struct sock *sk, struct sk_buff *skb, in skb_add_data_nocache() argument
2275 err = skb_do_copy_data_nocache(sk, skb, from, skb_put(skb, copy), in skb_add_data_nocache()
2283 static inline int skb_copy_to_page_nocache(struct sock *sk, struct iov_iter *from, in skb_copy_to_page_nocache() argument
2290 err = skb_do_copy_data_nocache(sk, skb, from, page_address(page) + off, in skb_copy_to_page_nocache()
2296 sk_wmem_queued_add(sk, copy); in skb_copy_to_page_nocache()
2297 sk_mem_charge(sk, copy); in skb_copy_to_page_nocache()
2307 static inline int sk_wmem_alloc_get(const struct sock *sk) in sk_wmem_alloc_get() argument
2309 return refcount_read(&sk->sk_wmem_alloc) - 1; in sk_wmem_alloc_get()
2318 static inline int sk_rmem_alloc_get(const struct sock *sk) in sk_rmem_alloc_get() argument
2320 return atomic_read(&sk->sk_rmem_alloc); in sk_rmem_alloc_get()
2329 static inline bool sk_has_allocations(const struct sock *sk) in sk_has_allocations() argument
2331 return sk_wmem_alloc_get(sk) || sk_rmem_alloc_get(sk); in sk_has_allocations()
2392 static inline void skb_set_hash_from_sk(struct sk_buff *skb, struct sock *sk) in skb_set_hash_from_sk() argument
2395 u32 txhash = READ_ONCE(sk->sk_txhash); in skb_set_hash_from_sk()
2403 void skb_set_owner_w(struct sk_buff *skb, struct sock *sk);
2413 static inline void skb_set_owner_r(struct sk_buff *skb, struct sock *sk) in skb_set_owner_r() argument
2416 skb->sk = sk; in skb_set_owner_r()
2418 atomic_add(skb->truesize, &sk->sk_rmem_alloc); in skb_set_owner_r()
2419 sk_mem_charge(sk, skb->truesize); in skb_set_owner_r()
2422 static inline __must_check bool skb_set_owner_sk_safe(struct sk_buff *skb, struct sock *sk) in skb_set_owner_sk_safe() argument
2424 if (sk && refcount_inc_not_zero(&sk->sk_refcnt)) { in skb_set_owner_sk_safe()
2427 skb->sk = sk; in skb_set_owner_sk_safe()
2442 void sk_reset_timer(struct sock *sk, struct timer_list *timer,
2445 void sk_stop_timer(struct sock *sk, struct timer_list *timer);
2447 void sk_stop_timer_sync(struct sock *sk, struct timer_list *timer);
2449 int __sk_queue_drop_skb(struct sock *sk, struct sk_buff_head *sk_queue,
2451 void (*destructor)(struct sock *sk,
2453 int __sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
2455 int sock_queue_rcv_skb_reason(struct sock *sk, struct sk_buff *skb,
2458 static inline int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) in sock_queue_rcv_skb() argument
2460 return sock_queue_rcv_skb_reason(sk, skb, NULL); in sock_queue_rcv_skb()
2463 int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb);
2464 struct sk_buff *sock_dequeue_err_skb(struct sock *sk);
2470 static inline int sock_error(struct sock *sk) in sock_error() argument
2477 if (likely(data_race(!sk->sk_err))) in sock_error()
2480 err = xchg(&sk->sk_err, 0); in sock_error()
2484 void sk_error_report(struct sock *sk);
2486 static inline unsigned long sock_wspace(struct sock *sk) in sock_wspace() argument
2490 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) { in sock_wspace()
2491 amt = sk->sk_sndbuf - refcount_read(&sk->sk_wmem_alloc); in sock_wspace()
2502 static inline void sk_set_bit(int nr, struct sock *sk) in sk_set_bit() argument
2505 !sock_flag(sk, SOCK_FASYNC)) in sk_set_bit()
2508 set_bit(nr, &sk->sk_wq_raw->flags); in sk_set_bit()
2511 static inline void sk_clear_bit(int nr, struct sock *sk) in sk_clear_bit() argument
2514 !sock_flag(sk, SOCK_FASYNC)) in sk_clear_bit()
2517 clear_bit(nr, &sk->sk_wq_raw->flags); in sk_clear_bit()
2520 static inline void sk_wake_async(const struct sock *sk, int how, int band) in sk_wake_async() argument
2522 if (sock_flag(sk, SOCK_FASYNC)) { in sk_wake_async()
2524 sock_wake_async(rcu_dereference(sk->sk_wq), how, band); in sk_wake_async()
2539 static inline void sk_stream_moderate_sndbuf(struct sock *sk) in sk_stream_moderate_sndbuf() argument
2543 if (sk->sk_userlocks & SOCK_SNDBUF_LOCK) in sk_stream_moderate_sndbuf()
2546 val = min(sk->sk_sndbuf, sk->sk_wmem_queued >> 1); in sk_stream_moderate_sndbuf()
2547 val = max_t(u32, val, sk_unused_reserved_mem(sk)); in sk_stream_moderate_sndbuf()
2549 WRITE_ONCE(sk->sk_sndbuf, max_t(u32, val, SOCK_MIN_SNDBUF)); in sk_stream_moderate_sndbuf()
2569 static inline struct page_frag *sk_page_frag(struct sock *sk) in sk_page_frag() argument
2571 if ((sk->sk_allocation & (__GFP_DIRECT_RECLAIM | __GFP_MEMALLOC | __GFP_FS)) == in sk_page_frag()
2575 return &sk->sk_frag; in sk_page_frag()
2578 bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag);
2583 static inline bool sock_writeable(const struct sock *sk) in sock_writeable() argument
2585 return refcount_read(&sk->sk_wmem_alloc) < (READ_ONCE(sk->sk_sndbuf) >> 1); in sock_writeable()
2598 static inline long sock_rcvtimeo(const struct sock *sk, bool noblock) in sock_rcvtimeo() argument
2600 return noblock ? 0 : sk->sk_rcvtimeo; in sock_rcvtimeo()
2603 static inline long sock_sndtimeo(const struct sock *sk, bool noblock) in sock_sndtimeo() argument
2605 return noblock ? 0 : sk->sk_sndtimeo; in sock_sndtimeo()
2608 static inline int sock_rcvlowat(const struct sock *sk, int waitall, int len) in sock_rcvlowat() argument
2610 int v = waitall ? len : min_t(int, READ_ONCE(sk->sk_rcvlowat), len); in sock_rcvlowat()
2641 sock_skb_set_dropcount(const struct sock *sk, struct sk_buff *skb) in sock_skb_set_dropcount() argument
2643 SOCK_SKB_CB(skb)->dropcount = sock_flag(sk, SOCK_RXQ_OVFL) ? in sock_skb_set_dropcount()
2644 atomic_read(&sk->sk_drops) : 0; in sock_skb_set_dropcount()
2647 static inline void sk_drops_add(struct sock *sk, const struct sk_buff *skb) in sk_drops_add() argument
2651 atomic_add(segs, &sk->sk_drops); in sk_drops_add()
2654 static inline ktime_t sock_read_timestamp(struct sock *sk) in sock_read_timestamp() argument
2661 seq = read_seqbegin(&sk->sk_stamp_seq); in sock_read_timestamp()
2662 kt = sk->sk_stamp; in sock_read_timestamp()
2663 } while (read_seqretry(&sk->sk_stamp_seq, seq)); in sock_read_timestamp()
2667 return READ_ONCE(sk->sk_stamp); in sock_read_timestamp()
2671 static inline void sock_write_timestamp(struct sock *sk, ktime_t kt) in sock_write_timestamp() argument
2674 write_seqlock(&sk->sk_stamp_seq); in sock_write_timestamp()
2675 sk->sk_stamp = kt; in sock_write_timestamp()
2676 write_sequnlock(&sk->sk_stamp_seq); in sock_write_timestamp()
2678 WRITE_ONCE(sk->sk_stamp, kt); in sock_write_timestamp()
2682 void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
2684 void __sock_recv_wifi_status(struct msghdr *msg, struct sock *sk,
2688 sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb) in sock_recv_timestamp() argument
2699 if (sock_flag(sk, SOCK_RCVTSTAMP) || in sock_recv_timestamp()
2700 (sk->sk_tsflags & SOF_TIMESTAMPING_RX_SOFTWARE) || in sock_recv_timestamp()
2701 (kt && sk->sk_tsflags & SOF_TIMESTAMPING_SOFTWARE) || in sock_recv_timestamp()
2703 (sk->sk_tsflags & SOF_TIMESTAMPING_RAW_HARDWARE))) in sock_recv_timestamp()
2704 __sock_recv_timestamp(msg, sk, skb); in sock_recv_timestamp()
2706 sock_write_timestamp(sk, kt); in sock_recv_timestamp()
2708 if (sock_flag(sk, SOCK_WIFI_STATUS) && skb->wifi_acked_valid) in sock_recv_timestamp()
2709 __sock_recv_wifi_status(msg, sk, skb); in sock_recv_timestamp()
2712 void __sock_recv_cmsgs(struct msghdr *msg, struct sock *sk,
2716 static inline void sock_recv_cmsgs(struct msghdr *msg, struct sock *sk, in sock_recv_cmsgs() argument
2725 if (sk->sk_flags & FLAGS_RECV_CMSGS || sk->sk_tsflags & TSFLAGS_ANY) in sock_recv_cmsgs()
2726 __sock_recv_cmsgs(msg, sk, skb); in sock_recv_cmsgs()
2727 else if (unlikely(sock_flag(sk, SOCK_TIMESTAMP))) in sock_recv_cmsgs()
2728 sock_write_timestamp(sk, skb->tstamp); in sock_recv_cmsgs()
2729 else if (unlikely(sk->sk_stamp == SK_DEFAULT_STAMP)) in sock_recv_cmsgs()
2730 sock_write_timestamp(sk, 0); in sock_recv_cmsgs()
2744 static inline void _sock_tx_timestamp(struct sock *sk, __u16 tsflags, in _sock_tx_timestamp() argument
2751 *tskey = atomic_inc_return(&sk->sk_tskey) - 1; in _sock_tx_timestamp()
2753 if (unlikely(sock_flag(sk, SOCK_WIFI_STATUS))) in _sock_tx_timestamp()
2757 static inline void sock_tx_timestamp(struct sock *sk, __u16 tsflags, in sock_tx_timestamp() argument
2760 _sock_tx_timestamp(sk, tsflags, tx_flags, NULL); in sock_tx_timestamp()
2765 _sock_tx_timestamp(skb->sk, tsflags, &skb_shinfo(skb)->tx_flags, in skb_setup_tx_timestamp()
2769 static inline bool sk_is_tcp(const struct sock *sk) in sk_is_tcp() argument
2771 return sk->sk_type == SOCK_STREAM && sk->sk_protocol == IPPROTO_TCP; in sk_is_tcp()
2782 static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb) in sk_eat_skb() argument
2784 __skb_unlink(skb, &sk->sk_receive_queue); in sk_eat_skb()
2801 static inline bool sk_fullsock(const struct sock *sk) in sk_fullsock() argument
2803 return (1 << sk->sk_state) & ~(TCPF_TIME_WAIT | TCPF_NEW_SYN_RECV); in sk_fullsock()
2807 sk_is_refcounted(struct sock *sk) in sk_is_refcounted() argument
2810 return !sk_fullsock(sk) || !sock_flag(sk, SOCK_RCU_FREE); in sk_is_refcounted()
2821 if (skb->sk) { in skb_steal_sock()
2822 struct sock *sk = skb->sk; in skb_steal_sock() local
2826 *refcounted = sk_is_refcounted(sk); in skb_steal_sock()
2828 skb->sk = NULL; in skb_steal_sock()
2829 return sk; in skb_steal_sock()
2843 struct sock *sk = skb->sk; in sk_validate_xmit_skb() local
2845 if (sk && sk_fullsock(sk) && sk->sk_validate_xmit_skb) { in sk_validate_xmit_skb()
2846 skb = sk->sk_validate_xmit_skb(sk, dev, skb); in sk_validate_xmit_skb()
2862 static inline bool sk_listener(const struct sock *sk) in sk_listener() argument
2864 return (1 << sk->sk_state) & (TCPF_LISTEN | TCPF_NEW_SYN_RECV); in sk_listener()
2867 void sock_enable_timestamp(struct sock *sk, enum sock_flags flag);
2868 int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len, int level,
2871 bool sk_ns_capable(const struct sock *sk,
2873 bool sk_capable(const struct sock *sk, int cap);
2874 bool sk_net_capable(const struct sock *sk, int cap);
2876 void sk_get_meminfo(const struct sock *sk, u32 *meminfo);
2900 static inline int sk_get_wmem0(const struct sock *sk, const struct proto *proto) in sk_get_wmem0() argument
2904 return READ_ONCE(*(int *)((void *)sock_net(sk) + proto->sysctl_wmem_offset)); in sk_get_wmem0()
2909 static inline int sk_get_rmem0(const struct sock *sk, const struct proto *proto) in sk_get_rmem0() argument
2913 return READ_ONCE(*(int *)((void *)sock_net(sk) + proto->sysctl_rmem_offset)); in sk_get_rmem0()
2922 static inline void sk_pacing_shift_update(struct sock *sk, int val) in sk_pacing_shift_update() argument
2924 if (!sk || !sk_fullsock(sk) || READ_ONCE(sk->sk_pacing_shift) == val) in sk_pacing_shift_update()
2926 WRITE_ONCE(sk->sk_pacing_shift, val); in sk_pacing_shift_update()
2934 static inline bool sk_dev_equal_l3scope(struct sock *sk, int dif) in sk_dev_equal_l3scope() argument
2936 int bound_dev_if = READ_ONCE(sk->sk_bound_dev_if); in sk_dev_equal_l3scope()
2942 mdif = l3mdev_master_ifindex_by_index(sock_net(sk), dif); in sk_dev_equal_l3scope()
2949 void sock_def_readable(struct sock *sk);
2951 int sock_bindtoindex(struct sock *sk, int ifindex, bool lock_sk);
2952 void sock_set_timestamp(struct sock *sk, int optname, bool valbool);
2953 int sock_set_timestamping(struct sock *sk, int optname,
2956 void sock_enable_timestamps(struct sock *sk);
2957 void sock_no_linger(struct sock *sk);
2958 void sock_set_keepalive(struct sock *sk);
2959 void sock_set_priority(struct sock *sk, u32 priority);
2960 void sock_set_rcvbuf(struct sock *sk, int val);
2961 void sock_set_mark(struct sock *sk, u32 val);
2962 void sock_set_reuseaddr(struct sock *sk);
2963 void sock_set_reuseport(struct sock *sk);
2964 void sock_set_sndtimeo(struct sock *sk, s64 secs);
2966 int sock_bind_add(struct sock *sk, struct sockaddr *addr, int addr_len);
2972 static inline bool sk_is_readable(struct sock *sk) in sk_is_readable() argument
2974 if (sk->sk_prot->sock_is_readable) in sk_is_readable()
2975 return sk->sk_prot->sock_is_readable(sk); in sk_is_readable()