Lines Matching refs:sk

82 #define SOCK_DEBUG(sk, msg...) do { if ((sk) && sock_flag((sk), SOCK_DBG)) \  argument
87 void SOCK_DEBUG(const struct sock *sk, const char *msg, ...) in SOCK_DEBUG() argument
529 void (*sk_state_change)(struct sock *sk);
530 void (*sk_data_ready)(struct sock *sk);
531 void (*sk_write_space)(struct sock *sk);
532 void (*sk_error_report)(struct sock *sk);
533 int (*sk_backlog_rcv)(struct sock *sk,
536 struct sk_buff* (*sk_validate_xmit_skb)(struct sock *sk,
540 void (*sk_destruct)(struct sock *sk);
581 static inline bool sk_user_data_is_nocopy(const struct sock *sk) in sk_user_data_is_nocopy() argument
583 return ((uintptr_t)sk->sk_user_data & SK_USER_DATA_NOCOPY); in sk_user_data_is_nocopy()
586 #define __sk_user_data(sk) ((*((void __rcu **)&(sk)->sk_user_data))) argument
599 __locked_read_sk_user_data_with_flags(const struct sock *sk, in __locked_read_sk_user_data_with_flags() argument
603 (uintptr_t)rcu_dereference_check(__sk_user_data(sk), in __locked_read_sk_user_data_with_flags()
604 lockdep_is_held(&sk->sk_callback_lock)); in __locked_read_sk_user_data_with_flags()
622 __rcu_dereference_sk_user_data_with_flags(const struct sock *sk, in __rcu_dereference_sk_user_data_with_flags() argument
625 uintptr_t sk_user_data = (uintptr_t)rcu_dereference(__sk_user_data(sk)); in __rcu_dereference_sk_user_data_with_flags()
634 #define rcu_dereference_sk_user_data(sk) \ argument
635 __rcu_dereference_sk_user_data_with_flags(sk, 0)
636 #define __rcu_assign_sk_user_data_with_flags(sk, ptr, flags) \ argument
642 rcu_assign_pointer(__sk_user_data((sk)), \
645 #define rcu_assign_sk_user_data(sk, ptr) \ argument
646 __rcu_assign_sk_user_data_with_flags(sk, ptr, 0)
649 struct net *sock_net(const struct sock *sk) in sock_net() argument
651 return read_pnet(&sk->sk_net); in sock_net()
655 void sock_net_set(struct sock *sk, struct net *net) in sock_net_set() argument
657 write_pnet(&sk->sk_net, net); in sock_net_set()
671 int sk_set_peek_off(struct sock *sk, int val);
673 static inline int sk_peek_offset(const struct sock *sk, int flags) in sk_peek_offset() argument
676 return READ_ONCE(sk->sk_peek_off); in sk_peek_offset()
682 static inline void sk_peek_offset_bwd(struct sock *sk, int val) in sk_peek_offset_bwd() argument
684 s32 off = READ_ONCE(sk->sk_peek_off); in sk_peek_offset_bwd()
688 WRITE_ONCE(sk->sk_peek_off, off); in sk_peek_offset_bwd()
692 static inline void sk_peek_offset_fwd(struct sock *sk, int val) in sk_peek_offset_fwd() argument
694 sk_peek_offset_bwd(sk, -val); in sk_peek_offset_fwd()
725 static inline struct sock *sk_next(const struct sock *sk) in sk_next() argument
727 return hlist_entry_safe(sk->sk_node.next, struct sock, sk_node); in sk_next()
730 static inline struct sock *sk_nulls_next(const struct sock *sk) in sk_nulls_next() argument
732 return (!is_a_nulls(sk->sk_nulls_node.next)) ? in sk_nulls_next()
733 hlist_nulls_entry(sk->sk_nulls_node.next, in sk_nulls_next()
738 static inline bool sk_unhashed(const struct sock *sk) in sk_unhashed() argument
740 return hlist_unhashed(&sk->sk_node); in sk_unhashed()
743 static inline bool sk_hashed(const struct sock *sk) in sk_hashed() argument
745 return !sk_unhashed(sk); in sk_hashed()
753 static inline void __sk_del_node(struct sock *sk) in __sk_del_node() argument
755 __hlist_del(&sk->sk_node); in __sk_del_node()
759 static inline bool __sk_del_node_init(struct sock *sk) in __sk_del_node_init() argument
761 if (sk_hashed(sk)) { in __sk_del_node_init()
762 __sk_del_node(sk); in __sk_del_node_init()
763 sk_node_init(&sk->sk_node); in __sk_del_node_init()
775 static __always_inline void sock_hold(struct sock *sk) in sock_hold() argument
777 refcount_inc(&sk->sk_refcnt); in sock_hold()
783 static __always_inline void __sock_put(struct sock *sk) in __sock_put() argument
785 refcount_dec(&sk->sk_refcnt); in __sock_put()
788 static inline bool sk_del_node_init(struct sock *sk) in sk_del_node_init() argument
790 bool rc = __sk_del_node_init(sk); in sk_del_node_init()
794 WARN_ON(refcount_read(&sk->sk_refcnt) == 1); in sk_del_node_init()
795 __sock_put(sk); in sk_del_node_init()
799 #define sk_del_node_init_rcu(sk) sk_del_node_init(sk) argument
801 static inline bool __sk_nulls_del_node_init_rcu(struct sock *sk) in __sk_nulls_del_node_init_rcu() argument
803 if (sk_hashed(sk)) { in __sk_nulls_del_node_init_rcu()
804 hlist_nulls_del_init_rcu(&sk->sk_nulls_node); in __sk_nulls_del_node_init_rcu()
810 static inline bool sk_nulls_del_node_init_rcu(struct sock *sk) in sk_nulls_del_node_init_rcu() argument
812 bool rc = __sk_nulls_del_node_init_rcu(sk); in sk_nulls_del_node_init_rcu()
816 WARN_ON(refcount_read(&sk->sk_refcnt) == 1); in sk_nulls_del_node_init_rcu()
817 __sock_put(sk); in sk_nulls_del_node_init_rcu()
822 static inline void __sk_add_node(struct sock *sk, struct hlist_head *list) in __sk_add_node() argument
824 hlist_add_head(&sk->sk_node, list); in __sk_add_node()
827 static inline void sk_add_node(struct sock *sk, struct hlist_head *list) in sk_add_node() argument
829 sock_hold(sk); in sk_add_node()
830 __sk_add_node(sk, list); in sk_add_node()
833 static inline void sk_add_node_rcu(struct sock *sk, struct hlist_head *list) in sk_add_node_rcu() argument
835 sock_hold(sk); in sk_add_node_rcu()
836 if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport && in sk_add_node_rcu()
837 sk->sk_family == AF_INET6) in sk_add_node_rcu()
838 hlist_add_tail_rcu(&sk->sk_node, list); in sk_add_node_rcu()
840 hlist_add_head_rcu(&sk->sk_node, list); in sk_add_node_rcu()
843 static inline void sk_add_node_tail_rcu(struct sock *sk, struct hlist_head *list) in sk_add_node_tail_rcu() argument
845 sock_hold(sk); in sk_add_node_tail_rcu()
846 hlist_add_tail_rcu(&sk->sk_node, list); in sk_add_node_tail_rcu()
849 static inline void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list) in __sk_nulls_add_node_rcu() argument
851 hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list); in __sk_nulls_add_node_rcu()
854 static inline void __sk_nulls_add_node_tail_rcu(struct sock *sk, struct hlist_nulls_head *list) in __sk_nulls_add_node_tail_rcu() argument
856 hlist_nulls_add_tail_rcu(&sk->sk_nulls_node, list); in __sk_nulls_add_node_tail_rcu()
859 static inline void sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list) in sk_nulls_add_node_rcu() argument
861 sock_hold(sk); in sk_nulls_add_node_rcu()
862 __sk_nulls_add_node_rcu(sk, list); in sk_nulls_add_node_rcu()
865 static inline void __sk_del_bind_node(struct sock *sk) in __sk_del_bind_node() argument
867 __hlist_del(&sk->sk_bind_node); in __sk_del_bind_node()
870 static inline void sk_add_bind_node(struct sock *sk, in sk_add_bind_node() argument
873 hlist_add_head(&sk->sk_bind_node, list); in sk_add_bind_node()
876 static inline void __sk_del_bind2_node(struct sock *sk) in __sk_del_bind2_node() argument
878 __hlist_del(&sk->sk_bind2_node); in __sk_del_bind2_node()
881 static inline void sk_add_bind2_node(struct sock *sk, struct hlist_head *list) in sk_add_bind2_node() argument
883 hlist_add_head(&sk->sk_bind2_node, list); in sk_add_bind2_node()
920 static inline struct user_namespace *sk_user_ns(const struct sock *sk) in sk_user_ns() argument
926 return sk->sk_socket->file->f_cred->user_ns; in sk_user_ns()
971 static inline void sock_set_flag(struct sock *sk, enum sock_flags flag) in sock_set_flag() argument
973 __set_bit(flag, &sk->sk_flags); in sock_set_flag()
976 static inline void sock_reset_flag(struct sock *sk, enum sock_flags flag) in sock_reset_flag() argument
978 __clear_bit(flag, &sk->sk_flags); in sock_reset_flag()
981 static inline void sock_valbool_flag(struct sock *sk, enum sock_flags bit, in sock_valbool_flag() argument
985 sock_set_flag(sk, bit); in sock_valbool_flag()
987 sock_reset_flag(sk, bit); in sock_valbool_flag()
990 static inline bool sock_flag(const struct sock *sk, enum sock_flags flag) in sock_flag() argument
992 return test_bit(flag, &sk->sk_flags); in sock_flag()
1014 static inline gfp_t sk_gfp_mask(const struct sock *sk, gfp_t gfp_mask) in sk_gfp_mask() argument
1016 return gfp_mask | (sk->sk_allocation & __GFP_MEMALLOC); in sk_gfp_mask()
1019 static inline void sk_acceptq_removed(struct sock *sk) in sk_acceptq_removed() argument
1021 WRITE_ONCE(sk->sk_ack_backlog, sk->sk_ack_backlog - 1); in sk_acceptq_removed()
1024 static inline void sk_acceptq_added(struct sock *sk) in sk_acceptq_added() argument
1026 WRITE_ONCE(sk->sk_ack_backlog, sk->sk_ack_backlog + 1); in sk_acceptq_added()
1033 static inline bool sk_acceptq_is_full(const struct sock *sk) in sk_acceptq_is_full() argument
1035 return READ_ONCE(sk->sk_ack_backlog) > READ_ONCE(sk->sk_max_ack_backlog); in sk_acceptq_is_full()
1041 static inline int sk_stream_min_wspace(const struct sock *sk) in sk_stream_min_wspace() argument
1043 return READ_ONCE(sk->sk_wmem_queued) >> 1; in sk_stream_min_wspace()
1046 static inline int sk_stream_wspace(const struct sock *sk) in sk_stream_wspace() argument
1048 return READ_ONCE(sk->sk_sndbuf) - READ_ONCE(sk->sk_wmem_queued); in sk_stream_wspace()
1051 static inline void sk_wmem_queued_add(struct sock *sk, int val) in sk_wmem_queued_add() argument
1053 WRITE_ONCE(sk->sk_wmem_queued, sk->sk_wmem_queued + val); in sk_wmem_queued_add()
1056 static inline void sk_forward_alloc_add(struct sock *sk, int val) in sk_forward_alloc_add() argument
1059 WRITE_ONCE(sk->sk_forward_alloc, sk->sk_forward_alloc + val); in sk_forward_alloc_add()
1062 void sk_stream_write_space(struct sock *sk);
1065 static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb) in __sk_add_backlog() argument
1070 if (!sk->sk_backlog.tail) in __sk_add_backlog()
1071 WRITE_ONCE(sk->sk_backlog.head, skb); in __sk_add_backlog()
1073 sk->sk_backlog.tail->next = skb; in __sk_add_backlog()
1075 WRITE_ONCE(sk->sk_backlog.tail, skb); in __sk_add_backlog()
1084 static inline bool sk_rcvqueues_full(const struct sock *sk, unsigned int limit) in sk_rcvqueues_full() argument
1086 unsigned int qsize = sk->sk_backlog.len + atomic_read(&sk->sk_rmem_alloc); in sk_rcvqueues_full()
1092 static inline __must_check int sk_add_backlog(struct sock *sk, struct sk_buff *skb, in sk_add_backlog() argument
1095 if (sk_rcvqueues_full(sk, limit)) in sk_add_backlog()
1103 if (skb_pfmemalloc(skb) && !sock_flag(sk, SOCK_MEMALLOC)) in sk_add_backlog()
1106 __sk_add_backlog(sk, skb); in sk_add_backlog()
1107 sk->sk_backlog.len += skb->truesize; in sk_add_backlog()
1111 int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb);
1113 INDIRECT_CALLABLE_DECLARE(int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb));
1114 INDIRECT_CALLABLE_DECLARE(int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb));
1116 static inline int sk_backlog_rcv(struct sock *sk, struct sk_buff *skb) in sk_backlog_rcv() argument
1119 return __sk_backlog_rcv(sk, skb); in sk_backlog_rcv()
1121 return INDIRECT_CALL_INET(sk->sk_backlog_rcv, in sk_backlog_rcv()
1124 sk, skb); in sk_backlog_rcv()
1127 static inline void sk_incoming_cpu_update(struct sock *sk) in sk_incoming_cpu_update() argument
1131 if (unlikely(READ_ONCE(sk->sk_incoming_cpu) != cpu)) in sk_incoming_cpu_update()
1132 WRITE_ONCE(sk->sk_incoming_cpu, cpu); in sk_incoming_cpu_update()
1147 static inline void sock_rps_record_flow(const struct sock *sk) in sock_rps_record_flow() argument
1161 if (sk->sk_state == TCP_ESTABLISHED) { in sock_rps_record_flow()
1165 sock_rps_record_flow_hash(READ_ONCE(sk->sk_rxhash)); in sock_rps_record_flow()
1171 static inline void sock_rps_save_rxhash(struct sock *sk, in sock_rps_save_rxhash() argument
1178 if (unlikely(READ_ONCE(sk->sk_rxhash) != skb->hash)) in sock_rps_save_rxhash()
1179 WRITE_ONCE(sk->sk_rxhash, skb->hash); in sock_rps_save_rxhash()
1183 static inline void sock_rps_reset_rxhash(struct sock *sk) in sock_rps_reset_rxhash() argument
1187 WRITE_ONCE(sk->sk_rxhash, 0); in sock_rps_reset_rxhash()
1206 int sk_stream_wait_connect(struct sock *sk, long *timeo_p);
1207 int sk_stream_wait_memory(struct sock *sk, long *timeo_p);
1208 void sk_stream_wait_close(struct sock *sk, long timeo_p);
1209 int sk_stream_error(struct sock *sk, int flags, int err);
1210 void sk_stream_kill_queues(struct sock *sk);
1211 void sk_set_memalloc(struct sock *sk);
1212 void sk_clear_memalloc(struct sock *sk);
1214 void __sk_flush_backlog(struct sock *sk);
1216 static inline bool sk_flush_backlog(struct sock *sk) in sk_flush_backlog() argument
1218 if (unlikely(READ_ONCE(sk->sk_backlog.tail))) { in sk_flush_backlog()
1219 __sk_flush_backlog(sk); in sk_flush_backlog()
1225 int sk_wait_data(struct sock *sk, long *timeo, const struct sk_buff *skb);
1239 static inline void sk_prot_clear_nulls(struct sock *sk, int size) in sk_prot_clear_nulls() argument
1242 memset(sk, 0, offsetof(struct sock, sk_node.next)); in sk_prot_clear_nulls()
1243 memset(&sk->sk_node.pprev, 0, in sk_prot_clear_nulls()
1251 void (*close)(struct sock *sk,
1253 int (*pre_connect)(struct sock *sk,
1256 int (*connect)(struct sock *sk,
1259 int (*disconnect)(struct sock *sk, int flags);
1261 struct sock * (*accept)(struct sock *sk, int flags, int *err,
1264 int (*ioctl)(struct sock *sk, int cmd,
1266 int (*init)(struct sock *sk);
1267 void (*destroy)(struct sock *sk);
1268 void (*shutdown)(struct sock *sk, int how);
1269 int (*setsockopt)(struct sock *sk, int level,
1272 int (*getsockopt)(struct sock *sk, int level,
1275 void (*keepalive)(struct sock *sk, int valbool);
1277 int (*compat_ioctl)(struct sock *sk,
1280 int (*sendmsg)(struct sock *sk, struct msghdr *msg,
1282 int (*recvmsg)(struct sock *sk, struct msghdr *msg,
1285 int (*bind)(struct sock *sk,
1287 int (*bind_add)(struct sock *sk,
1290 int (*backlog_rcv) (struct sock *sk,
1295 void (*release_cb)(struct sock *sk);
1298 int (*hash)(struct sock *sk);
1299 void (*unhash)(struct sock *sk);
1300 void (*rehash)(struct sock *sk);
1301 int (*get_port)(struct sock *sk, unsigned short snum);
1302 void (*put_port)(struct sock *sk);
1304 int (*psock_update_sk_prot)(struct sock *sk,
1315 int (*forward_alloc_get)(const struct sock *sk);
1318 bool (*stream_memory_free)(const struct sock *sk, int wake);
1319 bool (*sock_is_readable)(struct sock *sk);
1321 void (*enter_memory_pressure)(struct sock *sk);
1322 void (*leave_memory_pressure)(struct sock *sk);
1369 int (*diag_destroy)(struct sock *sk, int err);
1376 INDIRECT_CALLABLE_DECLARE(bool tcp_stream_memory_free(const struct sock *sk, int wake));
1378 static inline int sk_forward_alloc_get(const struct sock *sk) in sk_forward_alloc_get() argument
1381 if (sk->sk_prot->forward_alloc_get) in sk_forward_alloc_get()
1382 return sk->sk_prot->forward_alloc_get(sk); in sk_forward_alloc_get()
1384 return READ_ONCE(sk->sk_forward_alloc); in sk_forward_alloc_get()
1387 static inline bool __sk_stream_memory_free(const struct sock *sk, int wake) in __sk_stream_memory_free() argument
1389 if (READ_ONCE(sk->sk_wmem_queued) >= READ_ONCE(sk->sk_sndbuf)) in __sk_stream_memory_free()
1392 return sk->sk_prot->stream_memory_free ? in __sk_stream_memory_free()
1393 INDIRECT_CALL_INET_1(sk->sk_prot->stream_memory_free, in __sk_stream_memory_free()
1394 tcp_stream_memory_free, sk, wake) : true; in __sk_stream_memory_free()
1397 static inline bool sk_stream_memory_free(const struct sock *sk) in sk_stream_memory_free() argument
1399 return __sk_stream_memory_free(sk, 0); in sk_stream_memory_free()
1402 static inline bool __sk_stream_is_writeable(const struct sock *sk, int wake) in __sk_stream_is_writeable() argument
1404 return sk_stream_wspace(sk) >= sk_stream_min_wspace(sk) && in __sk_stream_is_writeable()
1405 __sk_stream_memory_free(sk, wake); in __sk_stream_is_writeable()
1408 static inline bool sk_stream_is_writeable(const struct sock *sk) in sk_stream_is_writeable() argument
1410 return __sk_stream_is_writeable(sk, 0); in sk_stream_is_writeable()
1413 static inline int sk_under_cgroup_hierarchy(struct sock *sk, in sk_under_cgroup_hierarchy() argument
1417 return cgroup_is_descendant(sock_cgroup_ptr(&sk->sk_cgrp_data), in sk_under_cgroup_hierarchy()
1424 static inline bool sk_has_memory_pressure(const struct sock *sk) in sk_has_memory_pressure() argument
1426 return sk->sk_prot->memory_pressure != NULL; in sk_has_memory_pressure()
1429 static inline bool sk_under_global_memory_pressure(const struct sock *sk) in sk_under_global_memory_pressure() argument
1431 return sk->sk_prot->memory_pressure && in sk_under_global_memory_pressure()
1432 !!READ_ONCE(*sk->sk_prot->memory_pressure); in sk_under_global_memory_pressure()
1435 static inline bool sk_under_memory_pressure(const struct sock *sk) in sk_under_memory_pressure() argument
1437 if (!sk->sk_prot->memory_pressure) in sk_under_memory_pressure()
1440 if (mem_cgroup_sockets_enabled && sk->sk_memcg && in sk_under_memory_pressure()
1441 mem_cgroup_under_socket_pressure(sk->sk_memcg)) in sk_under_memory_pressure()
1444 return !!READ_ONCE(*sk->sk_prot->memory_pressure); in sk_under_memory_pressure()
1454 sk_memory_allocated(const struct sock *sk) in sk_memory_allocated() argument
1456 return proto_memory_allocated(sk->sk_prot); in sk_memory_allocated()
1463 sk_memory_allocated_add(struct sock *sk, int amt) in sk_memory_allocated_add() argument
1468 local_reserve = __this_cpu_add_return(*sk->sk_prot->per_cpu_fw_alloc, amt); in sk_memory_allocated_add()
1470 __this_cpu_sub(*sk->sk_prot->per_cpu_fw_alloc, local_reserve); in sk_memory_allocated_add()
1471 atomic_long_add(local_reserve, sk->sk_prot->memory_allocated); in sk_memory_allocated_add()
1477 sk_memory_allocated_sub(struct sock *sk, int amt) in sk_memory_allocated_sub() argument
1482 local_reserve = __this_cpu_sub_return(*sk->sk_prot->per_cpu_fw_alloc, amt); in sk_memory_allocated_sub()
1484 __this_cpu_sub(*sk->sk_prot->per_cpu_fw_alloc, local_reserve); in sk_memory_allocated_sub()
1485 atomic_long_add(local_reserve, sk->sk_prot->memory_allocated); in sk_memory_allocated_sub()
1492 static inline void sk_sockets_allocated_dec(struct sock *sk) in sk_sockets_allocated_dec() argument
1494 percpu_counter_add_batch(sk->sk_prot->sockets_allocated, -1, in sk_sockets_allocated_dec()
1498 static inline void sk_sockets_allocated_inc(struct sock *sk) in sk_sockets_allocated_inc() argument
1500 percpu_counter_add_batch(sk->sk_prot->sockets_allocated, 1, in sk_sockets_allocated_inc()
1505 sk_sockets_allocated_read_positive(struct sock *sk) in sk_sockets_allocated_read_positive() argument
1507 return percpu_counter_read_positive(sk->sk_prot->sockets_allocated); in sk_sockets_allocated_read_positive()
1560 static inline int __sk_prot_rehash(struct sock *sk) in __sk_prot_rehash() argument
1562 sk->sk_prot->unhash(sk); in __sk_prot_rehash()
1563 return sk->sk_prot->hash(sk); in __sk_prot_rehash()
1597 int __sk_mem_raise_allocated(struct sock *sk, int size, int amt, int kind);
1598 int __sk_mem_schedule(struct sock *sk, int size, int kind);
1599 void __sk_mem_reduce_allocated(struct sock *sk, int amount);
1600 void __sk_mem_reclaim(struct sock *sk, int amount);
1606 static inline long sk_prot_mem_limits(const struct sock *sk, int index) in sk_prot_mem_limits() argument
1608 return READ_ONCE(sk->sk_prot->sysctl_mem[index]); in sk_prot_mem_limits()
1616 static inline bool sk_has_account(struct sock *sk) in sk_has_account() argument
1619 return !!sk->sk_prot->memory_allocated; in sk_has_account()
1622 static inline bool sk_wmem_schedule(struct sock *sk, int size) in sk_wmem_schedule() argument
1626 if (!sk_has_account(sk)) in sk_wmem_schedule()
1628 delta = size - sk->sk_forward_alloc; in sk_wmem_schedule()
1629 return delta <= 0 || __sk_mem_schedule(sk, delta, SK_MEM_SEND); in sk_wmem_schedule()
1633 sk_rmem_schedule(struct sock *sk, struct sk_buff *skb, int size) in sk_rmem_schedule() argument
1637 if (!sk_has_account(sk)) in sk_rmem_schedule()
1639 delta = size - sk->sk_forward_alloc; in sk_rmem_schedule()
1640 return delta <= 0 || __sk_mem_schedule(sk, delta, SK_MEM_RECV) || in sk_rmem_schedule()
1644 static inline int sk_unused_reserved_mem(const struct sock *sk) in sk_unused_reserved_mem() argument
1648 if (likely(!sk->sk_reserved_mem)) in sk_unused_reserved_mem()
1651 unused_mem = sk->sk_reserved_mem - sk->sk_wmem_queued - in sk_unused_reserved_mem()
1652 atomic_read(&sk->sk_rmem_alloc); in sk_unused_reserved_mem()
1657 static inline void sk_mem_reclaim(struct sock *sk) in sk_mem_reclaim() argument
1661 if (!sk_has_account(sk)) in sk_mem_reclaim()
1664 reclaimable = sk->sk_forward_alloc - sk_unused_reserved_mem(sk); in sk_mem_reclaim()
1667 __sk_mem_reclaim(sk, reclaimable); in sk_mem_reclaim()
1670 static inline void sk_mem_reclaim_final(struct sock *sk) in sk_mem_reclaim_final() argument
1672 sk->sk_reserved_mem = 0; in sk_mem_reclaim_final()
1673 sk_mem_reclaim(sk); in sk_mem_reclaim_final()
1676 static inline void sk_mem_charge(struct sock *sk, int size) in sk_mem_charge() argument
1678 if (!sk_has_account(sk)) in sk_mem_charge()
1680 sk_forward_alloc_add(sk, -size); in sk_mem_charge()
1683 static inline void sk_mem_uncharge(struct sock *sk, int size) in sk_mem_uncharge() argument
1685 if (!sk_has_account(sk)) in sk_mem_uncharge()
1687 sk_forward_alloc_add(sk, size); in sk_mem_uncharge()
1688 sk_mem_reclaim(sk); in sk_mem_uncharge()
1698 #define sock_lock_init_class_and_name(sk, sname, skey, name, key) \ argument
1700 sk->sk_lock.owned = 0; \
1701 init_waitqueue_head(&sk->sk_lock.wq); \
1702 spin_lock_init(&(sk)->sk_lock.slock); \
1703 debug_check_no_locks_freed((void *)&(sk)->sk_lock, \
1704 sizeof((sk)->sk_lock)); \
1705 lockdep_set_class_and_name(&(sk)->sk_lock.slock, \
1707 lockdep_init_map(&(sk)->sk_lock.dep_map, (name), (key), 0); \
1710 static inline bool lockdep_sock_is_held(const struct sock *sk) in lockdep_sock_is_held() argument
1712 return lockdep_is_held(&sk->sk_lock) || in lockdep_sock_is_held()
1713 lockdep_is_held(&sk->sk_lock.slock); in lockdep_sock_is_held()
1716 void lock_sock_nested(struct sock *sk, int subclass);
1718 static inline void lock_sock(struct sock *sk) in lock_sock() argument
1720 lock_sock_nested(sk, 0); in lock_sock()
1723 void __lock_sock(struct sock *sk);
1724 void __release_sock(struct sock *sk);
1725 void release_sock(struct sock *sk);
1734 bool __lock_sock_fast(struct sock *sk) __acquires(&sk->sk_lock.slock);
1749 static inline bool lock_sock_fast(struct sock *sk) in lock_sock_fast() argument
1752 mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_); in lock_sock_fast()
1754 return __lock_sock_fast(sk); in lock_sock_fast()
1758 static inline bool lock_sock_fast_nested(struct sock *sk) in lock_sock_fast_nested() argument
1760 mutex_acquire(&sk->sk_lock.dep_map, SINGLE_DEPTH_NESTING, 0, _RET_IP_); in lock_sock_fast_nested()
1762 return __lock_sock_fast(sk); in lock_sock_fast_nested()
1773 static inline void unlock_sock_fast(struct sock *sk, bool slow) in unlock_sock_fast() argument
1774 __releases(&sk->sk_lock.slock) in unlock_sock_fast()
1777 release_sock(sk); in unlock_sock_fast()
1778 __release(&sk->sk_lock.slock); in unlock_sock_fast()
1780 mutex_release(&sk->sk_lock.dep_map, _RET_IP_); in unlock_sock_fast()
1781 spin_unlock_bh(&sk->sk_lock.slock); in unlock_sock_fast()
1785 void sockopt_lock_sock(struct sock *sk);
1786 void sockopt_release_sock(struct sock *sk);
1804 static inline void sock_owned_by_me(const struct sock *sk) in sock_owned_by_me() argument
1807 WARN_ON_ONCE(!lockdep_sock_is_held(sk) && debug_locks); in sock_owned_by_me()
1811 static inline bool sock_owned_by_user(const struct sock *sk) in sock_owned_by_user() argument
1813 sock_owned_by_me(sk); in sock_owned_by_user()
1814 return sk->sk_lock.owned; in sock_owned_by_user()
1817 static inline bool sock_owned_by_user_nocheck(const struct sock *sk) in sock_owned_by_user_nocheck() argument
1819 return sk->sk_lock.owned; in sock_owned_by_user_nocheck()
1822 static inline void sock_release_ownership(struct sock *sk) in sock_release_ownership() argument
1824 if (sock_owned_by_user_nocheck(sk)) { in sock_release_ownership()
1825 sk->sk_lock.owned = 0; in sock_release_ownership()
1828 mutex_release(&sk->sk_lock.dep_map, _RET_IP_); in sock_release_ownership()
1835 struct sock *sk = (struct sock *)csk; in sock_allow_reclassification() local
1837 return !sock_owned_by_user_nocheck(sk) && in sock_allow_reclassification()
1838 !spin_is_locked(&sk->sk_lock.slock); in sock_allow_reclassification()
1843 void sk_free(struct sock *sk);
1844 void sk_destruct(struct sock *sk);
1845 struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority);
1846 void sk_free_unlock_clone(struct sock *sk);
1848 struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
1852 struct sk_buff *sock_omalloc(struct sock *sk, unsigned long size,
1864 int sk_setsockopt(struct sock *sk, int level, int optname,
1869 int sk_getsockopt(struct sock *sk, int level, int optname,
1875 struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
1879 static inline struct sk_buff *sock_alloc_send_skb(struct sock *sk, in sock_alloc_send_skb() argument
1883 return sock_alloc_send_pskb(sk, size, 0, noblock, errcode, 0); in sock_alloc_send_skb()
1886 void *sock_kmalloc(struct sock *sk, int size, gfp_t priority);
1887 void sock_kfree_s(struct sock *sk, void *mem, int size);
1888 void sock_kzfree_s(struct sock *sk, void *mem, int size);
1889 void sk_send_sigurg(struct sock *sk);
1891 static inline void sock_replace_proto(struct sock *sk, struct proto *proto) in sock_replace_proto() argument
1893 if (sk->sk_socket) in sock_replace_proto()
1894 clear_bit(SOCK_SUPPORT_ZC, &sk->sk_socket->flags); in sock_replace_proto()
1895 WRITE_ONCE(sk->sk_prot, proto); in sock_replace_proto()
1905 const struct sock *sk) in sockcm_init() argument
1908 .tsflags = READ_ONCE(sk->sk_tsflags) in sockcm_init()
1912 int __sock_cmsg_send(struct sock *sk, struct cmsghdr *cmsg,
1914 int sock_cmsg_send(struct sock *sk, struct msghdr *msg,
1930 int sock_no_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t len);
1946 void sk_common_release(struct sock *sk);
1953 void sock_init_data_uid(struct socket *sock, struct sock *sk, kuid_t uid);
1958 void sock_init_data(struct socket *sock, struct sock *sk);
1986 static inline void sock_put(struct sock *sk) in sock_put() argument
1988 if (refcount_dec_and_test(&sk->sk_refcnt)) in sock_put()
1989 sk_free(sk); in sock_put()
1994 void sock_gen_put(struct sock *sk);
1996 int __sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested,
1998 static inline int sk_receive_skb(struct sock *sk, struct sk_buff *skb, in sk_receive_skb() argument
2001 return __sk_receive_skb(sk, skb, nested, 1, true); in sk_receive_skb()
2004 static inline void sk_tx_queue_set(struct sock *sk, int tx_queue) in sk_tx_queue_set() argument
2009 sk->sk_tx_queue_mapping = tx_queue; in sk_tx_queue_set()
2014 static inline void sk_tx_queue_clear(struct sock *sk) in sk_tx_queue_clear() argument
2016 sk->sk_tx_queue_mapping = NO_QUEUE_MAPPING; in sk_tx_queue_clear()
2019 static inline int sk_tx_queue_get(const struct sock *sk) in sk_tx_queue_get() argument
2021 if (sk && sk->sk_tx_queue_mapping != NO_QUEUE_MAPPING) in sk_tx_queue_get()
2022 return sk->sk_tx_queue_mapping; in sk_tx_queue_get()
2027 static inline void __sk_rx_queue_set(struct sock *sk, in __sk_rx_queue_set() argument
2036 unlikely(READ_ONCE(sk->sk_rx_queue_mapping) != rx_queue)) in __sk_rx_queue_set()
2037 WRITE_ONCE(sk->sk_rx_queue_mapping, rx_queue); in __sk_rx_queue_set()
2042 static inline void sk_rx_queue_set(struct sock *sk, const struct sk_buff *skb) in sk_rx_queue_set() argument
2044 __sk_rx_queue_set(sk, skb, true); in sk_rx_queue_set()
2047 static inline void sk_rx_queue_update(struct sock *sk, const struct sk_buff *skb) in sk_rx_queue_update() argument
2049 __sk_rx_queue_set(sk, skb, false); in sk_rx_queue_update()
2052 static inline void sk_rx_queue_clear(struct sock *sk) in sk_rx_queue_clear() argument
2055 WRITE_ONCE(sk->sk_rx_queue_mapping, NO_QUEUE_MAPPING); in sk_rx_queue_clear()
2059 static inline int sk_rx_queue_get(const struct sock *sk) in sk_rx_queue_get() argument
2062 if (sk) { in sk_rx_queue_get()
2063 int res = READ_ONCE(sk->sk_rx_queue_mapping); in sk_rx_queue_get()
2073 static inline void sk_set_socket(struct sock *sk, struct socket *sock) in sk_set_socket() argument
2075 sk->sk_socket = sock; in sk_set_socket()
2078 static inline wait_queue_head_t *sk_sleep(struct sock *sk) in sk_sleep() argument
2081 return &rcu_dereference_raw(sk->sk_wq)->wait; in sk_sleep()
2090 static inline void sock_orphan(struct sock *sk) in sock_orphan() argument
2092 write_lock_bh(&sk->sk_callback_lock); in sock_orphan()
2093 sock_set_flag(sk, SOCK_DEAD); in sock_orphan()
2094 sk_set_socket(sk, NULL); in sock_orphan()
2095 sk->sk_wq = NULL; in sock_orphan()
2096 write_unlock_bh(&sk->sk_callback_lock); in sock_orphan()
2099 static inline void sock_graft(struct sock *sk, struct socket *parent) in sock_graft() argument
2101 WARN_ON(parent->sk); in sock_graft()
2102 write_lock_bh(&sk->sk_callback_lock); in sock_graft()
2103 rcu_assign_pointer(sk->sk_wq, &parent->wq); in sock_graft()
2104 parent->sk = sk; in sock_graft()
2105 sk_set_socket(sk, parent); in sock_graft()
2106 sk->sk_uid = SOCK_INODE(parent)->i_uid; in sock_graft()
2107 security_sock_graft(sk, parent); in sock_graft()
2108 write_unlock_bh(&sk->sk_callback_lock); in sock_graft()
2111 kuid_t sock_i_uid(struct sock *sk);
2112 unsigned long __sock_i_ino(struct sock *sk);
2113 unsigned long sock_i_ino(struct sock *sk);
2115 static inline kuid_t sock_net_uid(const struct net *net, const struct sock *sk) in sock_net_uid() argument
2117 return sk ? sk->sk_uid : make_kuid(net->user_ns, 0); in sock_net_uid()
2127 static inline void sk_set_txhash(struct sock *sk) in sk_set_txhash() argument
2130 WRITE_ONCE(sk->sk_txhash, net_tx_rndhash()); in sk_set_txhash()
2133 static inline bool sk_rethink_txhash(struct sock *sk) in sk_rethink_txhash() argument
2135 if (sk->sk_txhash && sk->sk_txrehash == SOCK_TXREHASH_ENABLED) { in sk_rethink_txhash()
2136 sk_set_txhash(sk); in sk_rethink_txhash()
2143 __sk_dst_get(struct sock *sk) in __sk_dst_get() argument
2145 return rcu_dereference_check(sk->sk_dst_cache, in __sk_dst_get()
2146 lockdep_sock_is_held(sk)); in __sk_dst_get()
2150 sk_dst_get(struct sock *sk) in sk_dst_get() argument
2155 dst = rcu_dereference(sk->sk_dst_cache); in sk_dst_get()
2162 static inline void __dst_negative_advice(struct sock *sk) in __dst_negative_advice() argument
2164 struct dst_entry *ndst, *dst = __sk_dst_get(sk); in __dst_negative_advice()
2170 rcu_assign_pointer(sk->sk_dst_cache, ndst); in __dst_negative_advice()
2171 sk_tx_queue_clear(sk); in __dst_negative_advice()
2172 sk->sk_dst_pending_confirm = 0; in __dst_negative_advice()
2177 static inline void dst_negative_advice(struct sock *sk) in dst_negative_advice() argument
2179 sk_rethink_txhash(sk); in dst_negative_advice()
2180 __dst_negative_advice(sk); in dst_negative_advice()
2184 __sk_dst_set(struct sock *sk, struct dst_entry *dst) in __sk_dst_set() argument
2188 sk_tx_queue_clear(sk); in __sk_dst_set()
2189 sk->sk_dst_pending_confirm = 0; in __sk_dst_set()
2190 old_dst = rcu_dereference_protected(sk->sk_dst_cache, in __sk_dst_set()
2191 lockdep_sock_is_held(sk)); in __sk_dst_set()
2192 rcu_assign_pointer(sk->sk_dst_cache, dst); in __sk_dst_set()
2197 sk_dst_set(struct sock *sk, struct dst_entry *dst) in sk_dst_set() argument
2201 sk_tx_queue_clear(sk); in sk_dst_set()
2202 sk->sk_dst_pending_confirm = 0; in sk_dst_set()
2203 old_dst = xchg((__force struct dst_entry **)&sk->sk_dst_cache, dst); in sk_dst_set()
2208 __sk_dst_reset(struct sock *sk) in __sk_dst_reset() argument
2210 __sk_dst_set(sk, NULL); in __sk_dst_reset()
2214 sk_dst_reset(struct sock *sk) in sk_dst_reset() argument
2216 sk_dst_set(sk, NULL); in sk_dst_reset()
2219 struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie);
2221 struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie);
2223 static inline void sk_dst_confirm(struct sock *sk) in sk_dst_confirm() argument
2225 if (!READ_ONCE(sk->sk_dst_pending_confirm)) in sk_dst_confirm()
2226 WRITE_ONCE(sk->sk_dst_pending_confirm, 1); in sk_dst_confirm()
2232 struct sock *sk = skb->sk; in sock_confirm_neigh() local
2234 if (sk && READ_ONCE(sk->sk_dst_pending_confirm)) in sock_confirm_neigh()
2235 WRITE_ONCE(sk->sk_dst_pending_confirm, 0); in sock_confirm_neigh()
2240 bool sk_mc_loop(struct sock *sk);
2242 static inline bool sk_can_gso(const struct sock *sk) in sk_can_gso() argument
2244 return net_gso_ok(sk->sk_route_caps, sk->sk_gso_type); in sk_can_gso()
2247 void sk_setup_caps(struct sock *sk, struct dst_entry *dst);
2249 static inline void sk_gso_disable(struct sock *sk) in sk_gso_disable() argument
2251 sk->sk_gso_disabled = 1; in sk_gso_disable()
2252 sk->sk_route_caps &= ~NETIF_F_GSO_MASK; in sk_gso_disable()
2255 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb, in skb_do_copy_data_nocache() argument
2264 } else if (sk->sk_route_caps & NETIF_F_NOCACHE_COPY) { in skb_do_copy_data_nocache()
2273 static inline int skb_add_data_nocache(struct sock *sk, struct sk_buff *skb, in skb_add_data_nocache() argument
2278 err = skb_do_copy_data_nocache(sk, skb, from, skb_put(skb, copy), in skb_add_data_nocache()
2286 static inline int skb_copy_to_page_nocache(struct sock *sk, struct iov_iter *from, in skb_copy_to_page_nocache() argument
2293 err = skb_do_copy_data_nocache(sk, skb, from, page_address(page) + off, in skb_copy_to_page_nocache()
2299 sk_wmem_queued_add(sk, copy); in skb_copy_to_page_nocache()
2300 sk_mem_charge(sk, copy); in skb_copy_to_page_nocache()
2310 static inline int sk_wmem_alloc_get(const struct sock *sk) in sk_wmem_alloc_get() argument
2312 return refcount_read(&sk->sk_wmem_alloc) - 1; in sk_wmem_alloc_get()
2321 static inline int sk_rmem_alloc_get(const struct sock *sk) in sk_rmem_alloc_get() argument
2323 return atomic_read(&sk->sk_rmem_alloc); in sk_rmem_alloc_get()
2332 static inline bool sk_has_allocations(const struct sock *sk) in sk_has_allocations() argument
2334 return sk_wmem_alloc_get(sk) || sk_rmem_alloc_get(sk); in sk_has_allocations()
2395 static inline void skb_set_hash_from_sk(struct sk_buff *skb, struct sock *sk) in skb_set_hash_from_sk() argument
2398 u32 txhash = READ_ONCE(sk->sk_txhash); in skb_set_hash_from_sk()
2406 void skb_set_owner_w(struct sk_buff *skb, struct sock *sk);
2416 static inline void skb_set_owner_r(struct sk_buff *skb, struct sock *sk) in skb_set_owner_r() argument
2419 skb->sk = sk; in skb_set_owner_r()
2421 atomic_add(skb->truesize, &sk->sk_rmem_alloc); in skb_set_owner_r()
2422 sk_mem_charge(sk, skb->truesize); in skb_set_owner_r()
2425 static inline __must_check bool skb_set_owner_sk_safe(struct sk_buff *skb, struct sock *sk) in skb_set_owner_sk_safe() argument
2427 if (sk && refcount_inc_not_zero(&sk->sk_refcnt)) { in skb_set_owner_sk_safe()
2430 skb->sk = sk; in skb_set_owner_sk_safe()
2436 static inline struct sk_buff *skb_clone_and_charge_r(struct sk_buff *skb, struct sock *sk) in skb_clone_and_charge_r() argument
2438 skb = skb_clone(skb, sk_gfp_mask(sk, GFP_ATOMIC)); in skb_clone_and_charge_r()
2440 if (sk_rmem_schedule(sk, skb, skb->truesize)) { in skb_clone_and_charge_r()
2441 skb_set_owner_r(skb, sk); in skb_clone_and_charge_r()
2458 void sk_reset_timer(struct sock *sk, struct timer_list *timer,
2461 void sk_stop_timer(struct sock *sk, struct timer_list *timer);
2463 void sk_stop_timer_sync(struct sock *sk, struct timer_list *timer);
2465 int __sk_queue_drop_skb(struct sock *sk, struct sk_buff_head *sk_queue,
2467 void (*destructor)(struct sock *sk,
2469 int __sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
2471 int sock_queue_rcv_skb_reason(struct sock *sk, struct sk_buff *skb,
2474 static inline int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) in sock_queue_rcv_skb() argument
2476 return sock_queue_rcv_skb_reason(sk, skb, NULL); in sock_queue_rcv_skb()
2479 int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb);
2480 struct sk_buff *sock_dequeue_err_skb(struct sock *sk);
2486 static inline int sock_error(struct sock *sk) in sock_error() argument
2493 if (likely(data_race(!sk->sk_err))) in sock_error()
2496 err = xchg(&sk->sk_err, 0); in sock_error()
2500 void sk_error_report(struct sock *sk);
2502 static inline unsigned long sock_wspace(struct sock *sk) in sock_wspace() argument
2506 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) { in sock_wspace()
2507 amt = sk->sk_sndbuf - refcount_read(&sk->sk_wmem_alloc); in sock_wspace()
2518 static inline void sk_set_bit(int nr, struct sock *sk) in sk_set_bit() argument
2521 !sock_flag(sk, SOCK_FASYNC)) in sk_set_bit()
2524 set_bit(nr, &sk->sk_wq_raw->flags); in sk_set_bit()
2527 static inline void sk_clear_bit(int nr, struct sock *sk) in sk_clear_bit() argument
2530 !sock_flag(sk, SOCK_FASYNC)) in sk_clear_bit()
2533 clear_bit(nr, &sk->sk_wq_raw->flags); in sk_clear_bit()
2536 static inline void sk_wake_async(const struct sock *sk, int how, int band) in sk_wake_async() argument
2538 if (sock_flag(sk, SOCK_FASYNC)) { in sk_wake_async()
2540 sock_wake_async(rcu_dereference(sk->sk_wq), how, band); in sk_wake_async()
2555 static inline void sk_stream_moderate_sndbuf(struct sock *sk) in sk_stream_moderate_sndbuf() argument
2559 if (sk->sk_userlocks & SOCK_SNDBUF_LOCK) in sk_stream_moderate_sndbuf()
2562 val = min(sk->sk_sndbuf, sk->sk_wmem_queued >> 1); in sk_stream_moderate_sndbuf()
2563 val = max_t(u32, val, sk_unused_reserved_mem(sk)); in sk_stream_moderate_sndbuf()
2565 WRITE_ONCE(sk->sk_sndbuf, max_t(u32, val, SOCK_MIN_SNDBUF)); in sk_stream_moderate_sndbuf()
2584 static inline struct page_frag *sk_page_frag(struct sock *sk) in sk_page_frag() argument
2586 if (sk->sk_use_task_frag) in sk_page_frag()
2589 return &sk->sk_frag; in sk_page_frag()
2592 bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag);
2597 static inline bool sock_writeable(const struct sock *sk) in sock_writeable() argument
2599 return refcount_read(&sk->sk_wmem_alloc) < (READ_ONCE(sk->sk_sndbuf) >> 1); in sock_writeable()
2612 static inline long sock_rcvtimeo(const struct sock *sk, bool noblock) in sock_rcvtimeo() argument
2614 return noblock ? 0 : sk->sk_rcvtimeo; in sock_rcvtimeo()
2617 static inline long sock_sndtimeo(const struct sock *sk, bool noblock) in sock_sndtimeo() argument
2619 return noblock ? 0 : sk->sk_sndtimeo; in sock_sndtimeo()
2622 static inline int sock_rcvlowat(const struct sock *sk, int waitall, int len) in sock_rcvlowat() argument
2624 int v = waitall ? len : min_t(int, READ_ONCE(sk->sk_rcvlowat), len); in sock_rcvlowat()
2655 sock_skb_set_dropcount(const struct sock *sk, struct sk_buff *skb) in sock_skb_set_dropcount() argument
2657 SOCK_SKB_CB(skb)->dropcount = sock_flag(sk, SOCK_RXQ_OVFL) ? in sock_skb_set_dropcount()
2658 atomic_read(&sk->sk_drops) : 0; in sock_skb_set_dropcount()
2661 static inline void sk_drops_add(struct sock *sk, const struct sk_buff *skb) in sk_drops_add() argument
2665 atomic_add(segs, &sk->sk_drops); in sk_drops_add()
2668 static inline ktime_t sock_read_timestamp(struct sock *sk) in sock_read_timestamp() argument
2675 seq = read_seqbegin(&sk->sk_stamp_seq); in sock_read_timestamp()
2676 kt = sk->sk_stamp; in sock_read_timestamp()
2677 } while (read_seqretry(&sk->sk_stamp_seq, seq)); in sock_read_timestamp()
2681 return READ_ONCE(sk->sk_stamp); in sock_read_timestamp()
2685 static inline void sock_write_timestamp(struct sock *sk, ktime_t kt) in sock_write_timestamp() argument
2688 write_seqlock(&sk->sk_stamp_seq); in sock_write_timestamp()
2689 sk->sk_stamp = kt; in sock_write_timestamp()
2690 write_sequnlock(&sk->sk_stamp_seq); in sock_write_timestamp()
2692 WRITE_ONCE(sk->sk_stamp, kt); in sock_write_timestamp()
2696 void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
2698 void __sock_recv_wifi_status(struct msghdr *msg, struct sock *sk,
2702 sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb) in sock_recv_timestamp() argument
2705 u32 tsflags = READ_ONCE(sk->sk_tsflags); in sock_recv_timestamp()
2713 if (sock_flag(sk, SOCK_RCVTSTAMP) || in sock_recv_timestamp()
2718 __sock_recv_timestamp(msg, sk, skb); in sock_recv_timestamp()
2720 sock_write_timestamp(sk, kt); in sock_recv_timestamp()
2722 if (sock_flag(sk, SOCK_WIFI_STATUS) && skb_wifi_acked_valid(skb)) in sock_recv_timestamp()
2723 __sock_recv_wifi_status(msg, sk, skb); in sock_recv_timestamp()
2726 void __sock_recv_cmsgs(struct msghdr *msg, struct sock *sk,
2730 static inline void sock_recv_cmsgs(struct msghdr *msg, struct sock *sk, in sock_recv_cmsgs() argument
2739 if (sk->sk_flags & FLAGS_RECV_CMSGS || in sock_recv_cmsgs()
2740 READ_ONCE(sk->sk_tsflags) & TSFLAGS_ANY) in sock_recv_cmsgs()
2741 __sock_recv_cmsgs(msg, sk, skb); in sock_recv_cmsgs()
2742 else if (unlikely(sock_flag(sk, SOCK_TIMESTAMP))) in sock_recv_cmsgs()
2743 sock_write_timestamp(sk, skb->tstamp); in sock_recv_cmsgs()
2744 else if (unlikely(sock_read_timestamp(sk) == SK_DEFAULT_STAMP)) in sock_recv_cmsgs()
2745 sock_write_timestamp(sk, 0); in sock_recv_cmsgs()
2759 static inline void _sock_tx_timestamp(struct sock *sk, __u16 tsflags, in _sock_tx_timestamp() argument
2766 *tskey = atomic_inc_return(&sk->sk_tskey) - 1; in _sock_tx_timestamp()
2768 if (unlikely(sock_flag(sk, SOCK_WIFI_STATUS))) in _sock_tx_timestamp()
2772 static inline void sock_tx_timestamp(struct sock *sk, __u16 tsflags, in sock_tx_timestamp() argument
2775 _sock_tx_timestamp(sk, tsflags, tx_flags, NULL); in sock_tx_timestamp()
2780 _sock_tx_timestamp(skb->sk, tsflags, &skb_shinfo(skb)->tx_flags, in skb_setup_tx_timestamp()
2784 static inline bool sk_is_tcp(const struct sock *sk) in sk_is_tcp() argument
2786 return sk->sk_type == SOCK_STREAM && sk->sk_protocol == IPPROTO_TCP; in sk_is_tcp()
2797 static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb) in sk_eat_skb() argument
2799 __skb_unlink(skb, &sk->sk_receive_queue); in sk_eat_skb()
2816 static inline bool sk_fullsock(const struct sock *sk) in sk_fullsock() argument
2818 return (1 << sk->sk_state) & ~(TCPF_TIME_WAIT | TCPF_NEW_SYN_RECV); in sk_fullsock()
2822 sk_is_refcounted(struct sock *sk) in sk_is_refcounted() argument
2825 return !sk_fullsock(sk) || !sock_flag(sk, SOCK_RCU_FREE); in sk_is_refcounted()
2837 if (skb->sk) { in skb_steal_sock()
2838 struct sock *sk = skb->sk; in skb_steal_sock() local
2843 *refcounted = sk_is_refcounted(sk); in skb_steal_sock()
2845 skb->sk = NULL; in skb_steal_sock()
2846 return sk; in skb_steal_sock()
2861 struct sock *sk = skb->sk; in sk_validate_xmit_skb() local
2863 if (sk && sk_fullsock(sk) && sk->sk_validate_xmit_skb) { in sk_validate_xmit_skb()
2864 skb = sk->sk_validate_xmit_skb(sk, dev, skb); in sk_validate_xmit_skb()
2880 static inline bool sk_listener(const struct sock *sk) in sk_listener() argument
2882 return (1 << sk->sk_state) & (TCPF_LISTEN | TCPF_NEW_SYN_RECV); in sk_listener()
2885 void sock_enable_timestamp(struct sock *sk, enum sock_flags flag);
2886 int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len, int level,
2889 bool sk_ns_capable(const struct sock *sk,
2891 bool sk_capable(const struct sock *sk, int cap);
2892 bool sk_net_capable(const struct sock *sk, int cap);
2894 void sk_get_meminfo(const struct sock *sk, u32 *meminfo);
2918 static inline int sk_get_wmem0(const struct sock *sk, const struct proto *proto) in sk_get_wmem0() argument
2922 return READ_ONCE(*(int *)((void *)sock_net(sk) + proto->sysctl_wmem_offset)); in sk_get_wmem0()
2927 static inline int sk_get_rmem0(const struct sock *sk, const struct proto *proto) in sk_get_rmem0() argument
2931 return READ_ONCE(*(int *)((void *)sock_net(sk) + proto->sysctl_rmem_offset)); in sk_get_rmem0()
2940 static inline void sk_pacing_shift_update(struct sock *sk, int val) in sk_pacing_shift_update() argument
2942 if (!sk || !sk_fullsock(sk) || READ_ONCE(sk->sk_pacing_shift) == val) in sk_pacing_shift_update()
2944 WRITE_ONCE(sk->sk_pacing_shift, val); in sk_pacing_shift_update()
2952 static inline bool sk_dev_equal_l3scope(struct sock *sk, int dif) in sk_dev_equal_l3scope() argument
2954 int bound_dev_if = READ_ONCE(sk->sk_bound_dev_if); in sk_dev_equal_l3scope()
2960 mdif = l3mdev_master_ifindex_by_index(sock_net(sk), dif); in sk_dev_equal_l3scope()
2967 void sock_def_readable(struct sock *sk);
2969 int sock_bindtoindex(struct sock *sk, int ifindex, bool lock_sk);
2970 void sock_set_timestamp(struct sock *sk, int optname, bool valbool);
2971 int sock_set_timestamping(struct sock *sk, int optname,
2974 void sock_enable_timestamps(struct sock *sk);
2975 void sock_no_linger(struct sock *sk);
2976 void sock_set_keepalive(struct sock *sk);
2977 void sock_set_priority(struct sock *sk, u32 priority);
2978 void sock_set_rcvbuf(struct sock *sk, int val);
2979 void sock_set_mark(struct sock *sk, u32 val);
2980 void sock_set_reuseaddr(struct sock *sk);
2981 void sock_set_reuseport(struct sock *sk);
2982 void sock_set_sndtimeo(struct sock *sk, s64 secs);
2984 int sock_bind_add(struct sock *sk, struct sockaddr *addr, int addr_len);
2990 int sock_ioctl_inout(struct sock *sk, unsigned int cmd,
2992 int sk_ioctl(struct sock *sk, unsigned int cmd, void __user *arg);
2993 static inline bool sk_is_readable(struct sock *sk) in sk_is_readable() argument
2995 if (sk->sk_prot->sock_is_readable) in sk_is_readable()
2996 return sk->sk_prot->sock_is_readable(sk); in sk_is_readable()