Lines Matching refs:sk
161 bool sk_ns_capable(const struct sock *sk, in sk_ns_capable() argument
164 return file_ns_capable(sk->sk_socket->file, user_ns, cap) && in sk_ns_capable()
178 bool sk_capable(const struct sock *sk, int cap) in sk_capable() argument
180 return sk_ns_capable(sk, &init_user_ns, cap); in sk_capable()
193 bool sk_net_capable(const struct sock *sk, int cap) in sk_net_capable() argument
195 return sk_ns_capable(sk, sock_net(sk)->user_ns, cap); in sk_net_capable()
297 void sk_set_memalloc(struct sock *sk) in sk_set_memalloc() argument
299 sock_set_flag(sk, SOCK_MEMALLOC); in sk_set_memalloc()
300 sk->sk_allocation |= __GFP_MEMALLOC; in sk_set_memalloc()
305 void sk_clear_memalloc(struct sock *sk) in sk_clear_memalloc() argument
307 sock_reset_flag(sk, SOCK_MEMALLOC); in sk_clear_memalloc()
308 sk->sk_allocation &= ~__GFP_MEMALLOC; in sk_clear_memalloc()
318 sk_mem_reclaim(sk); in sk_clear_memalloc()
322 int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb) in __sk_backlog_rcv() argument
328 BUG_ON(!sock_flag(sk, SOCK_MEMALLOC)); in __sk_backlog_rcv()
331 ret = sk->sk_backlog_rcv(sk, skb); in __sk_backlog_rcv()
380 static bool sock_needs_netstamp(const struct sock *sk) in sock_needs_netstamp() argument
382 switch (sk->sk_family) { in sock_needs_netstamp()
391 static void sock_disable_timestamp(struct sock *sk, unsigned long flags) in sock_disable_timestamp() argument
393 if (sk->sk_flags & flags) { in sock_disable_timestamp()
394 sk->sk_flags &= ~flags; in sock_disable_timestamp()
395 if (sock_needs_netstamp(sk) && in sock_disable_timestamp()
396 !(sk->sk_flags & SK_FLAGS_TIMESTAMP)) in sock_disable_timestamp()
402 int __sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) in __sock_queue_rcv_skb() argument
405 struct sk_buff_head *list = &sk->sk_receive_queue; in __sock_queue_rcv_skb()
407 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) { in __sock_queue_rcv_skb()
408 atomic_inc(&sk->sk_drops); in __sock_queue_rcv_skb()
409 trace_sock_rcvqueue_full(sk, skb); in __sock_queue_rcv_skb()
413 if (!sk_rmem_schedule(sk, skb, skb->truesize)) { in __sock_queue_rcv_skb()
414 atomic_inc(&sk->sk_drops); in __sock_queue_rcv_skb()
419 skb_set_owner_r(skb, sk); in __sock_queue_rcv_skb()
427 sock_skb_set_dropcount(sk, skb); in __sock_queue_rcv_skb()
431 if (!sock_flag(sk, SOCK_DEAD)) in __sock_queue_rcv_skb()
432 sk->sk_data_ready(sk); in __sock_queue_rcv_skb()
437 int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) in sock_queue_rcv_skb() argument
441 err = sk_filter(sk, skb); in sock_queue_rcv_skb()
445 return __sock_queue_rcv_skb(sk, skb); in sock_queue_rcv_skb()
449 int __sk_receive_skb(struct sock *sk, struct sk_buff *skb, in __sk_receive_skb() argument
454 if (sk_filter_trim_cap(sk, skb, trim_cap)) in __sk_receive_skb()
459 if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) { in __sk_receive_skb()
460 atomic_inc(&sk->sk_drops); in __sk_receive_skb()
464 bh_lock_sock_nested(sk); in __sk_receive_skb()
466 bh_lock_sock(sk); in __sk_receive_skb()
467 if (!sock_owned_by_user(sk)) { in __sk_receive_skb()
471 mutex_acquire(&sk->sk_lock.dep_map, 0, 1, _RET_IP_); in __sk_receive_skb()
473 rc = sk_backlog_rcv(sk, skb); in __sk_receive_skb()
475 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_); in __sk_receive_skb()
476 } else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) { in __sk_receive_skb()
477 bh_unlock_sock(sk); in __sk_receive_skb()
478 atomic_inc(&sk->sk_drops); in __sk_receive_skb()
482 bh_unlock_sock(sk); in __sk_receive_skb()
485 sock_put(sk); in __sk_receive_skb()
493 struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie) in __sk_dst_check() argument
495 struct dst_entry *dst = __sk_dst_get(sk); in __sk_dst_check()
498 sk_tx_queue_clear(sk); in __sk_dst_check()
499 sk->sk_dst_pending_confirm = 0; in __sk_dst_check()
500 RCU_INIT_POINTER(sk->sk_dst_cache, NULL); in __sk_dst_check()
509 struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie) in sk_dst_check() argument
511 struct dst_entry *dst = sk_dst_get(sk); in sk_dst_check()
514 sk_dst_reset(sk); in sk_dst_check()
523 static int sock_setbindtodevice(struct sock *sk, char __user *optval, in sock_setbindtodevice() argument
528 struct net *net = sock_net(sk); in sock_setbindtodevice()
568 lock_sock(sk); in sock_setbindtodevice()
569 sk->sk_bound_dev_if = index; in sock_setbindtodevice()
570 sk_dst_reset(sk); in sock_setbindtodevice()
571 release_sock(sk); in sock_setbindtodevice()
581 static int sock_getbindtodevice(struct sock *sk, char __user *optval, in sock_getbindtodevice() argument
586 struct net *net = sock_net(sk); in sock_getbindtodevice()
589 if (sk->sk_bound_dev_if == 0) { in sock_getbindtodevice()
598 ret = netdev_get_name(net, devname, sk->sk_bound_dev_if); in sock_getbindtodevice()
621 static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool) in sock_valbool_flag() argument
624 sock_set_flag(sk, bit); in sock_valbool_flag()
626 sock_reset_flag(sk, bit); in sock_valbool_flag()
629 bool sk_mc_loop(struct sock *sk) in sk_mc_loop() argument
633 if (!sk) in sk_mc_loop()
635 switch (sk->sk_family) { in sk_mc_loop()
637 return inet_sk(sk)->mc_loop; in sk_mc_loop()
640 return inet6_sk(sk)->mc_loop; in sk_mc_loop()
657 struct sock *sk = sock->sk; in sock_setsockopt() local
668 return sock_setbindtodevice(sk, optval, optlen); in sock_setsockopt()
678 lock_sock(sk); in sock_setsockopt()
685 sock_valbool_flag(sk, SOCK_DBG, valbool); in sock_setsockopt()
688 sk->sk_reuse = (valbool ? SK_CAN_REUSE : SK_NO_REUSE); in sock_setsockopt()
691 sk->sk_reuseport = valbool; in sock_setsockopt()
700 sock_valbool_flag(sk, SOCK_LOCALROUTE, valbool); in sock_setsockopt()
703 sock_valbool_flag(sk, SOCK_BROADCAST, valbool); in sock_setsockopt()
713 sk->sk_userlocks |= SOCK_SNDBUF_LOCK; in sock_setsockopt()
714 sk->sk_sndbuf = max_t(int, val * 2, SOCK_MIN_SNDBUF); in sock_setsockopt()
716 sk->sk_write_space(sk); in sock_setsockopt()
734 sk->sk_userlocks |= SOCK_RCVBUF_LOCK; in sock_setsockopt()
750 sk->sk_rcvbuf = max_t(int, val * 2, SOCK_MIN_RCVBUF); in sock_setsockopt()
761 if (sk->sk_prot->keepalive) in sock_setsockopt()
762 sk->sk_prot->keepalive(sk, valbool); in sock_setsockopt()
763 sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool); in sock_setsockopt()
767 sock_valbool_flag(sk, SOCK_URGINLINE, valbool); in sock_setsockopt()
771 sk->sk_no_check_tx = valbool; in sock_setsockopt()
776 ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) in sock_setsockopt()
777 sk->sk_priority = val; in sock_setsockopt()
792 sock_reset_flag(sk, SOCK_LINGER); in sock_setsockopt()
796 sk->sk_lingertime = MAX_SCHEDULE_TIMEOUT; in sock_setsockopt()
799 sk->sk_lingertime = (unsigned int)ling.l_linger * HZ; in sock_setsockopt()
800 sock_set_flag(sk, SOCK_LINGER); in sock_setsockopt()
819 sock_reset_flag(sk, SOCK_RCVTSTAMPNS); in sock_setsockopt()
821 sock_set_flag(sk, SOCK_RCVTSTAMPNS); in sock_setsockopt()
822 sock_set_flag(sk, SOCK_RCVTSTAMP); in sock_setsockopt()
823 sock_enable_timestamp(sk, SOCK_TIMESTAMP); in sock_setsockopt()
825 sock_reset_flag(sk, SOCK_RCVTSTAMP); in sock_setsockopt()
826 sock_reset_flag(sk, SOCK_RCVTSTAMPNS); in sock_setsockopt()
837 !(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)) { in sock_setsockopt()
838 if (sk->sk_protocol == IPPROTO_TCP && in sock_setsockopt()
839 sk->sk_type == SOCK_STREAM) { in sock_setsockopt()
840 if ((1 << sk->sk_state) & in sock_setsockopt()
845 sk->sk_tskey = tcp_sk(sk)->snd_una; in sock_setsockopt()
847 sk->sk_tskey = 0; in sock_setsockopt()
857 sk->sk_tsflags = val; in sock_setsockopt()
859 sock_enable_timestamp(sk, in sock_setsockopt()
862 sock_disable_timestamp(sk, in sock_setsockopt()
870 ret = sock->ops->set_rcvlowat(sk, val); in sock_setsockopt()
872 sk->sk_rcvlowat = val ? : 1; in sock_setsockopt()
876 ret = sock_set_timeout(&sk->sk_rcvtimeo, optval, optlen); in sock_setsockopt()
880 ret = sock_set_timeout(&sk->sk_sndtimeo, optval, optlen); in sock_setsockopt()
892 ret = sk_attach_filter(&fprog, sk); in sock_setsockopt()
905 ret = sk_attach_bpf(ufd, sk); in sock_setsockopt()
918 ret = sk_reuseport_attach_filter(&fprog, sk); in sock_setsockopt()
931 ret = sk_reuseport_attach_bpf(ufd, sk); in sock_setsockopt()
936 ret = sk_detach_filter(sk); in sock_setsockopt()
940 if (sock_flag(sk, SOCK_FILTER_LOCKED) && !valbool) in sock_setsockopt()
943 sock_valbool_flag(sk, SOCK_FILTER_LOCKED, valbool); in sock_setsockopt()
953 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) in sock_setsockopt()
956 sk->sk_mark = val; in sock_setsockopt()
960 sock_valbool_flag(sk, SOCK_RXQ_OVFL, valbool); in sock_setsockopt()
964 sock_valbool_flag(sk, SOCK_WIFI_STATUS, valbool); in sock_setsockopt()
969 ret = sock->ops->set_peek_off(sk, val); in sock_setsockopt()
975 sock_valbool_flag(sk, SOCK_NOFCS, valbool); in sock_setsockopt()
979 sock_valbool_flag(sk, SOCK_SELECT_ERR_QUEUE, valbool); in sock_setsockopt()
985 if ((val > sk->sk_ll_usec) && !capable(CAP_NET_ADMIN)) in sock_setsockopt()
991 sk->sk_ll_usec = val; in sock_setsockopt()
998 cmpxchg(&sk->sk_pacing_status, in sock_setsockopt()
1001 sk->sk_max_pacing_rate = val; in sock_setsockopt()
1002 sk->sk_pacing_rate = min(sk->sk_pacing_rate, in sock_setsockopt()
1003 sk->sk_max_pacing_rate); in sock_setsockopt()
1007 sk->sk_incoming_cpu = val; in sock_setsockopt()
1012 dst_negative_advice(sk); in sock_setsockopt()
1016 if (sk->sk_family == PF_INET || sk->sk_family == PF_INET6) { in sock_setsockopt()
1017 if (sk->sk_protocol != IPPROTO_TCP) in sock_setsockopt()
1019 } else if (sk->sk_family != PF_RDS) { in sock_setsockopt()
1026 sock_valbool_flag(sk, SOCK_ZEROCOPY, valbool); in sock_setsockopt()
1031 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) { in sock_setsockopt()
1041 sock_valbool_flag(sk, SOCK_TXTIME, true); in sock_setsockopt()
1042 sk->sk_clockid = sk_txtime.clockid; in sock_setsockopt()
1043 sk->sk_txtime_deadline_mode = in sock_setsockopt()
1045 sk->sk_txtime_report_errors = in sock_setsockopt()
1054 release_sock(sk); in sock_setsockopt()
1088 struct sock *sk = sock->sk; in sock_getsockopt() local
1110 v.val = sock_flag(sk, SOCK_DBG); in sock_getsockopt()
1114 v.val = sock_flag(sk, SOCK_LOCALROUTE); in sock_getsockopt()
1118 v.val = sock_flag(sk, SOCK_BROADCAST); in sock_getsockopt()
1122 v.val = sk->sk_sndbuf; in sock_getsockopt()
1126 v.val = sk->sk_rcvbuf; in sock_getsockopt()
1130 v.val = sk->sk_reuse; in sock_getsockopt()
1134 v.val = sk->sk_reuseport; in sock_getsockopt()
1138 v.val = sock_flag(sk, SOCK_KEEPOPEN); in sock_getsockopt()
1142 v.val = sk->sk_type; in sock_getsockopt()
1146 v.val = sk->sk_protocol; in sock_getsockopt()
1150 v.val = sk->sk_family; in sock_getsockopt()
1154 v.val = -sock_error(sk); in sock_getsockopt()
1156 v.val = xchg(&sk->sk_err_soft, 0); in sock_getsockopt()
1160 v.val = sock_flag(sk, SOCK_URGINLINE); in sock_getsockopt()
1164 v.val = sk->sk_no_check_tx; in sock_getsockopt()
1168 v.val = sk->sk_priority; in sock_getsockopt()
1173 v.ling.l_onoff = sock_flag(sk, SOCK_LINGER); in sock_getsockopt()
1174 v.ling.l_linger = sk->sk_lingertime / HZ; in sock_getsockopt()
1182 v.val = sock_flag(sk, SOCK_RCVTSTAMP) && in sock_getsockopt()
1183 !sock_flag(sk, SOCK_RCVTSTAMPNS); in sock_getsockopt()
1187 v.val = sock_flag(sk, SOCK_RCVTSTAMPNS); in sock_getsockopt()
1191 v.val = sk->sk_tsflags; in sock_getsockopt()
1196 if (sk->sk_rcvtimeo == MAX_SCHEDULE_TIMEOUT) { in sock_getsockopt()
1200 v.tm.tv_sec = sk->sk_rcvtimeo / HZ; in sock_getsockopt()
1201 v.tm.tv_usec = ((sk->sk_rcvtimeo % HZ) * USEC_PER_SEC) / HZ; in sock_getsockopt()
1207 if (sk->sk_sndtimeo == MAX_SCHEDULE_TIMEOUT) { in sock_getsockopt()
1211 v.tm.tv_sec = sk->sk_sndtimeo / HZ; in sock_getsockopt()
1212 v.tm.tv_usec = ((sk->sk_sndtimeo % HZ) * USEC_PER_SEC) / HZ; in sock_getsockopt()
1217 v.val = sk->sk_rcvlowat; in sock_getsockopt()
1233 cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred); in sock_getsockopt()
1243 if (!sk->sk_peer_cred) in sock_getsockopt()
1246 n = sk->sk_peer_cred->group_info->ngroups; in sock_getsockopt()
1254 sk->sk_peer_cred->group_info); in sock_getsockopt()
1278 v.val = sk->sk_state == TCP_LISTEN; in sock_getsockopt()
1289 v.val = sk->sk_mark; in sock_getsockopt()
1293 v.val = sock_flag(sk, SOCK_RXQ_OVFL); in sock_getsockopt()
1297 v.val = sock_flag(sk, SOCK_WIFI_STATUS); in sock_getsockopt()
1304 v.val = sk->sk_peek_off; in sock_getsockopt()
1307 v.val = sock_flag(sk, SOCK_NOFCS); in sock_getsockopt()
1311 return sock_getbindtodevice(sk, optval, optlen, len); in sock_getsockopt()
1314 len = sk_get_filter(sk, (struct sock_filter __user *)optval, len); in sock_getsockopt()
1321 v.val = sock_flag(sk, SOCK_FILTER_LOCKED); in sock_getsockopt()
1329 v.val = sock_flag(sk, SOCK_SELECT_ERR_QUEUE); in sock_getsockopt()
1334 v.val = sk->sk_ll_usec; in sock_getsockopt()
1339 v.val = sk->sk_max_pacing_rate; in sock_getsockopt()
1343 v.val = sk->sk_incoming_cpu; in sock_getsockopt()
1353 sk_get_meminfo(sk, meminfo); in sock_getsockopt()
1364 v.val = READ_ONCE(sk->sk_napi_id); in sock_getsockopt()
1377 v.val64 = sock_gen_cookie(sk); in sock_getsockopt()
1381 v.val = sock_flag(sk, SOCK_ZEROCOPY); in sock_getsockopt()
1386 v.txtime.clockid = sk->sk_clockid; in sock_getsockopt()
1387 v.txtime.flags |= sk->sk_txtime_deadline_mode ? in sock_getsockopt()
1389 v.txtime.flags |= sk->sk_txtime_report_errors ? in sock_getsockopt()
1415 static inline void sock_lock_init(struct sock *sk) in sock_lock_init() argument
1417 if (sk->sk_kern_sock) in sock_lock_init()
1419 sk, in sock_lock_init()
1420 af_family_kern_slock_key_strings[sk->sk_family], in sock_lock_init()
1421 af_family_kern_slock_keys + sk->sk_family, in sock_lock_init()
1422 af_family_kern_key_strings[sk->sk_family], in sock_lock_init()
1423 af_family_kern_keys + sk->sk_family); in sock_lock_init()
1426 sk, in sock_lock_init()
1427 af_family_slock_key_strings[sk->sk_family], in sock_lock_init()
1428 af_family_slock_keys + sk->sk_family, in sock_lock_init()
1429 af_family_key_strings[sk->sk_family], in sock_lock_init()
1430 af_family_keys + sk->sk_family); in sock_lock_init()
1457 struct sock *sk; in sk_prot_alloc() local
1462 sk = kmem_cache_alloc(slab, priority & ~__GFP_ZERO); in sk_prot_alloc()
1463 if (!sk) in sk_prot_alloc()
1464 return sk; in sk_prot_alloc()
1466 sk_prot_clear_nulls(sk, prot->obj_size); in sk_prot_alloc()
1468 sk = kmalloc(prot->obj_size, priority); in sk_prot_alloc()
1470 if (sk != NULL) { in sk_prot_alloc()
1471 if (security_sk_alloc(sk, family, priority)) in sk_prot_alloc()
1476 sk_tx_queue_clear(sk); in sk_prot_alloc()
1479 return sk; in sk_prot_alloc()
1482 security_sk_free(sk); in sk_prot_alloc()
1485 kmem_cache_free(slab, sk); in sk_prot_alloc()
1487 kfree(sk); in sk_prot_alloc()
1491 static void sk_prot_free(struct proto *prot, struct sock *sk) in sk_prot_free() argument
1499 cgroup_sk_free(&sk->sk_cgrp_data); in sk_prot_free()
1500 mem_cgroup_sk_free(sk); in sk_prot_free()
1501 security_sk_free(sk); in sk_prot_free()
1503 kmem_cache_free(slab, sk); in sk_prot_free()
1505 kfree(sk); in sk_prot_free()
1520 struct sock *sk; in sk_alloc() local
1522 sk = sk_prot_alloc(prot, priority | __GFP_ZERO, family); in sk_alloc()
1523 if (sk) { in sk_alloc()
1524 sk->sk_family = family; in sk_alloc()
1529 sk->sk_prot = sk->sk_prot_creator = prot; in sk_alloc()
1530 sk->sk_kern_sock = kern; in sk_alloc()
1531 sock_lock_init(sk); in sk_alloc()
1532 sk->sk_net_refcnt = kern ? 0 : 1; in sk_alloc()
1533 if (likely(sk->sk_net_refcnt)) { in sk_alloc()
1538 sock_net_set(sk, net); in sk_alloc()
1539 refcount_set(&sk->sk_wmem_alloc, 1); in sk_alloc()
1541 mem_cgroup_sk_alloc(sk); in sk_alloc()
1542 cgroup_sk_alloc(&sk->sk_cgrp_data); in sk_alloc()
1543 sock_update_classid(&sk->sk_cgrp_data); in sk_alloc()
1544 sock_update_netprioidx(&sk->sk_cgrp_data); in sk_alloc()
1547 return sk; in sk_alloc()
1556 struct sock *sk = container_of(head, struct sock, sk_rcu); in __sk_destruct() local
1559 if (sk->sk_destruct) in __sk_destruct()
1560 sk->sk_destruct(sk); in __sk_destruct()
1562 filter = rcu_dereference_check(sk->sk_filter, in __sk_destruct()
1563 refcount_read(&sk->sk_wmem_alloc) == 0); in __sk_destruct()
1565 sk_filter_uncharge(sk, filter); in __sk_destruct()
1566 RCU_INIT_POINTER(sk->sk_filter, NULL); in __sk_destruct()
1568 if (rcu_access_pointer(sk->sk_reuseport_cb)) in __sk_destruct()
1569 reuseport_detach_sock(sk); in __sk_destruct()
1571 sock_disable_timestamp(sk, SK_FLAGS_TIMESTAMP); in __sk_destruct()
1573 if (atomic_read(&sk->sk_omem_alloc)) in __sk_destruct()
1575 __func__, atomic_read(&sk->sk_omem_alloc)); in __sk_destruct()
1577 if (sk->sk_frag.page) { in __sk_destruct()
1578 put_page(sk->sk_frag.page); in __sk_destruct()
1579 sk->sk_frag.page = NULL; in __sk_destruct()
1582 if (sk->sk_peer_cred) in __sk_destruct()
1583 put_cred(sk->sk_peer_cred); in __sk_destruct()
1584 put_pid(sk->sk_peer_pid); in __sk_destruct()
1585 if (likely(sk->sk_net_refcnt)) in __sk_destruct()
1586 put_net(sock_net(sk)); in __sk_destruct()
1587 sk_prot_free(sk->sk_prot_creator, sk); in __sk_destruct()
1590 void sk_destruct(struct sock *sk) in sk_destruct() argument
1592 if (sock_flag(sk, SOCK_RCU_FREE)) in sk_destruct()
1593 call_rcu(&sk->sk_rcu, __sk_destruct); in sk_destruct()
1595 __sk_destruct(&sk->sk_rcu); in sk_destruct()
1598 static void __sk_free(struct sock *sk) in __sk_free() argument
1600 if (likely(sk->sk_net_refcnt)) in __sk_free()
1601 sock_inuse_add(sock_net(sk), -1); in __sk_free()
1603 if (unlikely(sk->sk_net_refcnt && sock_diag_has_destroy_listeners(sk))) in __sk_free()
1604 sock_diag_broadcast_destroy(sk); in __sk_free()
1606 sk_destruct(sk); in __sk_free()
1609 void sk_free(struct sock *sk) in sk_free() argument
1616 if (refcount_dec_and_test(&sk->sk_wmem_alloc)) in sk_free()
1617 __sk_free(sk); in sk_free()
1621 static void sk_init_common(struct sock *sk) in sk_init_common() argument
1623 skb_queue_head_init(&sk->sk_receive_queue); in sk_init_common()
1624 skb_queue_head_init(&sk->sk_write_queue); in sk_init_common()
1625 skb_queue_head_init(&sk->sk_error_queue); in sk_init_common()
1627 rwlock_init(&sk->sk_callback_lock); in sk_init_common()
1628 lockdep_set_class_and_name(&sk->sk_receive_queue.lock, in sk_init_common()
1629 af_rlock_keys + sk->sk_family, in sk_init_common()
1630 af_family_rlock_key_strings[sk->sk_family]); in sk_init_common()
1631 lockdep_set_class_and_name(&sk->sk_write_queue.lock, in sk_init_common()
1632 af_wlock_keys + sk->sk_family, in sk_init_common()
1633 af_family_wlock_key_strings[sk->sk_family]); in sk_init_common()
1634 lockdep_set_class_and_name(&sk->sk_error_queue.lock, in sk_init_common()
1635 af_elock_keys + sk->sk_family, in sk_init_common()
1636 af_family_elock_key_strings[sk->sk_family]); in sk_init_common()
1637 lockdep_set_class_and_name(&sk->sk_callback_lock, in sk_init_common()
1638 af_callback_keys + sk->sk_family, in sk_init_common()
1639 af_family_clock_key_strings[sk->sk_family]); in sk_init_common()
1649 struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority) in sk_clone_lock() argument
1654 newsk = sk_prot_alloc(sk->sk_prot, priority, sk->sk_family); in sk_clone_lock()
1658 sock_copy(newsk, sk); in sk_clone_lock()
1660 newsk->sk_prot_creator = sk->sk_prot; in sk_clone_lock()
1685 newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK; in sk_clone_lock()
1693 filter = rcu_dereference(sk->sk_filter); in sk_clone_lock()
1703 if (unlikely(!is_charged || xfrm_sk_clone_policy(newsk, sk))) { in sk_clone_lock()
1749 if (sock_needs_netstamp(sk) && in sk_clone_lock()
1758 void sk_free_unlock_clone(struct sock *sk) in sk_free_unlock_clone() argument
1762 sk->sk_destruct = NULL; in sk_free_unlock_clone()
1763 bh_unlock_sock(sk); in sk_free_unlock_clone()
1764 sk_free(sk); in sk_free_unlock_clone()
1768 void sk_setup_caps(struct sock *sk, struct dst_entry *dst) in sk_setup_caps() argument
1772 sk_dst_set(sk, dst); in sk_setup_caps()
1773 sk->sk_route_caps = dst->dev->features | sk->sk_route_forced_caps; in sk_setup_caps()
1774 if (sk->sk_route_caps & NETIF_F_GSO) in sk_setup_caps()
1775 sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE; in sk_setup_caps()
1776 sk->sk_route_caps &= ~sk->sk_route_nocaps; in sk_setup_caps()
1777 if (sk_can_gso(sk)) { in sk_setup_caps()
1779 sk->sk_route_caps &= ~NETIF_F_GSO_MASK; in sk_setup_caps()
1781 sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM; in sk_setup_caps()
1782 sk->sk_gso_max_size = dst->dev->gso_max_size; in sk_setup_caps()
1786 sk->sk_gso_max_segs = max_segs; in sk_setup_caps()
1800 struct sock *sk = skb->sk; in sock_wfree() local
1803 if (!sock_flag(sk, SOCK_USE_WRITE_QUEUE)) { in sock_wfree()
1808 WARN_ON(refcount_sub_and_test(len - 1, &sk->sk_wmem_alloc)); in sock_wfree()
1809 sk->sk_write_space(sk); in sock_wfree()
1816 if (refcount_sub_and_test(len, &sk->sk_wmem_alloc)) in sock_wfree()
1817 __sk_free(sk); in sock_wfree()
1826 struct sock *sk = skb->sk; in __sock_wfree() local
1828 if (refcount_sub_and_test(skb->truesize, &sk->sk_wmem_alloc)) in __sock_wfree()
1829 __sk_free(sk); in __sock_wfree()
1832 void skb_set_owner_w(struct sk_buff *skb, struct sock *sk) in skb_set_owner_w() argument
1835 skb->sk = sk; in skb_set_owner_w()
1837 if (unlikely(!sk_fullsock(sk))) { in skb_set_owner_w()
1839 sock_hold(sk); in skb_set_owner_w()
1844 skb_set_hash_from_sk(skb, sk); in skb_set_owner_w()
1850 refcount_add(skb->truesize, &sk->sk_wmem_alloc); in skb_set_owner_w()
1870 struct sock *sk = skb->sk; in skb_orphan_partial() local
1872 if (refcount_inc_not_zero(&sk->sk_refcnt)) { in skb_orphan_partial()
1873 WARN_ON(refcount_sub_and_test(skb->truesize, &sk->sk_wmem_alloc)); in skb_orphan_partial()
1887 struct sock *sk = skb->sk; in sock_rfree() local
1890 atomic_sub(len, &sk->sk_rmem_alloc); in sock_rfree()
1891 sk_mem_uncharge(sk, len); in sock_rfree()
1901 sock_put(skb->sk); in sock_efree()
1905 kuid_t sock_i_uid(struct sock *sk) in sock_i_uid() argument
1909 read_lock_bh(&sk->sk_callback_lock); in sock_i_uid()
1910 uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : GLOBAL_ROOT_UID; in sock_i_uid()
1911 read_unlock_bh(&sk->sk_callback_lock); in sock_i_uid()
1916 unsigned long sock_i_ino(struct sock *sk) in sock_i_ino() argument
1920 read_lock_bh(&sk->sk_callback_lock); in sock_i_ino()
1921 ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0; in sock_i_ino()
1922 read_unlock_bh(&sk->sk_callback_lock); in sock_i_ino()
1930 struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force, in sock_wmalloc() argument
1933 if (force || refcount_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) { in sock_wmalloc()
1936 skb_set_owner_w(skb, sk); in sock_wmalloc()
1946 struct sock *sk = skb->sk; in sock_ofree() local
1948 atomic_sub(skb->truesize, &sk->sk_omem_alloc); in sock_ofree()
1951 struct sk_buff *sock_omalloc(struct sock *sk, unsigned long size, in sock_omalloc() argument
1957 if (atomic_read(&sk->sk_omem_alloc) + SKB_TRUESIZE(size) > in sock_omalloc()
1965 atomic_add(skb->truesize, &sk->sk_omem_alloc); in sock_omalloc()
1966 skb->sk = sk; in sock_omalloc()
1974 void *sock_kmalloc(struct sock *sk, int size, gfp_t priority) in sock_kmalloc() argument
1977 atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) { in sock_kmalloc()
1982 atomic_add(size, &sk->sk_omem_alloc); in sock_kmalloc()
1986 atomic_sub(size, &sk->sk_omem_alloc); in sock_kmalloc()
1996 static inline void __sock_kfree_s(struct sock *sk, void *mem, int size, in __sock_kfree_s() argument
2005 atomic_sub(size, &sk->sk_omem_alloc); in __sock_kfree_s()
2008 void sock_kfree_s(struct sock *sk, void *mem, int size) in sock_kfree_s() argument
2010 __sock_kfree_s(sk, mem, size, false); in sock_kfree_s()
2014 void sock_kzfree_s(struct sock *sk, void *mem, int size) in sock_kzfree_s() argument
2016 __sock_kfree_s(sk, mem, size, true); in sock_kzfree_s()
2023 static long sock_wait_for_wmem(struct sock *sk, long timeo) in sock_wait_for_wmem() argument
2027 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk); in sock_wait_for_wmem()
2033 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); in sock_wait_for_wmem()
2034 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); in sock_wait_for_wmem()
2035 if (refcount_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) in sock_wait_for_wmem()
2037 if (sk->sk_shutdown & SEND_SHUTDOWN) in sock_wait_for_wmem()
2039 if (sk->sk_err) in sock_wait_for_wmem()
2043 finish_wait(sk_sleep(sk), &wait); in sock_wait_for_wmem()
2052 struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len, in sock_alloc_send_pskb() argument
2060 timeo = sock_sndtimeo(sk, noblock); in sock_alloc_send_pskb()
2062 err = sock_error(sk); in sock_alloc_send_pskb()
2067 if (sk->sk_shutdown & SEND_SHUTDOWN) in sock_alloc_send_pskb()
2070 if (sk_wmem_alloc_get(sk) < sk->sk_sndbuf) in sock_alloc_send_pskb()
2073 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); in sock_alloc_send_pskb()
2074 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); in sock_alloc_send_pskb()
2080 timeo = sock_wait_for_wmem(sk, timeo); in sock_alloc_send_pskb()
2083 errcode, sk->sk_allocation); in sock_alloc_send_pskb()
2085 skb_set_owner_w(skb, sk); in sock_alloc_send_pskb()
2096 struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size, in sock_alloc_send_skb() argument
2099 return sock_alloc_send_pskb(sk, size, 0, noblock, errcode, 0); in sock_alloc_send_skb()
2103 int __sock_cmsg_send(struct sock *sk, struct msghdr *msg, struct cmsghdr *cmsg, in __sock_cmsg_send() argument
2110 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) in __sock_cmsg_send()
2128 if (!sock_flag(sk, SOCK_TXTIME)) in __sock_cmsg_send()
2145 int sock_cmsg_send(struct sock *sk, struct msghdr *msg, in sock_cmsg_send() argument
2156 ret = __sock_cmsg_send(sk, msg, cmsg, sockc); in sock_cmsg_send()
2164 static void sk_enter_memory_pressure(struct sock *sk) in sk_enter_memory_pressure() argument
2166 if (!sk->sk_prot->enter_memory_pressure) in sk_enter_memory_pressure()
2169 sk->sk_prot->enter_memory_pressure(sk); in sk_enter_memory_pressure()
2172 static void sk_leave_memory_pressure(struct sock *sk) in sk_leave_memory_pressure() argument
2174 if (sk->sk_prot->leave_memory_pressure) { in sk_leave_memory_pressure()
2175 sk->sk_prot->leave_memory_pressure(sk); in sk_leave_memory_pressure()
2177 unsigned long *memory_pressure = sk->sk_prot->memory_pressure; in sk_leave_memory_pressure()
2230 bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag) in sk_page_frag_refill() argument
2232 if (likely(skb_page_frag_refill(32U, pfrag, sk->sk_allocation))) in sk_page_frag_refill()
2235 sk_enter_memory_pressure(sk); in sk_page_frag_refill()
2236 sk_stream_moderate_sndbuf(sk); in sk_page_frag_refill()
2241 int sk_alloc_sg(struct sock *sk, int len, struct scatterlist *sg, in sk_alloc_sg() argument
2251 pfrag = sk_page_frag(sk); in sk_alloc_sg()
2256 if (!sk_page_frag_refill(sk, pfrag)) { in sk_alloc_sg()
2263 if (!sk_wmem_schedule(sk, use)) { in sk_alloc_sg()
2268 sk_mem_charge(sk, use); in sk_alloc_sg()
2302 static void __lock_sock(struct sock *sk) in __lock_sock() argument
2303 __releases(&sk->sk_lock.slock) in __lock_sock()
2304 __acquires(&sk->sk_lock.slock) in __lock_sock()
2309 prepare_to_wait_exclusive(&sk->sk_lock.wq, &wait, in __lock_sock()
2311 spin_unlock_bh(&sk->sk_lock.slock); in __lock_sock()
2313 spin_lock_bh(&sk->sk_lock.slock); in __lock_sock()
2314 if (!sock_owned_by_user(sk)) in __lock_sock()
2317 finish_wait(&sk->sk_lock.wq, &wait); in __lock_sock()
2320 static void __release_sock(struct sock *sk) in __release_sock() argument
2321 __releases(&sk->sk_lock.slock) in __release_sock()
2322 __acquires(&sk->sk_lock.slock) in __release_sock()
2326 while ((skb = sk->sk_backlog.head) != NULL) { in __release_sock()
2327 sk->sk_backlog.head = sk->sk_backlog.tail = NULL; in __release_sock()
2329 spin_unlock_bh(&sk->sk_lock.slock); in __release_sock()
2336 sk_backlog_rcv(sk, skb); in __release_sock()
2343 spin_lock_bh(&sk->sk_lock.slock); in __release_sock()
2350 sk->sk_backlog.len = 0; in __release_sock()
2353 void __sk_flush_backlog(struct sock *sk) in __sk_flush_backlog() argument
2355 spin_lock_bh(&sk->sk_lock.slock); in __sk_flush_backlog()
2356 __release_sock(sk); in __sk_flush_backlog()
2357 spin_unlock_bh(&sk->sk_lock.slock); in __sk_flush_backlog()
2371 int sk_wait_data(struct sock *sk, long *timeo, const struct sk_buff *skb) in sk_wait_data() argument
2376 add_wait_queue(sk_sleep(sk), &wait); in sk_wait_data()
2377 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); in sk_wait_data()
2378 rc = sk_wait_event(sk, timeo, skb_peek_tail(&sk->sk_receive_queue) != skb, &wait); in sk_wait_data()
2379 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); in sk_wait_data()
2380 remove_wait_queue(sk_sleep(sk), &wait); in sk_wait_data()
2394 int __sk_mem_raise_allocated(struct sock *sk, int size, int amt, int kind) in __sk_mem_raise_allocated() argument
2396 struct proto *prot = sk->sk_prot; in __sk_mem_raise_allocated()
2397 long allocated = sk_memory_allocated_add(sk, amt); in __sk_mem_raise_allocated()
2400 if (mem_cgroup_sockets_enabled && sk->sk_memcg && in __sk_mem_raise_allocated()
2401 !(charged = mem_cgroup_charge_skmem(sk->sk_memcg, amt))) in __sk_mem_raise_allocated()
2405 if (allocated <= sk_prot_mem_limits(sk, 0)) { in __sk_mem_raise_allocated()
2406 sk_leave_memory_pressure(sk); in __sk_mem_raise_allocated()
2411 if (allocated > sk_prot_mem_limits(sk, 1)) in __sk_mem_raise_allocated()
2412 sk_enter_memory_pressure(sk); in __sk_mem_raise_allocated()
2415 if (allocated > sk_prot_mem_limits(sk, 2)) in __sk_mem_raise_allocated()
2420 if (atomic_read(&sk->sk_rmem_alloc) < sk_get_rmem0(sk, prot)) in __sk_mem_raise_allocated()
2424 int wmem0 = sk_get_wmem0(sk, prot); in __sk_mem_raise_allocated()
2426 if (sk->sk_type == SOCK_STREAM) { in __sk_mem_raise_allocated()
2427 if (sk->sk_wmem_queued < wmem0) in __sk_mem_raise_allocated()
2429 } else if (refcount_read(&sk->sk_wmem_alloc) < wmem0) { in __sk_mem_raise_allocated()
2434 if (sk_has_memory_pressure(sk)) { in __sk_mem_raise_allocated()
2437 if (!sk_under_memory_pressure(sk)) in __sk_mem_raise_allocated()
2439 alloc = sk_sockets_allocated_read_positive(sk); in __sk_mem_raise_allocated()
2440 if (sk_prot_mem_limits(sk, 2) > alloc * in __sk_mem_raise_allocated()
2441 sk_mem_pages(sk->sk_wmem_queued + in __sk_mem_raise_allocated()
2442 atomic_read(&sk->sk_rmem_alloc) + in __sk_mem_raise_allocated()
2443 sk->sk_forward_alloc)) in __sk_mem_raise_allocated()
2449 if (kind == SK_MEM_SEND && sk->sk_type == SOCK_STREAM) { in __sk_mem_raise_allocated()
2450 sk_stream_moderate_sndbuf(sk); in __sk_mem_raise_allocated()
2455 if (sk->sk_wmem_queued + size >= sk->sk_sndbuf) in __sk_mem_raise_allocated()
2460 trace_sock_exceed_buf_limit(sk, prot, allocated, kind); in __sk_mem_raise_allocated()
2462 sk_memory_allocated_sub(sk, amt); in __sk_mem_raise_allocated()
2464 if (mem_cgroup_sockets_enabled && sk->sk_memcg) in __sk_mem_raise_allocated()
2465 mem_cgroup_uncharge_skmem(sk->sk_memcg, amt); in __sk_mem_raise_allocated()
2481 int __sk_mem_schedule(struct sock *sk, int size, int kind) in __sk_mem_schedule() argument
2485 sk->sk_forward_alloc += amt << SK_MEM_QUANTUM_SHIFT; in __sk_mem_schedule()
2486 ret = __sk_mem_raise_allocated(sk, size, amt, kind); in __sk_mem_schedule()
2488 sk->sk_forward_alloc -= amt << SK_MEM_QUANTUM_SHIFT; in __sk_mem_schedule()
2500 void __sk_mem_reduce_allocated(struct sock *sk, int amount) in __sk_mem_reduce_allocated() argument
2502 sk_memory_allocated_sub(sk, amount); in __sk_mem_reduce_allocated()
2504 if (mem_cgroup_sockets_enabled && sk->sk_memcg) in __sk_mem_reduce_allocated()
2505 mem_cgroup_uncharge_skmem(sk->sk_memcg, amount); in __sk_mem_reduce_allocated()
2507 if (sk_under_memory_pressure(sk) && in __sk_mem_reduce_allocated()
2508 (sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0))) in __sk_mem_reduce_allocated()
2509 sk_leave_memory_pressure(sk); in __sk_mem_reduce_allocated()
2518 void __sk_mem_reclaim(struct sock *sk, int amount) in __sk_mem_reclaim() argument
2521 sk->sk_forward_alloc -= amount << SK_MEM_QUANTUM_SHIFT; in __sk_mem_reclaim()
2522 __sk_mem_reduce_allocated(sk, amount); in __sk_mem_reclaim()
2526 int sk_set_peek_off(struct sock *sk, int val) in sk_set_peek_off() argument
2528 sk->sk_peek_off = val; in sk_set_peek_off()
2611 int sock_no_sendmsg_locked(struct sock *sk, struct msghdr *m, size_t len) in sock_no_sendmsg_locked() argument
2645 ssize_t sock_no_sendpage_locked(struct sock *sk, struct page *page, in sock_no_sendpage_locked() argument
2655 res = kernel_sendmsg_locked(sk, &msg, &iov, 1, size); in sock_no_sendpage_locked()
2665 static void sock_def_wakeup(struct sock *sk) in sock_def_wakeup() argument
2670 wq = rcu_dereference(sk->sk_wq); in sock_def_wakeup()
2676 static void sock_def_error_report(struct sock *sk) in sock_def_error_report() argument
2681 wq = rcu_dereference(sk->sk_wq); in sock_def_error_report()
2684 sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR); in sock_def_error_report()
2688 static void sock_def_readable(struct sock *sk) in sock_def_readable() argument
2693 wq = rcu_dereference(sk->sk_wq); in sock_def_readable()
2697 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); in sock_def_readable()
2701 static void sock_def_write_space(struct sock *sk) in sock_def_write_space() argument
2710 if ((refcount_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) { in sock_def_write_space()
2711 wq = rcu_dereference(sk->sk_wq); in sock_def_write_space()
2717 if (sock_writeable(sk)) in sock_def_write_space()
2718 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); in sock_def_write_space()
2724 static void sock_def_destruct(struct sock *sk) in sock_def_destruct() argument
2728 void sk_send_sigurg(struct sock *sk) in sk_send_sigurg() argument
2730 if (sk->sk_socket && sk->sk_socket->file) in sk_send_sigurg()
2731 if (send_sigurg(&sk->sk_socket->file->f_owner)) in sk_send_sigurg()
2732 sk_wake_async(sk, SOCK_WAKE_URG, POLL_PRI); in sk_send_sigurg()
2736 void sk_reset_timer(struct sock *sk, struct timer_list* timer, in sk_reset_timer() argument
2740 sock_hold(sk); in sk_reset_timer()
2744 void sk_stop_timer(struct sock *sk, struct timer_list* timer) in sk_stop_timer() argument
2747 __sock_put(sk); in sk_stop_timer()
2751 void sock_init_data(struct socket *sock, struct sock *sk) in sock_init_data() argument
2753 sk_init_common(sk); in sock_init_data()
2754 sk->sk_send_head = NULL; in sock_init_data()
2756 timer_setup(&sk->sk_timer, NULL, 0); in sock_init_data()
2758 sk->sk_allocation = GFP_KERNEL; in sock_init_data()
2759 sk->sk_rcvbuf = sysctl_rmem_default; in sock_init_data()
2760 sk->sk_sndbuf = sysctl_wmem_default; in sock_init_data()
2761 sk->sk_state = TCP_CLOSE; in sock_init_data()
2762 sk_set_socket(sk, sock); in sock_init_data()
2764 sock_set_flag(sk, SOCK_ZAPPED); in sock_init_data()
2767 sk->sk_type = sock->type; in sock_init_data()
2768 sk->sk_wq = sock->wq; in sock_init_data()
2769 sock->sk = sk; in sock_init_data()
2770 sk->sk_uid = SOCK_INODE(sock)->i_uid; in sock_init_data()
2772 sk->sk_wq = NULL; in sock_init_data()
2773 sk->sk_uid = make_kuid(sock_net(sk)->user_ns, 0); in sock_init_data()
2776 rwlock_init(&sk->sk_callback_lock); in sock_init_data()
2777 if (sk->sk_kern_sock) in sock_init_data()
2779 &sk->sk_callback_lock, in sock_init_data()
2780 af_kern_callback_keys + sk->sk_family, in sock_init_data()
2781 af_family_kern_clock_key_strings[sk->sk_family]); in sock_init_data()
2784 &sk->sk_callback_lock, in sock_init_data()
2785 af_callback_keys + sk->sk_family, in sock_init_data()
2786 af_family_clock_key_strings[sk->sk_family]); in sock_init_data()
2788 sk->sk_state_change = sock_def_wakeup; in sock_init_data()
2789 sk->sk_data_ready = sock_def_readable; in sock_init_data()
2790 sk->sk_write_space = sock_def_write_space; in sock_init_data()
2791 sk->sk_error_report = sock_def_error_report; in sock_init_data()
2792 sk->sk_destruct = sock_def_destruct; in sock_init_data()
2794 sk->sk_frag.page = NULL; in sock_init_data()
2795 sk->sk_frag.offset = 0; in sock_init_data()
2796 sk->sk_peek_off = -1; in sock_init_data()
2798 sk->sk_peer_pid = NULL; in sock_init_data()
2799 sk->sk_peer_cred = NULL; in sock_init_data()
2800 sk->sk_write_pending = 0; in sock_init_data()
2801 sk->sk_rcvlowat = 1; in sock_init_data()
2802 sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT; in sock_init_data()
2803 sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT; in sock_init_data()
2805 sk->sk_stamp = SK_DEFAULT_STAMP; in sock_init_data()
2806 atomic_set(&sk->sk_zckey, 0); in sock_init_data()
2809 sk->sk_napi_id = 0; in sock_init_data()
2810 sk->sk_ll_usec = sysctl_net_busy_read; in sock_init_data()
2813 sk->sk_max_pacing_rate = ~0U; in sock_init_data()
2814 sk->sk_pacing_rate = ~0U; in sock_init_data()
2815 sk->sk_pacing_shift = 10; in sock_init_data()
2816 sk->sk_incoming_cpu = -1; in sock_init_data()
2818 sk_rx_queue_clear(sk); in sock_init_data()
2824 refcount_set(&sk->sk_refcnt, 1); in sock_init_data()
2825 atomic_set(&sk->sk_drops, 0); in sock_init_data()
2829 void lock_sock_nested(struct sock *sk, int subclass) in lock_sock_nested() argument
2832 spin_lock_bh(&sk->sk_lock.slock); in lock_sock_nested()
2833 if (sk->sk_lock.owned) in lock_sock_nested()
2834 __lock_sock(sk); in lock_sock_nested()
2835 sk->sk_lock.owned = 1; in lock_sock_nested()
2836 spin_unlock(&sk->sk_lock.slock); in lock_sock_nested()
2840 mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_); in lock_sock_nested()
2845 void release_sock(struct sock *sk) in release_sock() argument
2847 spin_lock_bh(&sk->sk_lock.slock); in release_sock()
2848 if (sk->sk_backlog.tail) in release_sock()
2849 __release_sock(sk); in release_sock()
2854 if (sk->sk_prot->release_cb) in release_sock()
2855 sk->sk_prot->release_cb(sk); in release_sock()
2857 sock_release_ownership(sk); in release_sock()
2858 if (waitqueue_active(&sk->sk_lock.wq)) in release_sock()
2859 wake_up(&sk->sk_lock.wq); in release_sock()
2860 spin_unlock_bh(&sk->sk_lock.slock); in release_sock()
2877 bool lock_sock_fast(struct sock *sk) in lock_sock_fast() argument
2880 spin_lock_bh(&sk->sk_lock.slock); in lock_sock_fast()
2882 if (!sk->sk_lock.owned) in lock_sock_fast()
2888 __lock_sock(sk); in lock_sock_fast()
2889 sk->sk_lock.owned = 1; in lock_sock_fast()
2890 spin_unlock(&sk->sk_lock.slock); in lock_sock_fast()
2894 mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_); in lock_sock_fast()
2900 int sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp) in sock_get_timestamp() argument
2904 sock_enable_timestamp(sk, SOCK_TIMESTAMP); in sock_get_timestamp()
2905 tv = ktime_to_timeval(sk->sk_stamp); in sock_get_timestamp()
2909 sk->sk_stamp = ktime_get_real(); in sock_get_timestamp()
2910 tv = ktime_to_timeval(sk->sk_stamp); in sock_get_timestamp()
2916 int sock_get_timestampns(struct sock *sk, struct timespec __user *userstamp) in sock_get_timestampns() argument
2920 sock_enable_timestamp(sk, SOCK_TIMESTAMP); in sock_get_timestampns()
2921 ts = ktime_to_timespec(sk->sk_stamp); in sock_get_timestampns()
2925 sk->sk_stamp = ktime_get_real(); in sock_get_timestampns()
2926 ts = ktime_to_timespec(sk->sk_stamp); in sock_get_timestampns()
2932 void sock_enable_timestamp(struct sock *sk, int flag) in sock_enable_timestamp() argument
2934 if (!sock_flag(sk, flag)) { in sock_enable_timestamp()
2935 unsigned long previous_flags = sk->sk_flags; in sock_enable_timestamp()
2937 sock_set_flag(sk, flag); in sock_enable_timestamp()
2943 if (sock_needs_netstamp(sk) && in sock_enable_timestamp()
2949 int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len, in sock_recv_errqueue() argument
2957 skb = sock_dequeue_err_skb(sk); in sock_recv_errqueue()
2970 sock_recv_timestamp(msg, sk, skb); in sock_recv_errqueue()
2995 struct sock *sk = sock->sk; in sock_common_getsockopt() local
2997 return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen); in sock_common_getsockopt()
3005 struct sock *sk = sock->sk; in compat_sock_common_getsockopt() local
3007 if (sk->sk_prot->compat_getsockopt != NULL) in compat_sock_common_getsockopt()
3008 return sk->sk_prot->compat_getsockopt(sk, level, optname, in compat_sock_common_getsockopt()
3010 return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen); in compat_sock_common_getsockopt()
3018 struct sock *sk = sock->sk; in sock_common_recvmsg() local
3022 err = sk->sk_prot->recvmsg(sk, msg, size, flags & MSG_DONTWAIT, in sock_common_recvmsg()
3036 struct sock *sk = sock->sk; in sock_common_setsockopt() local
3038 return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen); in sock_common_setsockopt()
3046 struct sock *sk = sock->sk; in compat_sock_common_setsockopt() local
3048 if (sk->sk_prot->compat_setsockopt != NULL) in compat_sock_common_setsockopt()
3049 return sk->sk_prot->compat_setsockopt(sk, level, optname, in compat_sock_common_setsockopt()
3051 return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen); in compat_sock_common_setsockopt()
3056 void sk_common_release(struct sock *sk) in sk_common_release() argument
3058 if (sk->sk_prot->destroy) in sk_common_release()
3059 sk->sk_prot->destroy(sk); in sk_common_release()
3069 sk->sk_prot->unhash(sk); in sk_common_release()
3083 sock_orphan(sk); in sk_common_release()
3085 xfrm_sk_free_policy(sk); in sk_common_release()
3087 sk_refcnt_debug_release(sk); in sk_common_release()
3089 sock_put(sk); in sk_common_release()
3093 void sk_get_meminfo(const struct sock *sk, u32 *mem) in sk_get_meminfo() argument
3097 mem[SK_MEMINFO_RMEM_ALLOC] = sk_rmem_alloc_get(sk); in sk_get_meminfo()
3098 mem[SK_MEMINFO_RCVBUF] = sk->sk_rcvbuf; in sk_get_meminfo()
3099 mem[SK_MEMINFO_WMEM_ALLOC] = sk_wmem_alloc_get(sk); in sk_get_meminfo()
3100 mem[SK_MEMINFO_SNDBUF] = sk->sk_sndbuf; in sk_get_meminfo()
3101 mem[SK_MEMINFO_FWD_ALLOC] = sk->sk_forward_alloc; in sk_get_meminfo()
3102 mem[SK_MEMINFO_WMEM_QUEUED] = sk->sk_wmem_queued; in sk_get_meminfo()
3103 mem[SK_MEMINFO_OPTMEM] = atomic_read(&sk->sk_omem_alloc); in sk_get_meminfo()
3104 mem[SK_MEMINFO_BACKLOG] = sk->sk_backlog.len; in sk_get_meminfo()
3105 mem[SK_MEMINFO_DROPS] = atomic_read(&sk->sk_drops); in sk_get_meminfo()
3476 struct sock *sk = p; in sk_busy_loop_end() local
3478 return !skb_queue_empty(&sk->sk_receive_queue) || in sk_busy_loop_end()
3479 sk_busy_loop_timeout(sk, start_time); in sk_busy_loop_end()