/Linux-v6.1/net/ipv4/ |
D | tcp_fastopen.c | 319 __NET_INC_STATS(sock_net(sk), in tcp_fastopen_queue_check() 336 return (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fastopen) & flag) || in tcp_fastopen_no_cookie() 351 int tcp_fastopen = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fastopen); in tcp_try_fastopen() 357 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENCOOKIEREQD); in tcp_try_fastopen() 376 NET_INC_STATS(sock_net(sk), in tcp_try_fastopen() 394 NET_INC_STATS(sock_net(sk), in tcp_try_fastopen() 399 NET_INC_STATS(sock_net(sk), in tcp_try_fastopen() 403 NET_INC_STATS(sock_net(sk), in tcp_try_fastopen() 491 struct net *net = sock_net(sk); in tcp_fastopen_active_disable() 493 if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fastopen_blackhole_timeout)) in tcp_fastopen_active_disable() [all …]
|
D | tcp_timer.c | 75 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONTIMEOUT); in tcp_write_err() 126 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONMEMORY); in tcp_out_of_resources() 130 if (!check_net(sock_net(sk))) { in tcp_out_of_resources() 146 int retries = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_orphan_retries); /* May be zero. */ in tcp_orphan_retries() 162 const struct net *net = sock_net(sk); in tcp_mtu_probing() 235 struct net *net = sock_net(sk); in tcp_write_timeout() 283 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPTIMEOUTREHASH); in tcp_write_timeout() 317 __NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKS); in tcp_delack_timer_handler() 341 __NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED); in tcp_delack_timer() 378 max_probes = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_retries2); in tcp_probe_timer() [all …]
|
D | syncookies.c | 343 if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_syncookies) || in cookie_v4_check() 352 __NET_INC_STATS(sock_net(sk), LINUX_MIB_SYNCOOKIESFAILED); in cookie_v4_check() 356 __NET_INC_STATS(sock_net(sk), LINUX_MIB_SYNCOOKIESRECV); in cookie_v4_check() 360 tcp_parse_options(sock_net(sk), skb, &tcp_opt, 0, NULL); in cookie_v4_check() 363 tsoff = secure_tcp_ts_off(sock_net(sk), in cookie_v4_check() 369 if (!cookie_timestamp_decode(sock_net(sk), &tcp_opt)) in cookie_v4_check() 406 RCU_INIT_POINTER(ireq->ireq_opt, tcp_v4_save_options(sock_net(sk), skb)); in cookie_v4_check() 427 rt = ip_route_output_key(sock_net(sk), &fl4); in cookie_v4_check() 447 ireq->ecn_ok = cookie_ecn_ok(&tcp_opt, sock_net(sk), &rt->dst); in cookie_v4_check()
|
D | tcp_input.c | 216 dev = dev_get_by_index_rcu(sock_net(sk), skb->skb_iif); in tcp_gro_dev_warn() 429 min(sndmem, READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_wmem[2]))); in tcp_sndbuf_expand() 464 int window = tcp_win_from_space(sk, READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[2])) >> 1; in __tcp_grow_window() 537 int tcp_app_win = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_app_win); in tcp_init_buffer_space() 576 struct net *net = sock_net(sk); in tcp_clamp_window() 728 if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_moderate_rcvbuf) && in tcp_rcv_space_adjust() 749 READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[2])); in tcp_rcv_space_adjust() 913 rate *= READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_pacing_ss_ratio); in tcp_update_pacing_rate() 915 rate *= READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_pacing_ca_ratio); in tcp_update_pacing_rate() 1054 READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_max_reordering)); in tcp_check_sack_reordering() [all …]
|
D | tcp_minisocks.c | 250 struct net *net = sock_net(sk); in tcp_time_wait() 568 __TCP_INC_STATS(sock_net(sk), TCP_MIB_PASSIVEOPENS); in tcp_create_openreq_child() 598 tcp_parse_options(sock_net(sk), skb, &tmp_opt, 0, NULL); in tcp_check_req() 640 if (!tcp_oow_rate_limited(sock_net(sk), skb, in tcp_check_req() 729 !tcp_oow_rate_limited(sock_net(sk), skb, in tcp_check_req() 734 __NET_INC_STATS(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED); in tcp_check_req() 753 __TCP_INC_STATS(sock_net(sk), TCP_MIB_ATTEMPTFAILS); in tcp_check_req() 776 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDEFERACCEPTDROP); in tcp_check_req() 804 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMIGRATEREQFAILURE); in tcp_check_req() 806 if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_abort_on_overflow)) { in tcp_check_req() [all …]
|
D | inet_hashtables.c | 51 return inet6_ehashfn(sock_net(sk), in sk_ehashfn() 55 return inet_ehashfn(sock_net(sk), in sk_ehashfn() 177 struct net *net = sock_net(sk); in __inet_put_port() 219 struct net *net = sock_net(sk); in __inet_inherit_port() 300 hash = ipv6_portaddr_hash(sock_net(sk), in inet_lhash2_bucket_sk() 305 hash = ipv4_portaddr_hash(sock_net(sk), in inet_lhash2_bucket_sk() 317 if (net_eq(sock_net(sk), net) && sk->sk_num == hnum && in compute_score() 526 struct net *net = sock_net(sk); in __inet_check_established() 567 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); in __inet_check_established() 601 struct net *net = sock_net(sk); in inet_ehash_lookup_by_sk() [all …]
|
D | tcp_output.c | 83 NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPORIGDATASENT, in tcp_event_new_data_sent() 186 NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPACKCOMPRESSED, in tcp_event_ack_sent() 230 if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_workaround_signed_windows)) in tcp_select_initial_window() 241 space = max_t(u32, space, READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[2])); in tcp_select_initial_window() 274 NET_INC_STATS(sock_net(sk), in tcp_select_window() 285 READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_workaround_signed_windows)) in tcp_select_window() 297 NET_INC_STATS(sock_net(sk), in tcp_select_window() 300 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFROMZEROWINDOWADV); in tcp_select_window() 324 bool use_ecn = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_ecn) == 1 || in tcp_ecn_send_syn() 346 if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_ecn_fallback)) in tcp_ecn_clear_syn() [all …]
|
D | udp.c | 143 if (net_eq(sock_net(sk2), net) && in udp_lib_lport_inuse() 180 if (net_eq(sock_net(sk2), net) && in udp_lib_lport_inuse2() 203 struct net *net = sock_net(sk); in udp_reuseport_add_sock() 208 if (net_eq(sock_net(sk2), net) && in udp_reuseport_add_sock() 238 struct net *net = sock_net(sk); in udp_lib_get_port() 324 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); in udp_lib_get_port() 350 ipv4_portaddr_hash(sock_net(sk), htonl(INADDR_ANY), snum); in udp_v4_get_port() 352 ipv4_portaddr_hash(sock_net(sk), inet_sk(sk)->inet_rcv_saddr, 0); in udp_v4_get_port() 368 if (!net_eq(sock_net(sk), net) || in compute_score() 582 if (!net_eq(sock_net(sk), net) || in __udp_is_mcast_sock() [all …]
|
D | datagram.c | 41 if (!oif || netif_index_is_l3_master(sock_net(sk), oif)) in __ip4_datagram_connect() 55 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES); in __ip4_datagram_connect() 119 rt = ip_route_output_ports(sock_net(sk), &fl4, sk, daddr, in ip4_datagram_release_cb()
|
D | raw_diag.c | 99 net = sock_net(in_skb->sk); in raw_diag_dump_one() 143 struct net *net = sock_net(skb->sk); in raw_diag_dump() 167 if (!net_eq(sock_net(sk), net)) in raw_diag_dump() 204 struct net *net = sock_net(in_skb->sk); in raw_diag_destroy()
|
/Linux-v6.1/net/ipv6/ |
D | syncookies.c | 144 if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_syncookies) || in cookie_v6_check() 153 __NET_INC_STATS(sock_net(sk), LINUX_MIB_SYNCOOKIESFAILED); in cookie_v6_check() 157 __NET_INC_STATS(sock_net(sk), LINUX_MIB_SYNCOOKIESRECV); in cookie_v6_check() 161 tcp_parse_options(sock_net(sk), skb, &tcp_opt, 0, NULL); in cookie_v6_check() 164 tsoff = secure_tcpv6_ts_off(sock_net(sk), in cookie_v6_check() 170 if (!cookie_timestamp_decode(sock_net(sk), &tcp_opt)) in cookie_v6_check() 240 dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p); in cookie_v6_check() 258 ireq->ecn_ok = cookie_ecn_ok(&tcp_opt, sock_net(sk), dst); in cookie_v6_check()
|
/Linux-v6.1/net/can/ |
D | raw.c | 277 if (!net_eq(dev_net(dev), sock_net(sk))) in raw_notify() 393 dev = dev_get_by_index(sock_net(sk), ro->ifindex); in raw_release() 399 raw_disable_allfilters(sock_net(sk), NULL, sk); in raw_release() 442 dev = dev_get_by_index(sock_net(sk), addr->can_ifindex); in raw_bind() 458 err = raw_enable_allfilters(sock_net(sk), dev, sk); in raw_bind() 464 err = raw_enable_allfilters(sock_net(sk), NULL, sk); in raw_bind() 473 dev = dev_get_by_index(sock_net(sk), in raw_bind() 481 raw_disable_allfilters(sock_net(sk), NULL, sk); in raw_bind() 556 dev = dev_get_by_index(sock_net(sk), ro->ifindex); in raw_setsockopt() 568 err = raw_enable_filters(sock_net(sk), dev, sk, in raw_setsockopt() [all …]
|
/Linux-v6.1/net/vmw_vsock/ |
D | diag.c | 59 net = sock_net(skb->sk); in vsock_diag_dump() 79 if (!net_eq(sock_net(sk), net)) in vsock_diag_dump() 113 if (!net_eq(sock_net(sk), net)) in vsock_diag_dump() 144 struct net *net = sock_net(skb->sk); in vsock_diag_handler_dump()
|
/Linux-v6.1/net/ieee802154/ |
D | socket.c | 136 dev_load(sock_net(sk), ifr.ifr_name); in ieee802154_dev_ioctl() 137 dev = dev_get_by_name(sock_net(sk), ifr.ifr_name); in ieee802154_dev_ioctl() 178 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); in raw_hash() 187 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); in raw_unhash() 214 dev = ieee802154_get_dev(sock_net(sk), &addr); in raw_bind() 256 dev = dev_getfirstbyhwtype(sock_net(sk), ARPHRD_IEEE802154); in raw_sendmsg() 258 dev = dev_get_by_index(sock_net(sk), sk->sk_bound_dev_if); in raw_sendmsg() 462 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); in dgram_hash() 471 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); in dgram_unhash() 511 dev = ieee802154_get_dev(sock_net(sk), &haddr); in dgram_bind() [all …]
|
/Linux-v6.1/drivers/infiniband/core/ |
D | nldev.c | 1031 device = ib_device_get_by_index(sock_net(skb->sk), index); in nldev_get_doit() 1052 return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); in nldev_get_doit() 1075 device = ib_device_get_by_index(sock_net(skb->sk), index); in nldev_set_doit() 1169 device = ib_device_get_by_index(sock_net(skb->sk), index); in nldev_port_get_doit() 1189 err = fill_port_info(msg, device, port, sock_net(skb->sk)); in nldev_port_get_doit() 1196 return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); in nldev_port_get_doit() 1223 device = ib_device_get_by_index(sock_net(skb->sk), ifindex); in nldev_port_get_dumpit() 1249 if (fill_port_info(skb, device, p, sock_net(skb->sk))) { in nldev_port_get_dumpit() 1278 device = ib_device_get_by_index(sock_net(skb->sk), index); in nldev_res_get_doit() 1298 return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); in nldev_res_get_doit() [all …]
|
/Linux-v6.1/net/phonet/ |
D | socket.c | 79 if (!net_eq(sock_net(sknode), net)) in pn_find_sock_by_sa() 116 if (!net_eq(sock_net(sknode), net)) in pn_deliver_sock_broadcast() 175 if (saddr && phonet_address_lookup(sock_net(sk), saddr)) in pn_socket_bind() 372 dev = dev_get_by_index(sock_net(sk), in pn_socket_ioctl() 375 dev = phonet_device_get(sock_net(sk)); in pn_socket_ioctl() 473 struct net *net = sock_net(sk); in pn_sock_get_port() 528 if (!net_eq(net, sock_net(sknode))) in pn_sock_get_idx() 545 while (sk && !net_eq(net, sock_net(sk))); in pn_sock_get_next() 635 if (!net_eq(sock_net(sk), &init_net)) in pn_sock_bind_res()
|
/Linux-v6.1/net/mptcp/ |
D | options.c | 409 opts->csum_reqd = mptcp_is_checksum_enabled(sock_net(sk)); in mptcp_syn_options() 410 opts->allow_join_id0 = mptcp_allow_join_id0(sock_net(sk)); in mptcp_syn_options() 477 opts->allow_join_id0 = mptcp_allow_join_id0(sock_net(sk)); in mptcp_established_options_mp() 768 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPRSTTX); in mptcp_established_options_rst() 792 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPFASTCLOSETX); in mptcp_established_options_fastclose() 814 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPFAILTX); in mptcp_established_options_mp_fail() 1131 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPFASTCLOSERX); in mptcp_incoming_options() 1138 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_ADDADDR); in mptcp_incoming_options() 1142 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_ECHOADD); in mptcp_incoming_options() 1146 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_PORTADD); in mptcp_incoming_options() [all …]
|
D | mptcp_diag.c | 38 net = sock_net(in_skb->sk); in mptcp_diag_dump_one() 83 struct net *net = sock_net(skb->sk); in mptcp_diag_dump_listeners() 111 if (!sk || !net_eq(sock_net(sk), net)) in mptcp_diag_dump_listeners() 157 struct net *net = sock_net(skb->sk); in mptcp_diag_dump()
|
/Linux-v6.1/net/netlink/ |
D | diag.c | 94 struct net *net = sock_net(skb->sk); in __netlink_diag_dump() 133 if (!net_eq(sock_net(sk), net)) in __netlink_diag_dump() 159 if (!net_eq(sock_net(sk), net)) in __netlink_diag_dump() 227 struct net *net = sock_net(skb->sk); in netlink_diag_handler_dump()
|
/Linux-v6.1/net/mctp/ |
D | af_mctp.c | 130 addr->smctp_network = mctp_default_net(sock_net(sk)); in mctp_sendmsg() 140 dev = dev_get_by_index_rcu(sock_net(sk), extaddr->smctp_ifindex); in mctp_sendmsg() 151 rt = mctp_route_lookup(sock_net(sk), addr->smctp_network, in mctp_sendmsg() 355 struct net *net = sock_net(&msk->sk); in mctp_ioctl_alloctag() 397 struct net *net = sock_net(&msk->sk); in mctp_ioctl_droptag() 498 struct net *net = sock_net(&msk->sk); in mctp_sk_expire_keys() 555 struct net *net = sock_net(sk); in mctp_sk_hash() 567 struct net *net = sock_net(sk); in mctp_sk_unhash()
|
/Linux-v6.1/net/smc/ |
D | smc_stats.h | 111 struct net *_net = sock_net(&__smc->sk); \ 126 struct net *_net = sock_net(&__smc->sk); \ 154 struct net *_net = sock_net(&(_smc)->sk); \ 172 struct net *net = sock_net(&(_smc)->sk); \ 218 struct net *net = sock_net(&(__smc)->sk); \
|
/Linux-v6.1/include/net/ |
D | udp.h | 406 ipv4 ? (IS_UDPLITE(sk) ? sock_net(sk)->mib.udplite_statistics : \ 407 sock_net(sk)->mib.udp_statistics) : \ 408 (IS_UDPLITE(sk) ? sock_net(sk)->mib.udplite_stats_in6 : \ 409 sock_net(sk)->mib.udp_stats_in6); \ 414 IS_UDPLITE(sk) ? sock_net(sk)->mib.udplite_statistics : \ 415 sock_net(sk)->mib.udp_statistics; \
|
/Linux-v6.1/net/xfrm/ |
D | espintcp.c | 18 XFRM_INC_STATS(sock_net(sk), LINUX_MIB_XFRMINERROR); in handle_nonesp() 40 skb->dev = dev_get_by_index_rcu(sock_net(sk), skb->skb_iif); in handle_esp() 67 XFRM_INC_STATS(sock_net(strp->sk), LINUX_MIB_XFRMINHDRERROR); in espintcp_rcv() 80 XFRM_INC_STATS(sock_net(strp->sk), LINUX_MIB_XFRMINHDRERROR); in espintcp_rcv() 88 XFRM_INC_STATS(sock_net(strp->sk), LINUX_MIB_XFRMINHDRERROR); in espintcp_rcv() 95 XFRM_INC_STATS(sock_net(strp->sk), LINUX_MIB_XFRMINERROR); in espintcp_rcv() 101 XFRM_INC_STATS(sock_net(strp->sk), LINUX_MIB_XFRMINERROR); in espintcp_rcv()
|
/Linux-v6.1/net/netfilter/ |
D | xt_cgroup.c | 106 if (!sk || !sk_fullsock(sk) || !net_eq(xt_net(par), sock_net(sk))) in cgroup_mt_v0() 120 if (!sk || !sk_fullsock(sk) || !net_eq(xt_net(par), sock_net(sk))) in cgroup_mt_v1() 138 if (!sk || !sk_fullsock(sk) || !net_eq(xt_net(par), sock_net(sk))) in cgroup_mt_v2()
|
/Linux-v6.1/net/tls/ |
D | tls_main.c | 303 TLS_DEC_STATS(sock_net(sk), LINUX_MIB_TLSCURRTXSW); in tls_sk_proto_cleanup() 306 TLS_DEC_STATS(sock_net(sk), LINUX_MIB_TLSCURRTXDEVICE); in tls_sk_proto_cleanup() 311 TLS_DEC_STATS(sock_net(sk), LINUX_MIB_TLSCURRRXSW); in tls_sk_proto_cleanup() 314 TLS_DEC_STATS(sock_net(sk), LINUX_MIB_TLSCURRRXDEVICE); in tls_sk_proto_cleanup() 772 TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSTXDEVICE); in do_tls_setsockopt_conf() 773 TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSCURRTXDEVICE); in do_tls_setsockopt_conf() 778 TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSTXSW); in do_tls_setsockopt_conf() 779 TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSCURRTXSW); in do_tls_setsockopt_conf() 786 TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSRXDEVICE); in do_tls_setsockopt_conf() 787 TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSCURRRXDEVICE); in do_tls_setsockopt_conf() [all …]
|