Home
last modified time | relevance | path

Searched refs:tcp_hdr (Results 1 – 25 of 66) sorted by relevance

123

/Linux-v4.19/drivers/net/ethernet/sfc/
Dtx_tso.c164 EFX_WARN_ON_ONCE_PARANOID((PTR_DIFF(tcp_hdr(skb), skb->data) + in efx_tso_check_protocol()
165 (tcp_hdr(skb)->doff << 2u)) > in efx_tso_check_protocol()
182 header_len = st->tcp_off + (tcp_hdr(skb)->doff << 2u); in tso_start()
193 st->seqnum = ntohl(tcp_hdr(skb)->seq); in tso_start()
195 EFX_WARN_ON_ONCE_PARANOID(tcp_hdr(skb)->urg); in tso_start()
196 EFX_WARN_ON_ONCE_PARANOID(tcp_hdr(skb)->syn); in tso_start()
197 EFX_WARN_ON_ONCE_PARANOID(tcp_hdr(skb)->rst); in tso_start()
310 tcp_flags = ((u8 *)tcp_hdr(skb))[TCP_FLAGS_OFFSET] & ~tcp_flags_mask; in tso_start_new_packet()
Dtx.c391 (tcp_hdr(skb)->doff << 2u); in efx_tx_map_data()
/Linux-v4.19/net/ipv4/
Dtcp_offload.c43 struct tcphdr *th = tcp_hdr(skb); in tcp4_gso_segment()
72 th = tcp_hdr(skb); in tcp_gso_segment()
118 th = tcp_hdr(skb); in tcp_gso_segment()
143 th = tcp_hdr(skb); in tcp_gso_segment()
227 th2 = tcp_hdr(p); in tcp_gro_receive()
293 struct tcphdr *th = tcp_hdr(skb); in tcp_gro_complete()
324 struct tcphdr *th = tcp_hdr(skb); in tcp4_gro_complete()
Dsyncookies.c185 const struct tcphdr *th = tcp_hdr(skb); in cookie_v4_init_sequence()
290 const struct tcphdr *th = tcp_hdr(skb); in cookie_v4_check()
Dtcp_ipv4.c102 tcp_hdr(skb)->dest, in tcp_v4_init_seq()
103 tcp_hdr(skb)->source); in tcp_v4_init_seq()
620 struct tcphdr *th = tcp_hdr(skb); in __tcp_v4_send_check()
651 const struct tcphdr *th = tcp_hdr(skb); in tcp_v4_send_reset()
800 const struct tcphdr *th = tcp_hdr(skb); in tcp_v4_send_ack()
1231 const struct tcphdr *th = tcp_hdr(skb); in tcp_v4_md5_hash_skb()
1290 const struct tcphdr *th = tcp_hdr(skb); in tcp_v4_inbound_md5_hash()
1502 const struct tcphdr *th = tcp_hdr(skb); in tcp_v4_cookie_check()
1593 th = tcp_hdr(skb); in tcp_v4_early_demux()
Dtcp_input.c189 !(tcp_flag_word(tcp_hdr(skb)) & TCP_REMNANT))) { in tcp_measure_rcv_mss()
250 if (tcp_hdr(skb)->cwr) { in tcp_ecn_accept_cwr()
3371 u32 nwin = ntohs(tcp_hdr(skb)->window); in tcp_ack_update_window()
3373 if (likely(!tcp_hdr(skb)->syn)) in tcp_ack_update_window()
3433 !tcp_hdr(skb)->syn) in tcp_oow_rate_limited()
3659 if (tcp_ecn_rcv_ecn_echo(tp, tcp_hdr(skb))) { in tcp_ack()
3787 const struct tcphdr *th = tcp_hdr(skb); in tcp_parse_options()
4003 const struct tcphdr *th = tcp_hdr(skb); in tcp_disordered_ack()
4704 __skb_pull(skb, tcp_hdr(skb)->doff * 4); in tcp_data_queue()
5992 const struct tcphdr *th = tcp_hdr(skb); in tcp_rcv_state_process()
[all …]
Dtcp_minisocks.c536 newtp->snd_wnd = ntohs(tcp_hdr(skb)->window) << newtp->rx_opt.snd_wscale; in tcp_create_openreq_child()
590 const struct tcphdr *th = tcp_hdr(skb); in tcp_check_req()
Dtcp_fastopen.c249 tp->snd_wnd = ntohs(tcp_hdr(skb)->window); in tcp_fastopen_create_child()
Dip_output.c1573 tcp_hdr(skb)->source, tcp_hdr(skb)->dest, in ip_send_unicast_reply()
/Linux-v4.19/net/ipv6/
Dtcpv6_offload.c35 struct tcphdr *th = tcp_hdr(skb); in tcp6_gro_complete()
57 struct tcphdr *th = tcp_hdr(skb); in tcp6_gso_segment()
Dsyncookies.c117 const struct tcphdr *th = tcp_hdr(skb); in cookie_v6_init_sequence()
140 const struct tcphdr *th = tcp_hdr(skb); in cookie_v6_check()
Dtcp_ipv6.c110 tcp_hdr(skb)->dest, in tcp_v6_init_seq()
111 tcp_hdr(skb)->source); in tcp_v6_init_seq()
647 const struct tcphdr *th = tcp_hdr(skb); in tcp_v6_md5_hash_skb()
695 const struct tcphdr *th = tcp_hdr(skb); in tcp_v6_inbound_md5_hash()
797 const struct tcphdr *th = tcp_hdr(skb); in tcp_v6_send_response()
903 const struct tcphdr *th = tcp_hdr(skb); in tcp_v6_send_reset()
1025 const struct tcphdr *th = tcp_hdr(skb); in tcp_v6_cookie_check()
1653 th = tcp_hdr(skb); in tcp_v6_early_demux()
/Linux-v4.19/include/linux/
Dtcp.h28 static inline struct tcphdr *tcp_hdr(const struct sk_buff *skb) in tcp_hdr() function
40 return __tcp_hdrlen(tcp_hdr(skb)); in tcp_hdrlen()
55 return (tcp_hdr(skb)->doff - 5) * 4; in tcp_optlen()
/Linux-v4.19/drivers/net/ethernet/mellanox/mlx5/core/en_accel/
Dtls_rxtx.c162 th = tcp_hdr(nskb); in mlx5e_tls_complete_sync_skb()
195 u32 tcp_seq = ntohl(tcp_hdr(skb)->seq); in mlx5e_tls_handle_ooo()
284 skb_seq = ntohl(tcp_hdr(skb)->seq); in mlx5e_tls_handle_tx_skb()
/Linux-v4.19/include/net/
Dip6_checksum.h70 struct tcphdr *th = tcp_hdr(skb); in __tcp_v6_send_check()
/Linux-v4.19/net/core/
Dtso.c70 tso->tcp_seq = ntohl(tcp_hdr(skb)->seq); in tso_start()
/Linux-v4.19/net/tls/
Dtls_device_fallback.c168 struct tcphdr *th = tcp_hdr(skb); in update_chksum()
224 u32 tcp_seq = ntohl(tcp_hdr(skb)->seq); in fill_sg_in()
/Linux-v4.19/net/openvswitch/
Dflow.c652 struct tcphdr *tcp = tcp_hdr(skb); in key_extract()
766 struct tcphdr *tcp = tcp_hdr(skb); in key_extract()
Dactions.c426 inet_proto_csum_replace4(&tcp_hdr(skb)->check, skb, in update_ip_l4_checksum()
458 inet_proto_csum_replace16(&tcp_hdr(skb)->check, skb, in update_ipv6_checksum()
764 th = tcp_hdr(skb); in set_tcp()
/Linux-v4.19/security/
Dlsm_audit.c63 struct tcphdr *th = tcp_hdr(skb); in ipv4_skb_to_auditdata()
/Linux-v4.19/drivers/net/
Dthunderbolt.c955 tucso = dest + ((void *)&(tcp_hdr(skb)->check) - data); in tbnet_xmit_csum_and_map()
965 tucso = dest + ((void *)&(tcp_hdr(skb)->check) - data); in tbnet_xmit_csum_and_map()
/Linux-v4.19/drivers/net/ethernet/sun/
Dsunvnet_common.c321 struct tcphdr *ptcp = tcp_hdr(skb); in vnet_fullcsum_ipv4()
354 struct tcphdr *ptcp = tcp_hdr(skb); in vnet_fullcsum_ipv6()
1238 hlen += tcp_hdr(skb)->doff * 4; in vnet_handle_offloads()
/Linux-v4.19/drivers/net/ethernet/qlogic/qede/
Dqede_fp.c878 th = tcp_hdr(skb); in qede_gro_ip_csum()
892 th = tcp_hdr(skb); in qede_gro_ipv6_csum()
/Linux-v4.19/drivers/net/ethernet/qualcomm/emac/
Demac-mac.c1285 tcp_hdr(skb)->check = in emac_tso_csum()
1300 tcp_hdr(skb)->check = in emac_tso_csum()
/Linux-v4.19/drivers/net/ethernet/socionext/
Dnetsec.c881 tcp_hdr(skb)->check = in netsec_netdev_start_xmit()
886 tcp_hdr(skb)->check = in netsec_netdev_start_xmit()

123