1 /* 2 * Copyright (c) 2018-2019 Intel Corporation 3 * Copyright (c) 2023 Arm Limited (or its affiliates). All rights reserved. 4 * 5 * SPDX-License-Identifier: Apache-2.0 6 */ 7 8 #include "tp.h" 9 10 #define is(_a, _b) (strcmp((_a), (_b)) == 0) 11 12 #ifndef MIN3 13 #define MIN3(_a, _b, _c) MIN((_a), MIN((_b), (_c))) 14 #endif 15 16 #define th_sport(_x) UNALIGNED_GET(&(_x)->th_sport) 17 #define th_dport(_x) UNALIGNED_GET(&(_x)->th_dport) 18 #define th_seq(_x) ntohl(UNALIGNED_GET(&(_x)->th_seq)) 19 #define th_ack(_x) ntohl(UNALIGNED_GET(&(_x)->th_ack)) 20 #define th_off(_x) ((_x)->th_off) 21 #define th_flags(_x) UNALIGNED_GET(&(_x)->th_flags) 22 #define th_win(_x) UNALIGNED_GET(&(_x)->th_win) 23 24 #define tcp_slist(_conn, _slist, _op, _type, _link) \ 25 ({ \ 26 k_mutex_lock(&_conn->lock, K_FOREVER); \ 27 \ 28 sys_snode_t *_node = sys_slist_##_op(_slist); \ 29 \ 30 _type * _x = _node ? CONTAINER_OF(_node, _type, _link) : NULL; \ 31 \ 32 k_mutex_unlock(&_conn->lock); \ 33 \ 34 _x; \ 35 }) 36 37 #if defined(CONFIG_NET_TEST_PROTOCOL) 38 #define tcp_malloc(_size) \ 39 tp_malloc(_size, tp_basename(__FILE__), __LINE__, __func__) 40 #define tcp_calloc(_nmemb, _size) \ 41 tp_calloc(_nmemb, _size, tp_basename(__FILE__), __LINE__, __func__) 42 #define tcp_free(_ptr) tp_free(_ptr, tp_basename(__FILE__), __LINE__, __func__) 43 #else 44 #define tcp_malloc(_size) k_malloc(_size) 45 #define tcp_calloc(_nmemb, _size) k_calloc(_nmemb, _size) 46 #define tcp_free(_ptr) k_free(_ptr) 47 #endif 48 49 #define TCP_PKT_ALLOC_TIMEOUT K_MSEC(CONFIG_NET_TCP_PKT_ALLOC_TIMEOUT) 50 51 #if defined(CONFIG_NET_TEST_PROTOCOL) 52 #define tcp_pkt_clone(_pkt) tp_pkt_clone(_pkt, tp_basename(__FILE__), __LINE__) 53 #define tcp_pkt_unref(_pkt) tp_pkt_unref(_pkt, tp_basename(__FILE__), __LINE__) 54 #else 55 #define tcp_pkt_clone(_pkt) net_pkt_clone(_pkt, TCP_PKT_ALLOC_TIMEOUT) 56 #define tcp_pkt_unref(_pkt) net_pkt_unref(_pkt) 57 #define tp_pkt_alloc(args...) 58 #endif 59 60 #define tcp_pkt_ref(_pkt) net_pkt_ref(_pkt) 61 #define tcp_pkt_alloc(_conn, _len) \ 62 ({ \ 63 struct net_pkt *_pkt; \ 64 \ 65 if ((_len) > 0) { \ 66 _pkt = net_pkt_alloc_with_buffer( \ 67 (_conn)->iface, \ 68 (_len), \ 69 net_context_get_family((_conn)->context), \ 70 IPPROTO_TCP, \ 71 TCP_PKT_ALLOC_TIMEOUT); \ 72 } else { \ 73 _pkt = net_pkt_alloc(TCP_PKT_ALLOC_TIMEOUT); \ 74 } \ 75 \ 76 tp_pkt_alloc(_pkt, tp_basename(__FILE__), __LINE__); \ 77 \ 78 _pkt; \ 79 }) 80 81 #define tcp_rx_pkt_alloc(_conn, _len) \ 82 ({ \ 83 struct net_pkt *_pkt; \ 84 \ 85 if ((_len) > 0) { \ 86 _pkt = net_pkt_rx_alloc_with_buffer( \ 87 (_conn)->iface, \ 88 (_len), \ 89 net_context_get_family((_conn)->context), \ 90 IPPROTO_TCP, \ 91 TCP_PKT_ALLOC_TIMEOUT); \ 92 } else { \ 93 _pkt = net_pkt_rx_alloc(TCP_PKT_ALLOC_TIMEOUT); \ 94 } \ 95 \ 96 tp_pkt_alloc(_pkt, tp_basename(__FILE__), __LINE__); \ 97 \ 98 _pkt; \ 99 }) 100 101 #define tcp_pkt_alloc_no_conn(_iface, _family, _len) \ 102 ({ \ 103 struct net_pkt *_pkt; \ 104 \ 105 if ((_len) > 0) { \ 106 _pkt = net_pkt_alloc_with_buffer( \ 107 (_iface), (_len), (_family), \ 108 IPPROTO_TCP, \ 109 TCP_PKT_ALLOC_TIMEOUT); \ 110 } else { \ 111 _pkt = net_pkt_alloc(TCP_PKT_ALLOC_TIMEOUT); \ 112 } \ 113 \ 114 tp_pkt_alloc(_pkt, tp_basename(__FILE__), __LINE__); \ 115 \ 116 _pkt; \ 117 }) 118 119 #if defined(CONFIG_NET_TEST_PROTOCOL) 120 #define conn_seq(_conn, _req) \ 121 tp_seq_track(TP_SEQ, &(_conn)->seq, (_req), tp_basename(__FILE__), \ 122 __LINE__, __func__) 123 #define conn_ack(_conn, _req) \ 124 tp_seq_track(TP_ACK, &(_conn)->ack, (_req), tp_basename(__FILE__), \ 125 __LINE__, __func__) 126 #else 127 #define conn_seq(_conn, _req) (_conn)->seq += (_req) 128 #define conn_ack(_conn, _req) (_conn)->ack += (_req) 129 #endif 130 131 #define NET_TCP_DEFAULT_MSS 536 132 133 #define conn_mss(_conn) \ 134 MIN((_conn)->recv_options.mss_found ? (_conn)->recv_options.mss \ 135 : NET_TCP_DEFAULT_MSS, \ 136 net_tcp_get_supported_mss(_conn)) 137 138 #define conn_state(_conn, _s) \ 139 ({ \ 140 NET_DBG("%s->%s", \ 141 tcp_state_to_str((_conn)->state, false), \ 142 tcp_state_to_str((_s), false)); \ 143 (_conn)->state = _s; \ 144 }) 145 146 #define conn_send_data_dump(_conn) \ 147 ({ \ 148 NET_DBG("conn: %p total=%zd, unacked_len=%d, " \ 149 "send_win=%hu, mss=%hu", \ 150 (_conn), net_pkt_get_len((_conn)->send_data), \ 151 _conn->unacked_len, _conn->send_win, \ 152 (uint16_t)conn_mss((_conn))); \ 153 NET_DBG("conn: %p send_data_timer=%hu, send_data_retries=%hu", \ 154 (_conn), \ 155 (bool)k_ticks_to_ms_ceil32( \ 156 k_work_delayable_remaining_get( \ 157 &(_conn)->send_data_timer)), \ 158 (_conn)->send_data_retries); \ 159 }) 160 161 enum pkt_addr { 162 TCP_EP_SRC = 1, 163 TCP_EP_DST = 0 164 }; 165 166 struct tcphdr { 167 uint16_t th_sport; 168 uint16_t th_dport; 169 uint32_t th_seq; 170 uint32_t th_ack; 171 #ifdef CONFIG_LITTLE_ENDIAN 172 uint8_t th_x2:4; /* unused */ 173 uint8_t th_off:4; /* data offset, in units of 32-bit words */ 174 #else 175 uint8_t th_off:4; 176 uint8_t th_x2:4; 177 #endif 178 uint8_t th_flags; 179 uint16_t th_win; 180 uint16_t th_sum; 181 uint16_t th_urp; 182 } __packed; 183 184 enum th_flags { 185 FIN = BIT(0), 186 SYN = BIT(1), 187 RST = BIT(2), 188 PSH = BIT(3), 189 ACK = BIT(4), 190 URG = BIT(5), 191 ECN = BIT(6), 192 CWR = BIT(7), 193 }; 194 195 struct tcp_mss_option { 196 uint32_t option; 197 }; 198 199 enum tcp_state { 200 TCP_UNUSED = 0, 201 TCP_LISTEN, 202 TCP_SYN_SENT, 203 TCP_SYN_RECEIVED, 204 TCP_ESTABLISHED, 205 TCP_FIN_WAIT_1, 206 TCP_FIN_WAIT_2, 207 TCP_CLOSE_WAIT, 208 TCP_CLOSING, 209 TCP_LAST_ACK, 210 TCP_TIME_WAIT, 211 TCP_CLOSED 212 }; 213 214 enum tcp_data_mode { 215 TCP_DATA_MODE_SEND = 0, 216 TCP_DATA_MODE_RESEND = 1 217 }; 218 219 union tcp_endpoint { 220 struct sockaddr sa; 221 struct sockaddr_in sin; 222 struct sockaddr_in6 sin6; 223 }; 224 225 /* TCP Option codes */ 226 #define NET_TCP_END_OPT 0 227 #define NET_TCP_NOP_OPT 1 228 #define NET_TCP_MSS_OPT 2 229 #define NET_TCP_WINDOW_SCALE_OPT 3 230 231 /* TCP Option sizes */ 232 #define NET_TCP_END_SIZE 1 233 #define NET_TCP_NOP_SIZE 1 234 #define NET_TCP_MSS_SIZE 4 235 #define NET_TCP_WINDOW_SCALE_SIZE 3 236 237 struct tcp_options { 238 uint16_t mss; 239 uint16_t window; 240 bool mss_found : 1; 241 bool wnd_found : 1; 242 }; 243 244 #ifdef CONFIG_NET_TCP_CONGESTION_AVOIDANCE 245 246 struct tcp_collision_avoidance_reno { 247 uint16_t cwnd; 248 uint16_t ssthresh; 249 uint16_t pending_fast_retransmit_bytes; 250 }; 251 #endif 252 253 struct tcp; 254 typedef void (*net_tcp_closed_cb_t)(struct tcp *conn, void *user_data); 255 256 struct tcp { /* TCP connection */ 257 sys_snode_t next; 258 struct net_context *context; 259 struct net_pkt *send_data; 260 struct net_pkt *queue_recv_data; 261 struct net_if *iface; 262 void *recv_user_data; 263 sys_slist_t send_queue; 264 union { 265 net_tcp_accept_cb_t accept_cb; 266 struct tcp *accepted_conn; 267 }; 268 net_context_connect_cb_t connect_cb; 269 #if defined(CONFIG_NET_TEST) 270 net_tcp_closed_cb_t test_closed_cb; 271 void *test_user_data; 272 #endif 273 struct k_mutex lock; 274 struct k_sem connect_sem; /* semaphore for blocking connect */ 275 struct k_sem tx_sem; /* Semaphore indicating if transfers are blocked . */ 276 struct k_fifo recv_data; /* temp queue before passing data to app */ 277 struct tcp_options recv_options; 278 struct tcp_options send_options; 279 struct k_work_delayable send_timer; 280 struct k_work_delayable recv_queue_timer; 281 struct k_work_delayable send_data_timer; 282 struct k_work_delayable timewait_timer; 283 struct k_work_delayable persist_timer; 284 struct k_work_delayable ack_timer; 285 #if defined(CONFIG_NET_TCP_KEEPALIVE) 286 struct k_work_delayable keepalive_timer; 287 #endif /* CONFIG_NET_TCP_KEEPALIVE */ 288 struct k_work conn_release; 289 290 union { 291 /* Because FIN and establish timers are never happening 292 * at the same time, share the timer between them to 293 * save memory. 294 */ 295 struct k_work_delayable fin_timer; 296 struct k_work_delayable establish_timer; 297 }; 298 union tcp_endpoint src; 299 union tcp_endpoint dst; 300 #if defined(CONFIG_NET_TCP_IPV6_ND_REACHABILITY_HINT) 301 int64_t last_nd_hint_time; 302 #endif 303 size_t send_data_total; 304 size_t send_retries; 305 int unacked_len; 306 atomic_t ref_count; 307 enum tcp_state state; 308 enum tcp_data_mode data_mode; 309 uint32_t seq; 310 uint32_t ack; 311 #if defined(CONFIG_NET_TCP_KEEPALIVE) 312 uint32_t keep_idle; 313 uint32_t keep_intvl; 314 uint32_t keep_cnt; 315 uint32_t keep_cur; 316 #endif /* CONFIG_NET_TCP_KEEPALIVE */ 317 uint16_t recv_win_max; 318 uint16_t recv_win; 319 uint16_t send_win_max; 320 uint16_t send_win; 321 #ifdef CONFIG_NET_TCP_RANDOMIZED_RTO 322 uint16_t rto; 323 #endif 324 #ifdef CONFIG_NET_TCP_CONGESTION_AVOIDANCE 325 struct tcp_collision_avoidance_reno ca; 326 #endif 327 uint8_t send_data_retries; 328 #ifdef CONFIG_NET_TCP_FAST_RETRANSMIT 329 uint8_t dup_ack_cnt; 330 #endif 331 uint8_t zwp_retries; 332 bool in_retransmission : 1; 333 bool in_connect : 1; 334 bool in_close : 1; 335 #if defined(CONFIG_NET_TCP_KEEPALIVE) 336 bool keep_alive : 1; 337 #endif /* CONFIG_NET_TCP_KEEPALIVE */ 338 bool tcp_nodelay : 1; 339 }; 340 341 #define _flags(_fl, _op, _mask, _cond) \ 342 ({ \ 343 bool result = false; \ 344 \ 345 if (UNALIGNED_GET(_fl) && (_cond) && \ 346 (UNALIGNED_GET(_fl) _op(_mask))) { \ 347 UNALIGNED_PUT(UNALIGNED_GET(_fl) & ~(_mask), _fl); \ 348 result = true; \ 349 } \ 350 \ 351 result; \ 352 }) 353 354 #define FL(_fl, _op, _mask, _args...) \ 355 _flags(_fl, _op, _mask, sizeof(#_args) > 1 ? _args : true) 356 357 typedef void (*net_tcp_cb_t)(struct tcp *conn, void *user_data); 358 359 #if defined(CONFIG_NET_TEST) 360 void tcp_install_close_cb(struct net_context *ctx, 361 net_tcp_closed_cb_t cb, 362 void *user_data); 363 #endif 364