1 // SPDX-License-Identifier: GPL-2.0-only
2 /* (C) 1999-2001 Paul `Rusty' Russell
3  * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
4  * (C) 2002-2013 Jozsef Kadlecsik <kadlec@netfilter.org>
5  * (C) 2006-2012 Patrick McHardy <kaber@trash.net>
6  */
7 
8 #include <linux/types.h>
9 #include <linux/timer.h>
10 #include <linux/module.h>
11 #include <linux/in.h>
12 #include <linux/tcp.h>
13 #include <linux/spinlock.h>
14 #include <linux/skbuff.h>
15 #include <linux/ipv6.h>
16 #include <net/ip6_checksum.h>
17 #include <asm/unaligned.h>
18 
19 #include <net/tcp.h>
20 
21 #include <linux/netfilter.h>
22 #include <linux/netfilter_ipv4.h>
23 #include <linux/netfilter_ipv6.h>
24 #include <net/netfilter/nf_conntrack.h>
25 #include <net/netfilter/nf_conntrack_l4proto.h>
26 #include <net/netfilter/nf_conntrack_ecache.h>
27 #include <net/netfilter/nf_conntrack_seqadj.h>
28 #include <net/netfilter/nf_conntrack_synproxy.h>
29 #include <net/netfilter/nf_conntrack_timeout.h>
30 #include <net/netfilter/nf_log.h>
31 #include <net/netfilter/ipv4/nf_conntrack_ipv4.h>
32 #include <net/netfilter/ipv6/nf_conntrack_ipv6.h>
33 
34   /* FIXME: Examine ipfilter's timeouts and conntrack transitions more
35      closely.  They're more complex. --RR */
36 
37 static const char *const tcp_conntrack_names[] = {
38 	"NONE",
39 	"SYN_SENT",
40 	"SYN_RECV",
41 	"ESTABLISHED",
42 	"FIN_WAIT",
43 	"CLOSE_WAIT",
44 	"LAST_ACK",
45 	"TIME_WAIT",
46 	"CLOSE",
47 	"SYN_SENT2",
48 };
49 
50 enum nf_ct_tcp_action {
51 	NFCT_TCP_IGNORE,
52 	NFCT_TCP_INVALID,
53 	NFCT_TCP_ACCEPT,
54 };
55 
56 #define SECS * HZ
57 #define MINS * 60 SECS
58 #define HOURS * 60 MINS
59 #define DAYS * 24 HOURS
60 
61 static const unsigned int tcp_timeouts[TCP_CONNTRACK_TIMEOUT_MAX] = {
62 	[TCP_CONNTRACK_SYN_SENT]	= 2 MINS,
63 	[TCP_CONNTRACK_SYN_RECV]	= 60 SECS,
64 	[TCP_CONNTRACK_ESTABLISHED]	= 5 DAYS,
65 	[TCP_CONNTRACK_FIN_WAIT]	= 2 MINS,
66 	[TCP_CONNTRACK_CLOSE_WAIT]	= 60 SECS,
67 	[TCP_CONNTRACK_LAST_ACK]	= 30 SECS,
68 	[TCP_CONNTRACK_TIME_WAIT]	= 2 MINS,
69 	[TCP_CONNTRACK_CLOSE]		= 10 SECS,
70 	[TCP_CONNTRACK_SYN_SENT2]	= 2 MINS,
71 /* RFC1122 says the R2 limit should be at least 100 seconds.
72    Linux uses 15 packets as limit, which corresponds
73    to ~13-30min depending on RTO. */
74 	[TCP_CONNTRACK_RETRANS]		= 5 MINS,
75 	[TCP_CONNTRACK_UNACK]		= 5 MINS,
76 };
77 
78 #define sNO TCP_CONNTRACK_NONE
79 #define sSS TCP_CONNTRACK_SYN_SENT
80 #define sSR TCP_CONNTRACK_SYN_RECV
81 #define sES TCP_CONNTRACK_ESTABLISHED
82 #define sFW TCP_CONNTRACK_FIN_WAIT
83 #define sCW TCP_CONNTRACK_CLOSE_WAIT
84 #define sLA TCP_CONNTRACK_LAST_ACK
85 #define sTW TCP_CONNTRACK_TIME_WAIT
86 #define sCL TCP_CONNTRACK_CLOSE
87 #define sS2 TCP_CONNTRACK_SYN_SENT2
88 #define sIV TCP_CONNTRACK_MAX
89 #define sIG TCP_CONNTRACK_IGNORE
90 
91 /* What TCP flags are set from RST/SYN/FIN/ACK. */
92 enum tcp_bit_set {
93 	TCP_SYN_SET,
94 	TCP_SYNACK_SET,
95 	TCP_FIN_SET,
96 	TCP_ACK_SET,
97 	TCP_RST_SET,
98 	TCP_NONE_SET,
99 };
100 
101 /*
102  * The TCP state transition table needs a few words...
103  *
104  * We are the man in the middle. All the packets go through us
105  * but might get lost in transit to the destination.
106  * It is assumed that the destinations can't receive segments
107  * we haven't seen.
108  *
109  * The checked segment is in window, but our windows are *not*
110  * equivalent with the ones of the sender/receiver. We always
111  * try to guess the state of the current sender.
112  *
113  * The meaning of the states are:
114  *
115  * NONE:	initial state
116  * SYN_SENT:	SYN-only packet seen
117  * SYN_SENT2:	SYN-only packet seen from reply dir, simultaneous open
118  * SYN_RECV:	SYN-ACK packet seen
119  * ESTABLISHED:	ACK packet seen
120  * FIN_WAIT:	FIN packet seen
121  * CLOSE_WAIT:	ACK seen (after FIN)
122  * LAST_ACK:	FIN seen (after FIN)
123  * TIME_WAIT:	last ACK seen
124  * CLOSE:	closed connection (RST)
125  *
126  * Packets marked as IGNORED (sIG):
127  *	if they may be either invalid or valid
128  *	and the receiver may send back a connection
129  *	closing RST or a SYN/ACK.
130  *
131  * Packets marked as INVALID (sIV):
132  *	if we regard them as truly invalid packets
133  */
134 static const u8 tcp_conntracks[2][6][TCP_CONNTRACK_MAX] = {
135 	{
136 /* ORIGINAL */
137 /* 	     sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2	*/
138 /*syn*/	   { sSS, sSS, sIG, sIG, sIG, sIG, sIG, sSS, sSS, sS2 },
139 /*
140  *	sNO -> sSS	Initialize a new connection
141  *	sSS -> sSS	Retransmitted SYN
142  *	sS2 -> sS2	Late retransmitted SYN
143  *	sSR -> sIG
144  *	sES -> sIG	Error: SYNs in window outside the SYN_SENT state
145  *			are errors. Receiver will reply with RST
146  *			and close the connection.
147  *			Or we are not in sync and hold a dead connection.
148  *	sFW -> sIG
149  *	sCW -> sIG
150  *	sLA -> sIG
151  *	sTW -> sSS	Reopened connection (RFC 1122).
152  *	sCL -> sSS
153  */
154 /* 	     sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2	*/
155 /*synack*/ { sIV, sIV, sSR, sIV, sIV, sIV, sIV, sIV, sIV, sSR },
156 /*
157  *	sNO -> sIV	Too late and no reason to do anything
158  *	sSS -> sIV	Client can't send SYN and then SYN/ACK
159  *	sS2 -> sSR	SYN/ACK sent to SYN2 in simultaneous open
160  *	sSR -> sSR	Late retransmitted SYN/ACK in simultaneous open
161  *	sES -> sIV	Invalid SYN/ACK packets sent by the client
162  *	sFW -> sIV
163  *	sCW -> sIV
164  *	sLA -> sIV
165  *	sTW -> sIV
166  *	sCL -> sIV
167  */
168 /* 	     sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2	*/
169 /*fin*/    { sIV, sIV, sFW, sFW, sLA, sLA, sLA, sTW, sCL, sIV },
170 /*
171  *	sNO -> sIV	Too late and no reason to do anything...
172  *	sSS -> sIV	Client migth not send FIN in this state:
173  *			we enforce waiting for a SYN/ACK reply first.
174  *	sS2 -> sIV
175  *	sSR -> sFW	Close started.
176  *	sES -> sFW
177  *	sFW -> sLA	FIN seen in both directions, waiting for
178  *			the last ACK.
179  *			Migth be a retransmitted FIN as well...
180  *	sCW -> sLA
181  *	sLA -> sLA	Retransmitted FIN. Remain in the same state.
182  *	sTW -> sTW
183  *	sCL -> sCL
184  */
185 /* 	     sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2	*/
186 /*ack*/	   { sES, sIV, sES, sES, sCW, sCW, sTW, sTW, sCL, sIV },
187 /*
188  *	sNO -> sES	Assumed.
189  *	sSS -> sIV	ACK is invalid: we haven't seen a SYN/ACK yet.
190  *	sS2 -> sIV
191  *	sSR -> sES	Established state is reached.
192  *	sES -> sES	:-)
193  *	sFW -> sCW	Normal close request answered by ACK.
194  *	sCW -> sCW
195  *	sLA -> sTW	Last ACK detected (RFC5961 challenged)
196  *	sTW -> sTW	Retransmitted last ACK. Remain in the same state.
197  *	sCL -> sCL
198  */
199 /* 	     sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2	*/
200 /*rst*/    { sIV, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL },
201 /*none*/   { sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV }
202 	},
203 	{
204 /* REPLY */
205 /* 	     sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2	*/
206 /*syn*/	   { sIV, sS2, sIV, sIV, sIV, sIV, sIV, sSS, sIV, sS2 },
207 /*
208  *	sNO -> sIV	Never reached.
209  *	sSS -> sS2	Simultaneous open
210  *	sS2 -> sS2	Retransmitted simultaneous SYN
211  *	sSR -> sIV	Invalid SYN packets sent by the server
212  *	sES -> sIV
213  *	sFW -> sIV
214  *	sCW -> sIV
215  *	sLA -> sIV
216  *	sTW -> sSS	Reopened connection, but server may have switched role
217  *	sCL -> sIV
218  */
219 /* 	     sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2	*/
220 /*synack*/ { sIV, sSR, sIG, sIG, sIG, sIG, sIG, sIG, sIG, sSR },
221 /*
222  *	sSS -> sSR	Standard open.
223  *	sS2 -> sSR	Simultaneous open
224  *	sSR -> sIG	Retransmitted SYN/ACK, ignore it.
225  *	sES -> sIG	Late retransmitted SYN/ACK?
226  *	sFW -> sIG	Might be SYN/ACK answering ignored SYN
227  *	sCW -> sIG
228  *	sLA -> sIG
229  *	sTW -> sIG
230  *	sCL -> sIG
231  */
232 /* 	     sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2	*/
233 /*fin*/    { sIV, sIV, sFW, sFW, sLA, sLA, sLA, sTW, sCL, sIV },
234 /*
235  *	sSS -> sIV	Server might not send FIN in this state.
236  *	sS2 -> sIV
237  *	sSR -> sFW	Close started.
238  *	sES -> sFW
239  *	sFW -> sLA	FIN seen in both directions.
240  *	sCW -> sLA
241  *	sLA -> sLA	Retransmitted FIN.
242  *	sTW -> sTW
243  *	sCL -> sCL
244  */
245 /* 	     sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2	*/
246 /*ack*/	   { sIV, sIG, sSR, sES, sCW, sCW, sTW, sTW, sCL, sIG },
247 /*
248  *	sSS -> sIG	Might be a half-open connection.
249  *	sS2 -> sIG
250  *	sSR -> sSR	Might answer late resent SYN.
251  *	sES -> sES	:-)
252  *	sFW -> sCW	Normal close request answered by ACK.
253  *	sCW -> sCW
254  *	sLA -> sTW	Last ACK detected (RFC5961 challenged)
255  *	sTW -> sTW	Retransmitted last ACK.
256  *	sCL -> sCL
257  */
258 /* 	     sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2	*/
259 /*rst*/    { sIV, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL },
260 /*none*/   { sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV }
261 	}
262 };
263 
264 #ifdef CONFIG_NF_CONNTRACK_PROCFS
265 /* Print out the private part of the conntrack. */
tcp_print_conntrack(struct seq_file * s,struct nf_conn * ct)266 static void tcp_print_conntrack(struct seq_file *s, struct nf_conn *ct)
267 {
268 	if (test_bit(IPS_OFFLOAD_BIT, &ct->status))
269 		return;
270 
271 	seq_printf(s, "%s ", tcp_conntrack_names[ct->proto.tcp.state]);
272 }
273 #endif
274 
get_conntrack_index(const struct tcphdr * tcph)275 static unsigned int get_conntrack_index(const struct tcphdr *tcph)
276 {
277 	if (tcph->rst) return TCP_RST_SET;
278 	else if (tcph->syn) return (tcph->ack ? TCP_SYNACK_SET : TCP_SYN_SET);
279 	else if (tcph->fin) return TCP_FIN_SET;
280 	else if (tcph->ack) return TCP_ACK_SET;
281 	else return TCP_NONE_SET;
282 }
283 
284 /* TCP connection tracking based on 'Real Stateful TCP Packet Filtering
285    in IP Filter' by Guido van Rooij.
286 
287    http://www.sane.nl/events/sane2000/papers.html
288    http://www.darkart.com/mirrors/www.obfuscation.org/ipf/
289 
290    The boundaries and the conditions are changed according to RFC793:
291    the packet must intersect the window (i.e. segments may be
292    after the right or before the left edge) and thus receivers may ACK
293    segments after the right edge of the window.
294 
295 	td_maxend = max(sack + max(win,1)) seen in reply packets
296 	td_maxwin = max(max(win, 1)) + (sack - ack) seen in sent packets
297 	td_maxwin += seq + len - sender.td_maxend
298 			if seq + len > sender.td_maxend
299 	td_end    = max(seq + len) seen in sent packets
300 
301    I.   Upper bound for valid data:	seq <= sender.td_maxend
302    II.  Lower bound for valid data:	seq + len >= sender.td_end - receiver.td_maxwin
303    III.	Upper bound for valid (s)ack:   sack <= receiver.td_end
304    IV.	Lower bound for valid (s)ack:	sack >= receiver.td_end - MAXACKWINDOW
305 
306    where sack is the highest right edge of sack block found in the packet
307    or ack in the case of packet without SACK option.
308 
309    The upper bound limit for a valid (s)ack is not ignored -
310    we doesn't have to deal with fragments.
311 */
312 
segment_seq_plus_len(__u32 seq,size_t len,unsigned int dataoff,const struct tcphdr * tcph)313 static inline __u32 segment_seq_plus_len(__u32 seq,
314 					 size_t len,
315 					 unsigned int dataoff,
316 					 const struct tcphdr *tcph)
317 {
318 	/* XXX Should I use payload length field in IP/IPv6 header ?
319 	 * - YK */
320 	return (seq + len - dataoff - tcph->doff*4
321 		+ (tcph->syn ? 1 : 0) + (tcph->fin ? 1 : 0));
322 }
323 
324 /* Fixme: what about big packets? */
325 #define MAXACKWINCONST			66000
326 #define MAXACKWINDOW(sender)						\
327 	((sender)->td_maxwin > MAXACKWINCONST ? (sender)->td_maxwin	\
328 					      : MAXACKWINCONST)
329 
330 /*
331  * Simplified tcp_parse_options routine from tcp_input.c
332  */
tcp_options(const struct sk_buff * skb,unsigned int dataoff,const struct tcphdr * tcph,struct ip_ct_tcp_state * state)333 static void tcp_options(const struct sk_buff *skb,
334 			unsigned int dataoff,
335 			const struct tcphdr *tcph,
336 			struct ip_ct_tcp_state *state)
337 {
338 	unsigned char buff[(15 * 4) - sizeof(struct tcphdr)];
339 	const unsigned char *ptr;
340 	int length = (tcph->doff*4) - sizeof(struct tcphdr);
341 
342 	if (!length)
343 		return;
344 
345 	ptr = skb_header_pointer(skb, dataoff + sizeof(struct tcphdr),
346 				 length, buff);
347 	if (!ptr)
348 		return;
349 
350 	state->td_scale = 0;
351 	state->flags &= IP_CT_TCP_FLAG_BE_LIBERAL;
352 
353 	while (length > 0) {
354 		int opcode=*ptr++;
355 		int opsize;
356 
357 		switch (opcode) {
358 		case TCPOPT_EOL:
359 			return;
360 		case TCPOPT_NOP:	/* Ref: RFC 793 section 3.1 */
361 			length--;
362 			continue;
363 		default:
364 			if (length < 2)
365 				return;
366 			opsize=*ptr++;
367 			if (opsize < 2) /* "silly options" */
368 				return;
369 			if (opsize > length)
370 				return;	/* don't parse partial options */
371 
372 			if (opcode == TCPOPT_SACK_PERM
373 			    && opsize == TCPOLEN_SACK_PERM)
374 				state->flags |= IP_CT_TCP_FLAG_SACK_PERM;
375 			else if (opcode == TCPOPT_WINDOW
376 				 && opsize == TCPOLEN_WINDOW) {
377 				state->td_scale = *(u_int8_t *)ptr;
378 
379 				if (state->td_scale > TCP_MAX_WSCALE)
380 					state->td_scale = TCP_MAX_WSCALE;
381 
382 				state->flags |=
383 					IP_CT_TCP_FLAG_WINDOW_SCALE;
384 			}
385 			ptr += opsize - 2;
386 			length -= opsize;
387 		}
388 	}
389 }
390 
tcp_sack(const struct sk_buff * skb,unsigned int dataoff,const struct tcphdr * tcph,__u32 * sack)391 static void tcp_sack(const struct sk_buff *skb, unsigned int dataoff,
392                      const struct tcphdr *tcph, __u32 *sack)
393 {
394 	unsigned char buff[(15 * 4) - sizeof(struct tcphdr)];
395 	const unsigned char *ptr;
396 	int length = (tcph->doff*4) - sizeof(struct tcphdr);
397 	__u32 tmp;
398 
399 	if (!length)
400 		return;
401 
402 	ptr = skb_header_pointer(skb, dataoff + sizeof(struct tcphdr),
403 				 length, buff);
404 	if (!ptr)
405 		return;
406 
407 	/* Fast path for timestamp-only option */
408 	if (length == TCPOLEN_TSTAMP_ALIGNED
409 	    && *(__be32 *)ptr == htonl((TCPOPT_NOP << 24)
410 				       | (TCPOPT_NOP << 16)
411 				       | (TCPOPT_TIMESTAMP << 8)
412 				       | TCPOLEN_TIMESTAMP))
413 		return;
414 
415 	while (length > 0) {
416 		int opcode = *ptr++;
417 		int opsize, i;
418 
419 		switch (opcode) {
420 		case TCPOPT_EOL:
421 			return;
422 		case TCPOPT_NOP:	/* Ref: RFC 793 section 3.1 */
423 			length--;
424 			continue;
425 		default:
426 			if (length < 2)
427 				return;
428 			opsize = *ptr++;
429 			if (opsize < 2) /* "silly options" */
430 				return;
431 			if (opsize > length)
432 				return;	/* don't parse partial options */
433 
434 			if (opcode == TCPOPT_SACK
435 			    && opsize >= (TCPOLEN_SACK_BASE
436 					  + TCPOLEN_SACK_PERBLOCK)
437 			    && !((opsize - TCPOLEN_SACK_BASE)
438 				 % TCPOLEN_SACK_PERBLOCK)) {
439 				for (i = 0;
440 				     i < (opsize - TCPOLEN_SACK_BASE);
441 				     i += TCPOLEN_SACK_PERBLOCK) {
442 					tmp = get_unaligned_be32((__be32 *)(ptr+i)+1);
443 
444 					if (after(tmp, *sack))
445 						*sack = tmp;
446 				}
447 				return;
448 			}
449 			ptr += opsize - 2;
450 			length -= opsize;
451 		}
452 	}
453 }
454 
tcp_init_sender(struct ip_ct_tcp_state * sender,struct ip_ct_tcp_state * receiver,const struct sk_buff * skb,unsigned int dataoff,const struct tcphdr * tcph,u32 end,u32 win)455 static void tcp_init_sender(struct ip_ct_tcp_state *sender,
456 			    struct ip_ct_tcp_state *receiver,
457 			    const struct sk_buff *skb,
458 			    unsigned int dataoff,
459 			    const struct tcphdr *tcph,
460 			    u32 end, u32 win)
461 {
462 	/* SYN-ACK in reply to a SYN
463 	 * or SYN from reply direction in simultaneous open.
464 	 */
465 	sender->td_end =
466 	sender->td_maxend = end;
467 	sender->td_maxwin = (win == 0 ? 1 : win);
468 
469 	tcp_options(skb, dataoff, tcph, sender);
470 	/* RFC 1323:
471 	 * Both sides must send the Window Scale option
472 	 * to enable window scaling in either direction.
473 	 */
474 	if (!(sender->flags & IP_CT_TCP_FLAG_WINDOW_SCALE &&
475 	      receiver->flags & IP_CT_TCP_FLAG_WINDOW_SCALE)) {
476 		sender->td_scale = 0;
477 		receiver->td_scale = 0;
478 	}
479 }
480 
481 __printf(6, 7)
nf_tcp_log_invalid(const struct sk_buff * skb,const struct nf_conn * ct,const struct nf_hook_state * state,const struct ip_ct_tcp_state * sender,enum nf_ct_tcp_action ret,const char * fmt,...)482 static enum nf_ct_tcp_action nf_tcp_log_invalid(const struct sk_buff *skb,
483 						const struct nf_conn *ct,
484 						const struct nf_hook_state *state,
485 						const struct ip_ct_tcp_state *sender,
486 						enum nf_ct_tcp_action ret,
487 						const char *fmt, ...)
488 {
489 	const struct nf_tcp_net *tn = nf_tcp_pernet(nf_ct_net(ct));
490 	struct va_format vaf;
491 	va_list args;
492 	bool be_liberal;
493 
494 	be_liberal = sender->flags & IP_CT_TCP_FLAG_BE_LIBERAL || tn->tcp_be_liberal;
495 	if (be_liberal)
496 		return NFCT_TCP_ACCEPT;
497 
498 	va_start(args, fmt);
499 	vaf.fmt = fmt;
500 	vaf.va = &args;
501 	nf_ct_l4proto_log_invalid(skb, ct, state, "%pV", &vaf);
502 	va_end(args);
503 
504 	return ret;
505 }
506 
507 static enum nf_ct_tcp_action
tcp_in_window(struct nf_conn * ct,enum ip_conntrack_dir dir,unsigned int index,const struct sk_buff * skb,unsigned int dataoff,const struct tcphdr * tcph,const struct nf_hook_state * hook_state)508 tcp_in_window(struct nf_conn *ct, enum ip_conntrack_dir dir,
509 	      unsigned int index, const struct sk_buff *skb,
510 	      unsigned int dataoff, const struct tcphdr *tcph,
511 	      const struct nf_hook_state *hook_state)
512 {
513 	struct ip_ct_tcp *state = &ct->proto.tcp;
514 	struct ip_ct_tcp_state *sender = &state->seen[dir];
515 	struct ip_ct_tcp_state *receiver = &state->seen[!dir];
516 	__u32 seq, ack, sack, end, win, swin;
517 	bool in_recv_win, seq_ok;
518 	s32 receiver_offset;
519 	u16 win_raw;
520 
521 	/*
522 	 * Get the required data from the packet.
523 	 */
524 	seq = ntohl(tcph->seq);
525 	ack = sack = ntohl(tcph->ack_seq);
526 	win_raw = ntohs(tcph->window);
527 	win = win_raw;
528 	end = segment_seq_plus_len(seq, skb->len, dataoff, tcph);
529 
530 	if (receiver->flags & IP_CT_TCP_FLAG_SACK_PERM)
531 		tcp_sack(skb, dataoff, tcph, &sack);
532 
533 	/* Take into account NAT sequence number mangling */
534 	receiver_offset = nf_ct_seq_offset(ct, !dir, ack - 1);
535 	ack -= receiver_offset;
536 	sack -= receiver_offset;
537 
538 	if (sender->td_maxwin == 0) {
539 		/*
540 		 * Initialize sender data.
541 		 */
542 		if (tcph->syn) {
543 			tcp_init_sender(sender, receiver,
544 					skb, dataoff, tcph,
545 					end, win);
546 			if (!tcph->ack)
547 				/* Simultaneous open */
548 				return NFCT_TCP_ACCEPT;
549 		} else {
550 			/*
551 			 * We are in the middle of a connection,
552 			 * its history is lost for us.
553 			 * Let's try to use the data from the packet.
554 			 */
555 			sender->td_end = end;
556 			swin = win << sender->td_scale;
557 			sender->td_maxwin = (swin == 0 ? 1 : swin);
558 			sender->td_maxend = end + sender->td_maxwin;
559 			if (receiver->td_maxwin == 0) {
560 				/* We haven't seen traffic in the other
561 				 * direction yet but we have to tweak window
562 				 * tracking to pass III and IV until that
563 				 * happens.
564 				 */
565 				receiver->td_end = receiver->td_maxend = sack;
566 			} else if (sack == receiver->td_end + 1) {
567 				/* Likely a reply to a keepalive.
568 				 * Needed for III.
569 				 */
570 				receiver->td_end++;
571 			}
572 
573 		}
574 	} else if (tcph->syn &&
575 		   after(end, sender->td_end) &&
576 		   (state->state == TCP_CONNTRACK_SYN_SENT ||
577 		    state->state == TCP_CONNTRACK_SYN_RECV)) {
578 		/*
579 		 * RFC 793: "if a TCP is reinitialized ... then it need
580 		 * not wait at all; it must only be sure to use sequence
581 		 * numbers larger than those recently used."
582 		 *
583 		 * Re-init state for this direction, just like for the first
584 		 * syn(-ack) reply, it might differ in seq, ack or tcp options.
585 		 */
586 		tcp_init_sender(sender, receiver,
587 				skb, dataoff, tcph,
588 				end, win);
589 
590 		if (dir == IP_CT_DIR_REPLY && !tcph->ack)
591 			return NFCT_TCP_ACCEPT;
592 	}
593 
594 	if (!(tcph->ack)) {
595 		/*
596 		 * If there is no ACK, just pretend it was set and OK.
597 		 */
598 		ack = sack = receiver->td_end;
599 	} else if (((tcp_flag_word(tcph) & (TCP_FLAG_ACK|TCP_FLAG_RST)) ==
600 		    (TCP_FLAG_ACK|TCP_FLAG_RST))
601 		   && (ack == 0)) {
602 		/*
603 		 * Broken TCP stacks, that set ACK in RST packets as well
604 		 * with zero ack value.
605 		 */
606 		ack = sack = receiver->td_end;
607 	}
608 
609 	if (tcph->rst && seq == 0 && state->state == TCP_CONNTRACK_SYN_SENT)
610 		/*
611 		 * RST sent answering SYN.
612 		 */
613 		seq = end = sender->td_end;
614 
615 	seq_ok = before(seq, sender->td_maxend + 1);
616 	if (!seq_ok) {
617 		u32 overshot = end - sender->td_maxend + 1;
618 		bool ack_ok;
619 
620 		ack_ok = after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1);
621 		in_recv_win = receiver->td_maxwin &&
622 			      after(end, sender->td_end - receiver->td_maxwin - 1);
623 
624 		if (in_recv_win &&
625 		    ack_ok &&
626 		    overshot <= receiver->td_maxwin &&
627 		    before(sack, receiver->td_end + 1)) {
628 			/* Work around TCPs that send more bytes than allowed by
629 			 * the receive window.
630 			 *
631 			 * If the (marked as invalid) packet is allowed to pass by
632 			 * the ruleset and the peer acks this data, then its possible
633 			 * all future packets will trigger 'ACK is over upper bound' check.
634 			 *
635 			 * Thus if only the sequence check fails then do update td_end so
636 			 * possible ACK for this data can update internal state.
637 			 */
638 			sender->td_end = end;
639 			sender->flags |= IP_CT_TCP_FLAG_DATA_UNACKNOWLEDGED;
640 
641 			return nf_tcp_log_invalid(skb, ct, hook_state, sender, NFCT_TCP_IGNORE,
642 						  "%u bytes more than expected", overshot);
643 		}
644 
645 		return nf_tcp_log_invalid(skb, ct, hook_state, sender, NFCT_TCP_INVALID,
646 					  "SEQ is over upper bound %u (over the window of the receiver)",
647 					  sender->td_maxend + 1);
648 	}
649 
650 	if (!before(sack, receiver->td_end + 1))
651 		return nf_tcp_log_invalid(skb, ct, hook_state, sender, NFCT_TCP_INVALID,
652 					  "ACK is over upper bound %u (ACKed data not seen yet)",
653 					  receiver->td_end + 1);
654 
655 	/* Is the ending sequence in the receive window (if available)? */
656 	in_recv_win = !receiver->td_maxwin ||
657 		      after(end, sender->td_end - receiver->td_maxwin - 1);
658 	if (!in_recv_win)
659 		return nf_tcp_log_invalid(skb, ct, hook_state, sender, NFCT_TCP_IGNORE,
660 					  "SEQ is under lower bound %u (already ACKed data retransmitted)",
661 					  sender->td_end - receiver->td_maxwin - 1);
662 	if (!after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1))
663 		return nf_tcp_log_invalid(skb, ct, hook_state, sender, NFCT_TCP_IGNORE,
664 					  "ignored ACK under lower bound %u (possible overly delayed)",
665 					  receiver->td_end - MAXACKWINDOW(sender) - 1);
666 
667 	/* Take into account window scaling (RFC 1323). */
668 	if (!tcph->syn)
669 		win <<= sender->td_scale;
670 
671 	/* Update sender data. */
672 	swin = win + (sack - ack);
673 	if (sender->td_maxwin < swin)
674 		sender->td_maxwin = swin;
675 	if (after(end, sender->td_end)) {
676 		sender->td_end = end;
677 		sender->flags |= IP_CT_TCP_FLAG_DATA_UNACKNOWLEDGED;
678 	}
679 	if (tcph->ack) {
680 		if (!(sender->flags & IP_CT_TCP_FLAG_MAXACK_SET)) {
681 			sender->td_maxack = ack;
682 			sender->flags |= IP_CT_TCP_FLAG_MAXACK_SET;
683 		} else if (after(ack, sender->td_maxack)) {
684 			sender->td_maxack = ack;
685 		}
686 	}
687 
688 	/* Update receiver data. */
689 	if (receiver->td_maxwin != 0 && after(end, sender->td_maxend))
690 		receiver->td_maxwin += end - sender->td_maxend;
691 	if (after(sack + win, receiver->td_maxend - 1)) {
692 		receiver->td_maxend = sack + win;
693 		if (win == 0)
694 			receiver->td_maxend++;
695 	}
696 	if (ack == receiver->td_end)
697 		receiver->flags &= ~IP_CT_TCP_FLAG_DATA_UNACKNOWLEDGED;
698 
699 	/* Check retransmissions. */
700 	if (index == TCP_ACK_SET) {
701 		if (state->last_dir == dir &&
702 		    state->last_seq == seq &&
703 		    state->last_ack == ack &&
704 		    state->last_end == end &&
705 		    state->last_win == win_raw) {
706 			state->retrans++;
707 		} else {
708 			state->last_dir = dir;
709 			state->last_seq = seq;
710 			state->last_ack = ack;
711 			state->last_end = end;
712 			state->last_win = win_raw;
713 			state->retrans = 0;
714 		}
715 	}
716 
717 	return NFCT_TCP_ACCEPT;
718 }
719 
nf_tcp_handle_invalid(struct nf_conn * ct,enum ip_conntrack_dir dir,int index,const struct sk_buff * skb,const struct nf_hook_state * hook_state)720 static void __cold nf_tcp_handle_invalid(struct nf_conn *ct,
721 					 enum ip_conntrack_dir dir,
722 					 int index,
723 					 const struct sk_buff *skb,
724 					 const struct nf_hook_state *hook_state)
725 {
726 	const unsigned int *timeouts;
727 	const struct nf_tcp_net *tn;
728 	unsigned int timeout;
729 	u32 expires;
730 
731 	if (!test_bit(IPS_ASSURED_BIT, &ct->status) ||
732 	    test_bit(IPS_FIXED_TIMEOUT_BIT, &ct->status))
733 		return;
734 
735 	/* We don't want to have connections hanging around in ESTABLISHED
736 	 * state for long time 'just because' conntrack deemed a FIN/RST
737 	 * out-of-window.
738 	 *
739 	 * Shrink the timeout just like when there is unacked data.
740 	 * This speeds up eviction of 'dead' connections where the
741 	 * connection and conntracks internal state are out of sync.
742 	 */
743 	switch (index) {
744 	case TCP_RST_SET:
745 	case TCP_FIN_SET:
746 		break;
747 	default:
748 		return;
749 	}
750 
751 	if (ct->proto.tcp.last_dir != dir &&
752 	    (ct->proto.tcp.last_index == TCP_FIN_SET ||
753 	     ct->proto.tcp.last_index == TCP_RST_SET)) {
754 		expires = nf_ct_expires(ct);
755 		if (expires < 120 * HZ)
756 			return;
757 
758 		tn = nf_tcp_pernet(nf_ct_net(ct));
759 		timeouts = nf_ct_timeout_lookup(ct);
760 		if (!timeouts)
761 			timeouts = tn->timeouts;
762 
763 		timeout = READ_ONCE(timeouts[TCP_CONNTRACK_UNACK]);
764 		if (expires > timeout) {
765 			nf_ct_l4proto_log_invalid(skb, ct, hook_state,
766 					  "packet (index %d, dir %d) response for index %d lower timeout to %u",
767 					  index, dir, ct->proto.tcp.last_index, timeout);
768 
769 			WRITE_ONCE(ct->timeout, timeout + nfct_time_stamp);
770 		}
771 	} else {
772 		ct->proto.tcp.last_index = index;
773 		ct->proto.tcp.last_dir = dir;
774 	}
775 }
776 
777 /* table of valid flag combinations - PUSH, ECE and CWR are always valid */
778 static const u8 tcp_valid_flags[(TCPHDR_FIN|TCPHDR_SYN|TCPHDR_RST|TCPHDR_ACK|
779 				 TCPHDR_URG) + 1] =
780 {
781 	[TCPHDR_SYN]				= 1,
782 	[TCPHDR_SYN|TCPHDR_URG]			= 1,
783 	[TCPHDR_SYN|TCPHDR_ACK]			= 1,
784 	[TCPHDR_RST]				= 1,
785 	[TCPHDR_RST|TCPHDR_ACK]			= 1,
786 	[TCPHDR_FIN|TCPHDR_ACK]			= 1,
787 	[TCPHDR_FIN|TCPHDR_ACK|TCPHDR_URG]	= 1,
788 	[TCPHDR_ACK]				= 1,
789 	[TCPHDR_ACK|TCPHDR_URG]			= 1,
790 };
791 
tcp_error_log(const struct sk_buff * skb,const struct nf_hook_state * state,const char * msg)792 static void tcp_error_log(const struct sk_buff *skb,
793 			  const struct nf_hook_state *state,
794 			  const char *msg)
795 {
796 	nf_l4proto_log_invalid(skb, state, IPPROTO_TCP, "%s", msg);
797 }
798 
799 /* Protect conntrack agaist broken packets. Code taken from ipt_unclean.c.  */
tcp_error(const struct tcphdr * th,struct sk_buff * skb,unsigned int dataoff,const struct nf_hook_state * state)800 static bool tcp_error(const struct tcphdr *th,
801 		      struct sk_buff *skb,
802 		      unsigned int dataoff,
803 		      const struct nf_hook_state *state)
804 {
805 	unsigned int tcplen = skb->len - dataoff;
806 	u8 tcpflags;
807 
808 	/* Not whole TCP header or malformed packet */
809 	if (th->doff*4 < sizeof(struct tcphdr) || tcplen < th->doff*4) {
810 		tcp_error_log(skb, state, "truncated packet");
811 		return true;
812 	}
813 
814 	/* Checksum invalid? Ignore.
815 	 * We skip checking packets on the outgoing path
816 	 * because the checksum is assumed to be correct.
817 	 */
818 	/* FIXME: Source route IP option packets --RR */
819 	if (state->net->ct.sysctl_checksum &&
820 	    state->hook == NF_INET_PRE_ROUTING &&
821 	    nf_checksum(skb, state->hook, dataoff, IPPROTO_TCP, state->pf)) {
822 		tcp_error_log(skb, state, "bad checksum");
823 		return true;
824 	}
825 
826 	/* Check TCP flags. */
827 	tcpflags = (tcp_flag_byte(th) & ~(TCPHDR_ECE|TCPHDR_CWR|TCPHDR_PSH));
828 	if (!tcp_valid_flags[tcpflags]) {
829 		tcp_error_log(skb, state, "invalid tcp flag combination");
830 		return true;
831 	}
832 
833 	return false;
834 }
835 
tcp_new(struct nf_conn * ct,const struct sk_buff * skb,unsigned int dataoff,const struct tcphdr * th)836 static noinline bool tcp_new(struct nf_conn *ct, const struct sk_buff *skb,
837 			     unsigned int dataoff,
838 			     const struct tcphdr *th)
839 {
840 	enum tcp_conntrack new_state;
841 	struct net *net = nf_ct_net(ct);
842 	const struct nf_tcp_net *tn = nf_tcp_pernet(net);
843 
844 	/* Don't need lock here: this conntrack not in circulation yet */
845 	new_state = tcp_conntracks[0][get_conntrack_index(th)][TCP_CONNTRACK_NONE];
846 
847 	/* Invalid: delete conntrack */
848 	if (new_state >= TCP_CONNTRACK_MAX) {
849 		pr_debug("nf_ct_tcp: invalid new deleting.\n");
850 		return false;
851 	}
852 
853 	if (new_state == TCP_CONNTRACK_SYN_SENT) {
854 		memset(&ct->proto.tcp, 0, sizeof(ct->proto.tcp));
855 		/* SYN packet */
856 		ct->proto.tcp.seen[0].td_end =
857 			segment_seq_plus_len(ntohl(th->seq), skb->len,
858 					     dataoff, th);
859 		ct->proto.tcp.seen[0].td_maxwin = ntohs(th->window);
860 		if (ct->proto.tcp.seen[0].td_maxwin == 0)
861 			ct->proto.tcp.seen[0].td_maxwin = 1;
862 		ct->proto.tcp.seen[0].td_maxend =
863 			ct->proto.tcp.seen[0].td_end;
864 
865 		tcp_options(skb, dataoff, th, &ct->proto.tcp.seen[0]);
866 	} else if (tn->tcp_loose == 0) {
867 		/* Don't try to pick up connections. */
868 		return false;
869 	} else {
870 		memset(&ct->proto.tcp, 0, sizeof(ct->proto.tcp));
871 		/*
872 		 * We are in the middle of a connection,
873 		 * its history is lost for us.
874 		 * Let's try to use the data from the packet.
875 		 */
876 		ct->proto.tcp.seen[0].td_end =
877 			segment_seq_plus_len(ntohl(th->seq), skb->len,
878 					     dataoff, th);
879 		ct->proto.tcp.seen[0].td_maxwin = ntohs(th->window);
880 		if (ct->proto.tcp.seen[0].td_maxwin == 0)
881 			ct->proto.tcp.seen[0].td_maxwin = 1;
882 		ct->proto.tcp.seen[0].td_maxend =
883 			ct->proto.tcp.seen[0].td_end +
884 			ct->proto.tcp.seen[0].td_maxwin;
885 
886 		/* We assume SACK and liberal window checking to handle
887 		 * window scaling */
888 		ct->proto.tcp.seen[0].flags =
889 		ct->proto.tcp.seen[1].flags = IP_CT_TCP_FLAG_SACK_PERM |
890 					      IP_CT_TCP_FLAG_BE_LIBERAL;
891 	}
892 
893 	/* tcp_packet will set them */
894 	ct->proto.tcp.last_index = TCP_NONE_SET;
895 	return true;
896 }
897 
tcp_can_early_drop(const struct nf_conn * ct)898 static bool tcp_can_early_drop(const struct nf_conn *ct)
899 {
900 	switch (ct->proto.tcp.state) {
901 	case TCP_CONNTRACK_FIN_WAIT:
902 	case TCP_CONNTRACK_LAST_ACK:
903 	case TCP_CONNTRACK_TIME_WAIT:
904 	case TCP_CONNTRACK_CLOSE:
905 	case TCP_CONNTRACK_CLOSE_WAIT:
906 		return true;
907 	default:
908 		break;
909 	}
910 
911 	return false;
912 }
913 
nf_conntrack_tcp_set_closing(struct nf_conn * ct)914 void nf_conntrack_tcp_set_closing(struct nf_conn *ct)
915 {
916 	enum tcp_conntrack old_state;
917 	const unsigned int *timeouts;
918 	u32 timeout;
919 
920 	if (!nf_ct_is_confirmed(ct))
921 		return;
922 
923 	spin_lock_bh(&ct->lock);
924 	old_state = ct->proto.tcp.state;
925 	ct->proto.tcp.state = TCP_CONNTRACK_CLOSE;
926 
927 	if (old_state == TCP_CONNTRACK_CLOSE ||
928 	    test_bit(IPS_FIXED_TIMEOUT_BIT, &ct->status)) {
929 		spin_unlock_bh(&ct->lock);
930 		return;
931 	}
932 
933 	timeouts = nf_ct_timeout_lookup(ct);
934 	if (!timeouts) {
935 		const struct nf_tcp_net *tn;
936 
937 		tn = nf_tcp_pernet(nf_ct_net(ct));
938 		timeouts = tn->timeouts;
939 	}
940 
941 	timeout = timeouts[TCP_CONNTRACK_CLOSE];
942 	WRITE_ONCE(ct->timeout, timeout + nfct_time_stamp);
943 
944 	spin_unlock_bh(&ct->lock);
945 
946 	nf_conntrack_event_cache(IPCT_PROTOINFO, ct);
947 }
948 
nf_ct_tcp_state_reset(struct ip_ct_tcp_state * state)949 static void nf_ct_tcp_state_reset(struct ip_ct_tcp_state *state)
950 {
951 	state->td_end		= 0;
952 	state->td_maxend	= 0;
953 	state->td_maxwin	= 0;
954 	state->td_maxack	= 0;
955 	state->td_scale		= 0;
956 	state->flags		&= IP_CT_TCP_FLAG_BE_LIBERAL;
957 }
958 
959 /* Returns verdict for packet, or -1 for invalid. */
nf_conntrack_tcp_packet(struct nf_conn * ct,struct sk_buff * skb,unsigned int dataoff,enum ip_conntrack_info ctinfo,const struct nf_hook_state * state)960 int nf_conntrack_tcp_packet(struct nf_conn *ct,
961 			    struct sk_buff *skb,
962 			    unsigned int dataoff,
963 			    enum ip_conntrack_info ctinfo,
964 			    const struct nf_hook_state *state)
965 {
966 	struct net *net = nf_ct_net(ct);
967 	struct nf_tcp_net *tn = nf_tcp_pernet(net);
968 	enum tcp_conntrack new_state, old_state;
969 	unsigned int index, *timeouts;
970 	enum nf_ct_tcp_action res;
971 	enum ip_conntrack_dir dir;
972 	const struct tcphdr *th;
973 	struct tcphdr _tcph;
974 	unsigned long timeout;
975 
976 	th = skb_header_pointer(skb, dataoff, sizeof(_tcph), &_tcph);
977 	if (th == NULL)
978 		return -NF_ACCEPT;
979 
980 	if (tcp_error(th, skb, dataoff, state))
981 		return -NF_ACCEPT;
982 
983 	if (!nf_ct_is_confirmed(ct) && !tcp_new(ct, skb, dataoff, th))
984 		return -NF_ACCEPT;
985 
986 	spin_lock_bh(&ct->lock);
987 	old_state = ct->proto.tcp.state;
988 	dir = CTINFO2DIR(ctinfo);
989 	index = get_conntrack_index(th);
990 	new_state = tcp_conntracks[dir][index][old_state];
991 
992 	switch (new_state) {
993 	case TCP_CONNTRACK_SYN_SENT:
994 		if (old_state < TCP_CONNTRACK_TIME_WAIT)
995 			break;
996 		/* RFC 1122: "When a connection is closed actively,
997 		 * it MUST linger in TIME-WAIT state for a time 2xMSL
998 		 * (Maximum Segment Lifetime). However, it MAY accept
999 		 * a new SYN from the remote TCP to reopen the connection
1000 		 * directly from TIME-WAIT state, if..."
1001 		 * We ignore the conditions because we are in the
1002 		 * TIME-WAIT state anyway.
1003 		 *
1004 		 * Handle aborted connections: we and the server
1005 		 * think there is an existing connection but the client
1006 		 * aborts it and starts a new one.
1007 		 */
1008 		if (((ct->proto.tcp.seen[dir].flags
1009 		      | ct->proto.tcp.seen[!dir].flags)
1010 		     & IP_CT_TCP_FLAG_CLOSE_INIT)
1011 		    || (ct->proto.tcp.last_dir == dir
1012 		        && ct->proto.tcp.last_index == TCP_RST_SET)) {
1013 			/* Attempt to reopen a closed/aborted connection.
1014 			 * Delete this connection and look up again. */
1015 			spin_unlock_bh(&ct->lock);
1016 
1017 			/* Only repeat if we can actually remove the timer.
1018 			 * Destruction may already be in progress in process
1019 			 * context and we must give it a chance to terminate.
1020 			 */
1021 			if (nf_ct_kill(ct))
1022 				return -NF_REPEAT;
1023 			return NF_DROP;
1024 		}
1025 		fallthrough;
1026 	case TCP_CONNTRACK_IGNORE:
1027 		/* Ignored packets:
1028 		 *
1029 		 * Our connection entry may be out of sync, so ignore
1030 		 * packets which may signal the real connection between
1031 		 * the client and the server.
1032 		 *
1033 		 * a) SYN in ORIGINAL
1034 		 * b) SYN/ACK in REPLY
1035 		 * c) ACK in reply direction after initial SYN in original.
1036 		 *
1037 		 * If the ignored packet is invalid, the receiver will send
1038 		 * a RST we'll catch below.
1039 		 */
1040 		if (index == TCP_SYNACK_SET
1041 		    && ct->proto.tcp.last_index == TCP_SYN_SET
1042 		    && ct->proto.tcp.last_dir != dir
1043 		    && ntohl(th->ack_seq) == ct->proto.tcp.last_end) {
1044 			/* b) This SYN/ACK acknowledges a SYN that we earlier
1045 			 * ignored as invalid. This means that the client and
1046 			 * the server are both in sync, while the firewall is
1047 			 * not. We get in sync from the previously annotated
1048 			 * values.
1049 			 */
1050 			old_state = TCP_CONNTRACK_SYN_SENT;
1051 			new_state = TCP_CONNTRACK_SYN_RECV;
1052 			ct->proto.tcp.seen[ct->proto.tcp.last_dir].td_end =
1053 				ct->proto.tcp.last_end;
1054 			ct->proto.tcp.seen[ct->proto.tcp.last_dir].td_maxend =
1055 				ct->proto.tcp.last_end;
1056 			ct->proto.tcp.seen[ct->proto.tcp.last_dir].td_maxwin =
1057 				ct->proto.tcp.last_win == 0 ?
1058 					1 : ct->proto.tcp.last_win;
1059 			ct->proto.tcp.seen[ct->proto.tcp.last_dir].td_scale =
1060 				ct->proto.tcp.last_wscale;
1061 			ct->proto.tcp.last_flags &= ~IP_CT_EXP_CHALLENGE_ACK;
1062 			ct->proto.tcp.seen[ct->proto.tcp.last_dir].flags =
1063 				ct->proto.tcp.last_flags;
1064 			nf_ct_tcp_state_reset(&ct->proto.tcp.seen[dir]);
1065 			break;
1066 		}
1067 		ct->proto.tcp.last_index = index;
1068 		ct->proto.tcp.last_dir = dir;
1069 		ct->proto.tcp.last_seq = ntohl(th->seq);
1070 		ct->proto.tcp.last_end =
1071 		    segment_seq_plus_len(ntohl(th->seq), skb->len, dataoff, th);
1072 		ct->proto.tcp.last_win = ntohs(th->window);
1073 
1074 		/* a) This is a SYN in ORIGINAL. The client and the server
1075 		 * may be in sync but we are not. In that case, we annotate
1076 		 * the TCP options and let the packet go through. If it is a
1077 		 * valid SYN packet, the server will reply with a SYN/ACK, and
1078 		 * then we'll get in sync. Otherwise, the server potentially
1079 		 * responds with a challenge ACK if implementing RFC5961.
1080 		 */
1081 		if (index == TCP_SYN_SET && dir == IP_CT_DIR_ORIGINAL) {
1082 			struct ip_ct_tcp_state seen = {};
1083 
1084 			ct->proto.tcp.last_flags =
1085 			ct->proto.tcp.last_wscale = 0;
1086 			tcp_options(skb, dataoff, th, &seen);
1087 			if (seen.flags & IP_CT_TCP_FLAG_WINDOW_SCALE) {
1088 				ct->proto.tcp.last_flags |=
1089 					IP_CT_TCP_FLAG_WINDOW_SCALE;
1090 				ct->proto.tcp.last_wscale = seen.td_scale;
1091 			}
1092 			if (seen.flags & IP_CT_TCP_FLAG_SACK_PERM) {
1093 				ct->proto.tcp.last_flags |=
1094 					IP_CT_TCP_FLAG_SACK_PERM;
1095 			}
1096 			/* Mark the potential for RFC5961 challenge ACK,
1097 			 * this pose a special problem for LAST_ACK state
1098 			 * as ACK is intrepretated as ACKing last FIN.
1099 			 */
1100 			if (old_state == TCP_CONNTRACK_LAST_ACK)
1101 				ct->proto.tcp.last_flags |=
1102 					IP_CT_EXP_CHALLENGE_ACK;
1103 		}
1104 
1105 		/* possible challenge ack reply to syn */
1106 		if (old_state == TCP_CONNTRACK_SYN_SENT &&
1107 		    index == TCP_ACK_SET &&
1108 		    dir == IP_CT_DIR_REPLY)
1109 			ct->proto.tcp.last_ack = ntohl(th->ack_seq);
1110 
1111 		spin_unlock_bh(&ct->lock);
1112 		nf_ct_l4proto_log_invalid(skb, ct, state,
1113 					  "packet (index %d) in dir %d ignored, state %s",
1114 					  index, dir,
1115 					  tcp_conntrack_names[old_state]);
1116 		return NF_ACCEPT;
1117 	case TCP_CONNTRACK_MAX:
1118 		/* Special case for SYN proxy: when the SYN to the server or
1119 		 * the SYN/ACK from the server is lost, the client may transmit
1120 		 * a keep-alive packet while in SYN_SENT state. This needs to
1121 		 * be associated with the original conntrack entry in order to
1122 		 * generate a new SYN with the correct sequence number.
1123 		 */
1124 		if (nfct_synproxy(ct) && old_state == TCP_CONNTRACK_SYN_SENT &&
1125 		    index == TCP_ACK_SET && dir == IP_CT_DIR_ORIGINAL &&
1126 		    ct->proto.tcp.last_dir == IP_CT_DIR_ORIGINAL &&
1127 		    ct->proto.tcp.seen[dir].td_end - 1 == ntohl(th->seq)) {
1128 			pr_debug("nf_ct_tcp: SYN proxy client keep alive\n");
1129 			spin_unlock_bh(&ct->lock);
1130 			return NF_ACCEPT;
1131 		}
1132 
1133 		/* Invalid packet */
1134 		spin_unlock_bh(&ct->lock);
1135 		nf_ct_l4proto_log_invalid(skb, ct, state,
1136 					  "packet (index %d) in dir %d invalid, state %s",
1137 					  index, dir,
1138 					  tcp_conntrack_names[old_state]);
1139 		return -NF_ACCEPT;
1140 	case TCP_CONNTRACK_TIME_WAIT:
1141 		/* RFC5961 compliance cause stack to send "challenge-ACK"
1142 		 * e.g. in response to spurious SYNs.  Conntrack MUST
1143 		 * not believe this ACK is acking last FIN.
1144 		 */
1145 		if (old_state == TCP_CONNTRACK_LAST_ACK &&
1146 		    index == TCP_ACK_SET &&
1147 		    ct->proto.tcp.last_dir != dir &&
1148 		    ct->proto.tcp.last_index == TCP_SYN_SET &&
1149 		    (ct->proto.tcp.last_flags & IP_CT_EXP_CHALLENGE_ACK)) {
1150 			/* Detected RFC5961 challenge ACK */
1151 			ct->proto.tcp.last_flags &= ~IP_CT_EXP_CHALLENGE_ACK;
1152 			spin_unlock_bh(&ct->lock);
1153 			nf_ct_l4proto_log_invalid(skb, ct, state, "challenge-ack ignored");
1154 			return NF_ACCEPT; /* Don't change state */
1155 		}
1156 		break;
1157 	case TCP_CONNTRACK_SYN_SENT2:
1158 		/* tcp_conntracks table is not smart enough to handle
1159 		 * simultaneous open.
1160 		 */
1161 		ct->proto.tcp.last_flags |= IP_CT_TCP_SIMULTANEOUS_OPEN;
1162 		break;
1163 	case TCP_CONNTRACK_SYN_RECV:
1164 		if (dir == IP_CT_DIR_REPLY && index == TCP_ACK_SET &&
1165 		    ct->proto.tcp.last_flags & IP_CT_TCP_SIMULTANEOUS_OPEN)
1166 			new_state = TCP_CONNTRACK_ESTABLISHED;
1167 		break;
1168 	case TCP_CONNTRACK_CLOSE:
1169 		if (index != TCP_RST_SET)
1170 			break;
1171 
1172 		/* If we are closing, tuple might have been re-used already.
1173 		 * last_index, last_ack, and all other ct fields used for
1174 		 * sequence/window validation are outdated in that case.
1175 		 *
1176 		 * As the conntrack can already be expired by GC under pressure,
1177 		 * just skip validation checks.
1178 		 */
1179 		if (tcp_can_early_drop(ct))
1180 			goto in_window;
1181 
1182 		/* td_maxack might be outdated if we let a SYN through earlier */
1183 		if ((ct->proto.tcp.seen[!dir].flags & IP_CT_TCP_FLAG_MAXACK_SET) &&
1184 		    ct->proto.tcp.last_index != TCP_SYN_SET) {
1185 			u32 seq = ntohl(th->seq);
1186 
1187 			/* If we are not in established state and SEQ=0 this is most
1188 			 * likely an answer to a SYN we let go through above (last_index
1189 			 * can be updated due to out-of-order ACKs).
1190 			 */
1191 			if (seq == 0 && !nf_conntrack_tcp_established(ct))
1192 				break;
1193 
1194 			if (before(seq, ct->proto.tcp.seen[!dir].td_maxack) &&
1195 			    !tn->tcp_ignore_invalid_rst) {
1196 				/* Invalid RST  */
1197 				spin_unlock_bh(&ct->lock);
1198 				nf_ct_l4proto_log_invalid(skb, ct, state, "invalid rst");
1199 				return -NF_ACCEPT;
1200 			}
1201 
1202 			if (!nf_conntrack_tcp_established(ct) ||
1203 			    seq == ct->proto.tcp.seen[!dir].td_maxack)
1204 				break;
1205 
1206 			/* Check if rst is part of train, such as
1207 			 *   foo:80 > bar:4379: P, 235946583:235946602(19) ack 42
1208 			 *   foo:80 > bar:4379: R, 235946602:235946602(0)  ack 42
1209 			 */
1210 			if (ct->proto.tcp.last_index == TCP_ACK_SET &&
1211 			    ct->proto.tcp.last_dir == dir &&
1212 			    seq == ct->proto.tcp.last_end)
1213 				break;
1214 
1215 			/* ... RST sequence number doesn't match exactly, keep
1216 			 * established state to allow a possible challenge ACK.
1217 			 */
1218 			new_state = old_state;
1219 		}
1220 		if (((test_bit(IPS_SEEN_REPLY_BIT, &ct->status)
1221 			 && ct->proto.tcp.last_index == TCP_SYN_SET)
1222 			|| (!test_bit(IPS_ASSURED_BIT, &ct->status)
1223 			    && ct->proto.tcp.last_index == TCP_ACK_SET))
1224 		    && ntohl(th->ack_seq) == ct->proto.tcp.last_end) {
1225 			/* RST sent to invalid SYN or ACK we had let through
1226 			 * at a) and c) above:
1227 			 *
1228 			 * a) SYN was in window then
1229 			 * c) we hold a half-open connection.
1230 			 *
1231 			 * Delete our connection entry.
1232 			 * We skip window checking, because packet might ACK
1233 			 * segments we ignored. */
1234 			goto in_window;
1235 		}
1236 
1237 		/* Reset in response to a challenge-ack we let through earlier */
1238 		if (old_state == TCP_CONNTRACK_SYN_SENT &&
1239 		    ct->proto.tcp.last_index == TCP_ACK_SET &&
1240 		    ct->proto.tcp.last_dir == IP_CT_DIR_REPLY &&
1241 		    ntohl(th->seq) == ct->proto.tcp.last_ack)
1242 			goto in_window;
1243 
1244 		break;
1245 	default:
1246 		/* Keep compilers happy. */
1247 		break;
1248 	}
1249 
1250 	res = tcp_in_window(ct, dir, index,
1251 			    skb, dataoff, th, state);
1252 	switch (res) {
1253 	case NFCT_TCP_IGNORE:
1254 		spin_unlock_bh(&ct->lock);
1255 		return NF_ACCEPT;
1256 	case NFCT_TCP_INVALID:
1257 		nf_tcp_handle_invalid(ct, dir, index, skb, state);
1258 		spin_unlock_bh(&ct->lock);
1259 		return -NF_ACCEPT;
1260 	case NFCT_TCP_ACCEPT:
1261 		break;
1262 	}
1263      in_window:
1264 	/* From now on we have got in-window packets */
1265 	ct->proto.tcp.last_index = index;
1266 	ct->proto.tcp.last_dir = dir;
1267 
1268 	ct->proto.tcp.state = new_state;
1269 	if (old_state != new_state
1270 	    && new_state == TCP_CONNTRACK_FIN_WAIT)
1271 		ct->proto.tcp.seen[dir].flags |= IP_CT_TCP_FLAG_CLOSE_INIT;
1272 
1273 	timeouts = nf_ct_timeout_lookup(ct);
1274 	if (!timeouts)
1275 		timeouts = tn->timeouts;
1276 
1277 	if (ct->proto.tcp.retrans >= tn->tcp_max_retrans &&
1278 	    timeouts[new_state] > timeouts[TCP_CONNTRACK_RETRANS])
1279 		timeout = timeouts[TCP_CONNTRACK_RETRANS];
1280 	else if (unlikely(index == TCP_RST_SET))
1281 		timeout = timeouts[TCP_CONNTRACK_CLOSE];
1282 	else if ((ct->proto.tcp.seen[0].flags | ct->proto.tcp.seen[1].flags) &
1283 		 IP_CT_TCP_FLAG_DATA_UNACKNOWLEDGED &&
1284 		 timeouts[new_state] > timeouts[TCP_CONNTRACK_UNACK])
1285 		timeout = timeouts[TCP_CONNTRACK_UNACK];
1286 	else if (ct->proto.tcp.last_win == 0 &&
1287 		 timeouts[new_state] > timeouts[TCP_CONNTRACK_RETRANS])
1288 		timeout = timeouts[TCP_CONNTRACK_RETRANS];
1289 	else
1290 		timeout = timeouts[new_state];
1291 	spin_unlock_bh(&ct->lock);
1292 
1293 	if (new_state != old_state)
1294 		nf_conntrack_event_cache(IPCT_PROTOINFO, ct);
1295 
1296 	if (!test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
1297 		/* If only reply is a RST, we can consider ourselves not to
1298 		   have an established connection: this is a fairly common
1299 		   problem case, so we can delete the conntrack
1300 		   immediately.  --RR */
1301 		if (th->rst) {
1302 			nf_ct_kill_acct(ct, ctinfo, skb);
1303 			return NF_ACCEPT;
1304 		}
1305 
1306 		if (index == TCP_SYN_SET && old_state == TCP_CONNTRACK_SYN_SENT) {
1307 			/* do not renew timeout on SYN retransmit.
1308 			 *
1309 			 * Else port reuse by client or NAT middlebox can keep
1310 			 * entry alive indefinitely (including nat info).
1311 			 */
1312 			return NF_ACCEPT;
1313 		}
1314 
1315 		/* ESTABLISHED without SEEN_REPLY, i.e. mid-connection
1316 		 * pickup with loose=1. Avoid large ESTABLISHED timeout.
1317 		 */
1318 		if (new_state == TCP_CONNTRACK_ESTABLISHED &&
1319 		    timeout > timeouts[TCP_CONNTRACK_UNACK])
1320 			timeout = timeouts[TCP_CONNTRACK_UNACK];
1321 	} else if (!test_bit(IPS_ASSURED_BIT, &ct->status)
1322 		   && (old_state == TCP_CONNTRACK_SYN_RECV
1323 		       || old_state == TCP_CONNTRACK_ESTABLISHED)
1324 		   && new_state == TCP_CONNTRACK_ESTABLISHED) {
1325 		/* Set ASSURED if we see valid ack in ESTABLISHED
1326 		   after SYN_RECV or a valid answer for a picked up
1327 		   connection. */
1328 		set_bit(IPS_ASSURED_BIT, &ct->status);
1329 		nf_conntrack_event_cache(IPCT_ASSURED, ct);
1330 	}
1331 	nf_ct_refresh_acct(ct, ctinfo, skb, timeout);
1332 
1333 	return NF_ACCEPT;
1334 }
1335 
1336 #if IS_ENABLED(CONFIG_NF_CT_NETLINK)
1337 
1338 #include <linux/netfilter/nfnetlink.h>
1339 #include <linux/netfilter/nfnetlink_conntrack.h>
1340 
tcp_to_nlattr(struct sk_buff * skb,struct nlattr * nla,struct nf_conn * ct,bool destroy)1341 static int tcp_to_nlattr(struct sk_buff *skb, struct nlattr *nla,
1342 			 struct nf_conn *ct, bool destroy)
1343 {
1344 	struct nlattr *nest_parms;
1345 	struct nf_ct_tcp_flags tmp = {};
1346 
1347 	spin_lock_bh(&ct->lock);
1348 	nest_parms = nla_nest_start(skb, CTA_PROTOINFO_TCP);
1349 	if (!nest_parms)
1350 		goto nla_put_failure;
1351 
1352 	if (nla_put_u8(skb, CTA_PROTOINFO_TCP_STATE, ct->proto.tcp.state))
1353 		goto nla_put_failure;
1354 
1355 	if (destroy)
1356 		goto skip_state;
1357 
1358 	if (nla_put_u8(skb, CTA_PROTOINFO_TCP_WSCALE_ORIGINAL,
1359 		       ct->proto.tcp.seen[0].td_scale) ||
1360 	    nla_put_u8(skb, CTA_PROTOINFO_TCP_WSCALE_REPLY,
1361 		       ct->proto.tcp.seen[1].td_scale))
1362 		goto nla_put_failure;
1363 
1364 	tmp.flags = ct->proto.tcp.seen[0].flags;
1365 	if (nla_put(skb, CTA_PROTOINFO_TCP_FLAGS_ORIGINAL,
1366 		    sizeof(struct nf_ct_tcp_flags), &tmp))
1367 		goto nla_put_failure;
1368 
1369 	tmp.flags = ct->proto.tcp.seen[1].flags;
1370 	if (nla_put(skb, CTA_PROTOINFO_TCP_FLAGS_REPLY,
1371 		    sizeof(struct nf_ct_tcp_flags), &tmp))
1372 		goto nla_put_failure;
1373 skip_state:
1374 	spin_unlock_bh(&ct->lock);
1375 	nla_nest_end(skb, nest_parms);
1376 
1377 	return 0;
1378 
1379 nla_put_failure:
1380 	spin_unlock_bh(&ct->lock);
1381 	return -1;
1382 }
1383 
1384 static const struct nla_policy tcp_nla_policy[CTA_PROTOINFO_TCP_MAX+1] = {
1385 	[CTA_PROTOINFO_TCP_STATE]	    = { .type = NLA_U8 },
1386 	[CTA_PROTOINFO_TCP_WSCALE_ORIGINAL] = { .type = NLA_U8 },
1387 	[CTA_PROTOINFO_TCP_WSCALE_REPLY]    = { .type = NLA_U8 },
1388 	[CTA_PROTOINFO_TCP_FLAGS_ORIGINAL]  = { .len = sizeof(struct nf_ct_tcp_flags) },
1389 	[CTA_PROTOINFO_TCP_FLAGS_REPLY]	    = { .len = sizeof(struct nf_ct_tcp_flags) },
1390 };
1391 
1392 #define TCP_NLATTR_SIZE	( \
1393 	NLA_ALIGN(NLA_HDRLEN + 1) + \
1394 	NLA_ALIGN(NLA_HDRLEN + 1) + \
1395 	NLA_ALIGN(NLA_HDRLEN + sizeof(struct nf_ct_tcp_flags)) + \
1396 	NLA_ALIGN(NLA_HDRLEN + sizeof(struct nf_ct_tcp_flags)))
1397 
nlattr_to_tcp(struct nlattr * cda[],struct nf_conn * ct)1398 static int nlattr_to_tcp(struct nlattr *cda[], struct nf_conn *ct)
1399 {
1400 	struct nlattr *pattr = cda[CTA_PROTOINFO_TCP];
1401 	struct nlattr *tb[CTA_PROTOINFO_TCP_MAX+1];
1402 	int err;
1403 
1404 	/* updates could not contain anything about the private
1405 	 * protocol info, in that case skip the parsing */
1406 	if (!pattr)
1407 		return 0;
1408 
1409 	err = nla_parse_nested_deprecated(tb, CTA_PROTOINFO_TCP_MAX, pattr,
1410 					  tcp_nla_policy, NULL);
1411 	if (err < 0)
1412 		return err;
1413 
1414 	if (tb[CTA_PROTOINFO_TCP_STATE] &&
1415 	    nla_get_u8(tb[CTA_PROTOINFO_TCP_STATE]) >= TCP_CONNTRACK_MAX)
1416 		return -EINVAL;
1417 
1418 	spin_lock_bh(&ct->lock);
1419 	if (tb[CTA_PROTOINFO_TCP_STATE])
1420 		ct->proto.tcp.state = nla_get_u8(tb[CTA_PROTOINFO_TCP_STATE]);
1421 
1422 	if (tb[CTA_PROTOINFO_TCP_FLAGS_ORIGINAL]) {
1423 		struct nf_ct_tcp_flags *attr =
1424 			nla_data(tb[CTA_PROTOINFO_TCP_FLAGS_ORIGINAL]);
1425 		ct->proto.tcp.seen[0].flags &= ~attr->mask;
1426 		ct->proto.tcp.seen[0].flags |= attr->flags & attr->mask;
1427 	}
1428 
1429 	if (tb[CTA_PROTOINFO_TCP_FLAGS_REPLY]) {
1430 		struct nf_ct_tcp_flags *attr =
1431 			nla_data(tb[CTA_PROTOINFO_TCP_FLAGS_REPLY]);
1432 		ct->proto.tcp.seen[1].flags &= ~attr->mask;
1433 		ct->proto.tcp.seen[1].flags |= attr->flags & attr->mask;
1434 	}
1435 
1436 	if (tb[CTA_PROTOINFO_TCP_WSCALE_ORIGINAL] &&
1437 	    tb[CTA_PROTOINFO_TCP_WSCALE_REPLY] &&
1438 	    ct->proto.tcp.seen[0].flags & IP_CT_TCP_FLAG_WINDOW_SCALE &&
1439 	    ct->proto.tcp.seen[1].flags & IP_CT_TCP_FLAG_WINDOW_SCALE) {
1440 		ct->proto.tcp.seen[0].td_scale =
1441 			nla_get_u8(tb[CTA_PROTOINFO_TCP_WSCALE_ORIGINAL]);
1442 		ct->proto.tcp.seen[1].td_scale =
1443 			nla_get_u8(tb[CTA_PROTOINFO_TCP_WSCALE_REPLY]);
1444 	}
1445 	spin_unlock_bh(&ct->lock);
1446 
1447 	return 0;
1448 }
1449 
tcp_nlattr_tuple_size(void)1450 static unsigned int tcp_nlattr_tuple_size(void)
1451 {
1452 	static unsigned int size __read_mostly;
1453 
1454 	if (!size)
1455 		size = nla_policy_len(nf_ct_port_nla_policy, CTA_PROTO_MAX + 1);
1456 
1457 	return size;
1458 }
1459 #endif
1460 
1461 #ifdef CONFIG_NF_CONNTRACK_TIMEOUT
1462 
1463 #include <linux/netfilter/nfnetlink.h>
1464 #include <linux/netfilter/nfnetlink_cttimeout.h>
1465 
tcp_timeout_nlattr_to_obj(struct nlattr * tb[],struct net * net,void * data)1466 static int tcp_timeout_nlattr_to_obj(struct nlattr *tb[],
1467 				     struct net *net, void *data)
1468 {
1469 	struct nf_tcp_net *tn = nf_tcp_pernet(net);
1470 	unsigned int *timeouts = data;
1471 	int i;
1472 
1473 	if (!timeouts)
1474 		timeouts = tn->timeouts;
1475 	/* set default TCP timeouts. */
1476 	for (i=0; i<TCP_CONNTRACK_TIMEOUT_MAX; i++)
1477 		timeouts[i] = tn->timeouts[i];
1478 
1479 	if (tb[CTA_TIMEOUT_TCP_SYN_SENT]) {
1480 		timeouts[TCP_CONNTRACK_SYN_SENT] =
1481 			ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_SYN_SENT]))*HZ;
1482 	}
1483 
1484 	if (tb[CTA_TIMEOUT_TCP_SYN_RECV]) {
1485 		timeouts[TCP_CONNTRACK_SYN_RECV] =
1486 			ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_SYN_RECV]))*HZ;
1487 	}
1488 	if (tb[CTA_TIMEOUT_TCP_ESTABLISHED]) {
1489 		timeouts[TCP_CONNTRACK_ESTABLISHED] =
1490 			ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_ESTABLISHED]))*HZ;
1491 	}
1492 	if (tb[CTA_TIMEOUT_TCP_FIN_WAIT]) {
1493 		timeouts[TCP_CONNTRACK_FIN_WAIT] =
1494 			ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_FIN_WAIT]))*HZ;
1495 	}
1496 	if (tb[CTA_TIMEOUT_TCP_CLOSE_WAIT]) {
1497 		timeouts[TCP_CONNTRACK_CLOSE_WAIT] =
1498 			ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_CLOSE_WAIT]))*HZ;
1499 	}
1500 	if (tb[CTA_TIMEOUT_TCP_LAST_ACK]) {
1501 		timeouts[TCP_CONNTRACK_LAST_ACK] =
1502 			ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_LAST_ACK]))*HZ;
1503 	}
1504 	if (tb[CTA_TIMEOUT_TCP_TIME_WAIT]) {
1505 		timeouts[TCP_CONNTRACK_TIME_WAIT] =
1506 			ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_TIME_WAIT]))*HZ;
1507 	}
1508 	if (tb[CTA_TIMEOUT_TCP_CLOSE]) {
1509 		timeouts[TCP_CONNTRACK_CLOSE] =
1510 			ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_CLOSE]))*HZ;
1511 	}
1512 	if (tb[CTA_TIMEOUT_TCP_SYN_SENT2]) {
1513 		timeouts[TCP_CONNTRACK_SYN_SENT2] =
1514 			ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_SYN_SENT2]))*HZ;
1515 	}
1516 	if (tb[CTA_TIMEOUT_TCP_RETRANS]) {
1517 		timeouts[TCP_CONNTRACK_RETRANS] =
1518 			ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_RETRANS]))*HZ;
1519 	}
1520 	if (tb[CTA_TIMEOUT_TCP_UNACK]) {
1521 		timeouts[TCP_CONNTRACK_UNACK] =
1522 			ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_UNACK]))*HZ;
1523 	}
1524 
1525 	timeouts[CTA_TIMEOUT_TCP_UNSPEC] = timeouts[CTA_TIMEOUT_TCP_SYN_SENT];
1526 	return 0;
1527 }
1528 
1529 static int
tcp_timeout_obj_to_nlattr(struct sk_buff * skb,const void * data)1530 tcp_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data)
1531 {
1532 	const unsigned int *timeouts = data;
1533 
1534 	if (nla_put_be32(skb, CTA_TIMEOUT_TCP_SYN_SENT,
1535 			htonl(timeouts[TCP_CONNTRACK_SYN_SENT] / HZ)) ||
1536 	    nla_put_be32(skb, CTA_TIMEOUT_TCP_SYN_RECV,
1537 			 htonl(timeouts[TCP_CONNTRACK_SYN_RECV] / HZ)) ||
1538 	    nla_put_be32(skb, CTA_TIMEOUT_TCP_ESTABLISHED,
1539 			 htonl(timeouts[TCP_CONNTRACK_ESTABLISHED] / HZ)) ||
1540 	    nla_put_be32(skb, CTA_TIMEOUT_TCP_FIN_WAIT,
1541 			 htonl(timeouts[TCP_CONNTRACK_FIN_WAIT] / HZ)) ||
1542 	    nla_put_be32(skb, CTA_TIMEOUT_TCP_CLOSE_WAIT,
1543 			 htonl(timeouts[TCP_CONNTRACK_CLOSE_WAIT] / HZ)) ||
1544 	    nla_put_be32(skb, CTA_TIMEOUT_TCP_LAST_ACK,
1545 			 htonl(timeouts[TCP_CONNTRACK_LAST_ACK] / HZ)) ||
1546 	    nla_put_be32(skb, CTA_TIMEOUT_TCP_TIME_WAIT,
1547 			 htonl(timeouts[TCP_CONNTRACK_TIME_WAIT] / HZ)) ||
1548 	    nla_put_be32(skb, CTA_TIMEOUT_TCP_CLOSE,
1549 			 htonl(timeouts[TCP_CONNTRACK_CLOSE] / HZ)) ||
1550 	    nla_put_be32(skb, CTA_TIMEOUT_TCP_SYN_SENT2,
1551 			 htonl(timeouts[TCP_CONNTRACK_SYN_SENT2] / HZ)) ||
1552 	    nla_put_be32(skb, CTA_TIMEOUT_TCP_RETRANS,
1553 			 htonl(timeouts[TCP_CONNTRACK_RETRANS] / HZ)) ||
1554 	    nla_put_be32(skb, CTA_TIMEOUT_TCP_UNACK,
1555 			 htonl(timeouts[TCP_CONNTRACK_UNACK] / HZ)))
1556 		goto nla_put_failure;
1557 	return 0;
1558 
1559 nla_put_failure:
1560 	return -ENOSPC;
1561 }
1562 
1563 static const struct nla_policy tcp_timeout_nla_policy[CTA_TIMEOUT_TCP_MAX+1] = {
1564 	[CTA_TIMEOUT_TCP_SYN_SENT]	= { .type = NLA_U32 },
1565 	[CTA_TIMEOUT_TCP_SYN_RECV]	= { .type = NLA_U32 },
1566 	[CTA_TIMEOUT_TCP_ESTABLISHED]	= { .type = NLA_U32 },
1567 	[CTA_TIMEOUT_TCP_FIN_WAIT]	= { .type = NLA_U32 },
1568 	[CTA_TIMEOUT_TCP_CLOSE_WAIT]	= { .type = NLA_U32 },
1569 	[CTA_TIMEOUT_TCP_LAST_ACK]	= { .type = NLA_U32 },
1570 	[CTA_TIMEOUT_TCP_TIME_WAIT]	= { .type = NLA_U32 },
1571 	[CTA_TIMEOUT_TCP_CLOSE]		= { .type = NLA_U32 },
1572 	[CTA_TIMEOUT_TCP_SYN_SENT2]	= { .type = NLA_U32 },
1573 	[CTA_TIMEOUT_TCP_RETRANS]	= { .type = NLA_U32 },
1574 	[CTA_TIMEOUT_TCP_UNACK]		= { .type = NLA_U32 },
1575 };
1576 #endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
1577 
nf_conntrack_tcp_init_net(struct net * net)1578 void nf_conntrack_tcp_init_net(struct net *net)
1579 {
1580 	struct nf_tcp_net *tn = nf_tcp_pernet(net);
1581 	int i;
1582 
1583 	for (i = 0; i < TCP_CONNTRACK_TIMEOUT_MAX; i++)
1584 		tn->timeouts[i] = tcp_timeouts[i];
1585 
1586 	/* timeouts[0] is unused, make it same as SYN_SENT so
1587 	 * ->timeouts[0] contains 'new' timeout, like udp or icmp.
1588 	 */
1589 	tn->timeouts[0] = tcp_timeouts[TCP_CONNTRACK_SYN_SENT];
1590 
1591 	/* If it is set to zero, we disable picking up already established
1592 	 * connections.
1593 	 */
1594 	tn->tcp_loose = 1;
1595 
1596 	/* "Be conservative in what you do,
1597 	 *  be liberal in what you accept from others."
1598 	 * If it's non-zero, we mark only out of window RST segments as INVALID.
1599 	 */
1600 	tn->tcp_be_liberal = 0;
1601 
1602 	/* If it's non-zero, we turn off RST sequence number check */
1603 	tn->tcp_ignore_invalid_rst = 0;
1604 
1605 	/* Max number of the retransmitted packets without receiving an (acceptable)
1606 	 * ACK from the destination. If this number is reached, a shorter timer
1607 	 * will be started.
1608 	 */
1609 	tn->tcp_max_retrans = 3;
1610 
1611 #if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
1612 	tn->offload_timeout = 30 * HZ;
1613 #endif
1614 }
1615 
1616 const struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp =
1617 {
1618 	.l4proto 		= IPPROTO_TCP,
1619 #ifdef CONFIG_NF_CONNTRACK_PROCFS
1620 	.print_conntrack 	= tcp_print_conntrack,
1621 #endif
1622 	.can_early_drop		= tcp_can_early_drop,
1623 #if IS_ENABLED(CONFIG_NF_CT_NETLINK)
1624 	.to_nlattr		= tcp_to_nlattr,
1625 	.from_nlattr		= nlattr_to_tcp,
1626 	.tuple_to_nlattr	= nf_ct_port_tuple_to_nlattr,
1627 	.nlattr_to_tuple	= nf_ct_port_nlattr_to_tuple,
1628 	.nlattr_tuple_size	= tcp_nlattr_tuple_size,
1629 	.nlattr_size		= TCP_NLATTR_SIZE,
1630 	.nla_policy		= nf_ct_port_nla_policy,
1631 #endif
1632 #ifdef CONFIG_NF_CONNTRACK_TIMEOUT
1633 	.ctnl_timeout		= {
1634 		.nlattr_to_obj	= tcp_timeout_nlattr_to_obj,
1635 		.obj_to_nlattr	= tcp_timeout_obj_to_nlattr,
1636 		.nlattr_max	= CTA_TIMEOUT_TCP_MAX,
1637 		.obj_size	= sizeof(unsigned int) *
1638 					TCP_CONNTRACK_TIMEOUT_MAX,
1639 		.nla_policy	= tcp_timeout_nla_policy,
1640 	},
1641 #endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
1642 };
1643