1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
6 *
7 * Authors: Lotsa people, from code originally in tcp
8 */
9
10 #ifndef _INET_HASHTABLES_H
11 #define _INET_HASHTABLES_H
12
13
14 #include <linux/interrupt.h>
15 #include <linux/ip.h>
16 #include <linux/ipv6.h>
17 #include <linux/list.h>
18 #include <linux/slab.h>
19 #include <linux/socket.h>
20 #include <linux/spinlock.h>
21 #include <linux/types.h>
22 #include <linux/wait.h>
23
24 #include <net/inet_connection_sock.h>
25 #include <net/inet_sock.h>
26 #include <net/ip.h>
27 #include <net/sock.h>
28 #include <net/route.h>
29 #include <net/tcp_states.h>
30 #include <net/netns/hash.h>
31
32 #include <linux/refcount.h>
33 #include <asm/byteorder.h>
34
35 /* This is for all connections with a full identity, no wildcards.
36 * The 'e' prefix stands for Establish, but we really put all sockets
37 * but LISTEN ones.
38 */
39 struct inet_ehash_bucket {
40 struct hlist_nulls_head chain;
41 };
42
43 /* There are a few simple rules, which allow for local port reuse by
44 * an application. In essence:
45 *
46 * 1) Sockets bound to different interfaces may share a local port.
47 * Failing that, goto test 2.
48 * 2) If all sockets have sk->sk_reuse set, and none of them are in
49 * TCP_LISTEN state, the port may be shared.
50 * Failing that, goto test 3.
51 * 3) If all sockets are bound to a specific inet_sk(sk)->rcv_saddr local
52 * address, and none of them are the same, the port may be
53 * shared.
54 * Failing this, the port cannot be shared.
55 *
56 * The interesting point, is test #2. This is what an FTP server does
57 * all day. To optimize this case we use a specific flag bit defined
58 * below. As we add sockets to a bind bucket list, we perform a
59 * check of: (newsk->sk_reuse && (newsk->sk_state != TCP_LISTEN))
60 * As long as all sockets added to a bind bucket pass this test,
61 * the flag bit will be set.
62 * The resulting situation is that tcp_v[46]_verify_bind() can just check
63 * for this flag bit, if it is set and the socket trying to bind has
64 * sk->sk_reuse set, we don't even have to walk the owners list at all,
65 * we return that it is ok to bind this socket to the requested local port.
66 *
67 * Sounds like a lot of work, but it is worth it. In a more naive
68 * implementation (ie. current FreeBSD etc.) the entire list of ports
69 * must be walked for each data port opened by an ftp server. Needless
70 * to say, this does not scale at all. With a couple thousand FTP
71 * users logged onto your box, isn't it nice to know that new data
72 * ports are created in O(1) time? I thought so. ;-) -DaveM
73 */
74 #define FASTREUSEPORT_ANY 1
75 #define FASTREUSEPORT_STRICT 2
76
77 struct inet_bind_bucket {
78 possible_net_t ib_net;
79 int l3mdev;
80 unsigned short port;
81 signed char fastreuse;
82 signed char fastreuseport;
83 kuid_t fastuid;
84 #if IS_ENABLED(CONFIG_IPV6)
85 struct in6_addr fast_v6_rcv_saddr;
86 #endif
87 __be32 fast_rcv_saddr;
88 unsigned short fast_sk_family;
89 bool fast_ipv6_only;
90 struct hlist_node node;
91 struct hlist_head owners;
92 };
93
94 struct inet_bind2_bucket {
95 possible_net_t ib_net;
96 int l3mdev;
97 unsigned short port;
98 #if IS_ENABLED(CONFIG_IPV6)
99 unsigned short family;
100 #endif
101 union {
102 #if IS_ENABLED(CONFIG_IPV6)
103 struct in6_addr v6_rcv_saddr;
104 #endif
105 __be32 rcv_saddr;
106 };
107 /* Node in the bhash2 inet_bind_hashbucket chain */
108 struct hlist_node node;
109 /* List of sockets hashed to this bucket */
110 struct hlist_head owners;
111 };
112
ib_net(const struct inet_bind_bucket * ib)113 static inline struct net *ib_net(const struct inet_bind_bucket *ib)
114 {
115 return read_pnet(&ib->ib_net);
116 }
117
ib2_net(const struct inet_bind2_bucket * ib)118 static inline struct net *ib2_net(const struct inet_bind2_bucket *ib)
119 {
120 return read_pnet(&ib->ib_net);
121 }
122
123 #define inet_bind_bucket_for_each(tb, head) \
124 hlist_for_each_entry(tb, head, node)
125
126 struct inet_bind_hashbucket {
127 spinlock_t lock;
128 struct hlist_head chain;
129 };
130
131 /* Sockets can be hashed in established or listening table.
132 * We must use different 'nulls' end-of-chain value for all hash buckets :
133 * A socket might transition from ESTABLISH to LISTEN state without
134 * RCU grace period. A lookup in ehash table needs to handle this case.
135 */
136 #define LISTENING_NULLS_BASE (1U << 29)
137 struct inet_listen_hashbucket {
138 spinlock_t lock;
139 struct hlist_nulls_head nulls_head;
140 };
141
142 /* This is for listening sockets, thus all sockets which possess wildcards. */
143 #define INET_LHTABLE_SIZE 32 /* Yes, really, this is all you need. */
144
145 struct inet_hashinfo {
146 /* This is for sockets with full identity only. Sockets here will
147 * always be without wildcards and will have the following invariant:
148 *
149 * TCP_ESTABLISHED <= sk->sk_state < TCP_CLOSE
150 *
151 */
152 struct inet_ehash_bucket *ehash;
153 spinlock_t *ehash_locks;
154 unsigned int ehash_mask;
155 unsigned int ehash_locks_mask;
156
157 /* Ok, let's try this, I give up, we do need a local binding
158 * TCP hash as well as the others for fast bind/connect.
159 */
160 struct kmem_cache *bind_bucket_cachep;
161 /* This bind table is hashed by local port */
162 struct inet_bind_hashbucket *bhash;
163 struct kmem_cache *bind2_bucket_cachep;
164 /* This bind table is hashed by local port and sk->sk_rcv_saddr (ipv4)
165 * or sk->sk_v6_rcv_saddr (ipv6). This 2nd bind table is used
166 * primarily for expediting bind conflict resolution.
167 */
168 struct inet_bind_hashbucket *bhash2;
169 unsigned int bhash_size;
170
171 /* The 2nd listener table hashed by local port and address */
172 unsigned int lhash2_mask;
173 struct inet_listen_hashbucket *lhash2;
174
175 bool pernet;
176 };
177
tcp_or_dccp_get_hashinfo(const struct sock * sk)178 static inline struct inet_hashinfo *tcp_or_dccp_get_hashinfo(const struct sock *sk)
179 {
180 #if IS_ENABLED(CONFIG_IP_DCCP)
181 return sk->sk_prot->h.hashinfo ? :
182 sock_net(sk)->ipv4.tcp_death_row.hashinfo;
183 #else
184 return sock_net(sk)->ipv4.tcp_death_row.hashinfo;
185 #endif
186 }
187
188 static inline struct inet_listen_hashbucket *
inet_lhash2_bucket(struct inet_hashinfo * h,u32 hash)189 inet_lhash2_bucket(struct inet_hashinfo *h, u32 hash)
190 {
191 return &h->lhash2[hash & h->lhash2_mask];
192 }
193
inet_ehash_bucket(struct inet_hashinfo * hashinfo,unsigned int hash)194 static inline struct inet_ehash_bucket *inet_ehash_bucket(
195 struct inet_hashinfo *hashinfo,
196 unsigned int hash)
197 {
198 return &hashinfo->ehash[hash & hashinfo->ehash_mask];
199 }
200
inet_ehash_lockp(struct inet_hashinfo * hashinfo,unsigned int hash)201 static inline spinlock_t *inet_ehash_lockp(
202 struct inet_hashinfo *hashinfo,
203 unsigned int hash)
204 {
205 return &hashinfo->ehash_locks[hash & hashinfo->ehash_locks_mask];
206 }
207
208 int inet_ehash_locks_alloc(struct inet_hashinfo *hashinfo);
209
inet_hashinfo2_free_mod(struct inet_hashinfo * h)210 static inline void inet_hashinfo2_free_mod(struct inet_hashinfo *h)
211 {
212 kfree(h->lhash2);
213 h->lhash2 = NULL;
214 }
215
inet_ehash_locks_free(struct inet_hashinfo * hashinfo)216 static inline void inet_ehash_locks_free(struct inet_hashinfo *hashinfo)
217 {
218 kvfree(hashinfo->ehash_locks);
219 hashinfo->ehash_locks = NULL;
220 }
221
222 struct inet_hashinfo *inet_pernet_hashinfo_alloc(struct inet_hashinfo *hashinfo,
223 unsigned int ehash_entries);
224 void inet_pernet_hashinfo_free(struct inet_hashinfo *hashinfo);
225
226 struct inet_bind_bucket *
227 inet_bind_bucket_create(struct kmem_cache *cachep, struct net *net,
228 struct inet_bind_hashbucket *head,
229 const unsigned short snum, int l3mdev);
230 void inet_bind_bucket_destroy(struct kmem_cache *cachep,
231 struct inet_bind_bucket *tb);
232
233 bool inet_bind_bucket_match(const struct inet_bind_bucket *tb,
234 const struct net *net, unsigned short port,
235 int l3mdev);
236
237 struct inet_bind2_bucket *
238 inet_bind2_bucket_create(struct kmem_cache *cachep, struct net *net,
239 struct inet_bind_hashbucket *head,
240 unsigned short port, int l3mdev,
241 const struct sock *sk);
242
243 void inet_bind2_bucket_destroy(struct kmem_cache *cachep,
244 struct inet_bind2_bucket *tb);
245
246 struct inet_bind2_bucket *
247 inet_bind2_bucket_find(const struct inet_bind_hashbucket *head,
248 const struct net *net,
249 unsigned short port, int l3mdev,
250 const struct sock *sk);
251
252 bool inet_bind2_bucket_match_addr_any(const struct inet_bind2_bucket *tb,
253 const struct net *net, unsigned short port,
254 int l3mdev, const struct sock *sk);
255
inet_bhashfn(const struct net * net,const __u16 lport,const u32 bhash_size)256 static inline u32 inet_bhashfn(const struct net *net, const __u16 lport,
257 const u32 bhash_size)
258 {
259 return (lport + net_hash_mix(net)) & (bhash_size - 1);
260 }
261
262 static inline struct inet_bind_hashbucket *
inet_bhashfn_portaddr(const struct inet_hashinfo * hinfo,const struct sock * sk,const struct net * net,unsigned short port)263 inet_bhashfn_portaddr(const struct inet_hashinfo *hinfo, const struct sock *sk,
264 const struct net *net, unsigned short port)
265 {
266 u32 hash;
267
268 #if IS_ENABLED(CONFIG_IPV6)
269 if (sk->sk_family == AF_INET6)
270 hash = ipv6_portaddr_hash(net, &sk->sk_v6_rcv_saddr, port);
271 else
272 #endif
273 hash = ipv4_portaddr_hash(net, sk->sk_rcv_saddr, port);
274 return &hinfo->bhash2[hash & (hinfo->bhash_size - 1)];
275 }
276
277 struct inet_bind_hashbucket *
278 inet_bhash2_addr_any_hashbucket(const struct sock *sk, const struct net *net, int port);
279
280 /* This should be called whenever a socket's sk_rcv_saddr (ipv4) or
281 * sk_v6_rcv_saddr (ipv6) changes after it has been binded. The socket's
282 * rcv_saddr field should already have been updated when this is called.
283 */
284 int inet_bhash2_update_saddr(struct sock *sk, void *saddr, int family);
285 void inet_bhash2_reset_saddr(struct sock *sk);
286
287 void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb,
288 struct inet_bind2_bucket *tb2, unsigned short port);
289
290 /* Caller must disable local BH processing. */
291 int __inet_inherit_port(const struct sock *sk, struct sock *child);
292
293 void inet_put_port(struct sock *sk);
294
295 void inet_hashinfo2_init(struct inet_hashinfo *h, const char *name,
296 unsigned long numentries, int scale,
297 unsigned long low_limit,
298 unsigned long high_limit);
299 int inet_hashinfo2_init_mod(struct inet_hashinfo *h);
300
301 bool inet_ehash_insert(struct sock *sk, struct sock *osk, bool *found_dup_sk);
302 bool inet_ehash_nolisten(struct sock *sk, struct sock *osk,
303 bool *found_dup_sk);
304 int __inet_hash(struct sock *sk, struct sock *osk);
305 int inet_hash(struct sock *sk);
306 void inet_unhash(struct sock *sk);
307
308 struct sock *__inet_lookup_listener(struct net *net,
309 struct inet_hashinfo *hashinfo,
310 struct sk_buff *skb, int doff,
311 const __be32 saddr, const __be16 sport,
312 const __be32 daddr,
313 const unsigned short hnum,
314 const int dif, const int sdif);
315
inet_lookup_listener(struct net * net,struct inet_hashinfo * hashinfo,struct sk_buff * skb,int doff,__be32 saddr,__be16 sport,__be32 daddr,__be16 dport,int dif,int sdif)316 static inline struct sock *inet_lookup_listener(struct net *net,
317 struct inet_hashinfo *hashinfo,
318 struct sk_buff *skb, int doff,
319 __be32 saddr, __be16 sport,
320 __be32 daddr, __be16 dport, int dif, int sdif)
321 {
322 return __inet_lookup_listener(net, hashinfo, skb, doff, saddr, sport,
323 daddr, ntohs(dport), dif, sdif);
324 }
325
326 /* Socket demux engine toys. */
327 /* What happens here is ugly; there's a pair of adjacent fields in
328 struct inet_sock; __be16 dport followed by __u16 num. We want to
329 search by pair, so we combine the keys into a single 32bit value
330 and compare with 32bit value read from &...->dport. Let's at least
331 make sure that it's not mixed with anything else...
332 On 64bit targets we combine comparisons with pair of adjacent __be32
333 fields in the same way.
334 */
335 #ifdef __BIG_ENDIAN
336 #define INET_COMBINED_PORTS(__sport, __dport) \
337 ((__force __portpair)(((__force __u32)(__be16)(__sport) << 16) | (__u32)(__dport)))
338 #else /* __LITTLE_ENDIAN */
339 #define INET_COMBINED_PORTS(__sport, __dport) \
340 ((__force __portpair)(((__u32)(__dport) << 16) | (__force __u32)(__be16)(__sport)))
341 #endif
342
343 #ifdef __BIG_ENDIAN
344 #define INET_ADDR_COOKIE(__name, __saddr, __daddr) \
345 const __addrpair __name = (__force __addrpair) ( \
346 (((__force __u64)(__be32)(__saddr)) << 32) | \
347 ((__force __u64)(__be32)(__daddr)))
348 #else /* __LITTLE_ENDIAN */
349 #define INET_ADDR_COOKIE(__name, __saddr, __daddr) \
350 const __addrpair __name = (__force __addrpair) ( \
351 (((__force __u64)(__be32)(__daddr)) << 32) | \
352 ((__force __u64)(__be32)(__saddr)))
353 #endif /* __BIG_ENDIAN */
354
inet_match(struct net * net,const struct sock * sk,const __addrpair cookie,const __portpair ports,int dif,int sdif)355 static inline bool inet_match(struct net *net, const struct sock *sk,
356 const __addrpair cookie, const __portpair ports,
357 int dif, int sdif)
358 {
359 if (!net_eq(sock_net(sk), net) ||
360 sk->sk_portpair != ports ||
361 sk->sk_addrpair != cookie)
362 return false;
363
364 /* READ_ONCE() paired with WRITE_ONCE() in sock_bindtoindex_locked() */
365 return inet_sk_bound_dev_eq(net, READ_ONCE(sk->sk_bound_dev_if), dif,
366 sdif);
367 }
368
369 /* Sockets in TCP_CLOSE state are _always_ taken out of the hash, so we need
370 * not check it for lookups anymore, thanks Alexey. -DaveM
371 */
372 struct sock *__inet_lookup_established(struct net *net,
373 struct inet_hashinfo *hashinfo,
374 const __be32 saddr, const __be16 sport,
375 const __be32 daddr, const u16 hnum,
376 const int dif, const int sdif);
377
378 static inline struct sock *
inet_lookup_established(struct net * net,struct inet_hashinfo * hashinfo,const __be32 saddr,const __be16 sport,const __be32 daddr,const __be16 dport,const int dif)379 inet_lookup_established(struct net *net, struct inet_hashinfo *hashinfo,
380 const __be32 saddr, const __be16 sport,
381 const __be32 daddr, const __be16 dport,
382 const int dif)
383 {
384 return __inet_lookup_established(net, hashinfo, saddr, sport, daddr,
385 ntohs(dport), dif, 0);
386 }
387
__inet_lookup(struct net * net,struct inet_hashinfo * hashinfo,struct sk_buff * skb,int doff,const __be32 saddr,const __be16 sport,const __be32 daddr,const __be16 dport,const int dif,const int sdif,bool * refcounted)388 static inline struct sock *__inet_lookup(struct net *net,
389 struct inet_hashinfo *hashinfo,
390 struct sk_buff *skb, int doff,
391 const __be32 saddr, const __be16 sport,
392 const __be32 daddr, const __be16 dport,
393 const int dif, const int sdif,
394 bool *refcounted)
395 {
396 u16 hnum = ntohs(dport);
397 struct sock *sk;
398
399 sk = __inet_lookup_established(net, hashinfo, saddr, sport,
400 daddr, hnum, dif, sdif);
401 *refcounted = true;
402 if (sk)
403 return sk;
404 *refcounted = false;
405 return __inet_lookup_listener(net, hashinfo, skb, doff, saddr,
406 sport, daddr, hnum, dif, sdif);
407 }
408
inet_lookup(struct net * net,struct inet_hashinfo * hashinfo,struct sk_buff * skb,int doff,const __be32 saddr,const __be16 sport,const __be32 daddr,const __be16 dport,const int dif)409 static inline struct sock *inet_lookup(struct net *net,
410 struct inet_hashinfo *hashinfo,
411 struct sk_buff *skb, int doff,
412 const __be32 saddr, const __be16 sport,
413 const __be32 daddr, const __be16 dport,
414 const int dif)
415 {
416 struct sock *sk;
417 bool refcounted;
418
419 sk = __inet_lookup(net, hashinfo, skb, doff, saddr, sport, daddr,
420 dport, dif, 0, &refcounted);
421
422 if (sk && !refcounted && !refcount_inc_not_zero(&sk->sk_refcnt))
423 sk = NULL;
424 return sk;
425 }
426
__inet_lookup_skb(struct inet_hashinfo * hashinfo,struct sk_buff * skb,int doff,const __be16 sport,const __be16 dport,const int sdif,bool * refcounted)427 static inline struct sock *__inet_lookup_skb(struct inet_hashinfo *hashinfo,
428 struct sk_buff *skb,
429 int doff,
430 const __be16 sport,
431 const __be16 dport,
432 const int sdif,
433 bool *refcounted)
434 {
435 struct sock *sk = skb_steal_sock(skb, refcounted);
436 const struct iphdr *iph = ip_hdr(skb);
437
438 if (sk)
439 return sk;
440
441 return __inet_lookup(dev_net(skb_dst(skb)->dev), hashinfo, skb,
442 doff, iph->saddr, sport,
443 iph->daddr, dport, inet_iif(skb), sdif,
444 refcounted);
445 }
446
447 u32 inet6_ehashfn(const struct net *net,
448 const struct in6_addr *laddr, const u16 lport,
449 const struct in6_addr *faddr, const __be16 fport);
450
sk_daddr_set(struct sock * sk,__be32 addr)451 static inline void sk_daddr_set(struct sock *sk, __be32 addr)
452 {
453 sk->sk_daddr = addr; /* alias of inet_daddr */
454 #if IS_ENABLED(CONFIG_IPV6)
455 ipv6_addr_set_v4mapped(addr, &sk->sk_v6_daddr);
456 #endif
457 }
458
sk_rcv_saddr_set(struct sock * sk,__be32 addr)459 static inline void sk_rcv_saddr_set(struct sock *sk, __be32 addr)
460 {
461 sk->sk_rcv_saddr = addr; /* alias of inet_rcv_saddr */
462 #if IS_ENABLED(CONFIG_IPV6)
463 ipv6_addr_set_v4mapped(addr, &sk->sk_v6_rcv_saddr);
464 #endif
465 }
466
467 int __inet_hash_connect(struct inet_timewait_death_row *death_row,
468 struct sock *sk, u64 port_offset,
469 int (*check_established)(struct inet_timewait_death_row *,
470 struct sock *, __u16,
471 struct inet_timewait_sock **));
472
473 int inet_hash_connect(struct inet_timewait_death_row *death_row,
474 struct sock *sk);
475 #endif /* _INET_HASHTABLES_H */
476