2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Authors: Lotsa people, from code originally in tcp
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
14 #ifndef _INET_HASHTABLES_H
15 #define _INET_HASHTABLES_H
17 #include <linux/config.h>
19 #include <linux/interrupt.h>
20 #include <linux/ipv6.h>
21 #include <linux/list.h>
22 #include <linux/slab.h>
23 #include <linux/socket.h>
24 #include <linux/spinlock.h>
25 #include <linux/types.h>
26 #include <linux/wait.h>
28 #include <net/inet_connection_sock.h>
30 #include <net/tcp_states.h>
32 #include <asm/atomic.h>
33 #include <asm/byteorder.h>
35 /* This is for all connections with a full identity, no wildcards.
36 * New scheme, half the table is for TIME_WAIT, the other half is
37 * for the rest. I'll experiment with dynamic table growth later.
39 struct inet_ehash_bucket
{
41 struct hlist_head chain
;
42 } __attribute__((__aligned__(8)));
44 /* There are a few simple rules, which allow for local port reuse by
45 * an application. In essence:
47 * 1) Sockets bound to different interfaces may share a local port.
48 * Failing that, goto test 2.
49 * 2) If all sockets have sk->sk_reuse set, and none of them are in
50 * TCP_LISTEN state, the port may be shared.
51 * Failing that, goto test 3.
52 * 3) If all sockets are bound to a specific inet_sk(sk)->rcv_saddr local
53 * address, and none of them are the same, the port may be
55 * Failing this, the port cannot be shared.
57 * The interesting point, is test #2. This is what an FTP server does
58 * all day. To optimize this case we use a specific flag bit defined
59 * below. As we add sockets to a bind bucket list, we perform a
60 * check of: (newsk->sk_reuse && (newsk->sk_state != TCP_LISTEN))
61 * As long as all sockets added to a bind bucket pass this test,
62 * the flag bit will be set.
63 * The resulting situation is that tcp_v[46]_verify_bind() can just check
64 * for this flag bit, if it is set and the socket trying to bind has
65 * sk->sk_reuse set, we don't even have to walk the owners list at all,
66 * we return that it is ok to bind this socket to the requested local port.
68 * Sounds like a lot of work, but it is worth it. In a more naive
69 * implementation (ie. current FreeBSD etc.) the entire list of ports
70 * must be walked for each data port opened by an ftp server. Needless
71 * to say, this does not scale at all. With a couple thousand FTP
72 * users logged onto your box, isn't it nice to know that new data
73 * ports are created in O(1) time? I thought so. ;-) -DaveM
75 struct inet_bind_bucket
{
77 signed short fastreuse
;
78 struct hlist_node node
;
79 struct hlist_head owners
;
82 #define inet_bind_bucket_for_each(tb, node, head) \
83 hlist_for_each_entry(tb, node, head, node)
85 struct inet_bind_hashbucket
{
87 struct hlist_head chain
;
90 /* This is for listening sockets, thus all sockets which possess wildcards. */
91 #define INET_LHTABLE_SIZE 32 /* Yes, really, this is all you need. */
93 struct inet_hashinfo
{
94 /* This is for sockets with full identity only. Sockets here will
95 * always be without wildcards and will have the following invariant:
97 * TCP_ESTABLISHED <= sk->sk_state < TCP_CLOSE
99 * First half of the table is for sockets not in TIME_WAIT, second half
100 * is for TIME_WAIT sockets only.
102 struct inet_ehash_bucket
*ehash
;
104 /* Ok, let's try this, I give up, we do need a local binding
105 * TCP hash as well as the others for fast bind/connect.
107 struct inet_bind_hashbucket
*bhash
;
112 /* All sockets in TCP_LISTEN state will be in here. This is the only
113 * table where wildcard'd TCP sockets can exist. Hash function here
114 * is just local port number.
116 struct hlist_head listening_hash
[INET_LHTABLE_SIZE
];
118 /* All the above members are written once at bootup and
119 * never written again _or_ are predominantly read-access.
121 * Now align to a new cache line as all the following members
124 rwlock_t lhash_lock ____cacheline_aligned
;
125 atomic_t lhash_users
;
126 wait_queue_head_t lhash_wait
;
127 spinlock_t portalloc_lock
;
128 kmem_cache_t
*bind_bucket_cachep
;
132 static inline int inet_ehashfn(const __u32 laddr
, const __u16 lport
,
133 const __u32 faddr
, const __u16 fport
,
134 const int ehash_size
)
136 int h
= (laddr
^ lport
) ^ (faddr
^ fport
);
139 return h
& (ehash_size
- 1);
142 static inline int inet_sk_ehashfn(const struct sock
*sk
, const int ehash_size
)
144 const struct inet_sock
*inet
= inet_sk(sk
);
145 const __u32 laddr
= inet
->rcv_saddr
;
146 const __u16 lport
= inet
->num
;
147 const __u32 faddr
= inet
->daddr
;
148 const __u16 fport
= inet
->dport
;
150 return inet_ehashfn(laddr
, lport
, faddr
, fport
, ehash_size
);
153 extern struct inet_bind_bucket
*
154 inet_bind_bucket_create(kmem_cache_t
*cachep
,
155 struct inet_bind_hashbucket
*head
,
156 const unsigned short snum
);
157 extern void inet_bind_bucket_destroy(kmem_cache_t
*cachep
,
158 struct inet_bind_bucket
*tb
);
160 static inline int inet_bhashfn(const __u16 lport
, const int bhash_size
)
162 return lport
& (bhash_size
- 1);
165 extern void inet_bind_hash(struct sock
*sk
, struct inet_bind_bucket
*tb
,
166 const unsigned short snum
);
168 /* These can have wildcards, don't try too hard. */
169 static inline int inet_lhashfn(const unsigned short num
)
171 return num
& (INET_LHTABLE_SIZE
- 1);
174 static inline int inet_sk_listen_hashfn(const struct sock
*sk
)
176 return inet_lhashfn(inet_sk(sk
)->num
);
179 /* Caller must disable local BH processing. */
180 static inline void __inet_inherit_port(struct inet_hashinfo
*table
,
181 struct sock
*sk
, struct sock
*child
)
183 const int bhash
= inet_bhashfn(inet_sk(child
)->num
, table
->bhash_size
);
184 struct inet_bind_hashbucket
*head
= &table
->bhash
[bhash
];
185 struct inet_bind_bucket
*tb
;
187 spin_lock(&head
->lock
);
188 tb
= inet_csk(sk
)->icsk_bind_hash
;
189 sk_add_bind_node(child
, &tb
->owners
);
190 inet_csk(child
)->icsk_bind_hash
= tb
;
191 spin_unlock(&head
->lock
);
194 static inline void inet_inherit_port(struct inet_hashinfo
*table
,
195 struct sock
*sk
, struct sock
*child
)
198 __inet_inherit_port(table
, sk
, child
);
202 extern void inet_put_port(struct inet_hashinfo
*table
, struct sock
*sk
);
204 extern void inet_listen_wlock(struct inet_hashinfo
*hashinfo
);
207 * - We may sleep inside this lock.
208 * - If sleeping is not required (or called from BH),
209 * use plain read_(un)lock(&inet_hashinfo.lhash_lock).
211 static inline void inet_listen_lock(struct inet_hashinfo
*hashinfo
)
213 /* read_lock synchronizes to candidates to writers */
214 read_lock(&hashinfo
->lhash_lock
);
215 atomic_inc(&hashinfo
->lhash_users
);
216 read_unlock(&hashinfo
->lhash_lock
);
219 static inline void inet_listen_unlock(struct inet_hashinfo
*hashinfo
)
221 if (atomic_dec_and_test(&hashinfo
->lhash_users
))
222 wake_up(&hashinfo
->lhash_wait
);
225 static inline void __inet_hash(struct inet_hashinfo
*hashinfo
,
226 struct sock
*sk
, const int listen_possible
)
228 struct hlist_head
*list
;
231 BUG_TRAP(sk_unhashed(sk
));
232 if (listen_possible
&& sk
->sk_state
== TCP_LISTEN
) {
233 list
= &hashinfo
->listening_hash
[inet_sk_listen_hashfn(sk
)];
234 lock
= &hashinfo
->lhash_lock
;
235 inet_listen_wlock(hashinfo
);
237 sk
->sk_hashent
= inet_sk_ehashfn(sk
, hashinfo
->ehash_size
);
238 list
= &hashinfo
->ehash
[sk
->sk_hashent
].chain
;
239 lock
= &hashinfo
->ehash
[sk
->sk_hashent
].lock
;
242 __sk_add_node(sk
, list
);
243 sock_prot_inc_use(sk
->sk_prot
);
245 if (listen_possible
&& sk
->sk_state
== TCP_LISTEN
)
246 wake_up(&hashinfo
->lhash_wait
);
249 static inline void inet_hash(struct inet_hashinfo
*hashinfo
, struct sock
*sk
)
251 if (sk
->sk_state
!= TCP_CLOSE
) {
253 __inet_hash(hashinfo
, sk
, 1);
258 static inline void inet_unhash(struct inet_hashinfo
*hashinfo
, struct sock
*sk
)
265 if (sk
->sk_state
== TCP_LISTEN
) {
267 inet_listen_wlock(hashinfo
);
268 lock
= &hashinfo
->lhash_lock
;
270 struct inet_ehash_bucket
*head
= &hashinfo
->ehash
[sk
->sk_hashent
];
272 write_lock_bh(&head
->lock
);
275 if (__sk_del_node_init(sk
))
276 sock_prot_dec_use(sk
->sk_prot
);
277 write_unlock_bh(lock
);
279 if (sk
->sk_state
== TCP_LISTEN
)
280 wake_up(&hashinfo
->lhash_wait
);
283 extern struct sock
*__inet_lookup_listener(const struct hlist_head
*head
,
285 const unsigned short hnum
,
288 /* Optimize the common listener case. */
289 static inline struct sock
*
290 inet_lookup_listener(struct inet_hashinfo
*hashinfo
,
292 const unsigned short hnum
, const int dif
)
294 struct sock
*sk
= NULL
;
295 const struct hlist_head
*head
;
297 read_lock(&hashinfo
->lhash_lock
);
298 head
= &hashinfo
->listening_hash
[inet_lhashfn(hnum
)];
299 if (!hlist_empty(head
)) {
300 const struct inet_sock
*inet
= inet_sk((sk
= __sk_head(head
)));
302 if (inet
->num
== hnum
&& !sk
->sk_node
.next
&&
303 (!inet
->rcv_saddr
|| inet
->rcv_saddr
== daddr
) &&
304 (sk
->sk_family
== PF_INET
|| !ipv6_only_sock(sk
)) &&
305 !sk
->sk_bound_dev_if
)
307 sk
= __inet_lookup_listener(head
, daddr
, hnum
, dif
);
313 read_unlock(&hashinfo
->lhash_lock
);
317 /* Socket demux engine toys. */
319 #define INET_COMBINED_PORTS(__sport, __dport) \
320 (((__u32)(__sport) << 16) | (__u32)(__dport))
321 #else /* __LITTLE_ENDIAN */
322 #define INET_COMBINED_PORTS(__sport, __dport) \
323 (((__u32)(__dport) << 16) | (__u32)(__sport))
326 #if (BITS_PER_LONG == 64)
328 #define INET_ADDR_COOKIE(__name, __saddr, __daddr) \
329 const __u64 __name = (((__u64)(__saddr)) << 32) | ((__u64)(__daddr));
330 #else /* __LITTLE_ENDIAN */
331 #define INET_ADDR_COOKIE(__name, __saddr, __daddr) \
332 const __u64 __name = (((__u64)(__daddr)) << 32) | ((__u64)(__saddr));
333 #endif /* __BIG_ENDIAN */
334 #define INET_MATCH(__sk, __cookie, __saddr, __daddr, __ports, __dif)\
335 (((*((__u64 *)&(inet_sk(__sk)->daddr))) == (__cookie)) && \
336 ((*((__u32 *)&(inet_sk(__sk)->dport))) == (__ports)) && \
337 (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
338 #define INET_TW_MATCH(__sk, __cookie, __saddr, __daddr, __ports, __dif)\
339 (((*((__u64 *)&(inet_twsk(__sk)->tw_daddr))) == (__cookie)) && \
340 ((*((__u32 *)&(inet_twsk(__sk)->tw_dport))) == (__ports)) && \
341 (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
342 #else /* 32-bit arch */
343 #define INET_ADDR_COOKIE(__name, __saddr, __daddr)
344 #define INET_MATCH(__sk, __cookie, __saddr, __daddr, __ports, __dif) \
345 ((inet_sk(__sk)->daddr == (__saddr)) && \
346 (inet_sk(__sk)->rcv_saddr == (__daddr)) && \
347 ((*((__u32 *)&(inet_sk(__sk)->dport))) == (__ports)) && \
348 (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
349 #define INET_TW_MATCH(__sk, __cookie, __saddr, __daddr, __ports, __dif) \
350 ((inet_twsk(__sk)->tw_daddr == (__saddr)) && \
351 (inet_twsk(__sk)->tw_rcv_saddr == (__daddr)) && \
352 ((*((__u32 *)&(inet_twsk(__sk)->tw_dport))) == (__ports)) && \
353 (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
354 #endif /* 64-bit arch */
357 * Sockets in TCP_CLOSE state are _always_ taken out of the hash, so we need
358 * not check it for lookups anymore, thanks Alexey. -DaveM
360 * Local BH must be disabled here.
362 static inline struct sock
*
363 __inet_lookup_established(struct inet_hashinfo
*hashinfo
,
364 const u32 saddr
, const u16 sport
,
365 const u32 daddr
, const u16 hnum
,
368 INET_ADDR_COOKIE(acookie
, saddr
, daddr
)
369 const __u32 ports
= INET_COMBINED_PORTS(sport
, hnum
);
371 const struct hlist_node
*node
;
372 /* Optimize here for direct hit, only listening connections can
373 * have wildcards anyways.
375 const int hash
= inet_ehashfn(daddr
, hnum
, saddr
, sport
, hashinfo
->ehash_size
);
376 struct inet_ehash_bucket
*head
= &hashinfo
->ehash
[hash
];
378 read_lock(&head
->lock
);
379 sk_for_each(sk
, node
, &head
->chain
) {
380 if (INET_MATCH(sk
, acookie
, saddr
, daddr
, ports
, dif
))
381 goto hit
; /* You sunk my battleship! */
384 /* Must check for a TIME_WAIT'er before going to listener hash. */
385 sk_for_each(sk
, node
, &(head
+ hashinfo
->ehash_size
)->chain
) {
386 if (INET_TW_MATCH(sk
, acookie
, saddr
, daddr
, ports
, dif
))
391 read_unlock(&head
->lock
);
398 static inline struct sock
*__inet_lookup(struct inet_hashinfo
*hashinfo
,
399 const u32 saddr
, const u16 sport
,
400 const u32 daddr
, const u16 hnum
,
403 struct sock
*sk
= __inet_lookup_established(hashinfo
, saddr
, sport
, daddr
,
405 return sk
? : inet_lookup_listener(hashinfo
, daddr
, hnum
, dif
);
408 static inline struct sock
*inet_lookup(struct inet_hashinfo
*hashinfo
,
409 const u32 saddr
, const u16 sport
,
410 const u32 daddr
, const u16 dport
,
416 sk
= __inet_lookup(hashinfo
, saddr
, sport
, daddr
, ntohs(dport
), dif
);
421 #endif /* _INET_HASHTABLES_H */