Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
[deliverable/linux.git] / include / net / inet_hashtables.h
1 /*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Authors: Lotsa people, from code originally in tcp
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 */
13
14 #ifndef _INET_HASHTABLES_H
15 #define _INET_HASHTABLES_H
16
17
18 #include <linux/interrupt.h>
19 #include <linux/ip.h>
20 #include <linux/ipv6.h>
21 #include <linux/list.h>
22 #include <linux/slab.h>
23 #include <linux/socket.h>
24 #include <linux/spinlock.h>
25 #include <linux/types.h>
26 #include <linux/wait.h>
27 #include <linux/vmalloc.h>
28
29 #include <net/inet_connection_sock.h>
30 #include <net/inet_sock.h>
31 #include <net/sock.h>
32 #include <net/route.h>
33 #include <net/tcp_states.h>
34 #include <net/netns/hash.h>
35
36 #include <asm/atomic.h>
37 #include <asm/byteorder.h>
38
39 /* This is for all connections with a full identity, no wildcards.
40 * One chain is dedicated to TIME_WAIT sockets.
41 * I'll experiment with dynamic table growth later.
42 */
43 struct inet_ehash_bucket {
44 struct hlist_nulls_head chain;
45 struct hlist_nulls_head twchain;
46 };
47
48 /* There are a few simple rules, which allow for local port reuse by
49 * an application. In essence:
50 *
51 * 1) Sockets bound to different interfaces may share a local port.
52 * Failing that, goto test 2.
53 * 2) If all sockets have sk->sk_reuse set, and none of them are in
54 * TCP_LISTEN state, the port may be shared.
55 * Failing that, goto test 3.
56 * 3) If all sockets are bound to a specific inet_sk(sk)->rcv_saddr local
57 * address, and none of them are the same, the port may be
58 * shared.
59 * Failing this, the port cannot be shared.
60 *
61 * The interesting point, is test #2. This is what an FTP server does
62 * all day. To optimize this case we use a specific flag bit defined
63 * below. As we add sockets to a bind bucket list, we perform a
64 * check of: (newsk->sk_reuse && (newsk->sk_state != TCP_LISTEN))
65 * As long as all sockets added to a bind bucket pass this test,
66 * the flag bit will be set.
67 * The resulting situation is that tcp_v[46]_verify_bind() can just check
68 * for this flag bit, if it is set and the socket trying to bind has
69 * sk->sk_reuse set, we don't even have to walk the owners list at all,
70 * we return that it is ok to bind this socket to the requested local port.
71 *
72 * Sounds like a lot of work, but it is worth it. In a more naive
73 * implementation (ie. current FreeBSD etc.) the entire list of ports
74 * must be walked for each data port opened by an ftp server. Needless
75 * to say, this does not scale at all. With a couple thousand FTP
76 * users logged onto your box, isn't it nice to know that new data
77 * ports are created in O(1) time? I thought so. ;-) -DaveM
78 */
79 struct inet_bind_bucket {
80 #ifdef CONFIG_NET_NS
81 struct net *ib_net;
82 #endif
83 unsigned short port;
84 signed short fastreuse;
85 int num_owners;
86 struct hlist_node node;
87 struct hlist_head owners;
88 };
89
90 static inline struct net *ib_net(struct inet_bind_bucket *ib)
91 {
92 return read_pnet(&ib->ib_net);
93 }
94
95 #define inet_bind_bucket_for_each(tb, node, head) \
96 hlist_for_each_entry(tb, node, head, node)
97
98 struct inet_bind_hashbucket {
99 spinlock_t lock;
100 struct hlist_head chain;
101 };
102
103 /*
104 * Sockets can be hashed in established or listening table
105 * We must use different 'nulls' end-of-chain value for listening
106 * hash table, or we might find a socket that was closed and
107 * reallocated/inserted into established hash table
108 */
109 #define LISTENING_NULLS_BASE (1U << 29)
110 struct inet_listen_hashbucket {
111 spinlock_t lock;
112 struct hlist_nulls_head head;
113 };
114
115 /* This is for listening sockets, thus all sockets which possess wildcards. */
116 #define INET_LHTABLE_SIZE 32 /* Yes, really, this is all you need. */
117
118 struct inet_hashinfo {
119 /* This is for sockets with full identity only. Sockets here will
120 * always be without wildcards and will have the following invariant:
121 *
122 * TCP_ESTABLISHED <= sk->sk_state < TCP_CLOSE
123 *
124 * TIME_WAIT sockets use a separate chain (twchain).
125 */
126 struct inet_ehash_bucket *ehash;
127 spinlock_t *ehash_locks;
128 unsigned int ehash_size;
129 unsigned int ehash_locks_mask;
130
131 /* Ok, let's try this, I give up, we do need a local binding
132 * TCP hash as well as the others for fast bind/connect.
133 */
134 struct inet_bind_hashbucket *bhash;
135
136 unsigned int bhash_size;
137 int bsockets;
138
139 struct kmem_cache *bind_bucket_cachep;
140
141 /* All the above members are written once at bootup and
142 * never written again _or_ are predominantly read-access.
143 *
144 * Now align to a new cache line as all the following members
145 * might be often dirty.
146 */
147 /* All sockets in TCP_LISTEN state will be in here. This is the only
148 * table where wildcard'd TCP sockets can exist. Hash function here
149 * is just local port number.
150 */
151 struct inet_listen_hashbucket listening_hash[INET_LHTABLE_SIZE]
152 ____cacheline_aligned_in_smp;
153
154 };
155
156 static inline struct inet_ehash_bucket *inet_ehash_bucket(
157 struct inet_hashinfo *hashinfo,
158 unsigned int hash)
159 {
160 return &hashinfo->ehash[hash & (hashinfo->ehash_size - 1)];
161 }
162
163 static inline spinlock_t *inet_ehash_lockp(
164 struct inet_hashinfo *hashinfo,
165 unsigned int hash)
166 {
167 return &hashinfo->ehash_locks[hash & hashinfo->ehash_locks_mask];
168 }
169
170 static inline int inet_ehash_locks_alloc(struct inet_hashinfo *hashinfo)
171 {
172 unsigned int i, size = 256;
173 #if defined(CONFIG_PROVE_LOCKING)
174 unsigned int nr_pcpus = 2;
175 #else
176 unsigned int nr_pcpus = num_possible_cpus();
177 #endif
178 if (nr_pcpus >= 4)
179 size = 512;
180 if (nr_pcpus >= 8)
181 size = 1024;
182 if (nr_pcpus >= 16)
183 size = 2048;
184 if (nr_pcpus >= 32)
185 size = 4096;
186 if (sizeof(spinlock_t) != 0) {
187 #ifdef CONFIG_NUMA
188 if (size * sizeof(spinlock_t) > PAGE_SIZE)
189 hashinfo->ehash_locks = vmalloc(size * sizeof(spinlock_t));
190 else
191 #endif
192 hashinfo->ehash_locks = kmalloc(size * sizeof(spinlock_t),
193 GFP_KERNEL);
194 if (!hashinfo->ehash_locks)
195 return ENOMEM;
196 for (i = 0; i < size; i++)
197 spin_lock_init(&hashinfo->ehash_locks[i]);
198 }
199 hashinfo->ehash_locks_mask = size - 1;
200 return 0;
201 }
202
203 static inline void inet_ehash_locks_free(struct inet_hashinfo *hashinfo)
204 {
205 if (hashinfo->ehash_locks) {
206 #ifdef CONFIG_NUMA
207 unsigned int size = (hashinfo->ehash_locks_mask + 1) *
208 sizeof(spinlock_t);
209 if (size > PAGE_SIZE)
210 vfree(hashinfo->ehash_locks);
211 else
212 #endif
213 kfree(hashinfo->ehash_locks);
214 hashinfo->ehash_locks = NULL;
215 }
216 }
217
218 extern struct inet_bind_bucket *
219 inet_bind_bucket_create(struct kmem_cache *cachep,
220 struct net *net,
221 struct inet_bind_hashbucket *head,
222 const unsigned short snum);
223 extern void inet_bind_bucket_destroy(struct kmem_cache *cachep,
224 struct inet_bind_bucket *tb);
225
226 static inline int inet_bhashfn(struct net *net,
227 const __u16 lport, const int bhash_size)
228 {
229 return (lport + net_hash_mix(net)) & (bhash_size - 1);
230 }
231
232 extern void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb,
233 const unsigned short snum);
234
235 /* These can have wildcards, don't try too hard. */
236 static inline int inet_lhashfn(struct net *net, const unsigned short num)
237 {
238 return (num + net_hash_mix(net)) & (INET_LHTABLE_SIZE - 1);
239 }
240
241 static inline int inet_sk_listen_hashfn(const struct sock *sk)
242 {
243 return inet_lhashfn(sock_net(sk), inet_sk(sk)->num);
244 }
245
246 /* Caller must disable local BH processing. */
247 extern void __inet_inherit_port(struct sock *sk, struct sock *child);
248
249 extern void inet_put_port(struct sock *sk);
250
251 void inet_hashinfo_init(struct inet_hashinfo *h);
252
253 extern void __inet_hash_nolisten(struct sock *sk);
254 extern void inet_hash(struct sock *sk);
255 extern void inet_unhash(struct sock *sk);
256
257 extern struct sock *__inet_lookup_listener(struct net *net,
258 struct inet_hashinfo *hashinfo,
259 const __be32 daddr,
260 const unsigned short hnum,
261 const int dif);
262
263 static inline struct sock *inet_lookup_listener(struct net *net,
264 struct inet_hashinfo *hashinfo,
265 __be32 daddr, __be16 dport, int dif)
266 {
267 return __inet_lookup_listener(net, hashinfo, daddr, ntohs(dport), dif);
268 }
269
270 /* Socket demux engine toys. */
271 /* What happens here is ugly; there's a pair of adjacent fields in
272 struct inet_sock; __be16 dport followed by __u16 num. We want to
273 search by pair, so we combine the keys into a single 32bit value
274 and compare with 32bit value read from &...->dport. Let's at least
275 make sure that it's not mixed with anything else...
276 On 64bit targets we combine comparisons with pair of adjacent __be32
277 fields in the same way.
278 */
279 typedef __u32 __bitwise __portpair;
280 #ifdef __BIG_ENDIAN
281 #define INET_COMBINED_PORTS(__sport, __dport) \
282 ((__force __portpair)(((__force __u32)(__be16)(__sport) << 16) | (__u32)(__dport)))
283 #else /* __LITTLE_ENDIAN */
284 #define INET_COMBINED_PORTS(__sport, __dport) \
285 ((__force __portpair)(((__u32)(__dport) << 16) | (__force __u32)(__be16)(__sport)))
286 #endif
287
288 #if (BITS_PER_LONG == 64)
289 typedef __u64 __bitwise __addrpair;
290 #ifdef __BIG_ENDIAN
291 #define INET_ADDR_COOKIE(__name, __saddr, __daddr) \
292 const __addrpair __name = (__force __addrpair) ( \
293 (((__force __u64)(__be32)(__saddr)) << 32) | \
294 ((__force __u64)(__be32)(__daddr)));
295 #else /* __LITTLE_ENDIAN */
296 #define INET_ADDR_COOKIE(__name, __saddr, __daddr) \
297 const __addrpair __name = (__force __addrpair) ( \
298 (((__force __u64)(__be32)(__daddr)) << 32) | \
299 ((__force __u64)(__be32)(__saddr)));
300 #endif /* __BIG_ENDIAN */
301 #define INET_MATCH(__sk, __net, __hash, __cookie, __saddr, __daddr, __ports, __dif)\
302 (((__sk)->sk_hash == (__hash)) && net_eq(sock_net(__sk), (__net)) && \
303 ((*((__addrpair *)&(inet_sk(__sk)->daddr))) == (__cookie)) && \
304 ((*((__portpair *)&(inet_sk(__sk)->dport))) == (__ports)) && \
305 (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
306 #define INET_TW_MATCH(__sk, __net, __hash, __cookie, __saddr, __daddr, __ports, __dif)\
307 (((__sk)->sk_hash == (__hash)) && net_eq(sock_net(__sk), (__net)) && \
308 ((*((__addrpair *)&(inet_twsk(__sk)->tw_daddr))) == (__cookie)) && \
309 ((*((__portpair *)&(inet_twsk(__sk)->tw_dport))) == (__ports)) && \
310 (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
311 #else /* 32-bit arch */
312 #define INET_ADDR_COOKIE(__name, __saddr, __daddr)
313 #define INET_MATCH(__sk, __net, __hash, __cookie, __saddr, __daddr, __ports, __dif) \
314 (((__sk)->sk_hash == (__hash)) && net_eq(sock_net(__sk), (__net)) && \
315 (inet_sk(__sk)->daddr == (__saddr)) && \
316 (inet_sk(__sk)->rcv_saddr == (__daddr)) && \
317 ((*((__portpair *)&(inet_sk(__sk)->dport))) == (__ports)) && \
318 (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
319 #define INET_TW_MATCH(__sk, __net, __hash,__cookie, __saddr, __daddr, __ports, __dif) \
320 (((__sk)->sk_hash == (__hash)) && net_eq(sock_net(__sk), (__net)) && \
321 (inet_twsk(__sk)->tw_daddr == (__saddr)) && \
322 (inet_twsk(__sk)->tw_rcv_saddr == (__daddr)) && \
323 ((*((__portpair *)&(inet_twsk(__sk)->tw_dport))) == (__ports)) && \
324 (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
325 #endif /* 64-bit arch */
326
327 /*
328 * Sockets in TCP_CLOSE state are _always_ taken out of the hash, so we need
329 * not check it for lookups anymore, thanks Alexey. -DaveM
330 *
331 * Local BH must be disabled here.
332 */
333 extern struct sock * __inet_lookup_established(struct net *net,
334 struct inet_hashinfo *hashinfo,
335 const __be32 saddr, const __be16 sport,
336 const __be32 daddr, const u16 hnum, const int dif);
337
338 static inline struct sock *
339 inet_lookup_established(struct net *net, struct inet_hashinfo *hashinfo,
340 const __be32 saddr, const __be16 sport,
341 const __be32 daddr, const __be16 dport,
342 const int dif)
343 {
344 return __inet_lookup_established(net, hashinfo, saddr, sport, daddr,
345 ntohs(dport), dif);
346 }
347
348 static inline struct sock *__inet_lookup(struct net *net,
349 struct inet_hashinfo *hashinfo,
350 const __be32 saddr, const __be16 sport,
351 const __be32 daddr, const __be16 dport,
352 const int dif)
353 {
354 u16 hnum = ntohs(dport);
355 struct sock *sk = __inet_lookup_established(net, hashinfo,
356 saddr, sport, daddr, hnum, dif);
357
358 return sk ? : __inet_lookup_listener(net, hashinfo, daddr, hnum, dif);
359 }
360
361 static inline struct sock *inet_lookup(struct net *net,
362 struct inet_hashinfo *hashinfo,
363 const __be32 saddr, const __be16 sport,
364 const __be32 daddr, const __be16 dport,
365 const int dif)
366 {
367 struct sock *sk;
368
369 local_bh_disable();
370 sk = __inet_lookup(net, hashinfo, saddr, sport, daddr, dport, dif);
371 local_bh_enable();
372
373 return sk;
374 }
375
376 static inline struct sock *__inet_lookup_skb(struct inet_hashinfo *hashinfo,
377 struct sk_buff *skb,
378 const __be16 sport,
379 const __be16 dport)
380 {
381 struct sock *sk;
382 const struct iphdr *iph = ip_hdr(skb);
383
384 if (unlikely(sk = skb_steal_sock(skb)))
385 return sk;
386 else
387 return __inet_lookup(dev_net(skb->dst->dev), hashinfo,
388 iph->saddr, sport,
389 iph->daddr, dport, inet_iif(skb));
390 }
391
392 extern int __inet_hash_connect(struct inet_timewait_death_row *death_row,
393 struct sock *sk, u32 port_offset,
394 int (*check_established)(struct inet_timewait_death_row *,
395 struct sock *, __u16, struct inet_timewait_sock **),
396 void (*hash)(struct sock *sk));
397 extern int inet_hash_connect(struct inet_timewait_death_row *death_row,
398 struct sock *sk);
399 #endif /* _INET_HASHTABLES_H */
This page took 0.047029 seconds and 5 git commands to generate.