[NETNS]: Add netns refcnt debug for inet bind buckets.
[deliverable/linux.git] / net / ipv4 / inet_hashtables.c
1 /*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Generic INET transport hashtables
7 *
8 * Authors: Lotsa people, from code originally in tcp
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 */
15
16 #include <linux/module.h>
17 #include <linux/random.h>
18 #include <linux/sched.h>
19 #include <linux/slab.h>
20 #include <linux/wait.h>
21
22 #include <net/inet_connection_sock.h>
23 #include <net/inet_hashtables.h>
24 #include <net/ip.h>
25
26 /*
27 * Allocate and initialize a new local port bind bucket.
28 * The bindhash mutex for snum's hash chain must be held here.
29 */
30 struct inet_bind_bucket *inet_bind_bucket_create(struct kmem_cache *cachep,
31 struct net *net,
32 struct inet_bind_hashbucket *head,
33 const unsigned short snum)
34 {
35 struct inet_bind_bucket *tb = kmem_cache_alloc(cachep, GFP_ATOMIC);
36
37 if (tb != NULL) {
38 tb->ib_net = hold_net(net);
39 tb->port = snum;
40 tb->fastreuse = 0;
41 INIT_HLIST_HEAD(&tb->owners);
42 hlist_add_head(&tb->node, &head->chain);
43 }
44 return tb;
45 }
46
47 /*
48 * Caller must hold hashbucket lock for this tb with local BH disabled
49 */
50 void inet_bind_bucket_destroy(struct kmem_cache *cachep, struct inet_bind_bucket *tb)
51 {
52 if (hlist_empty(&tb->owners)) {
53 __hlist_del(&tb->node);
54 release_net(tb->ib_net);
55 kmem_cache_free(cachep, tb);
56 }
57 }
58
59 void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb,
60 const unsigned short snum)
61 {
62 inet_sk(sk)->num = snum;
63 sk_add_bind_node(sk, &tb->owners);
64 inet_csk(sk)->icsk_bind_hash = tb;
65 }
66
67 /*
68 * Get rid of any references to a local port held by the given sock.
69 */
70 static void __inet_put_port(struct sock *sk)
71 {
72 struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
73 const int bhash = inet_bhashfn(inet_sk(sk)->num, hashinfo->bhash_size);
74 struct inet_bind_hashbucket *head = &hashinfo->bhash[bhash];
75 struct inet_bind_bucket *tb;
76
77 spin_lock(&head->lock);
78 tb = inet_csk(sk)->icsk_bind_hash;
79 __sk_del_bind_node(sk);
80 inet_csk(sk)->icsk_bind_hash = NULL;
81 inet_sk(sk)->num = 0;
82 inet_bind_bucket_destroy(hashinfo->bind_bucket_cachep, tb);
83 spin_unlock(&head->lock);
84 }
85
86 void inet_put_port(struct sock *sk)
87 {
88 local_bh_disable();
89 __inet_put_port(sk);
90 local_bh_enable();
91 }
92
93 EXPORT_SYMBOL(inet_put_port);
94
95 /*
96 * This lock without WQ_FLAG_EXCLUSIVE is good on UP and it can be very bad on SMP.
97 * Look, when several writers sleep and reader wakes them up, all but one
98 * immediately hit write lock and grab all the cpus. Exclusive sleep solves
99 * this, _but_ remember, it adds useless work on UP machines (wake up each
100 * exclusive lock release). It should be ifdefed really.
101 */
102 void inet_listen_wlock(struct inet_hashinfo *hashinfo)
103 __acquires(hashinfo->lhash_lock)
104 {
105 write_lock(&hashinfo->lhash_lock);
106
107 if (atomic_read(&hashinfo->lhash_users)) {
108 DEFINE_WAIT(wait);
109
110 for (;;) {
111 prepare_to_wait_exclusive(&hashinfo->lhash_wait,
112 &wait, TASK_UNINTERRUPTIBLE);
113 if (!atomic_read(&hashinfo->lhash_users))
114 break;
115 write_unlock_bh(&hashinfo->lhash_lock);
116 schedule();
117 write_lock_bh(&hashinfo->lhash_lock);
118 }
119
120 finish_wait(&hashinfo->lhash_wait, &wait);
121 }
122 }
123
124 /*
125 * Don't inline this cruft. Here are some nice properties to exploit here. The
126 * BSD API does not allow a listening sock to specify the remote port nor the
127 * remote address for the connection. So always assume those are both
128 * wildcarded during the search since they can never be otherwise.
129 */
130 static struct sock *inet_lookup_listener_slow(struct net *net,
131 const struct hlist_head *head,
132 const __be32 daddr,
133 const unsigned short hnum,
134 const int dif)
135 {
136 struct sock *result = NULL, *sk;
137 const struct hlist_node *node;
138 int hiscore = -1;
139
140 sk_for_each(sk, node, head) {
141 const struct inet_sock *inet = inet_sk(sk);
142
143 if (net_eq(sock_net(sk), net) && inet->num == hnum &&
144 !ipv6_only_sock(sk)) {
145 const __be32 rcv_saddr = inet->rcv_saddr;
146 int score = sk->sk_family == PF_INET ? 1 : 0;
147
148 if (rcv_saddr) {
149 if (rcv_saddr != daddr)
150 continue;
151 score += 2;
152 }
153 if (sk->sk_bound_dev_if) {
154 if (sk->sk_bound_dev_if != dif)
155 continue;
156 score += 2;
157 }
158 if (score == 5)
159 return sk;
160 if (score > hiscore) {
161 hiscore = score;
162 result = sk;
163 }
164 }
165 }
166 return result;
167 }
168
169 /* Optimize the common listener case. */
170 struct sock *__inet_lookup_listener(struct net *net,
171 struct inet_hashinfo *hashinfo,
172 const __be32 daddr, const unsigned short hnum,
173 const int dif)
174 {
175 struct sock *sk = NULL;
176 const struct hlist_head *head;
177
178 read_lock(&hashinfo->lhash_lock);
179 head = &hashinfo->listening_hash[inet_lhashfn(hnum)];
180 if (!hlist_empty(head)) {
181 const struct inet_sock *inet = inet_sk((sk = __sk_head(head)));
182
183 if (inet->num == hnum && !sk->sk_node.next &&
184 (!inet->rcv_saddr || inet->rcv_saddr == daddr) &&
185 (sk->sk_family == PF_INET || !ipv6_only_sock(sk)) &&
186 !sk->sk_bound_dev_if && net_eq(sock_net(sk), net))
187 goto sherry_cache;
188 sk = inet_lookup_listener_slow(net, head, daddr, hnum, dif);
189 }
190 if (sk) {
191 sherry_cache:
192 sock_hold(sk);
193 }
194 read_unlock(&hashinfo->lhash_lock);
195 return sk;
196 }
197 EXPORT_SYMBOL_GPL(__inet_lookup_listener);
198
199 struct sock * __inet_lookup_established(struct net *net,
200 struct inet_hashinfo *hashinfo,
201 const __be32 saddr, const __be16 sport,
202 const __be32 daddr, const u16 hnum,
203 const int dif)
204 {
205 INET_ADDR_COOKIE(acookie, saddr, daddr)
206 const __portpair ports = INET_COMBINED_PORTS(sport, hnum);
207 struct sock *sk;
208 const struct hlist_node *node;
209 /* Optimize here for direct hit, only listening connections can
210 * have wildcards anyways.
211 */
212 unsigned int hash = inet_ehashfn(daddr, hnum, saddr, sport);
213 struct inet_ehash_bucket *head = inet_ehash_bucket(hashinfo, hash);
214 rwlock_t *lock = inet_ehash_lockp(hashinfo, hash);
215
216 prefetch(head->chain.first);
217 read_lock(lock);
218 sk_for_each(sk, node, &head->chain) {
219 if (INET_MATCH(sk, net, hash, acookie,
220 saddr, daddr, ports, dif))
221 goto hit; /* You sunk my battleship! */
222 }
223
224 /* Must check for a TIME_WAIT'er before going to listener hash. */
225 sk_for_each(sk, node, &head->twchain) {
226 if (INET_TW_MATCH(sk, net, hash, acookie,
227 saddr, daddr, ports, dif))
228 goto hit;
229 }
230 sk = NULL;
231 out:
232 read_unlock(lock);
233 return sk;
234 hit:
235 sock_hold(sk);
236 goto out;
237 }
238 EXPORT_SYMBOL_GPL(__inet_lookup_established);
239
240 /* called with local bh disabled */
241 static int __inet_check_established(struct inet_timewait_death_row *death_row,
242 struct sock *sk, __u16 lport,
243 struct inet_timewait_sock **twp)
244 {
245 struct inet_hashinfo *hinfo = death_row->hashinfo;
246 struct inet_sock *inet = inet_sk(sk);
247 __be32 daddr = inet->rcv_saddr;
248 __be32 saddr = inet->daddr;
249 int dif = sk->sk_bound_dev_if;
250 INET_ADDR_COOKIE(acookie, saddr, daddr)
251 const __portpair ports = INET_COMBINED_PORTS(inet->dport, lport);
252 unsigned int hash = inet_ehashfn(daddr, lport, saddr, inet->dport);
253 struct inet_ehash_bucket *head = inet_ehash_bucket(hinfo, hash);
254 rwlock_t *lock = inet_ehash_lockp(hinfo, hash);
255 struct sock *sk2;
256 const struct hlist_node *node;
257 struct inet_timewait_sock *tw;
258 struct net *net = sock_net(sk);
259
260 prefetch(head->chain.first);
261 write_lock(lock);
262
263 /* Check TIME-WAIT sockets first. */
264 sk_for_each(sk2, node, &head->twchain) {
265 tw = inet_twsk(sk2);
266
267 if (INET_TW_MATCH(sk2, net, hash, acookie,
268 saddr, daddr, ports, dif)) {
269 if (twsk_unique(sk, sk2, twp))
270 goto unique;
271 else
272 goto not_unique;
273 }
274 }
275 tw = NULL;
276
277 /* And established part... */
278 sk_for_each(sk2, node, &head->chain) {
279 if (INET_MATCH(sk2, net, hash, acookie,
280 saddr, daddr, ports, dif))
281 goto not_unique;
282 }
283
284 unique:
285 /* Must record num and sport now. Otherwise we will see
286 * in hash table socket with a funny identity. */
287 inet->num = lport;
288 inet->sport = htons(lport);
289 sk->sk_hash = hash;
290 BUG_TRAP(sk_unhashed(sk));
291 __sk_add_node(sk, &head->chain);
292 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
293 write_unlock(lock);
294
295 if (twp) {
296 *twp = tw;
297 NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED);
298 } else if (tw) {
299 /* Silly. Should hash-dance instead... */
300 inet_twsk_deschedule(tw, death_row);
301 NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED);
302
303 inet_twsk_put(tw);
304 }
305
306 return 0;
307
308 not_unique:
309 write_unlock(lock);
310 return -EADDRNOTAVAIL;
311 }
312
313 static inline u32 inet_sk_port_offset(const struct sock *sk)
314 {
315 const struct inet_sock *inet = inet_sk(sk);
316 return secure_ipv4_port_ephemeral(inet->rcv_saddr, inet->daddr,
317 inet->dport);
318 }
319
320 void __inet_hash_nolisten(struct sock *sk)
321 {
322 struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
323 struct hlist_head *list;
324 rwlock_t *lock;
325 struct inet_ehash_bucket *head;
326
327 BUG_TRAP(sk_unhashed(sk));
328
329 sk->sk_hash = inet_sk_ehashfn(sk);
330 head = inet_ehash_bucket(hashinfo, sk->sk_hash);
331 list = &head->chain;
332 lock = inet_ehash_lockp(hashinfo, sk->sk_hash);
333
334 write_lock(lock);
335 __sk_add_node(sk, list);
336 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
337 write_unlock(lock);
338 }
339 EXPORT_SYMBOL_GPL(__inet_hash_nolisten);
340
341 static void __inet_hash(struct sock *sk)
342 {
343 struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
344 struct hlist_head *list;
345 rwlock_t *lock;
346
347 if (sk->sk_state != TCP_LISTEN) {
348 __inet_hash_nolisten(sk);
349 return;
350 }
351
352 BUG_TRAP(sk_unhashed(sk));
353 list = &hashinfo->listening_hash[inet_sk_listen_hashfn(sk)];
354 lock = &hashinfo->lhash_lock;
355
356 inet_listen_wlock(hashinfo);
357 __sk_add_node(sk, list);
358 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
359 write_unlock(lock);
360 wake_up(&hashinfo->lhash_wait);
361 }
362
363 void inet_hash(struct sock *sk)
364 {
365 if (sk->sk_state != TCP_CLOSE) {
366 local_bh_disable();
367 __inet_hash(sk);
368 local_bh_enable();
369 }
370 }
371 EXPORT_SYMBOL_GPL(inet_hash);
372
373 void inet_unhash(struct sock *sk)
374 {
375 rwlock_t *lock;
376 struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
377
378 if (sk_unhashed(sk))
379 goto out;
380
381 if (sk->sk_state == TCP_LISTEN) {
382 local_bh_disable();
383 inet_listen_wlock(hashinfo);
384 lock = &hashinfo->lhash_lock;
385 } else {
386 lock = inet_ehash_lockp(hashinfo, sk->sk_hash);
387 write_lock_bh(lock);
388 }
389
390 if (__sk_del_node_init(sk))
391 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
392 write_unlock_bh(lock);
393 out:
394 if (sk->sk_state == TCP_LISTEN)
395 wake_up(&hashinfo->lhash_wait);
396 }
397 EXPORT_SYMBOL_GPL(inet_unhash);
398
399 int __inet_hash_connect(struct inet_timewait_death_row *death_row,
400 struct sock *sk, u32 port_offset,
401 int (*check_established)(struct inet_timewait_death_row *,
402 struct sock *, __u16, struct inet_timewait_sock **),
403 void (*hash)(struct sock *sk))
404 {
405 struct inet_hashinfo *hinfo = death_row->hashinfo;
406 const unsigned short snum = inet_sk(sk)->num;
407 struct inet_bind_hashbucket *head;
408 struct inet_bind_bucket *tb;
409 int ret;
410 struct net *net = sock_net(sk);
411
412 if (!snum) {
413 int i, remaining, low, high, port;
414 static u32 hint;
415 u32 offset = hint + port_offset;
416 struct hlist_node *node;
417 struct inet_timewait_sock *tw = NULL;
418
419 inet_get_local_port_range(&low, &high);
420 remaining = (high - low) + 1;
421
422 local_bh_disable();
423 for (i = 1; i <= remaining; i++) {
424 port = low + (i + offset) % remaining;
425 head = &hinfo->bhash[inet_bhashfn(port, hinfo->bhash_size)];
426 spin_lock(&head->lock);
427
428 /* Does not bother with rcv_saddr checks,
429 * because the established check is already
430 * unique enough.
431 */
432 inet_bind_bucket_for_each(tb, node, &head->chain) {
433 if (tb->ib_net == net && tb->port == port) {
434 BUG_TRAP(!hlist_empty(&tb->owners));
435 if (tb->fastreuse >= 0)
436 goto next_port;
437 if (!check_established(death_row, sk,
438 port, &tw))
439 goto ok;
440 goto next_port;
441 }
442 }
443
444 tb = inet_bind_bucket_create(hinfo->bind_bucket_cachep,
445 net, head, port);
446 if (!tb) {
447 spin_unlock(&head->lock);
448 break;
449 }
450 tb->fastreuse = -1;
451 goto ok;
452
453 next_port:
454 spin_unlock(&head->lock);
455 }
456 local_bh_enable();
457
458 return -EADDRNOTAVAIL;
459
460 ok:
461 hint += i;
462
463 /* Head lock still held and bh's disabled */
464 inet_bind_hash(sk, tb, port);
465 if (sk_unhashed(sk)) {
466 inet_sk(sk)->sport = htons(port);
467 hash(sk);
468 }
469 spin_unlock(&head->lock);
470
471 if (tw) {
472 inet_twsk_deschedule(tw, death_row);
473 inet_twsk_put(tw);
474 }
475
476 ret = 0;
477 goto out;
478 }
479
480 head = &hinfo->bhash[inet_bhashfn(snum, hinfo->bhash_size)];
481 tb = inet_csk(sk)->icsk_bind_hash;
482 spin_lock_bh(&head->lock);
483 if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) {
484 hash(sk);
485 spin_unlock_bh(&head->lock);
486 return 0;
487 } else {
488 spin_unlock(&head->lock);
489 /* No definite answer... Walk to established hash table */
490 ret = check_established(death_row, sk, snum, NULL);
491 out:
492 local_bh_enable();
493 return ret;
494 }
495 }
496
497 /*
498 * Bind a port for a connect operation and hash it.
499 */
500 int inet_hash_connect(struct inet_timewait_death_row *death_row,
501 struct sock *sk)
502 {
503 return __inet_hash_connect(death_row, sk, inet_sk_port_offset(sk),
504 __inet_check_established, __inet_hash_nolisten);
505 }
506
507 EXPORT_SYMBOL_GPL(inet_hash_connect);
This page took 0.051106 seconds and 5 git commands to generate.