tcp/dccp: fix potential NULL deref in __inet_inherit_port()
[deliverable/linux.git] / net / ipv4 / inet_hashtables.c
1 /*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Generic INET transport hashtables
7 *
8 * Authors: Lotsa people, from code originally in tcp
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 */
15
16 #include <linux/module.h>
17 #include <linux/random.h>
18 #include <linux/sched.h>
19 #include <linux/slab.h>
20 #include <linux/wait.h>
21 #include <linux/vmalloc.h>
22
23 #include <net/inet_connection_sock.h>
24 #include <net/inet_hashtables.h>
25 #include <net/secure_seq.h>
26 #include <net/ip.h>
27
28 static u32 inet_ehashfn(const struct net *net, const __be32 laddr,
29 const __u16 lport, const __be32 faddr,
30 const __be16 fport)
31 {
32 static u32 inet_ehash_secret __read_mostly;
33
34 net_get_random_once(&inet_ehash_secret, sizeof(inet_ehash_secret));
35
36 return __inet_ehashfn(laddr, lport, faddr, fport,
37 inet_ehash_secret + net_hash_mix(net));
38 }
39
40 /* This function handles inet_sock, but also timewait and request sockets
41 * for IPv4/IPv6.
42 */
43 u32 sk_ehashfn(const struct sock *sk)
44 {
45 #if IS_ENABLED(CONFIG_IPV6)
46 if (sk->sk_family == AF_INET6 &&
47 !ipv6_addr_v4mapped(&sk->sk_v6_daddr))
48 return inet6_ehashfn(sock_net(sk),
49 &sk->sk_v6_rcv_saddr, sk->sk_num,
50 &sk->sk_v6_daddr, sk->sk_dport);
51 #endif
52 return inet_ehashfn(sock_net(sk),
53 sk->sk_rcv_saddr, sk->sk_num,
54 sk->sk_daddr, sk->sk_dport);
55 }
56
57 /*
58 * Allocate and initialize a new local port bind bucket.
59 * The bindhash mutex for snum's hash chain must be held here.
60 */
61 struct inet_bind_bucket *inet_bind_bucket_create(struct kmem_cache *cachep,
62 struct net *net,
63 struct inet_bind_hashbucket *head,
64 const unsigned short snum)
65 {
66 struct inet_bind_bucket *tb = kmem_cache_alloc(cachep, GFP_ATOMIC);
67
68 if (tb) {
69 write_pnet(&tb->ib_net, net);
70 tb->port = snum;
71 tb->fastreuse = 0;
72 tb->fastreuseport = 0;
73 tb->num_owners = 0;
74 INIT_HLIST_HEAD(&tb->owners);
75 hlist_add_head(&tb->node, &head->chain);
76 }
77 return tb;
78 }
79
80 /*
81 * Caller must hold hashbucket lock for this tb with local BH disabled
82 */
83 void inet_bind_bucket_destroy(struct kmem_cache *cachep, struct inet_bind_bucket *tb)
84 {
85 if (hlist_empty(&tb->owners)) {
86 __hlist_del(&tb->node);
87 kmem_cache_free(cachep, tb);
88 }
89 }
90
91 void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb,
92 const unsigned short snum)
93 {
94 inet_sk(sk)->inet_num = snum;
95 sk_add_bind_node(sk, &tb->owners);
96 tb->num_owners++;
97 inet_csk(sk)->icsk_bind_hash = tb;
98 }
99
100 /*
101 * Get rid of any references to a local port held by the given sock.
102 */
103 static void __inet_put_port(struct sock *sk)
104 {
105 struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
106 const int bhash = inet_bhashfn(sock_net(sk), inet_sk(sk)->inet_num,
107 hashinfo->bhash_size);
108 struct inet_bind_hashbucket *head = &hashinfo->bhash[bhash];
109 struct inet_bind_bucket *tb;
110
111 spin_lock(&head->lock);
112 tb = inet_csk(sk)->icsk_bind_hash;
113 __sk_del_bind_node(sk);
114 tb->num_owners--;
115 inet_csk(sk)->icsk_bind_hash = NULL;
116 inet_sk(sk)->inet_num = 0;
117 inet_bind_bucket_destroy(hashinfo->bind_bucket_cachep, tb);
118 spin_unlock(&head->lock);
119 }
120
121 void inet_put_port(struct sock *sk)
122 {
123 local_bh_disable();
124 __inet_put_port(sk);
125 local_bh_enable();
126 }
127 EXPORT_SYMBOL(inet_put_port);
128
129 int __inet_inherit_port(const struct sock *sk, struct sock *child)
130 {
131 struct inet_hashinfo *table = sk->sk_prot->h.hashinfo;
132 unsigned short port = inet_sk(child)->inet_num;
133 const int bhash = inet_bhashfn(sock_net(sk), port,
134 table->bhash_size);
135 struct inet_bind_hashbucket *head = &table->bhash[bhash];
136 struct inet_bind_bucket *tb;
137
138 spin_lock(&head->lock);
139 tb = inet_csk(sk)->icsk_bind_hash;
140 if (unlikely(!tb)) {
141 spin_unlock(&head->lock);
142 return -ENOENT;
143 }
144 if (tb->port != port) {
145 /* NOTE: using tproxy and redirecting skbs to a proxy
146 * on a different listener port breaks the assumption
147 * that the listener socket's icsk_bind_hash is the same
148 * as that of the child socket. We have to look up or
149 * create a new bind bucket for the child here. */
150 inet_bind_bucket_for_each(tb, &head->chain) {
151 if (net_eq(ib_net(tb), sock_net(sk)) &&
152 tb->port == port)
153 break;
154 }
155 if (!tb) {
156 tb = inet_bind_bucket_create(table->bind_bucket_cachep,
157 sock_net(sk), head, port);
158 if (!tb) {
159 spin_unlock(&head->lock);
160 return -ENOMEM;
161 }
162 }
163 }
164 inet_bind_hash(child, tb, port);
165 spin_unlock(&head->lock);
166
167 return 0;
168 }
169 EXPORT_SYMBOL_GPL(__inet_inherit_port);
170
171 static inline int compute_score(struct sock *sk, struct net *net,
172 const unsigned short hnum, const __be32 daddr,
173 const int dif)
174 {
175 int score = -1;
176 struct inet_sock *inet = inet_sk(sk);
177
178 if (net_eq(sock_net(sk), net) && inet->inet_num == hnum &&
179 !ipv6_only_sock(sk)) {
180 __be32 rcv_saddr = inet->inet_rcv_saddr;
181 score = sk->sk_family == PF_INET ? 2 : 1;
182 if (rcv_saddr) {
183 if (rcv_saddr != daddr)
184 return -1;
185 score += 4;
186 }
187 if (sk->sk_bound_dev_if) {
188 if (sk->sk_bound_dev_if != dif)
189 return -1;
190 score += 4;
191 }
192 if (sk->sk_incoming_cpu == raw_smp_processor_id())
193 score++;
194 }
195 return score;
196 }
197
198 /*
199 * Don't inline this cruft. Here are some nice properties to exploit here. The
200 * BSD API does not allow a listening sock to specify the remote port nor the
201 * remote address for the connection. So always assume those are both
202 * wildcarded during the search since they can never be otherwise.
203 */
204
205
206 struct sock *__inet_lookup_listener(struct net *net,
207 struct inet_hashinfo *hashinfo,
208 const __be32 saddr, __be16 sport,
209 const __be32 daddr, const unsigned short hnum,
210 const int dif)
211 {
212 struct sock *sk, *result;
213 struct hlist_nulls_node *node;
214 unsigned int hash = inet_lhashfn(net, hnum);
215 struct inet_listen_hashbucket *ilb = &hashinfo->listening_hash[hash];
216 int score, hiscore, matches = 0, reuseport = 0;
217 u32 phash = 0;
218
219 rcu_read_lock();
220 begin:
221 result = NULL;
222 hiscore = 0;
223 sk_nulls_for_each_rcu(sk, node, &ilb->head) {
224 score = compute_score(sk, net, hnum, daddr, dif);
225 if (score > hiscore) {
226 result = sk;
227 hiscore = score;
228 reuseport = sk->sk_reuseport;
229 if (reuseport) {
230 phash = inet_ehashfn(net, daddr, hnum,
231 saddr, sport);
232 matches = 1;
233 }
234 } else if (score == hiscore && reuseport) {
235 matches++;
236 if (reciprocal_scale(phash, matches) == 0)
237 result = sk;
238 phash = next_pseudo_random32(phash);
239 }
240 }
241 /*
242 * if the nulls value we got at the end of this lookup is
243 * not the expected one, we must restart lookup.
244 * We probably met an item that was moved to another chain.
245 */
246 if (get_nulls_value(node) != hash + LISTENING_NULLS_BASE)
247 goto begin;
248 if (result) {
249 if (unlikely(!atomic_inc_not_zero(&result->sk_refcnt)))
250 result = NULL;
251 else if (unlikely(compute_score(result, net, hnum, daddr,
252 dif) < hiscore)) {
253 sock_put(result);
254 goto begin;
255 }
256 }
257 rcu_read_unlock();
258 return result;
259 }
260 EXPORT_SYMBOL_GPL(__inet_lookup_listener);
261
262 /* All sockets share common refcount, but have different destructors */
263 void sock_gen_put(struct sock *sk)
264 {
265 if (!atomic_dec_and_test(&sk->sk_refcnt))
266 return;
267
268 if (sk->sk_state == TCP_TIME_WAIT)
269 inet_twsk_free(inet_twsk(sk));
270 else if (sk->sk_state == TCP_NEW_SYN_RECV)
271 reqsk_free(inet_reqsk(sk));
272 else
273 sk_free(sk);
274 }
275 EXPORT_SYMBOL_GPL(sock_gen_put);
276
277 void sock_edemux(struct sk_buff *skb)
278 {
279 sock_gen_put(skb->sk);
280 }
281 EXPORT_SYMBOL(sock_edemux);
282
283 struct sock *__inet_lookup_established(struct net *net,
284 struct inet_hashinfo *hashinfo,
285 const __be32 saddr, const __be16 sport,
286 const __be32 daddr, const u16 hnum,
287 const int dif)
288 {
289 INET_ADDR_COOKIE(acookie, saddr, daddr);
290 const __portpair ports = INET_COMBINED_PORTS(sport, hnum);
291 struct sock *sk;
292 const struct hlist_nulls_node *node;
293 /* Optimize here for direct hit, only listening connections can
294 * have wildcards anyways.
295 */
296 unsigned int hash = inet_ehashfn(net, daddr, hnum, saddr, sport);
297 unsigned int slot = hash & hashinfo->ehash_mask;
298 struct inet_ehash_bucket *head = &hashinfo->ehash[slot];
299
300 rcu_read_lock();
301 begin:
302 sk_nulls_for_each_rcu(sk, node, &head->chain) {
303 if (sk->sk_hash != hash)
304 continue;
305 if (likely(INET_MATCH(sk, net, acookie,
306 saddr, daddr, ports, dif))) {
307 if (unlikely(!atomic_inc_not_zero(&sk->sk_refcnt)))
308 goto out;
309 if (unlikely(!INET_MATCH(sk, net, acookie,
310 saddr, daddr, ports, dif))) {
311 sock_gen_put(sk);
312 goto begin;
313 }
314 goto found;
315 }
316 }
317 /*
318 * if the nulls value we got at the end of this lookup is
319 * not the expected one, we must restart lookup.
320 * We probably met an item that was moved to another chain.
321 */
322 if (get_nulls_value(node) != slot)
323 goto begin;
324 out:
325 sk = NULL;
326 found:
327 rcu_read_unlock();
328 return sk;
329 }
330 EXPORT_SYMBOL_GPL(__inet_lookup_established);
331
332 /* called with local bh disabled */
333 static int __inet_check_established(struct inet_timewait_death_row *death_row,
334 struct sock *sk, __u16 lport,
335 struct inet_timewait_sock **twp)
336 {
337 struct inet_hashinfo *hinfo = death_row->hashinfo;
338 struct inet_sock *inet = inet_sk(sk);
339 __be32 daddr = inet->inet_rcv_saddr;
340 __be32 saddr = inet->inet_daddr;
341 int dif = sk->sk_bound_dev_if;
342 INET_ADDR_COOKIE(acookie, saddr, daddr);
343 const __portpair ports = INET_COMBINED_PORTS(inet->inet_dport, lport);
344 struct net *net = sock_net(sk);
345 unsigned int hash = inet_ehashfn(net, daddr, lport,
346 saddr, inet->inet_dport);
347 struct inet_ehash_bucket *head = inet_ehash_bucket(hinfo, hash);
348 spinlock_t *lock = inet_ehash_lockp(hinfo, hash);
349 struct sock *sk2;
350 const struct hlist_nulls_node *node;
351 struct inet_timewait_sock *tw = NULL;
352
353 spin_lock(lock);
354
355 sk_nulls_for_each(sk2, node, &head->chain) {
356 if (sk2->sk_hash != hash)
357 continue;
358
359 if (likely(INET_MATCH(sk2, net, acookie,
360 saddr, daddr, ports, dif))) {
361 if (sk2->sk_state == TCP_TIME_WAIT) {
362 tw = inet_twsk(sk2);
363 if (twsk_unique(sk, sk2, twp))
364 break;
365 }
366 goto not_unique;
367 }
368 }
369
370 /* Must record num and sport now. Otherwise we will see
371 * in hash table socket with a funny identity.
372 */
373 inet->inet_num = lport;
374 inet->inet_sport = htons(lport);
375 sk->sk_hash = hash;
376 WARN_ON(!sk_unhashed(sk));
377 __sk_nulls_add_node_rcu(sk, &head->chain);
378 if (tw) {
379 sk_nulls_del_node_init_rcu((struct sock *)tw);
380 NET_INC_STATS_BH(net, LINUX_MIB_TIMEWAITRECYCLED);
381 }
382 spin_unlock(lock);
383 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
384
385 if (twp) {
386 *twp = tw;
387 } else if (tw) {
388 /* Silly. Should hash-dance instead... */
389 inet_twsk_deschedule_put(tw);
390 }
391 return 0;
392
393 not_unique:
394 spin_unlock(lock);
395 return -EADDRNOTAVAIL;
396 }
397
398 static u32 inet_sk_port_offset(const struct sock *sk)
399 {
400 const struct inet_sock *inet = inet_sk(sk);
401
402 return secure_ipv4_port_ephemeral(inet->inet_rcv_saddr,
403 inet->inet_daddr,
404 inet->inet_dport);
405 }
406
407 /* insert a socket into ehash, and eventually remove another one
408 * (The another one can be a SYN_RECV or TIMEWAIT
409 */
410 int inet_ehash_insert(struct sock *sk, struct sock *osk)
411 {
412 struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
413 struct hlist_nulls_head *list;
414 struct inet_ehash_bucket *head;
415 spinlock_t *lock;
416 int ret = 0;
417
418 WARN_ON_ONCE(!sk_unhashed(sk));
419
420 sk->sk_hash = sk_ehashfn(sk);
421 head = inet_ehash_bucket(hashinfo, sk->sk_hash);
422 list = &head->chain;
423 lock = inet_ehash_lockp(hashinfo, sk->sk_hash);
424
425 spin_lock(lock);
426 __sk_nulls_add_node_rcu(sk, list);
427 if (osk) {
428 WARN_ON(sk->sk_hash != osk->sk_hash);
429 sk_nulls_del_node_init_rcu(osk);
430 }
431 spin_unlock(lock);
432 return ret;
433 }
434
435 void __inet_hash_nolisten(struct sock *sk, struct sock *osk)
436 {
437 inet_ehash_insert(sk, osk);
438 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
439 }
440 EXPORT_SYMBOL_GPL(__inet_hash_nolisten);
441
442 void __inet_hash(struct sock *sk, struct sock *osk)
443 {
444 struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
445 struct inet_listen_hashbucket *ilb;
446
447 if (sk->sk_state != TCP_LISTEN)
448 return __inet_hash_nolisten(sk, osk);
449
450 WARN_ON(!sk_unhashed(sk));
451 ilb = &hashinfo->listening_hash[inet_sk_listen_hashfn(sk)];
452
453 spin_lock(&ilb->lock);
454 __sk_nulls_add_node_rcu(sk, &ilb->head);
455 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
456 spin_unlock(&ilb->lock);
457 }
458 EXPORT_SYMBOL(__inet_hash);
459
460 void inet_hash(struct sock *sk)
461 {
462 if (sk->sk_state != TCP_CLOSE) {
463 local_bh_disable();
464 __inet_hash(sk, NULL);
465 local_bh_enable();
466 }
467 }
468 EXPORT_SYMBOL_GPL(inet_hash);
469
470 void inet_unhash(struct sock *sk)
471 {
472 struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
473 spinlock_t *lock;
474 int done;
475
476 if (sk_unhashed(sk))
477 return;
478
479 if (sk->sk_state == TCP_LISTEN)
480 lock = &hashinfo->listening_hash[inet_sk_listen_hashfn(sk)].lock;
481 else
482 lock = inet_ehash_lockp(hashinfo, sk->sk_hash);
483
484 spin_lock_bh(lock);
485 done = __sk_nulls_del_node_init_rcu(sk);
486 if (done)
487 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
488 spin_unlock_bh(lock);
489 }
490 EXPORT_SYMBOL_GPL(inet_unhash);
491
492 int __inet_hash_connect(struct inet_timewait_death_row *death_row,
493 struct sock *sk, u32 port_offset,
494 int (*check_established)(struct inet_timewait_death_row *,
495 struct sock *, __u16, struct inet_timewait_sock **))
496 {
497 struct inet_hashinfo *hinfo = death_row->hashinfo;
498 const unsigned short snum = inet_sk(sk)->inet_num;
499 struct inet_bind_hashbucket *head;
500 struct inet_bind_bucket *tb;
501 int ret;
502 struct net *net = sock_net(sk);
503
504 if (!snum) {
505 int i, remaining, low, high, port;
506 static u32 hint;
507 u32 offset = hint + port_offset;
508 struct inet_timewait_sock *tw = NULL;
509
510 inet_get_local_port_range(net, &low, &high);
511 remaining = (high - low) + 1;
512
513 /* By starting with offset being an even number,
514 * we tend to leave about 50% of ports for other uses,
515 * like bind(0).
516 */
517 offset &= ~1;
518
519 local_bh_disable();
520 for (i = 0; i < remaining; i++) {
521 port = low + (i + offset) % remaining;
522 if (inet_is_local_reserved_port(net, port))
523 continue;
524 head = &hinfo->bhash[inet_bhashfn(net, port,
525 hinfo->bhash_size)];
526 spin_lock(&head->lock);
527
528 /* Does not bother with rcv_saddr checks,
529 * because the established check is already
530 * unique enough.
531 */
532 inet_bind_bucket_for_each(tb, &head->chain) {
533 if (net_eq(ib_net(tb), net) &&
534 tb->port == port) {
535 if (tb->fastreuse >= 0 ||
536 tb->fastreuseport >= 0)
537 goto next_port;
538 WARN_ON(hlist_empty(&tb->owners));
539 if (!check_established(death_row, sk,
540 port, &tw))
541 goto ok;
542 goto next_port;
543 }
544 }
545
546 tb = inet_bind_bucket_create(hinfo->bind_bucket_cachep,
547 net, head, port);
548 if (!tb) {
549 spin_unlock(&head->lock);
550 break;
551 }
552 tb->fastreuse = -1;
553 tb->fastreuseport = -1;
554 goto ok;
555
556 next_port:
557 spin_unlock(&head->lock);
558 }
559 local_bh_enable();
560
561 return -EADDRNOTAVAIL;
562
563 ok:
564 hint += (i + 2) & ~1;
565
566 /* Head lock still held and bh's disabled */
567 inet_bind_hash(sk, tb, port);
568 if (sk_unhashed(sk)) {
569 inet_sk(sk)->inet_sport = htons(port);
570 __inet_hash_nolisten(sk, (struct sock *)tw);
571 }
572 if (tw)
573 inet_twsk_bind_unhash(tw, hinfo);
574 spin_unlock(&head->lock);
575
576 if (tw)
577 inet_twsk_deschedule_put(tw);
578
579 ret = 0;
580 goto out;
581 }
582
583 head = &hinfo->bhash[inet_bhashfn(net, snum, hinfo->bhash_size)];
584 tb = inet_csk(sk)->icsk_bind_hash;
585 spin_lock_bh(&head->lock);
586 if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) {
587 __inet_hash_nolisten(sk, NULL);
588 spin_unlock_bh(&head->lock);
589 return 0;
590 } else {
591 spin_unlock(&head->lock);
592 /* No definite answer... Walk to established hash table */
593 ret = check_established(death_row, sk, snum, NULL);
594 out:
595 local_bh_enable();
596 return ret;
597 }
598 }
599
600 /*
601 * Bind a port for a connect operation and hash it.
602 */
603 int inet_hash_connect(struct inet_timewait_death_row *death_row,
604 struct sock *sk)
605 {
606 u32 port_offset = 0;
607
608 if (!inet_sk(sk)->inet_num)
609 port_offset = inet_sk_port_offset(sk);
610 return __inet_hash_connect(death_row, sk, port_offset,
611 __inet_check_established);
612 }
613 EXPORT_SYMBOL_GPL(inet_hash_connect);
614
615 void inet_hashinfo_init(struct inet_hashinfo *h)
616 {
617 int i;
618
619 for (i = 0; i < INET_LHTABLE_SIZE; i++) {
620 spin_lock_init(&h->listening_hash[i].lock);
621 INIT_HLIST_NULLS_HEAD(&h->listening_hash[i].head,
622 i + LISTENING_NULLS_BASE);
623 }
624 }
625 EXPORT_SYMBOL_GPL(inet_hashinfo_init);
626
627 int inet_ehash_locks_alloc(struct inet_hashinfo *hashinfo)
628 {
629 unsigned int locksz = sizeof(spinlock_t);
630 unsigned int i, nblocks = 1;
631
632 if (locksz != 0) {
633 /* allocate 2 cache lines or at least one spinlock per cpu */
634 nblocks = max(2U * L1_CACHE_BYTES / locksz, 1U);
635 nblocks = roundup_pow_of_two(nblocks * num_possible_cpus());
636
637 /* no more locks than number of hash buckets */
638 nblocks = min(nblocks, hashinfo->ehash_mask + 1);
639
640 hashinfo->ehash_locks = kmalloc_array(nblocks, locksz,
641 GFP_KERNEL | __GFP_NOWARN);
642 if (!hashinfo->ehash_locks)
643 hashinfo->ehash_locks = vmalloc(nblocks * locksz);
644
645 if (!hashinfo->ehash_locks)
646 return -ENOMEM;
647
648 for (i = 0; i < nblocks; i++)
649 spin_lock_init(&hashinfo->ehash_locks[i]);
650 }
651 hashinfo->ehash_locks_mask = nblocks - 1;
652 return 0;
653 }
654 EXPORT_SYMBOL_GPL(inet_ehash_locks_alloc);
This page took 0.072298 seconds and 5 git commands to generate.