[INET]: Move tcp_port_rover to inet_hashinfo
[deliverable/linux.git] / net / ipv4 / tcp_minisocks.c
CommitLineData
1da177e4
LT
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Implementation of the Transmission Control Protocol(TCP).
7 *
8 * Version: $Id: tcp_minisocks.c,v 1.15 2002/02/01 22:01:04 davem Exp $
9 *
02c30a84 10 * Authors: Ross Biro
1da177e4
LT
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 * Corey Minyard <wf-rch!minyard@relay.EU.net>
14 * Florian La Roche, <flla@stud.uni-sb.de>
15 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
16 * Linus Torvalds, <torvalds@cs.helsinki.fi>
17 * Alan Cox, <gw4pts@gw4pts.ampr.org>
18 * Matthew Dillon, <dillon@apollo.west.oic.com>
19 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
20 * Jorge Cwik, <jorge@laser.satlink.net>
21 */
22
23#include <linux/config.h>
24#include <linux/mm.h>
25#include <linux/module.h>
26#include <linux/sysctl.h>
27#include <linux/workqueue.h>
28#include <net/tcp.h>
29#include <net/inet_common.h>
30#include <net/xfrm.h>
31
32#ifdef CONFIG_SYSCTL
33#define SYNC_INIT 0 /* let the user enable it */
34#else
35#define SYNC_INIT 1
36#endif
37
38int sysctl_tcp_tw_recycle;
39int sysctl_tcp_max_tw_buckets = NR_FILE*2;
40
41int sysctl_tcp_syncookies = SYNC_INIT;
42int sysctl_tcp_abort_on_overflow;
43
44static void tcp_tw_schedule(struct tcp_tw_bucket *tw, int timeo);
45
46static __inline__ int tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win)
47{
48 if (seq == s_win)
49 return 1;
50 if (after(end_seq, s_win) && before(seq, e_win))
51 return 1;
52 return (seq == e_win && seq == end_seq);
53}
54
55/* New-style handling of TIME_WAIT sockets. */
56
57int tcp_tw_count;
58
59
60/* Must be called with locally disabled BHs. */
61static void tcp_timewait_kill(struct tcp_tw_bucket *tw)
62{
0f7ff927
ACM
63 struct inet_bind_hashbucket *bhead;
64 struct inet_bind_bucket *tb;
1da177e4 65 /* Unlink from established hashes. */
6e04e021
ACM
66 struct inet_ehash_bucket *ehead = &tcp_hashinfo.ehash[tw->tw_hashent];
67
1da177e4
LT
68 write_lock(&ehead->lock);
69 if (hlist_unhashed(&tw->tw_node)) {
70 write_unlock(&ehead->lock);
71 return;
72 }
73 __hlist_del(&tw->tw_node);
74 sk_node_init(&tw->tw_node);
75 write_unlock(&ehead->lock);
76
77 /* Disassociate with bind bucket. */
6e04e021 78 bhead = &tcp_hashinfo.bhash[inet_bhashfn(tw->tw_num, tcp_hashinfo.bhash_size)];
1da177e4
LT
79 spin_lock(&bhead->lock);
80 tb = tw->tw_tb;
81 __hlist_del(&tw->tw_bind_node);
82 tw->tw_tb = NULL;
6e04e021 83 inet_bind_bucket_destroy(tcp_hashinfo.bind_bucket_cachep, tb);
1da177e4
LT
84 spin_unlock(&bhead->lock);
85
e6848976 86#ifdef SOCK_REFCNT_DEBUG
1da177e4
LT
87 if (atomic_read(&tw->tw_refcnt) != 1) {
88 printk(KERN_DEBUG "tw_bucket %p refcnt=%d\n", tw,
89 atomic_read(&tw->tw_refcnt));
90 }
91#endif
92 tcp_tw_put(tw);
93}
94
95/*
96 * * Main purpose of TIME-WAIT state is to close connection gracefully,
97 * when one of ends sits in LAST-ACK or CLOSING retransmitting FIN
98 * (and, probably, tail of data) and one or more our ACKs are lost.
99 * * What is TIME-WAIT timeout? It is associated with maximal packet
100 * lifetime in the internet, which results in wrong conclusion, that
101 * it is set to catch "old duplicate segments" wandering out of their path.
102 * It is not quite correct. This timeout is calculated so that it exceeds
103 * maximal retransmission timeout enough to allow to lose one (or more)
104 * segments sent by peer and our ACKs. This time may be calculated from RTO.
105 * * When TIME-WAIT socket receives RST, it means that another end
106 * finally closed and we are allowed to kill TIME-WAIT too.
107 * * Second purpose of TIME-WAIT is catching old duplicate segments.
108 * Well, certainly it is pure paranoia, but if we load TIME-WAIT
109 * with this semantics, we MUST NOT kill TIME-WAIT state with RSTs.
110 * * If we invented some more clever way to catch duplicates
111 * (f.e. based on PAWS), we could truncate TIME-WAIT to several RTOs.
112 *
113 * The algorithm below is based on FORMAL INTERPRETATION of RFCs.
114 * When you compare it to RFCs, please, read section SEGMENT ARRIVES
115 * from the very beginning.
116 *
117 * NOTE. With recycling (and later with fin-wait-2) TW bucket
118 * is _not_ stateless. It means, that strictly speaking we must
119 * spinlock it. I do not want! Well, probability of misbehaviour
120 * is ridiculously low and, seems, we could use some mb() tricks
121 * to avoid misread sequence numbers, states etc. --ANK
122 */
123enum tcp_tw_status
124tcp_timewait_state_process(struct tcp_tw_bucket *tw, struct sk_buff *skb,
125 struct tcphdr *th, unsigned len)
126{
127 struct tcp_options_received tmp_opt;
128 int paws_reject = 0;
129
130 tmp_opt.saw_tstamp = 0;
131 if (th->doff > (sizeof(struct tcphdr) >> 2) && tw->tw_ts_recent_stamp) {
132 tcp_parse_options(skb, &tmp_opt, 0);
133
134 if (tmp_opt.saw_tstamp) {
135 tmp_opt.ts_recent = tw->tw_ts_recent;
136 tmp_opt.ts_recent_stamp = tw->tw_ts_recent_stamp;
137 paws_reject = tcp_paws_check(&tmp_opt, th->rst);
138 }
139 }
140
141 if (tw->tw_substate == TCP_FIN_WAIT2) {
142 /* Just repeat all the checks of tcp_rcv_state_process() */
143
144 /* Out of window, send ACK */
145 if (paws_reject ||
146 !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
147 tw->tw_rcv_nxt,
148 tw->tw_rcv_nxt + tw->tw_rcv_wnd))
149 return TCP_TW_ACK;
150
151 if (th->rst)
152 goto kill;
153
154 if (th->syn && !before(TCP_SKB_CB(skb)->seq, tw->tw_rcv_nxt))
155 goto kill_with_rst;
156
157 /* Dup ACK? */
158 if (!after(TCP_SKB_CB(skb)->end_seq, tw->tw_rcv_nxt) ||
159 TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) {
160 tcp_tw_put(tw);
161 return TCP_TW_SUCCESS;
162 }
163
164 /* New data or FIN. If new data arrive after half-duplex close,
165 * reset.
166 */
167 if (!th->fin ||
168 TCP_SKB_CB(skb)->end_seq != tw->tw_rcv_nxt + 1) {
169kill_with_rst:
170 tcp_tw_deschedule(tw);
171 tcp_tw_put(tw);
172 return TCP_TW_RST;
173 }
174
175 /* FIN arrived, enter true time-wait state. */
176 tw->tw_substate = TCP_TIME_WAIT;
177 tw->tw_rcv_nxt = TCP_SKB_CB(skb)->end_seq;
178 if (tmp_opt.saw_tstamp) {
179 tw->tw_ts_recent_stamp = xtime.tv_sec;
180 tw->tw_ts_recent = tmp_opt.rcv_tsval;
181 }
182
183 /* I am shamed, but failed to make it more elegant.
184 * Yes, it is direct reference to IP, which is impossible
185 * to generalize to IPv6. Taking into account that IPv6
186 * do not undertsnad recycling in any case, it not
187 * a big problem in practice. --ANK */
188 if (tw->tw_family == AF_INET &&
189 sysctl_tcp_tw_recycle && tw->tw_ts_recent_stamp &&
190 tcp_v4_tw_remember_stamp(tw))
191 tcp_tw_schedule(tw, tw->tw_timeout);
192 else
193 tcp_tw_schedule(tw, TCP_TIMEWAIT_LEN);
194 return TCP_TW_ACK;
195 }
196
197 /*
198 * Now real TIME-WAIT state.
199 *
200 * RFC 1122:
201 * "When a connection is [...] on TIME-WAIT state [...]
202 * [a TCP] MAY accept a new SYN from the remote TCP to
203 * reopen the connection directly, if it:
204 *
205 * (1) assigns its initial sequence number for the new
206 * connection to be larger than the largest sequence
207 * number it used on the previous connection incarnation,
208 * and
209 *
210 * (2) returns to TIME-WAIT state if the SYN turns out
211 * to be an old duplicate".
212 */
213
214 if (!paws_reject &&
215 (TCP_SKB_CB(skb)->seq == tw->tw_rcv_nxt &&
216 (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq || th->rst))) {
217 /* In window segment, it may be only reset or bare ack. */
218
219 if (th->rst) {
220 /* This is TIME_WAIT assasination, in two flavors.
221 * Oh well... nobody has a sufficient solution to this
222 * protocol bug yet.
223 */
224 if (sysctl_tcp_rfc1337 == 0) {
225kill:
226 tcp_tw_deschedule(tw);
227 tcp_tw_put(tw);
228 return TCP_TW_SUCCESS;
229 }
230 }
231 tcp_tw_schedule(tw, TCP_TIMEWAIT_LEN);
232
233 if (tmp_opt.saw_tstamp) {
234 tw->tw_ts_recent = tmp_opt.rcv_tsval;
235 tw->tw_ts_recent_stamp = xtime.tv_sec;
236 }
237
238 tcp_tw_put(tw);
239 return TCP_TW_SUCCESS;
240 }
241
242 /* Out of window segment.
243
244 All the segments are ACKed immediately.
245
246 The only exception is new SYN. We accept it, if it is
247 not old duplicate and we are not in danger to be killed
248 by delayed old duplicates. RFC check is that it has
249 newer sequence number works at rates <40Mbit/sec.
250 However, if paws works, it is reliable AND even more,
251 we even may relax silly seq space cutoff.
252
253 RED-PEN: we violate main RFC requirement, if this SYN will appear
254 old duplicate (i.e. we receive RST in reply to SYN-ACK),
255 we must return socket to time-wait state. It is not good,
256 but not fatal yet.
257 */
258
259 if (th->syn && !th->rst && !th->ack && !paws_reject &&
260 (after(TCP_SKB_CB(skb)->seq, tw->tw_rcv_nxt) ||
261 (tmp_opt.saw_tstamp && (s32)(tw->tw_ts_recent - tmp_opt.rcv_tsval) < 0))) {
262 u32 isn = tw->tw_snd_nxt + 65535 + 2;
263 if (isn == 0)
264 isn++;
265 TCP_SKB_CB(skb)->when = isn;
266 return TCP_TW_SYN;
267 }
268
269 if (paws_reject)
270 NET_INC_STATS_BH(LINUX_MIB_PAWSESTABREJECTED);
271
272 if(!th->rst) {
273 /* In this case we must reset the TIMEWAIT timer.
274 *
275 * If it is ACKless SYN it may be both old duplicate
276 * and new good SYN with random sequence number <rcv_nxt.
277 * Do not reschedule in the last case.
278 */
279 if (paws_reject || th->ack)
280 tcp_tw_schedule(tw, TCP_TIMEWAIT_LEN);
281
282 /* Send ACK. Note, we do not put the bucket,
283 * it will be released by caller.
284 */
285 return TCP_TW_ACK;
286 }
287 tcp_tw_put(tw);
288 return TCP_TW_SUCCESS;
289}
290
291/* Enter the time wait state. This is called with locally disabled BH.
292 * Essentially we whip up a timewait bucket, copy the
293 * relevant info into it from the SK, and mess with hash chains
294 * and list linkage.
295 */
296static void __tcp_tw_hashdance(struct sock *sk, struct tcp_tw_bucket *tw)
297{
a55ebcc4 298 const struct inet_sock *inet = inet_sk(sk);
6e04e021 299 struct inet_ehash_bucket *ehead = &tcp_hashinfo.ehash[sk->sk_hashent];
0f7ff927 300 struct inet_bind_hashbucket *bhead;
1da177e4 301 /* Step 1: Put TW into bind hash. Original socket stays there too.
a55ebcc4 302 Note, that any socket with inet->num != 0 MUST be bound in
1da177e4
LT
303 binding cache, even if it is closed.
304 */
6e04e021 305 bhead = &tcp_hashinfo.bhash[inet_bhashfn(inet->num, tcp_hashinfo.bhash_size)];
1da177e4 306 spin_lock(&bhead->lock);
a55ebcc4
ACM
307 tw->tw_tb = inet->bind_hash;
308 BUG_TRAP(inet->bind_hash);
1da177e4
LT
309 tw_add_bind_node(tw, &tw->tw_tb->owners);
310 spin_unlock(&bhead->lock);
311
312 write_lock(&ehead->lock);
313
314 /* Step 2: Remove SK from established hash. */
315 if (__sk_del_node_init(sk))
316 sock_prot_dec_use(sk->sk_prot);
317
318 /* Step 3: Hash TW into TIMEWAIT half of established hash table. */
6e04e021 319 tw_add_node(tw, &(ehead + tcp_hashinfo.ehash_size)->chain);
1da177e4
LT
320 atomic_inc(&tw->tw_refcnt);
321
322 write_unlock(&ehead->lock);
323}
324
325/*
326 * Move a socket to time-wait or dead fin-wait-2 state.
327 */
328void tcp_time_wait(struct sock *sk, int state, int timeo)
329{
330 struct tcp_tw_bucket *tw = NULL;
331 struct tcp_sock *tp = tcp_sk(sk);
332 int recycle_ok = 0;
333
334 if (sysctl_tcp_tw_recycle && tp->rx_opt.ts_recent_stamp)
335 recycle_ok = tp->af_specific->remember_stamp(sk);
336
337 if (tcp_tw_count < sysctl_tcp_max_tw_buckets)
338 tw = kmem_cache_alloc(tcp_timewait_cachep, SLAB_ATOMIC);
339
340 if(tw != NULL) {
341 struct inet_sock *inet = inet_sk(sk);
342 int rto = (tp->rto<<2) - (tp->rto>>1);
343
344 /* Give us an identity. */
345 tw->tw_daddr = inet->daddr;
346 tw->tw_rcv_saddr = inet->rcv_saddr;
347 tw->tw_bound_dev_if = sk->sk_bound_dev_if;
348 tw->tw_num = inet->num;
349 tw->tw_state = TCP_TIME_WAIT;
350 tw->tw_substate = state;
351 tw->tw_sport = inet->sport;
352 tw->tw_dport = inet->dport;
353 tw->tw_family = sk->sk_family;
354 tw->tw_reuse = sk->sk_reuse;
355 tw->tw_rcv_wscale = tp->rx_opt.rcv_wscale;
356 atomic_set(&tw->tw_refcnt, 1);
357
358 tw->tw_hashent = sk->sk_hashent;
359 tw->tw_rcv_nxt = tp->rcv_nxt;
360 tw->tw_snd_nxt = tp->snd_nxt;
361 tw->tw_rcv_wnd = tcp_receive_window(tp);
362 tw->tw_ts_recent = tp->rx_opt.ts_recent;
363 tw->tw_ts_recent_stamp = tp->rx_opt.ts_recent_stamp;
364 tw_dead_node_init(tw);
365
366#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
367 if (tw->tw_family == PF_INET6) {
368 struct ipv6_pinfo *np = inet6_sk(sk);
369
370 ipv6_addr_copy(&tw->tw_v6_daddr, &np->daddr);
371 ipv6_addr_copy(&tw->tw_v6_rcv_saddr, &np->rcv_saddr);
372 tw->tw_v6_ipv6only = np->ipv6only;
373 } else {
374 memset(&tw->tw_v6_daddr, 0, sizeof(tw->tw_v6_daddr));
375 memset(&tw->tw_v6_rcv_saddr, 0, sizeof(tw->tw_v6_rcv_saddr));
376 tw->tw_v6_ipv6only = 0;
377 }
378#endif
379 /* Linkage updates. */
380 __tcp_tw_hashdance(sk, tw);
381
382 /* Get the TIME_WAIT timeout firing. */
383 if (timeo < rto)
384 timeo = rto;
385
386 if (recycle_ok) {
387 tw->tw_timeout = rto;
388 } else {
389 tw->tw_timeout = TCP_TIMEWAIT_LEN;
390 if (state == TCP_TIME_WAIT)
391 timeo = TCP_TIMEWAIT_LEN;
392 }
393
394 tcp_tw_schedule(tw, timeo);
395 tcp_tw_put(tw);
396 } else {
397 /* Sorry, if we're out of memory, just CLOSE this
398 * socket up. We've got bigger problems than
399 * non-graceful socket closings.
400 */
401 if (net_ratelimit())
402 printk(KERN_INFO "TCP: time wait bucket table overflow\n");
403 }
404
405 tcp_update_metrics(sk);
406 tcp_done(sk);
407}
408
409/* Kill off TIME_WAIT sockets once their lifetime has expired. */
410static int tcp_tw_death_row_slot;
411
412static void tcp_twkill(unsigned long);
413
414/* TIME_WAIT reaping mechanism. */
415#define TCP_TWKILL_SLOTS 8 /* Please keep this a power of 2. */
416#define TCP_TWKILL_PERIOD (TCP_TIMEWAIT_LEN/TCP_TWKILL_SLOTS)
417
418#define TCP_TWKILL_QUOTA 100
419
420static struct hlist_head tcp_tw_death_row[TCP_TWKILL_SLOTS];
421static DEFINE_SPINLOCK(tw_death_lock);
422static struct timer_list tcp_tw_timer = TIMER_INITIALIZER(tcp_twkill, 0, 0);
423static void twkill_work(void *);
424static DECLARE_WORK(tcp_twkill_work, twkill_work, NULL);
425static u32 twkill_thread_slots;
426
427/* Returns non-zero if quota exceeded. */
428static int tcp_do_twkill_work(int slot, unsigned int quota)
429{
430 struct tcp_tw_bucket *tw;
431 struct hlist_node *node;
432 unsigned int killed;
433 int ret;
434
435 /* NOTE: compare this to previous version where lock
436 * was released after detaching chain. It was racy,
437 * because tw buckets are scheduled in not serialized context
438 * in 2.3 (with netfilter), and with softnet it is common, because
439 * soft irqs are not sequenced.
440 */
441 killed = 0;
442 ret = 0;
443rescan:
444 tw_for_each_inmate(tw, node, &tcp_tw_death_row[slot]) {
445 __tw_del_dead_node(tw);
446 spin_unlock(&tw_death_lock);
447 tcp_timewait_kill(tw);
448 tcp_tw_put(tw);
449 killed++;
450 spin_lock(&tw_death_lock);
451 if (killed > quota) {
452 ret = 1;
453 break;
454 }
455
456 /* While we dropped tw_death_lock, another cpu may have
457 * killed off the next TW bucket in the list, therefore
458 * do a fresh re-read of the hlist head node with the
459 * lock reacquired. We still use the hlist traversal
460 * macro in order to get the prefetches.
461 */
462 goto rescan;
463 }
464
465 tcp_tw_count -= killed;
466 NET_ADD_STATS_BH(LINUX_MIB_TIMEWAITED, killed);
467
468 return ret;
469}
470
471static void tcp_twkill(unsigned long dummy)
472{
473 int need_timer, ret;
474
475 spin_lock(&tw_death_lock);
476
477 if (tcp_tw_count == 0)
478 goto out;
479
480 need_timer = 0;
481 ret = tcp_do_twkill_work(tcp_tw_death_row_slot, TCP_TWKILL_QUOTA);
482 if (ret) {
483 twkill_thread_slots |= (1 << tcp_tw_death_row_slot);
484 mb();
485 schedule_work(&tcp_twkill_work);
486 need_timer = 1;
487 } else {
488 /* We purged the entire slot, anything left? */
489 if (tcp_tw_count)
490 need_timer = 1;
491 }
492 tcp_tw_death_row_slot =
493 ((tcp_tw_death_row_slot + 1) & (TCP_TWKILL_SLOTS - 1));
494 if (need_timer)
495 mod_timer(&tcp_tw_timer, jiffies + TCP_TWKILL_PERIOD);
496out:
497 spin_unlock(&tw_death_lock);
498}
499
500extern void twkill_slots_invalid(void);
501
502static void twkill_work(void *dummy)
503{
504 int i;
505
506 if ((TCP_TWKILL_SLOTS - 1) > (sizeof(twkill_thread_slots) * 8))
507 twkill_slots_invalid();
508
509 while (twkill_thread_slots) {
510 spin_lock_bh(&tw_death_lock);
511 for (i = 0; i < TCP_TWKILL_SLOTS; i++) {
512 if (!(twkill_thread_slots & (1 << i)))
513 continue;
514
515 while (tcp_do_twkill_work(i, TCP_TWKILL_QUOTA) != 0) {
516 if (need_resched()) {
517 spin_unlock_bh(&tw_death_lock);
518 schedule();
519 spin_lock_bh(&tw_death_lock);
520 }
521 }
522
523 twkill_thread_slots &= ~(1 << i);
524 }
525 spin_unlock_bh(&tw_death_lock);
526 }
527}
528
529/* These are always called from BH context. See callers in
530 * tcp_input.c to verify this.
531 */
532
533/* This is for handling early-kills of TIME_WAIT sockets. */
534void tcp_tw_deschedule(struct tcp_tw_bucket *tw)
535{
536 spin_lock(&tw_death_lock);
537 if (tw_del_dead_node(tw)) {
538 tcp_tw_put(tw);
539 if (--tcp_tw_count == 0)
540 del_timer(&tcp_tw_timer);
541 }
542 spin_unlock(&tw_death_lock);
543 tcp_timewait_kill(tw);
544}
545
546/* Short-time timewait calendar */
547
548static int tcp_twcal_hand = -1;
549static int tcp_twcal_jiffie;
550static void tcp_twcal_tick(unsigned long);
551static struct timer_list tcp_twcal_timer =
552 TIMER_INITIALIZER(tcp_twcal_tick, 0, 0);
553static struct hlist_head tcp_twcal_row[TCP_TW_RECYCLE_SLOTS];
554
555static void tcp_tw_schedule(struct tcp_tw_bucket *tw, int timeo)
556{
557 struct hlist_head *list;
558 int slot;
559
560 /* timeout := RTO * 3.5
561 *
562 * 3.5 = 1+2+0.5 to wait for two retransmits.
563 *
564 * RATIONALE: if FIN arrived and we entered TIME-WAIT state,
565 * our ACK acking that FIN can be lost. If N subsequent retransmitted
566 * FINs (or previous seqments) are lost (probability of such event
567 * is p^(N+1), where p is probability to lose single packet and
568 * time to detect the loss is about RTO*(2^N - 1) with exponential
569 * backoff). Normal timewait length is calculated so, that we
570 * waited at least for one retransmitted FIN (maximal RTO is 120sec).
571 * [ BTW Linux. following BSD, violates this requirement waiting
572 * only for 60sec, we should wait at least for 240 secs.
573 * Well, 240 consumes too much of resources 8)
574 * ]
575 * This interval is not reduced to catch old duplicate and
576 * responces to our wandering segments living for two MSLs.
577 * However, if we use PAWS to detect
578 * old duplicates, we can reduce the interval to bounds required
579 * by RTO, rather than MSL. So, if peer understands PAWS, we
580 * kill tw bucket after 3.5*RTO (it is important that this number
581 * is greater than TS tick!) and detect old duplicates with help
582 * of PAWS.
583 */
584 slot = (timeo + (1<<TCP_TW_RECYCLE_TICK) - 1) >> TCP_TW_RECYCLE_TICK;
585
586 spin_lock(&tw_death_lock);
587
588 /* Unlink it, if it was scheduled */
589 if (tw_del_dead_node(tw))
590 tcp_tw_count--;
591 else
592 atomic_inc(&tw->tw_refcnt);
593
594 if (slot >= TCP_TW_RECYCLE_SLOTS) {
595 /* Schedule to slow timer */
596 if (timeo >= TCP_TIMEWAIT_LEN) {
597 slot = TCP_TWKILL_SLOTS-1;
598 } else {
599 slot = (timeo + TCP_TWKILL_PERIOD-1) / TCP_TWKILL_PERIOD;
600 if (slot >= TCP_TWKILL_SLOTS)
601 slot = TCP_TWKILL_SLOTS-1;
602 }
603 tw->tw_ttd = jiffies + timeo;
604 slot = (tcp_tw_death_row_slot + slot) & (TCP_TWKILL_SLOTS - 1);
605 list = &tcp_tw_death_row[slot];
606 } else {
607 tw->tw_ttd = jiffies + (slot << TCP_TW_RECYCLE_TICK);
608
609 if (tcp_twcal_hand < 0) {
610 tcp_twcal_hand = 0;
611 tcp_twcal_jiffie = jiffies;
612 tcp_twcal_timer.expires = tcp_twcal_jiffie + (slot<<TCP_TW_RECYCLE_TICK);
613 add_timer(&tcp_twcal_timer);
614 } else {
615 if (time_after(tcp_twcal_timer.expires, jiffies + (slot<<TCP_TW_RECYCLE_TICK)))
616 mod_timer(&tcp_twcal_timer, jiffies + (slot<<TCP_TW_RECYCLE_TICK));
617 slot = (tcp_twcal_hand + slot)&(TCP_TW_RECYCLE_SLOTS-1);
618 }
619 list = &tcp_twcal_row[slot];
620 }
621
622 hlist_add_head(&tw->tw_death_node, list);
623
624 if (tcp_tw_count++ == 0)
625 mod_timer(&tcp_tw_timer, jiffies+TCP_TWKILL_PERIOD);
626 spin_unlock(&tw_death_lock);
627}
628
629void tcp_twcal_tick(unsigned long dummy)
630{
631 int n, slot;
632 unsigned long j;
633 unsigned long now = jiffies;
634 int killed = 0;
635 int adv = 0;
636
637 spin_lock(&tw_death_lock);
638 if (tcp_twcal_hand < 0)
639 goto out;
640
641 slot = tcp_twcal_hand;
642 j = tcp_twcal_jiffie;
643
644 for (n=0; n<TCP_TW_RECYCLE_SLOTS; n++) {
645 if (time_before_eq(j, now)) {
646 struct hlist_node *node, *safe;
647 struct tcp_tw_bucket *tw;
648
649 tw_for_each_inmate_safe(tw, node, safe,
650 &tcp_twcal_row[slot]) {
651 __tw_del_dead_node(tw);
652 tcp_timewait_kill(tw);
653 tcp_tw_put(tw);
654 killed++;
655 }
656 } else {
657 if (!adv) {
658 adv = 1;
659 tcp_twcal_jiffie = j;
660 tcp_twcal_hand = slot;
661 }
662
663 if (!hlist_empty(&tcp_twcal_row[slot])) {
664 mod_timer(&tcp_twcal_timer, j);
665 goto out;
666 }
667 }
668 j += (1<<TCP_TW_RECYCLE_TICK);
669 slot = (slot+1)&(TCP_TW_RECYCLE_SLOTS-1);
670 }
671 tcp_twcal_hand = -1;
672
673out:
674 if ((tcp_tw_count -= killed) == 0)
675 del_timer(&tcp_tw_timer);
676 NET_ADD_STATS_BH(LINUX_MIB_TIMEWAITKILLED, killed);
677 spin_unlock(&tw_death_lock);
678}
679
680/* This is not only more efficient than what we used to do, it eliminates
681 * a lot of code duplication between IPv4/IPv6 SYN recv processing. -DaveM
682 *
683 * Actually, we could lots of memory writes here. tp of listening
684 * socket contains all necessary default parameters.
685 */
60236fdd 686struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req, struct sk_buff *skb)
1da177e4
LT
687{
688 /* allocate the newsk from the same slab of the master sock,
689 * if not, at sk_free time we'll try to free it from the wrong
690 * slabcache (i.e. is it TCPv4 or v6?), this is handled thru sk->sk_prot -acme */
691 struct sock *newsk = sk_alloc(PF_INET, GFP_ATOMIC, sk->sk_prot, 0);
692
693 if(newsk != NULL) {
2e6599cb
ACM
694 struct inet_request_sock *ireq = inet_rsk(req);
695 struct tcp_request_sock *treq = tcp_rsk(req);
a55ebcc4 696 struct inet_sock *newinet = inet_sk(newsk);
1da177e4
LT
697 struct tcp_sock *newtp;
698 struct sk_filter *filter;
699
700 memcpy(newsk, sk, sizeof(struct tcp_sock));
701 newsk->sk_state = TCP_SYN_RECV;
702
703 /* SANITY */
704 sk_node_init(&newsk->sk_node);
a55ebcc4 705 newinet->bind_hash = NULL;
1da177e4
LT
706
707 /* Clone the TCP header template */
a55ebcc4 708 newinet->dport = ireq->rmt_port;
1da177e4
LT
709
710 sock_lock_init(newsk);
711 bh_lock_sock(newsk);
712
713 rwlock_init(&newsk->sk_dst_lock);
6cbb0df7 714 newsk->sk_dst_cache = NULL;
1da177e4
LT
715 atomic_set(&newsk->sk_rmem_alloc, 0);
716 skb_queue_head_init(&newsk->sk_receive_queue);
717 atomic_set(&newsk->sk_wmem_alloc, 0);
718 skb_queue_head_init(&newsk->sk_write_queue);
719 atomic_set(&newsk->sk_omem_alloc, 0);
720 newsk->sk_wmem_queued = 0;
721 newsk->sk_forward_alloc = 0;
722
723 sock_reset_flag(newsk, SOCK_DONE);
724 newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
725 newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL;
726 newsk->sk_send_head = NULL;
727 rwlock_init(&newsk->sk_callback_lock);
728 skb_queue_head_init(&newsk->sk_error_queue);
729 newsk->sk_write_space = sk_stream_write_space;
730
731 if ((filter = newsk->sk_filter) != NULL)
732 sk_filter_charge(newsk, filter);
733
734 if (unlikely(xfrm_sk_clone_policy(newsk))) {
735 /* It is still raw copy of parent, so invalidate
736 * destructor and make plain sk_free() */
737 newsk->sk_destruct = NULL;
738 sk_free(newsk);
739 return NULL;
740 }
741
742 /* Now setup tcp_sock */
743 newtp = tcp_sk(newsk);
744 newtp->pred_flags = 0;
2e6599cb
ACM
745 newtp->rcv_nxt = treq->rcv_isn + 1;
746 newtp->snd_nxt = treq->snt_isn + 1;
747 newtp->snd_una = treq->snt_isn + 1;
748 newtp->snd_sml = treq->snt_isn + 1;
1da177e4
LT
749
750 tcp_prequeue_init(newtp);
751
2e6599cb 752 tcp_init_wl(newtp, treq->snt_isn, treq->rcv_isn);
1da177e4
LT
753
754 newtp->retransmits = 0;
755 newtp->backoff = 0;
756 newtp->srtt = 0;
757 newtp->mdev = TCP_TIMEOUT_INIT;
758 newtp->rto = TCP_TIMEOUT_INIT;
759
760 newtp->packets_out = 0;
761 newtp->left_out = 0;
762 newtp->retrans_out = 0;
763 newtp->sacked_out = 0;
764 newtp->fackets_out = 0;
765 newtp->snd_ssthresh = 0x7fffffff;
766
767 /* So many TCP implementations out there (incorrectly) count the
768 * initial SYN frame in their delayed-ACK and congestion control
769 * algorithms that we must have the following bandaid to talk
770 * efficiently to them. -DaveM
771 */
772 newtp->snd_cwnd = 2;
773 newtp->snd_cwnd_cnt = 0;
774
775 newtp->frto_counter = 0;
776 newtp->frto_highmark = 0;
777
317a76f9
SH
778 newtp->ca_ops = &tcp_reno;
779
1da177e4
LT
780 tcp_set_ca_state(newtp, TCP_CA_Open);
781 tcp_init_xmit_timers(newsk);
782 skb_queue_head_init(&newtp->out_of_order_queue);
2e6599cb
ACM
783 newtp->rcv_wup = treq->rcv_isn + 1;
784 newtp->write_seq = treq->snt_isn + 1;
1da177e4 785 newtp->pushed_seq = newtp->write_seq;
2e6599cb 786 newtp->copied_seq = treq->rcv_isn + 1;
1da177e4
LT
787
788 newtp->rx_opt.saw_tstamp = 0;
789
790 newtp->rx_opt.dsack = 0;
791 newtp->rx_opt.eff_sacks = 0;
792
793 newtp->probes_out = 0;
794 newtp->rx_opt.num_sacks = 0;
795 newtp->urg_data = 0;
0e87506f
ACM
796 /* Deinitialize accept_queue to trap illegal accesses. */
797 memset(&newtp->accept_queue, 0, sizeof(newtp->accept_queue));
1da177e4
LT
798
799 /* Back to base struct sock members. */
800 newsk->sk_err = 0;
801 newsk->sk_priority = 0;
802 atomic_set(&newsk->sk_refcnt, 2);
e6848976
ACM
803
804 /*
805 * Increment the counter in the same struct proto as the master
806 * sock (sk_refcnt_debug_inc uses newsk->sk_prot->socks, that
807 * is the same as sk->sk_prot->socks, as this field was copied
808 * with memcpy), same rationale as the first comment in this
809 * function.
810 *
811 * This _changes_ the previous behaviour, where
812 * tcp_create_openreq_child always was incrementing the
813 * equivalent to tcp_prot->socks (inet_sock_nr), so this have
814 * to be taken into account in all callers. -acme
815 */
816 sk_refcnt_debug_inc(newsk);
817
1da177e4
LT
818 atomic_inc(&tcp_sockets_allocated);
819
820 if (sock_flag(newsk, SOCK_KEEPOPEN))
821 tcp_reset_keepalive_timer(newsk,
822 keepalive_time_when(newtp));
823 newsk->sk_socket = NULL;
824 newsk->sk_sleep = NULL;
825
2e6599cb
ACM
826 newtp->rx_opt.tstamp_ok = ireq->tstamp_ok;
827 if((newtp->rx_opt.sack_ok = ireq->sack_ok) != 0) {
1da177e4
LT
828 if (sysctl_tcp_fack)
829 newtp->rx_opt.sack_ok |= 2;
830 }
831 newtp->window_clamp = req->window_clamp;
832 newtp->rcv_ssthresh = req->rcv_wnd;
833 newtp->rcv_wnd = req->rcv_wnd;
2e6599cb 834 newtp->rx_opt.wscale_ok = ireq->wscale_ok;
1da177e4 835 if (newtp->rx_opt.wscale_ok) {
2e6599cb
ACM
836 newtp->rx_opt.snd_wscale = ireq->snd_wscale;
837 newtp->rx_opt.rcv_wscale = ireq->rcv_wscale;
1da177e4
LT
838 } else {
839 newtp->rx_opt.snd_wscale = newtp->rx_opt.rcv_wscale = 0;
840 newtp->window_clamp = min(newtp->window_clamp, 65535U);
841 }
842 newtp->snd_wnd = ntohs(skb->h.th->window) << newtp->rx_opt.snd_wscale;
843 newtp->max_window = newtp->snd_wnd;
844
845 if (newtp->rx_opt.tstamp_ok) {
846 newtp->rx_opt.ts_recent = req->ts_recent;
847 newtp->rx_opt.ts_recent_stamp = xtime.tv_sec;
848 newtp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED;
849 } else {
850 newtp->rx_opt.ts_recent_stamp = 0;
851 newtp->tcp_header_len = sizeof(struct tcphdr);
852 }
853 if (skb->len >= TCP_MIN_RCVMSS+newtp->tcp_header_len)
854 newtp->ack.last_seg_size = skb->len-newtp->tcp_header_len;
855 newtp->rx_opt.mss_clamp = req->mss;
856 TCP_ECN_openreq_child(newtp, req);
857 if (newtp->ecn_flags&TCP_ECN_OK)
858 sock_set_flag(newsk, SOCK_NO_LARGESEND);
859
1da177e4
LT
860 TCP_INC_STATS_BH(TCP_MIB_PASSIVEOPENS);
861 }
862 return newsk;
863}
864
865/*
866 * Process an incoming packet for SYN_RECV sockets represented
60236fdd 867 * as a request_sock.
1da177e4
LT
868 */
869
870struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb,
60236fdd
ACM
871 struct request_sock *req,
872 struct request_sock **prev)
1da177e4
LT
873{
874 struct tcphdr *th = skb->h.th;
875 struct tcp_sock *tp = tcp_sk(sk);
876 u32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK);
877 int paws_reject = 0;
878 struct tcp_options_received tmp_opt;
879 struct sock *child;
880
881 tmp_opt.saw_tstamp = 0;
882 if (th->doff > (sizeof(struct tcphdr)>>2)) {
883 tcp_parse_options(skb, &tmp_opt, 0);
884
885 if (tmp_opt.saw_tstamp) {
886 tmp_opt.ts_recent = req->ts_recent;
887 /* We do not store true stamp, but it is not required,
888 * it can be estimated (approximately)
889 * from another data.
890 */
891 tmp_opt.ts_recent_stamp = xtime.tv_sec - ((TCP_TIMEOUT_INIT/HZ)<<req->retrans);
892 paws_reject = tcp_paws_check(&tmp_opt, th->rst);
893 }
894 }
895
896 /* Check for pure retransmitted SYN. */
2e6599cb 897 if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn &&
1da177e4
LT
898 flg == TCP_FLAG_SYN &&
899 !paws_reject) {
900 /*
901 * RFC793 draws (Incorrectly! It was fixed in RFC1122)
902 * this case on figure 6 and figure 8, but formal
903 * protocol description says NOTHING.
904 * To be more exact, it says that we should send ACK,
905 * because this segment (at least, if it has no data)
906 * is out of window.
907 *
908 * CONCLUSION: RFC793 (even with RFC1122) DOES NOT
909 * describe SYN-RECV state. All the description
910 * is wrong, we cannot believe to it and should
911 * rely only on common sense and implementation
912 * experience.
913 *
914 * Enforce "SYN-ACK" according to figure 8, figure 6
915 * of RFC793, fixed by RFC1122.
916 */
60236fdd 917 req->rsk_ops->rtx_syn_ack(sk, req, NULL);
1da177e4
LT
918 return NULL;
919 }
920
921 /* Further reproduces section "SEGMENT ARRIVES"
922 for state SYN-RECEIVED of RFC793.
923 It is broken, however, it does not work only
924 when SYNs are crossed.
925
926 You would think that SYN crossing is impossible here, since
927 we should have a SYN_SENT socket (from connect()) on our end,
928 but this is not true if the crossed SYNs were sent to both
929 ends by a malicious third party. We must defend against this,
930 and to do that we first verify the ACK (as per RFC793, page
931 36) and reset if it is invalid. Is this a true full defense?
932 To convince ourselves, let us consider a way in which the ACK
933 test can still pass in this 'malicious crossed SYNs' case.
934 Malicious sender sends identical SYNs (and thus identical sequence
935 numbers) to both A and B:
936
937 A: gets SYN, seq=7
938 B: gets SYN, seq=7
939
940 By our good fortune, both A and B select the same initial
941 send sequence number of seven :-)
942
943 A: sends SYN|ACK, seq=7, ack_seq=8
944 B: sends SYN|ACK, seq=7, ack_seq=8
945
946 So we are now A eating this SYN|ACK, ACK test passes. So
947 does sequence test, SYN is truncated, and thus we consider
948 it a bare ACK.
949
950 If tp->defer_accept, we silently drop this bare ACK. Otherwise,
951 we create an established connection. Both ends (listening sockets)
952 accept the new incoming connection and try to talk to each other. 8-)
953
954 Note: This case is both harmless, and rare. Possibility is about the
955 same as us discovering intelligent life on another plant tomorrow.
956
957 But generally, we should (RFC lies!) to accept ACK
958 from SYNACK both here and in tcp_rcv_state_process().
959 tcp_rcv_state_process() does not, hence, we do not too.
960
961 Note that the case is absolutely generic:
962 we cannot optimize anything here without
963 violating protocol. All the checks must be made
964 before attempt to create socket.
965 */
966
967 /* RFC793 page 36: "If the connection is in any non-synchronized state ...
968 * and the incoming segment acknowledges something not yet
969 * sent (the segment carries an unaccaptable ACK) ...
970 * a reset is sent."
971 *
972 * Invalid ACK: reset will be sent by listening socket
973 */
974 if ((flg & TCP_FLAG_ACK) &&
2e6599cb 975 (TCP_SKB_CB(skb)->ack_seq != tcp_rsk(req)->snt_isn + 1))
1da177e4
LT
976 return sk;
977
978 /* Also, it would be not so bad idea to check rcv_tsecr, which
979 * is essentially ACK extension and too early or too late values
980 * should cause reset in unsynchronized states.
981 */
982
983 /* RFC793: "first check sequence number". */
984
985 if (paws_reject || !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
2e6599cb 986 tcp_rsk(req)->rcv_isn + 1, tcp_rsk(req)->rcv_isn + 1 + req->rcv_wnd)) {
1da177e4
LT
987 /* Out of window: send ACK and drop. */
988 if (!(flg & TCP_FLAG_RST))
60236fdd 989 req->rsk_ops->send_ack(skb, req);
1da177e4
LT
990 if (paws_reject)
991 NET_INC_STATS_BH(LINUX_MIB_PAWSESTABREJECTED);
992 return NULL;
993 }
994
995 /* In sequence, PAWS is OK. */
996
2e6599cb 997 if (tmp_opt.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_isn + 1))
1da177e4
LT
998 req->ts_recent = tmp_opt.rcv_tsval;
999
2e6599cb 1000 if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn) {
1da177e4 1001 /* Truncate SYN, it is out of window starting
2e6599cb 1002 at tcp_rsk(req)->rcv_isn + 1. */
1da177e4
LT
1003 flg &= ~TCP_FLAG_SYN;
1004 }
1005
1006 /* RFC793: "second check the RST bit" and
1007 * "fourth, check the SYN bit"
1008 */
1009 if (flg & (TCP_FLAG_RST|TCP_FLAG_SYN))
1010 goto embryonic_reset;
1011
1012 /* ACK sequence verified above, just make sure ACK is
1013 * set. If ACK not set, just silently drop the packet.
1014 */
1015 if (!(flg & TCP_FLAG_ACK))
1016 return NULL;
1017
1018 /* If TCP_DEFER_ACCEPT is set, drop bare ACK. */
2e6599cb
ACM
1019 if (tp->defer_accept && TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) {
1020 inet_rsk(req)->acked = 1;
1da177e4
LT
1021 return NULL;
1022 }
1023
1024 /* OK, ACK is valid, create big socket and
1025 * feed this segment to it. It will repeat all
1026 * the tests. THIS SEGMENT MUST MOVE SOCKET TO
1027 * ESTABLISHED STATE. If it will be dropped after
1028 * socket is created, wait for troubles.
1029 */
1030 child = tp->af_specific->syn_recv_sock(sk, skb, req, NULL);
1031 if (child == NULL)
1032 goto listen_overflow;
1033
1034 tcp_synq_unlink(tp, req, prev);
1035 tcp_synq_removed(sk, req);
1036
1037 tcp_acceptq_queue(sk, req, child);
1038 return child;
1039
1040 listen_overflow:
1041 if (!sysctl_tcp_abort_on_overflow) {
2e6599cb 1042 inet_rsk(req)->acked = 1;
1da177e4
LT
1043 return NULL;
1044 }
1045
1046 embryonic_reset:
1047 NET_INC_STATS_BH(LINUX_MIB_EMBRYONICRSTS);
1048 if (!(flg & TCP_FLAG_RST))
60236fdd 1049 req->rsk_ops->send_reset(skb);
1da177e4
LT
1050
1051 tcp_synq_drop(sk, req, prev);
1052 return NULL;
1053}
1054
1055/*
1056 * Queue segment on the new socket if the new socket is active,
1057 * otherwise we just shortcircuit this and continue with
1058 * the new socket.
1059 */
1060
1061int tcp_child_process(struct sock *parent, struct sock *child,
1062 struct sk_buff *skb)
1063{
1064 int ret = 0;
1065 int state = child->sk_state;
1066
1067 if (!sock_owned_by_user(child)) {
1068 ret = tcp_rcv_state_process(child, skb, skb->h.th, skb->len);
1069
1070 /* Wakeup parent, send SIGIO */
1071 if (state == TCP_SYN_RECV && child->sk_state != state)
1072 parent->sk_data_ready(parent, 0);
1073 } else {
1074 /* Alas, it is possible again, because we do lookup
1075 * in main socket hash table and lock on listening
1076 * socket does not protect us more.
1077 */
1078 sk_add_backlog(child, skb);
1079 }
1080
1081 bh_unlock_sock(child);
1082 sock_put(child);
1083 return ret;
1084}
1085
1086EXPORT_SYMBOL(tcp_check_req);
1087EXPORT_SYMBOL(tcp_child_process);
1088EXPORT_SYMBOL(tcp_create_openreq_child);
1089EXPORT_SYMBOL(tcp_timewait_state_process);
1090EXPORT_SYMBOL(tcp_tw_deschedule);
This page took 0.102185 seconds and 5 git commands to generate.