sunvnet: Invoke SET_NETDEV_DEV() to set up the vdev in vnet_new()
[deliverable/linux.git] / net / ipv4 / tcp_minisocks.c
1 /*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Implementation of the Transmission Control Protocol(TCP).
7 *
8 * Authors: Ross Biro
9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Mark Evans, <evansmp@uhura.aston.ac.uk>
11 * Corey Minyard <wf-rch!minyard@relay.EU.net>
12 * Florian La Roche, <flla@stud.uni-sb.de>
13 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
14 * Linus Torvalds, <torvalds@cs.helsinki.fi>
15 * Alan Cox, <gw4pts@gw4pts.ampr.org>
16 * Matthew Dillon, <dillon@apollo.west.oic.com>
17 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
18 * Jorge Cwik, <jorge@laser.satlink.net>
19 */
20
21 #include <linux/mm.h>
22 #include <linux/module.h>
23 #include <linux/slab.h>
24 #include <linux/sysctl.h>
25 #include <linux/workqueue.h>
26 #include <net/tcp.h>
27 #include <net/inet_common.h>
28 #include <net/xfrm.h>
29
30 int sysctl_tcp_syncookies __read_mostly = 1;
31 EXPORT_SYMBOL(sysctl_tcp_syncookies);
32
33 int sysctl_tcp_abort_on_overflow __read_mostly;
34
35 struct inet_timewait_death_row tcp_death_row = {
36 .sysctl_max_tw_buckets = NR_FILE * 2,
37 .hashinfo = &tcp_hashinfo,
38 };
39 EXPORT_SYMBOL_GPL(tcp_death_row);
40
41 static bool tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win)
42 {
43 if (seq == s_win)
44 return true;
45 if (after(end_seq, s_win) && before(seq, e_win))
46 return true;
47 return seq == e_win && seq == end_seq;
48 }
49
50 static enum tcp_tw_status
51 tcp_timewait_check_oow_rate_limit(struct inet_timewait_sock *tw,
52 const struct sk_buff *skb, int mib_idx)
53 {
54 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
55
56 if (!tcp_oow_rate_limited(twsk_net(tw), skb, mib_idx,
57 &tcptw->tw_last_oow_ack_time)) {
58 /* Send ACK. Note, we do not put the bucket,
59 * it will be released by caller.
60 */
61 return TCP_TW_ACK;
62 }
63
64 /* We are rate-limiting, so just release the tw sock and drop skb. */
65 inet_twsk_put(tw);
66 return TCP_TW_SUCCESS;
67 }
68
69 /*
70 * * Main purpose of TIME-WAIT state is to close connection gracefully,
71 * when one of ends sits in LAST-ACK or CLOSING retransmitting FIN
72 * (and, probably, tail of data) and one or more our ACKs are lost.
73 * * What is TIME-WAIT timeout? It is associated with maximal packet
74 * lifetime in the internet, which results in wrong conclusion, that
75 * it is set to catch "old duplicate segments" wandering out of their path.
76 * It is not quite correct. This timeout is calculated so that it exceeds
77 * maximal retransmission timeout enough to allow to lose one (or more)
78 * segments sent by peer and our ACKs. This time may be calculated from RTO.
79 * * When TIME-WAIT socket receives RST, it means that another end
80 * finally closed and we are allowed to kill TIME-WAIT too.
81 * * Second purpose of TIME-WAIT is catching old duplicate segments.
82 * Well, certainly it is pure paranoia, but if we load TIME-WAIT
83 * with this semantics, we MUST NOT kill TIME-WAIT state with RSTs.
84 * * If we invented some more clever way to catch duplicates
85 * (f.e. based on PAWS), we could truncate TIME-WAIT to several RTOs.
86 *
87 * The algorithm below is based on FORMAL INTERPRETATION of RFCs.
88 * When you compare it to RFCs, please, read section SEGMENT ARRIVES
89 * from the very beginning.
90 *
91 * NOTE. With recycling (and later with fin-wait-2) TW bucket
92 * is _not_ stateless. It means, that strictly speaking we must
93 * spinlock it. I do not want! Well, probability of misbehaviour
94 * is ridiculously low and, seems, we could use some mb() tricks
95 * to avoid misread sequence numbers, states etc. --ANK
96 *
97 * We don't need to initialize tmp_out.sack_ok as we don't use the results
98 */
99 enum tcp_tw_status
100 tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
101 const struct tcphdr *th)
102 {
103 struct tcp_options_received tmp_opt;
104 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
105 bool paws_reject = false;
106
107 tmp_opt.saw_tstamp = 0;
108 if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) {
109 tcp_parse_options(skb, &tmp_opt, 0, NULL);
110
111 if (tmp_opt.saw_tstamp) {
112 tmp_opt.rcv_tsecr -= tcptw->tw_ts_offset;
113 tmp_opt.ts_recent = tcptw->tw_ts_recent;
114 tmp_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
115 paws_reject = tcp_paws_reject(&tmp_opt, th->rst);
116 }
117 }
118
119 if (tw->tw_substate == TCP_FIN_WAIT2) {
120 /* Just repeat all the checks of tcp_rcv_state_process() */
121
122 /* Out of window, send ACK */
123 if (paws_reject ||
124 !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
125 tcptw->tw_rcv_nxt,
126 tcptw->tw_rcv_nxt + tcptw->tw_rcv_wnd))
127 return tcp_timewait_check_oow_rate_limit(
128 tw, skb, LINUX_MIB_TCPACKSKIPPEDFINWAIT2);
129
130 if (th->rst)
131 goto kill;
132
133 if (th->syn && !before(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt))
134 goto kill_with_rst;
135
136 /* Dup ACK? */
137 if (!th->ack ||
138 !after(TCP_SKB_CB(skb)->end_seq, tcptw->tw_rcv_nxt) ||
139 TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) {
140 inet_twsk_put(tw);
141 return TCP_TW_SUCCESS;
142 }
143
144 /* New data or FIN. If new data arrive after half-duplex close,
145 * reset.
146 */
147 if (!th->fin ||
148 TCP_SKB_CB(skb)->end_seq != tcptw->tw_rcv_nxt + 1) {
149 kill_with_rst:
150 inet_twsk_deschedule_put(tw);
151 return TCP_TW_RST;
152 }
153
154 /* FIN arrived, enter true time-wait state. */
155 tw->tw_substate = TCP_TIME_WAIT;
156 tcptw->tw_rcv_nxt = TCP_SKB_CB(skb)->end_seq;
157 if (tmp_opt.saw_tstamp) {
158 tcptw->tw_ts_recent_stamp = get_seconds();
159 tcptw->tw_ts_recent = tmp_opt.rcv_tsval;
160 }
161
162 if (tcp_death_row.sysctl_tw_recycle &&
163 tcptw->tw_ts_recent_stamp &&
164 tcp_tw_remember_stamp(tw))
165 inet_twsk_schedule(tw, tw->tw_timeout);
166 else
167 inet_twsk_schedule(tw, TCP_TIMEWAIT_LEN);
168 return TCP_TW_ACK;
169 }
170
171 /*
172 * Now real TIME-WAIT state.
173 *
174 * RFC 1122:
175 * "When a connection is [...] on TIME-WAIT state [...]
176 * [a TCP] MAY accept a new SYN from the remote TCP to
177 * reopen the connection directly, if it:
178 *
179 * (1) assigns its initial sequence number for the new
180 * connection to be larger than the largest sequence
181 * number it used on the previous connection incarnation,
182 * and
183 *
184 * (2) returns to TIME-WAIT state if the SYN turns out
185 * to be an old duplicate".
186 */
187
188 if (!paws_reject &&
189 (TCP_SKB_CB(skb)->seq == tcptw->tw_rcv_nxt &&
190 (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq || th->rst))) {
191 /* In window segment, it may be only reset or bare ack. */
192
193 if (th->rst) {
194 /* This is TIME_WAIT assassination, in two flavors.
195 * Oh well... nobody has a sufficient solution to this
196 * protocol bug yet.
197 */
198 if (sysctl_tcp_rfc1337 == 0) {
199 kill:
200 inet_twsk_deschedule_put(tw);
201 return TCP_TW_SUCCESS;
202 }
203 }
204 inet_twsk_schedule(tw, TCP_TIMEWAIT_LEN);
205
206 if (tmp_opt.saw_tstamp) {
207 tcptw->tw_ts_recent = tmp_opt.rcv_tsval;
208 tcptw->tw_ts_recent_stamp = get_seconds();
209 }
210
211 inet_twsk_put(tw);
212 return TCP_TW_SUCCESS;
213 }
214
215 /* Out of window segment.
216
217 All the segments are ACKed immediately.
218
219 The only exception is new SYN. We accept it, if it is
220 not old duplicate and we are not in danger to be killed
221 by delayed old duplicates. RFC check is that it has
222 newer sequence number works at rates <40Mbit/sec.
223 However, if paws works, it is reliable AND even more,
224 we even may relax silly seq space cutoff.
225
226 RED-PEN: we violate main RFC requirement, if this SYN will appear
227 old duplicate (i.e. we receive RST in reply to SYN-ACK),
228 we must return socket to time-wait state. It is not good,
229 but not fatal yet.
230 */
231
232 if (th->syn && !th->rst && !th->ack && !paws_reject &&
233 (after(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt) ||
234 (tmp_opt.saw_tstamp &&
235 (s32)(tcptw->tw_ts_recent - tmp_opt.rcv_tsval) < 0))) {
236 u32 isn = tcptw->tw_snd_nxt + 65535 + 2;
237 if (isn == 0)
238 isn++;
239 TCP_SKB_CB(skb)->tcp_tw_isn = isn;
240 return TCP_TW_SYN;
241 }
242
243 if (paws_reject)
244 NET_INC_STATS_BH(twsk_net(tw), LINUX_MIB_PAWSESTABREJECTED);
245
246 if (!th->rst) {
247 /* In this case we must reset the TIMEWAIT timer.
248 *
249 * If it is ACKless SYN it may be both old duplicate
250 * and new good SYN with random sequence number <rcv_nxt.
251 * Do not reschedule in the last case.
252 */
253 if (paws_reject || th->ack)
254 inet_twsk_schedule(tw, TCP_TIMEWAIT_LEN);
255
256 return tcp_timewait_check_oow_rate_limit(
257 tw, skb, LINUX_MIB_TCPACKSKIPPEDTIMEWAIT);
258 }
259 inet_twsk_put(tw);
260 return TCP_TW_SUCCESS;
261 }
262 EXPORT_SYMBOL(tcp_timewait_state_process);
263
264 /*
265 * Move a socket to time-wait or dead fin-wait-2 state.
266 */
267 void tcp_time_wait(struct sock *sk, int state, int timeo)
268 {
269 const struct inet_connection_sock *icsk = inet_csk(sk);
270 const struct tcp_sock *tp = tcp_sk(sk);
271 struct inet_timewait_sock *tw;
272 bool recycle_ok = false;
273
274 if (tcp_death_row.sysctl_tw_recycle && tp->rx_opt.ts_recent_stamp)
275 recycle_ok = tcp_remember_stamp(sk);
276
277 tw = inet_twsk_alloc(sk, &tcp_death_row, state);
278
279 if (tw) {
280 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
281 const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1);
282 struct inet_sock *inet = inet_sk(sk);
283
284 tw->tw_transparent = inet->transparent;
285 tw->tw_rcv_wscale = tp->rx_opt.rcv_wscale;
286 tcptw->tw_rcv_nxt = tp->rcv_nxt;
287 tcptw->tw_snd_nxt = tp->snd_nxt;
288 tcptw->tw_rcv_wnd = tcp_receive_window(tp);
289 tcptw->tw_ts_recent = tp->rx_opt.ts_recent;
290 tcptw->tw_ts_recent_stamp = tp->rx_opt.ts_recent_stamp;
291 tcptw->tw_ts_offset = tp->tsoffset;
292 tcptw->tw_last_oow_ack_time = 0;
293
294 #if IS_ENABLED(CONFIG_IPV6)
295 if (tw->tw_family == PF_INET6) {
296 struct ipv6_pinfo *np = inet6_sk(sk);
297
298 tw->tw_v6_daddr = sk->sk_v6_daddr;
299 tw->tw_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
300 tw->tw_tclass = np->tclass;
301 tw->tw_flowlabel = be32_to_cpu(np->flow_label & IPV6_FLOWLABEL_MASK);
302 tw->tw_ipv6only = sk->sk_ipv6only;
303 }
304 #endif
305
306 #ifdef CONFIG_TCP_MD5SIG
307 /*
308 * The timewait bucket does not have the key DB from the
309 * sock structure. We just make a quick copy of the
310 * md5 key being used (if indeed we are using one)
311 * so the timewait ack generating code has the key.
312 */
313 do {
314 struct tcp_md5sig_key *key;
315 tcptw->tw_md5_key = NULL;
316 key = tp->af_specific->md5_lookup(sk, sk);
317 if (key) {
318 tcptw->tw_md5_key = kmemdup(key, sizeof(*key), GFP_ATOMIC);
319 if (tcptw->tw_md5_key && !tcp_alloc_md5sig_pool())
320 BUG();
321 }
322 } while (0);
323 #endif
324
325 /* Linkage updates. */
326 __inet_twsk_hashdance(tw, sk, &tcp_hashinfo);
327
328 /* Get the TIME_WAIT timeout firing. */
329 if (timeo < rto)
330 timeo = rto;
331
332 if (recycle_ok) {
333 tw->tw_timeout = rto;
334 } else {
335 tw->tw_timeout = TCP_TIMEWAIT_LEN;
336 if (state == TCP_TIME_WAIT)
337 timeo = TCP_TIMEWAIT_LEN;
338 }
339
340 inet_twsk_schedule(tw, timeo);
341 inet_twsk_put(tw);
342 } else {
343 /* Sorry, if we're out of memory, just CLOSE this
344 * socket up. We've got bigger problems than
345 * non-graceful socket closings.
346 */
347 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPTIMEWAITOVERFLOW);
348 }
349
350 tcp_update_metrics(sk);
351 tcp_done(sk);
352 }
353
354 void tcp_twsk_destructor(struct sock *sk)
355 {
356 #ifdef CONFIG_TCP_MD5SIG
357 struct tcp_timewait_sock *twsk = tcp_twsk(sk);
358
359 if (twsk->tw_md5_key)
360 kfree_rcu(twsk->tw_md5_key, rcu);
361 #endif
362 }
363 EXPORT_SYMBOL_GPL(tcp_twsk_destructor);
364
365 void tcp_openreq_init_rwin(struct request_sock *req,
366 struct sock *sk, struct dst_entry *dst)
367 {
368 struct inet_request_sock *ireq = inet_rsk(req);
369 struct tcp_sock *tp = tcp_sk(sk);
370 __u8 rcv_wscale;
371 int mss = dst_metric_advmss(dst);
372
373 if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < mss)
374 mss = tp->rx_opt.user_mss;
375
376 /* Set this up on the first call only */
377 req->window_clamp = tp->window_clamp ? : dst_metric(dst, RTAX_WINDOW);
378
379 /* limit the window selection if the user enforce a smaller rx buffer */
380 if (sk->sk_userlocks & SOCK_RCVBUF_LOCK &&
381 (req->window_clamp > tcp_full_space(sk) || req->window_clamp == 0))
382 req->window_clamp = tcp_full_space(sk);
383
384 /* tcp_full_space because it is guaranteed to be the first packet */
385 tcp_select_initial_window(tcp_full_space(sk),
386 mss - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0),
387 &req->rcv_wnd,
388 &req->window_clamp,
389 ireq->wscale_ok,
390 &rcv_wscale,
391 dst_metric(dst, RTAX_INITRWND));
392 ireq->rcv_wscale = rcv_wscale;
393 }
394 EXPORT_SYMBOL(tcp_openreq_init_rwin);
395
396 static void tcp_ecn_openreq_child(struct tcp_sock *tp,
397 const struct request_sock *req)
398 {
399 tp->ecn_flags = inet_rsk(req)->ecn_ok ? TCP_ECN_OK : 0;
400 }
401
402 void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst)
403 {
404 struct inet_connection_sock *icsk = inet_csk(sk);
405 u32 ca_key = dst_metric(dst, RTAX_CC_ALGO);
406 bool ca_got_dst = false;
407
408 if (ca_key != TCP_CA_UNSPEC) {
409 const struct tcp_congestion_ops *ca;
410
411 rcu_read_lock();
412 ca = tcp_ca_find_key(ca_key);
413 if (likely(ca && try_module_get(ca->owner))) {
414 icsk->icsk_ca_dst_locked = tcp_ca_dst_locked(dst);
415 icsk->icsk_ca_ops = ca;
416 ca_got_dst = true;
417 }
418 rcu_read_unlock();
419 }
420
421 /* If no valid choice made yet, assign current system default ca. */
422 if (!ca_got_dst &&
423 (!icsk->icsk_ca_setsockopt ||
424 !try_module_get(icsk->icsk_ca_ops->owner)))
425 tcp_assign_congestion_control(sk);
426
427 tcp_set_ca_state(sk, TCP_CA_Open);
428 }
429 EXPORT_SYMBOL_GPL(tcp_ca_openreq_child);
430
431 /* This is not only more efficient than what we used to do, it eliminates
432 * a lot of code duplication between IPv4/IPv6 SYN recv processing. -DaveM
433 *
434 * Actually, we could lots of memory writes here. tp of listening
435 * socket contains all necessary default parameters.
436 */
437 struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req, struct sk_buff *skb)
438 {
439 struct sock *newsk = inet_csk_clone_lock(sk, req, GFP_ATOMIC);
440
441 if (newsk) {
442 const struct inet_request_sock *ireq = inet_rsk(req);
443 struct tcp_request_sock *treq = tcp_rsk(req);
444 struct inet_connection_sock *newicsk = inet_csk(newsk);
445 struct tcp_sock *newtp = tcp_sk(newsk);
446
447 /* Now setup tcp_sock */
448 newtp->pred_flags = 0;
449
450 newtp->rcv_wup = newtp->copied_seq =
451 newtp->rcv_nxt = treq->rcv_isn + 1;
452 newtp->segs_in = 0;
453
454 newtp->snd_sml = newtp->snd_una =
455 newtp->snd_nxt = newtp->snd_up = treq->snt_isn + 1;
456
457 tcp_prequeue_init(newtp);
458 INIT_LIST_HEAD(&newtp->tsq_node);
459
460 tcp_init_wl(newtp, treq->rcv_isn);
461
462 newtp->srtt_us = 0;
463 newtp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT);
464 newicsk->icsk_rto = TCP_TIMEOUT_INIT;
465
466 newtp->packets_out = 0;
467 newtp->retrans_out = 0;
468 newtp->sacked_out = 0;
469 newtp->fackets_out = 0;
470 newtp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
471 tcp_enable_early_retrans(newtp);
472 newtp->tlp_high_seq = 0;
473 newtp->lsndtime = treq->snt_synack;
474 newtp->last_oow_ack_time = 0;
475 newtp->total_retrans = req->num_retrans;
476
477 /* So many TCP implementations out there (incorrectly) count the
478 * initial SYN frame in their delayed-ACK and congestion control
479 * algorithms that we must have the following bandaid to talk
480 * efficiently to them. -DaveM
481 */
482 newtp->snd_cwnd = TCP_INIT_CWND;
483 newtp->snd_cwnd_cnt = 0;
484
485 tcp_init_xmit_timers(newsk);
486 __skb_queue_head_init(&newtp->out_of_order_queue);
487 newtp->write_seq = newtp->pushed_seq = treq->snt_isn + 1;
488
489 newtp->rx_opt.saw_tstamp = 0;
490
491 newtp->rx_opt.dsack = 0;
492 newtp->rx_opt.num_sacks = 0;
493
494 newtp->urg_data = 0;
495
496 if (sock_flag(newsk, SOCK_KEEPOPEN))
497 inet_csk_reset_keepalive_timer(newsk,
498 keepalive_time_when(newtp));
499
500 newtp->rx_opt.tstamp_ok = ireq->tstamp_ok;
501 if ((newtp->rx_opt.sack_ok = ireq->sack_ok) != 0) {
502 if (sysctl_tcp_fack)
503 tcp_enable_fack(newtp);
504 }
505 newtp->window_clamp = req->window_clamp;
506 newtp->rcv_ssthresh = req->rcv_wnd;
507 newtp->rcv_wnd = req->rcv_wnd;
508 newtp->rx_opt.wscale_ok = ireq->wscale_ok;
509 if (newtp->rx_opt.wscale_ok) {
510 newtp->rx_opt.snd_wscale = ireq->snd_wscale;
511 newtp->rx_opt.rcv_wscale = ireq->rcv_wscale;
512 } else {
513 newtp->rx_opt.snd_wscale = newtp->rx_opt.rcv_wscale = 0;
514 newtp->window_clamp = min(newtp->window_clamp, 65535U);
515 }
516 newtp->snd_wnd = (ntohs(tcp_hdr(skb)->window) <<
517 newtp->rx_opt.snd_wscale);
518 newtp->max_window = newtp->snd_wnd;
519
520 if (newtp->rx_opt.tstamp_ok) {
521 newtp->rx_opt.ts_recent = req->ts_recent;
522 newtp->rx_opt.ts_recent_stamp = get_seconds();
523 newtp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED;
524 } else {
525 newtp->rx_opt.ts_recent_stamp = 0;
526 newtp->tcp_header_len = sizeof(struct tcphdr);
527 }
528 newtp->tsoffset = 0;
529 #ifdef CONFIG_TCP_MD5SIG
530 newtp->md5sig_info = NULL; /*XXX*/
531 if (newtp->af_specific->md5_lookup(sk, newsk))
532 newtp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED;
533 #endif
534 if (skb->len >= TCP_MSS_DEFAULT + newtp->tcp_header_len)
535 newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len;
536 newtp->rx_opt.mss_clamp = req->mss;
537 tcp_ecn_openreq_child(newtp, req);
538 newtp->fastopen_rsk = NULL;
539 newtp->syn_data_acked = 0;
540
541 newtp->saved_syn = req->saved_syn;
542 req->saved_syn = NULL;
543
544 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_PASSIVEOPENS);
545 }
546 return newsk;
547 }
548 EXPORT_SYMBOL(tcp_create_openreq_child);
549
550 /*
551 * Process an incoming packet for SYN_RECV sockets represented as a
552 * request_sock. Normally sk is the listener socket but for TFO it
553 * points to the child socket.
554 *
555 * XXX (TFO) - The current impl contains a special check for ack
556 * validation and inside tcp_v4_reqsk_send_ack(). Can we do better?
557 *
558 * We don't need to initialize tmp_opt.sack_ok as we don't use the results
559 */
560
561 struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
562 struct request_sock *req,
563 bool fastopen)
564 {
565 struct tcp_options_received tmp_opt;
566 struct sock *child;
567 const struct tcphdr *th = tcp_hdr(skb);
568 __be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK);
569 bool paws_reject = false;
570
571 BUG_ON(fastopen == (sk->sk_state == TCP_LISTEN));
572
573 tmp_opt.saw_tstamp = 0;
574 if (th->doff > (sizeof(struct tcphdr)>>2)) {
575 tcp_parse_options(skb, &tmp_opt, 0, NULL);
576
577 if (tmp_opt.saw_tstamp) {
578 tmp_opt.ts_recent = req->ts_recent;
579 /* We do not store true stamp, but it is not required,
580 * it can be estimated (approximately)
581 * from another data.
582 */
583 tmp_opt.ts_recent_stamp = get_seconds() - ((TCP_TIMEOUT_INIT/HZ)<<req->num_timeout);
584 paws_reject = tcp_paws_reject(&tmp_opt, th->rst);
585 }
586 }
587
588 /* Check for pure retransmitted SYN. */
589 if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn &&
590 flg == TCP_FLAG_SYN &&
591 !paws_reject) {
592 /*
593 * RFC793 draws (Incorrectly! It was fixed in RFC1122)
594 * this case on figure 6 and figure 8, but formal
595 * protocol description says NOTHING.
596 * To be more exact, it says that we should send ACK,
597 * because this segment (at least, if it has no data)
598 * is out of window.
599 *
600 * CONCLUSION: RFC793 (even with RFC1122) DOES NOT
601 * describe SYN-RECV state. All the description
602 * is wrong, we cannot believe to it and should
603 * rely only on common sense and implementation
604 * experience.
605 *
606 * Enforce "SYN-ACK" according to figure 8, figure 6
607 * of RFC793, fixed by RFC1122.
608 *
609 * Note that even if there is new data in the SYN packet
610 * they will be thrown away too.
611 *
612 * Reset timer after retransmitting SYNACK, similar to
613 * the idea of fast retransmit in recovery.
614 */
615 if (!tcp_oow_rate_limited(sock_net(sk), skb,
616 LINUX_MIB_TCPACKSKIPPEDSYNRECV,
617 &tcp_rsk(req)->last_oow_ack_time) &&
618
619 !inet_rtx_syn_ack(sk, req)) {
620 unsigned long expires = jiffies;
621
622 expires += min(TCP_TIMEOUT_INIT << req->num_timeout,
623 TCP_RTO_MAX);
624 if (!fastopen)
625 mod_timer_pending(&req->rsk_timer, expires);
626 else
627 req->rsk_timer.expires = expires;
628 }
629 return NULL;
630 }
631
632 /* Further reproduces section "SEGMENT ARRIVES"
633 for state SYN-RECEIVED of RFC793.
634 It is broken, however, it does not work only
635 when SYNs are crossed.
636
637 You would think that SYN crossing is impossible here, since
638 we should have a SYN_SENT socket (from connect()) on our end,
639 but this is not true if the crossed SYNs were sent to both
640 ends by a malicious third party. We must defend against this,
641 and to do that we first verify the ACK (as per RFC793, page
642 36) and reset if it is invalid. Is this a true full defense?
643 To convince ourselves, let us consider a way in which the ACK
644 test can still pass in this 'malicious crossed SYNs' case.
645 Malicious sender sends identical SYNs (and thus identical sequence
646 numbers) to both A and B:
647
648 A: gets SYN, seq=7
649 B: gets SYN, seq=7
650
651 By our good fortune, both A and B select the same initial
652 send sequence number of seven :-)
653
654 A: sends SYN|ACK, seq=7, ack_seq=8
655 B: sends SYN|ACK, seq=7, ack_seq=8
656
657 So we are now A eating this SYN|ACK, ACK test passes. So
658 does sequence test, SYN is truncated, and thus we consider
659 it a bare ACK.
660
661 If icsk->icsk_accept_queue.rskq_defer_accept, we silently drop this
662 bare ACK. Otherwise, we create an established connection. Both
663 ends (listening sockets) accept the new incoming connection and try
664 to talk to each other. 8-)
665
666 Note: This case is both harmless, and rare. Possibility is about the
667 same as us discovering intelligent life on another plant tomorrow.
668
669 But generally, we should (RFC lies!) to accept ACK
670 from SYNACK both here and in tcp_rcv_state_process().
671 tcp_rcv_state_process() does not, hence, we do not too.
672
673 Note that the case is absolutely generic:
674 we cannot optimize anything here without
675 violating protocol. All the checks must be made
676 before attempt to create socket.
677 */
678
679 /* RFC793 page 36: "If the connection is in any non-synchronized state ...
680 * and the incoming segment acknowledges something not yet
681 * sent (the segment carries an unacceptable ACK) ...
682 * a reset is sent."
683 *
684 * Invalid ACK: reset will be sent by listening socket.
685 * Note that the ACK validity check for a Fast Open socket is done
686 * elsewhere and is checked directly against the child socket rather
687 * than req because user data may have been sent out.
688 */
689 if ((flg & TCP_FLAG_ACK) && !fastopen &&
690 (TCP_SKB_CB(skb)->ack_seq !=
691 tcp_rsk(req)->snt_isn + 1))
692 return sk;
693
694 /* Also, it would be not so bad idea to check rcv_tsecr, which
695 * is essentially ACK extension and too early or too late values
696 * should cause reset in unsynchronized states.
697 */
698
699 /* RFC793: "first check sequence number". */
700
701 if (paws_reject || !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
702 tcp_rsk(req)->rcv_nxt, tcp_rsk(req)->rcv_nxt + req->rcv_wnd)) {
703 /* Out of window: send ACK and drop. */
704 if (!(flg & TCP_FLAG_RST))
705 req->rsk_ops->send_ack(sk, skb, req);
706 if (paws_reject)
707 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
708 return NULL;
709 }
710
711 /* In sequence, PAWS is OK. */
712
713 if (tmp_opt.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_nxt))
714 req->ts_recent = tmp_opt.rcv_tsval;
715
716 if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn) {
717 /* Truncate SYN, it is out of window starting
718 at tcp_rsk(req)->rcv_isn + 1. */
719 flg &= ~TCP_FLAG_SYN;
720 }
721
722 /* RFC793: "second check the RST bit" and
723 * "fourth, check the SYN bit"
724 */
725 if (flg & (TCP_FLAG_RST|TCP_FLAG_SYN)) {
726 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_ATTEMPTFAILS);
727 goto embryonic_reset;
728 }
729
730 /* ACK sequence verified above, just make sure ACK is
731 * set. If ACK not set, just silently drop the packet.
732 *
733 * XXX (TFO) - if we ever allow "data after SYN", the
734 * following check needs to be removed.
735 */
736 if (!(flg & TCP_FLAG_ACK))
737 return NULL;
738
739 /* For Fast Open no more processing is needed (sk is the
740 * child socket).
741 */
742 if (fastopen)
743 return sk;
744
745 /* While TCP_DEFER_ACCEPT is active, drop bare ACK. */
746 if (req->num_timeout < inet_csk(sk)->icsk_accept_queue.rskq_defer_accept &&
747 TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) {
748 inet_rsk(req)->acked = 1;
749 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDEFERACCEPTDROP);
750 return NULL;
751 }
752
753 /* OK, ACK is valid, create big socket and
754 * feed this segment to it. It will repeat all
755 * the tests. THIS SEGMENT MUST MOVE SOCKET TO
756 * ESTABLISHED STATE. If it will be dropped after
757 * socket is created, wait for troubles.
758 */
759 child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL);
760 if (!child)
761 goto listen_overflow;
762
763 inet_csk_reqsk_queue_drop(sk, req);
764 inet_csk_reqsk_queue_add(sk, req, child);
765 /* Warning: caller must not call reqsk_put(req);
766 * child stole last reference on it.
767 */
768 return child;
769
770 listen_overflow:
771 if (!sysctl_tcp_abort_on_overflow) {
772 inet_rsk(req)->acked = 1;
773 return NULL;
774 }
775
776 embryonic_reset:
777 if (!(flg & TCP_FLAG_RST)) {
778 /* Received a bad SYN pkt - for TFO We try not to reset
779 * the local connection unless it's really necessary to
780 * avoid becoming vulnerable to outside attack aiming at
781 * resetting legit local connections.
782 */
783 req->rsk_ops->send_reset(sk, skb);
784 } else if (fastopen) { /* received a valid RST pkt */
785 reqsk_fastopen_remove(sk, req, true);
786 tcp_reset(sk);
787 }
788 if (!fastopen) {
789 inet_csk_reqsk_queue_drop(sk, req);
790 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
791 }
792 return NULL;
793 }
794 EXPORT_SYMBOL(tcp_check_req);
795
796 /*
797 * Queue segment on the new socket if the new socket is active,
798 * otherwise we just shortcircuit this and continue with
799 * the new socket.
800 *
801 * For the vast majority of cases child->sk_state will be TCP_SYN_RECV
802 * when entering. But other states are possible due to a race condition
803 * where after __inet_lookup_established() fails but before the listener
804 * locked is obtained, other packets cause the same connection to
805 * be created.
806 */
807
808 int tcp_child_process(struct sock *parent, struct sock *child,
809 struct sk_buff *skb)
810 {
811 int ret = 0;
812 int state = child->sk_state;
813
814 if (!sock_owned_by_user(child)) {
815 ret = tcp_rcv_state_process(child, skb, tcp_hdr(skb),
816 skb->len);
817 /* Wakeup parent, send SIGIO */
818 if (state == TCP_SYN_RECV && child->sk_state != state)
819 parent->sk_data_ready(parent);
820 } else {
821 /* Alas, it is possible again, because we do lookup
822 * in main socket hash table and lock on listening
823 * socket does not protect us more.
824 */
825 __sk_add_backlog(child, skb);
826 }
827
828 bh_unlock_sock(child);
829 sock_put(child);
830 return ret;
831 }
832 EXPORT_SYMBOL(tcp_child_process);
This page took 0.061336 seconds and 6 git commands to generate.