drivers: net: irda: use resource_size() in au1k_ir.c
[deliverable/linux.git] / net / ipv4 / tcp_ipv4.c
CommitLineData
1da177e4
LT
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Implementation of the Transmission Control Protocol(TCP).
7 *
1da177e4
LT
8 * IPv4 specific functions
9 *
10 *
11 * code split from:
12 * linux/ipv4/tcp.c
13 * linux/ipv4/tcp_input.c
14 * linux/ipv4/tcp_output.c
15 *
16 * See tcp.c for author information
17 *
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
22 */
23
24/*
25 * Changes:
26 * David S. Miller : New socket lookup architecture.
27 * This code is dedicated to John Dyson.
28 * David S. Miller : Change semantics of established hash,
29 * half is devoted to TIME_WAIT sockets
30 * and the rest go in the other half.
31 * Andi Kleen : Add support for syncookies and fixed
32 * some bugs: ip options weren't passed to
33 * the TCP layer, missed a check for an
34 * ACK bit.
35 * Andi Kleen : Implemented fast path mtu discovery.
36 * Fixed many serious bugs in the
60236fdd 37 * request_sock handling and moved
1da177e4
LT
38 * most of it into the af independent code.
39 * Added tail drop and some other bugfixes.
caa20d9a 40 * Added new listen semantics.
1da177e4
LT
41 * Mike McLagan : Routing by source
42 * Juan Jose Ciarlante: ip_dynaddr bits
43 * Andi Kleen: various fixes.
44 * Vitaly E. Lavrov : Transparent proxy revived after year
45 * coma.
46 * Andi Kleen : Fix new listen.
47 * Andi Kleen : Fix accept error reporting.
48 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
49 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
50 * a single port at the same time.
51 */
52
afd46503 53#define pr_fmt(fmt) "TCP: " fmt
1da177e4 54
eb4dea58 55#include <linux/bottom_half.h>
1da177e4
LT
56#include <linux/types.h>
57#include <linux/fcntl.h>
58#include <linux/module.h>
59#include <linux/random.h>
60#include <linux/cache.h>
61#include <linux/jhash.h>
62#include <linux/init.h>
63#include <linux/times.h>
5a0e3ad6 64#include <linux/slab.h>
1da177e4 65
457c4cbc 66#include <net/net_namespace.h>
1da177e4 67#include <net/icmp.h>
304a1618 68#include <net/inet_hashtables.h>
1da177e4 69#include <net/tcp.h>
20380731 70#include <net/transp_v6.h>
1da177e4
LT
71#include <net/ipv6.h>
72#include <net/inet_common.h>
6d6ee43e 73#include <net/timewait_sock.h>
1da177e4 74#include <net/xfrm.h>
1a2449a8 75#include <net/netdma.h>
6e5714ea 76#include <net/secure_seq.h>
d1a4c0b3 77#include <net/tcp_memcontrol.h>
1da177e4
LT
78
79#include <linux/inet.h>
80#include <linux/ipv6.h>
81#include <linux/stddef.h>
82#include <linux/proc_fs.h>
83#include <linux/seq_file.h>
84
cfb6eeb4
YH
85#include <linux/crypto.h>
86#include <linux/scatterlist.h>
87
ab32ea5d
BH
88int sysctl_tcp_tw_reuse __read_mostly;
89int sysctl_tcp_low_latency __read_mostly;
4bc2f18b 90EXPORT_SYMBOL(sysctl_tcp_low_latency);
1da177e4 91
1da177e4 92
cfb6eeb4 93#ifdef CONFIG_TCP_MD5SIG
a915da9b 94static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
318cf7aa 95 __be32 daddr, __be32 saddr, const struct tcphdr *th);
cfb6eeb4
YH
96#endif
97
5caea4ea 98struct inet_hashinfo tcp_hashinfo;
4bc2f18b 99EXPORT_SYMBOL(tcp_hashinfo);
1da177e4 100
cf533ea5 101static inline __u32 tcp_v4_init_sequence(const struct sk_buff *skb)
1da177e4 102{
eddc9ec5
ACM
103 return secure_tcp_sequence_number(ip_hdr(skb)->daddr,
104 ip_hdr(skb)->saddr,
aa8223c7
ACM
105 tcp_hdr(skb)->dest,
106 tcp_hdr(skb)->source);
1da177e4
LT
107}
108
6d6ee43e
ACM
109int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
110{
111 const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
112 struct tcp_sock *tp = tcp_sk(sk);
113
114 /* With PAWS, it is safe from the viewpoint
115 of data integrity. Even without PAWS it is safe provided sequence
116 spaces do not overlap i.e. at data rates <= 80Mbit/sec.
117
118 Actually, the idea is close to VJ's one, only timestamp cache is
119 held not per host, but per port pair and TW bucket is used as state
120 holder.
121
122 If TW bucket has been already destroyed we fall back to VJ's scheme
123 and use initial timestamp retrieved from peer table.
124 */
125 if (tcptw->tw_ts_recent_stamp &&
126 (twp == NULL || (sysctl_tcp_tw_reuse &&
9d729f72 127 get_seconds() - tcptw->tw_ts_recent_stamp > 1))) {
6d6ee43e
ACM
128 tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
129 if (tp->write_seq == 0)
130 tp->write_seq = 1;
131 tp->rx_opt.ts_recent = tcptw->tw_ts_recent;
132 tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
133 sock_hold(sktw);
134 return 1;
135 }
136
137 return 0;
138}
6d6ee43e
ACM
139EXPORT_SYMBOL_GPL(tcp_twsk_unique);
140
1da177e4
LT
141/* This will initiate an outgoing connection. */
142int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
143{
2d7192d6 144 struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
1da177e4
LT
145 struct inet_sock *inet = inet_sk(sk);
146 struct tcp_sock *tp = tcp_sk(sk);
dca8b089 147 __be16 orig_sport, orig_dport;
bada8adc 148 __be32 daddr, nexthop;
da905bd1 149 struct flowi4 *fl4;
2d7192d6 150 struct rtable *rt;
1da177e4 151 int err;
f6d8bd05 152 struct ip_options_rcu *inet_opt;
1da177e4
LT
153
154 if (addr_len < sizeof(struct sockaddr_in))
155 return -EINVAL;
156
157 if (usin->sin_family != AF_INET)
158 return -EAFNOSUPPORT;
159
160 nexthop = daddr = usin->sin_addr.s_addr;
f6d8bd05
ED
161 inet_opt = rcu_dereference_protected(inet->inet_opt,
162 sock_owned_by_user(sk));
163 if (inet_opt && inet_opt->opt.srr) {
1da177e4
LT
164 if (!daddr)
165 return -EINVAL;
f6d8bd05 166 nexthop = inet_opt->opt.faddr;
1da177e4
LT
167 }
168
dca8b089
DM
169 orig_sport = inet->inet_sport;
170 orig_dport = usin->sin_port;
da905bd1
DM
171 fl4 = &inet->cork.fl.u.ip4;
172 rt = ip_route_connect(fl4, nexthop, inet->inet_saddr,
b23dd4fe
DM
173 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
174 IPPROTO_TCP,
175 orig_sport, orig_dport, sk, true);
176 if (IS_ERR(rt)) {
177 err = PTR_ERR(rt);
178 if (err == -ENETUNREACH)
7c73a6fa 179 IP_INC_STATS_BH(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
b23dd4fe 180 return err;
584bdf8c 181 }
1da177e4
LT
182
183 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
184 ip_rt_put(rt);
185 return -ENETUNREACH;
186 }
187
f6d8bd05 188 if (!inet_opt || !inet_opt->opt.srr)
da905bd1 189 daddr = fl4->daddr;
1da177e4 190
c720c7e8 191 if (!inet->inet_saddr)
da905bd1 192 inet->inet_saddr = fl4->saddr;
c720c7e8 193 inet->inet_rcv_saddr = inet->inet_saddr;
1da177e4 194
c720c7e8 195 if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
1da177e4
LT
196 /* Reset inherited state */
197 tp->rx_opt.ts_recent = 0;
198 tp->rx_opt.ts_recent_stamp = 0;
ee995283
PE
199 if (likely(!tp->repair))
200 tp->write_seq = 0;
1da177e4
LT
201 }
202
295ff7ed 203 if (tcp_death_row.sysctl_tw_recycle &&
81166dd6
DM
204 !tp->rx_opt.ts_recent_stamp && fl4->daddr == daddr)
205 tcp_fetch_timewait_stamp(sk, &rt->dst);
1da177e4 206
c720c7e8
ED
207 inet->inet_dport = usin->sin_port;
208 inet->inet_daddr = daddr;
1da177e4 209
d83d8461 210 inet_csk(sk)->icsk_ext_hdr_len = 0;
f6d8bd05
ED
211 if (inet_opt)
212 inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
1da177e4 213
bee7ca9e 214 tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
1da177e4
LT
215
216 /* Socket identity is still unknown (sport may be zero).
217 * However we set state to SYN-SENT and not releasing socket
218 * lock select source port, enter ourselves into the hash tables and
219 * complete initialization after this.
220 */
221 tcp_set_state(sk, TCP_SYN_SENT);
a7f5e7f1 222 err = inet_hash_connect(&tcp_death_row, sk);
1da177e4
LT
223 if (err)
224 goto failure;
225
da905bd1 226 rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
b23dd4fe
DM
227 inet->inet_sport, inet->inet_dport, sk);
228 if (IS_ERR(rt)) {
229 err = PTR_ERR(rt);
230 rt = NULL;
1da177e4 231 goto failure;
b23dd4fe 232 }
1da177e4 233 /* OK, now commit destination to socket. */
bcd76111 234 sk->sk_gso_type = SKB_GSO_TCPV4;
d8d1f30b 235 sk_setup_caps(sk, &rt->dst);
1da177e4 236
ee995283 237 if (!tp->write_seq && likely(!tp->repair))
c720c7e8
ED
238 tp->write_seq = secure_tcp_sequence_number(inet->inet_saddr,
239 inet->inet_daddr,
240 inet->inet_sport,
1da177e4
LT
241 usin->sin_port);
242
c720c7e8 243 inet->inet_id = tp->write_seq ^ jiffies;
1da177e4 244
2b916477 245 err = tcp_connect(sk);
ee995283 246
1da177e4
LT
247 rt = NULL;
248 if (err)
249 goto failure;
250
251 return 0;
252
253failure:
7174259e
ACM
254 /*
255 * This unhashes the socket and releases the local port,
256 * if necessary.
257 */
1da177e4
LT
258 tcp_set_state(sk, TCP_CLOSE);
259 ip_rt_put(rt);
260 sk->sk_route_caps = 0;
c720c7e8 261 inet->inet_dport = 0;
1da177e4
LT
262 return err;
263}
4bc2f18b 264EXPORT_SYMBOL(tcp_v4_connect);
1da177e4 265
1da177e4 266/*
563d34d0
ED
267 * This routine reacts to ICMP_FRAG_NEEDED mtu indications as defined in RFC1191.
268 * It can be called through tcp_release_cb() if socket was owned by user
269 * at the time tcp_v4_err() was called to handle ICMP message.
1da177e4 270 */
563d34d0 271static void tcp_v4_mtu_reduced(struct sock *sk)
1da177e4
LT
272{
273 struct dst_entry *dst;
274 struct inet_sock *inet = inet_sk(sk);
563d34d0 275 u32 mtu = tcp_sk(sk)->mtu_info;
1da177e4
LT
276
277 /* We are not interested in TCP_LISTEN and open_requests (SYN-ACKs
278 * send out by Linux are always <576bytes so they should go through
279 * unfragmented).
280 */
281 if (sk->sk_state == TCP_LISTEN)
282 return;
283
80d0a69f
DM
284 dst = inet_csk_update_pmtu(sk, mtu);
285 if (!dst)
1da177e4
LT
286 return;
287
1da177e4
LT
288 /* Something is about to be wrong... Remember soft error
289 * for the case, if this connection will not able to recover.
290 */
291 if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
292 sk->sk_err_soft = EMSGSIZE;
293
294 mtu = dst_mtu(dst);
295
296 if (inet->pmtudisc != IP_PMTUDISC_DONT &&
d83d8461 297 inet_csk(sk)->icsk_pmtu_cookie > mtu) {
1da177e4
LT
298 tcp_sync_mss(sk, mtu);
299
300 /* Resend the TCP packet because it's
301 * clear that the old packet has been
302 * dropped. This is the new "fast" path mtu
303 * discovery.
304 */
305 tcp_simple_retransmit(sk);
306 } /* else let the usual retransmit timer handle it */
307}
308
55be7a9c
DM
309static void do_redirect(struct sk_buff *skb, struct sock *sk)
310{
311 struct dst_entry *dst = __sk_dst_check(sk, 0);
312
1ed5c48f 313 if (dst)
6700c270 314 dst->ops->redirect(dst, sk, skb);
55be7a9c
DM
315}
316
1da177e4
LT
317/*
318 * This routine is called by the ICMP module when it gets some
319 * sort of error condition. If err < 0 then the socket should
320 * be closed and the error returned to the user. If err > 0
321 * it's just the icmp type << 8 | icmp code. After adjustment
322 * header points to the first 8 bytes of the tcp header. We need
323 * to find the appropriate port.
324 *
325 * The locking strategy used here is very "optimistic". When
326 * someone else accesses the socket the ICMP is just dropped
327 * and for some paths there is no check at all.
328 * A more general error queue to queue errors for later handling
329 * is probably better.
330 *
331 */
332
4d1a2d9e 333void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
1da177e4 334{
b71d1d42 335 const struct iphdr *iph = (const struct iphdr *)icmp_skb->data;
4d1a2d9e 336 struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2));
f1ecd5d9 337 struct inet_connection_sock *icsk;
1da177e4
LT
338 struct tcp_sock *tp;
339 struct inet_sock *inet;
4d1a2d9e
DL
340 const int type = icmp_hdr(icmp_skb)->type;
341 const int code = icmp_hdr(icmp_skb)->code;
1da177e4 342 struct sock *sk;
f1ecd5d9 343 struct sk_buff *skb;
168a8f58 344 struct request_sock *req;
1da177e4 345 __u32 seq;
f1ecd5d9 346 __u32 remaining;
1da177e4 347 int err;
4d1a2d9e 348 struct net *net = dev_net(icmp_skb->dev);
1da177e4 349
4d1a2d9e 350 if (icmp_skb->len < (iph->ihl << 2) + 8) {
dcfc23ca 351 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
1da177e4
LT
352 return;
353 }
354
fd54d716 355 sk = inet_lookup(net, &tcp_hashinfo, iph->daddr, th->dest,
4d1a2d9e 356 iph->saddr, th->source, inet_iif(icmp_skb));
1da177e4 357 if (!sk) {
dcfc23ca 358 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
1da177e4
LT
359 return;
360 }
361 if (sk->sk_state == TCP_TIME_WAIT) {
9469c7b4 362 inet_twsk_put(inet_twsk(sk));
1da177e4
LT
363 return;
364 }
365
366 bh_lock_sock(sk);
367 /* If too many ICMPs get dropped on busy
368 * servers this needs to be solved differently.
563d34d0
ED
369 * We do take care of PMTU discovery (RFC1191) special case :
370 * we can receive locally generated ICMP messages while socket is held.
1da177e4 371 */
b74aa930
ED
372 if (sock_owned_by_user(sk)) {
373 if (!(type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED))
374 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
375 }
1da177e4
LT
376 if (sk->sk_state == TCP_CLOSE)
377 goto out;
378
97e3ecd1 379 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
380 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
381 goto out;
382 }
383
f1ecd5d9 384 icsk = inet_csk(sk);
1da177e4 385 tp = tcp_sk(sk);
168a8f58 386 req = tp->fastopen_rsk;
1da177e4
LT
387 seq = ntohl(th->seq);
388 if (sk->sk_state != TCP_LISTEN &&
168a8f58
JC
389 !between(seq, tp->snd_una, tp->snd_nxt) &&
390 (req == NULL || seq != tcp_rsk(req)->snt_isn)) {
391 /* For a Fast Open socket, allow seq to be snt_isn. */
de0744af 392 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
1da177e4
LT
393 goto out;
394 }
395
396 switch (type) {
55be7a9c
DM
397 case ICMP_REDIRECT:
398 do_redirect(icmp_skb, sk);
399 goto out;
1da177e4
LT
400 case ICMP_SOURCE_QUENCH:
401 /* Just silently ignore these. */
402 goto out;
403 case ICMP_PARAMETERPROB:
404 err = EPROTO;
405 break;
406 case ICMP_DEST_UNREACH:
407 if (code > NR_ICMP_UNREACH)
408 goto out;
409
410 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
563d34d0 411 tp->mtu_info = info;
144d56e9 412 if (!sock_owned_by_user(sk)) {
563d34d0 413 tcp_v4_mtu_reduced(sk);
144d56e9
ED
414 } else {
415 if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &tp->tsq_flags))
416 sock_hold(sk);
417 }
1da177e4
LT
418 goto out;
419 }
420
421 err = icmp_err_convert[code].errno;
f1ecd5d9
DL
422 /* check if icmp_skb allows revert of backoff
423 * (see draft-zimmermann-tcp-lcd) */
424 if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH)
425 break;
426 if (seq != tp->snd_una || !icsk->icsk_retransmits ||
427 !icsk->icsk_backoff)
428 break;
429
168a8f58
JC
430 /* XXX (TFO) - revisit the following logic for TFO */
431
8f49c270
DM
432 if (sock_owned_by_user(sk))
433 break;
434
f1ecd5d9 435 icsk->icsk_backoff--;
9ad7c049
JC
436 inet_csk(sk)->icsk_rto = (tp->srtt ? __tcp_set_rto(tp) :
437 TCP_TIMEOUT_INIT) << icsk->icsk_backoff;
f1ecd5d9
DL
438 tcp_bound_rto(sk);
439
440 skb = tcp_write_queue_head(sk);
441 BUG_ON(!skb);
442
443 remaining = icsk->icsk_rto - min(icsk->icsk_rto,
444 tcp_time_stamp - TCP_SKB_CB(skb)->when);
445
446 if (remaining) {
447 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
448 remaining, TCP_RTO_MAX);
f1ecd5d9
DL
449 } else {
450 /* RTO revert clocked out retransmission.
451 * Will retransmit now */
452 tcp_retransmit_timer(sk);
453 }
454
1da177e4
LT
455 break;
456 case ICMP_TIME_EXCEEDED:
457 err = EHOSTUNREACH;
458 break;
459 default:
460 goto out;
461 }
462
168a8f58
JC
463 /* XXX (TFO) - if it's a TFO socket and has been accepted, rather
464 * than following the TCP_SYN_RECV case and closing the socket,
465 * we ignore the ICMP error and keep trying like a fully established
466 * socket. Is this the right thing to do?
467 */
468 if (req && req->sk == NULL)
469 goto out;
470
1da177e4 471 switch (sk->sk_state) {
60236fdd 472 struct request_sock *req, **prev;
1da177e4
LT
473 case TCP_LISTEN:
474 if (sock_owned_by_user(sk))
475 goto out;
476
463c84b9
ACM
477 req = inet_csk_search_req(sk, &prev, th->dest,
478 iph->daddr, iph->saddr);
1da177e4
LT
479 if (!req)
480 goto out;
481
482 /* ICMPs are not backlogged, hence we cannot get
483 an established socket here.
484 */
547b792c 485 WARN_ON(req->sk);
1da177e4 486
2e6599cb 487 if (seq != tcp_rsk(req)->snt_isn) {
de0744af 488 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
1da177e4
LT
489 goto out;
490 }
491
492 /*
493 * Still in SYN_RECV, just remove it silently.
494 * There is no good way to pass the error to the newly
495 * created socket, and POSIX does not want network
496 * errors returned from accept().
497 */
463c84b9 498 inet_csk_reqsk_queue_drop(sk, req, prev);
848bf15f 499 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1da177e4
LT
500 goto out;
501
502 case TCP_SYN_SENT:
503 case TCP_SYN_RECV: /* Cannot happen.
168a8f58
JC
504 It can f.e. if SYNs crossed,
505 or Fast Open.
1da177e4
LT
506 */
507 if (!sock_owned_by_user(sk)) {
1da177e4
LT
508 sk->sk_err = err;
509
510 sk->sk_error_report(sk);
511
512 tcp_done(sk);
513 } else {
514 sk->sk_err_soft = err;
515 }
516 goto out;
517 }
518
519 /* If we've already connected we will keep trying
520 * until we time out, or the user gives up.
521 *
522 * rfc1122 4.2.3.9 allows to consider as hard errors
523 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
524 * but it is obsoleted by pmtu discovery).
525 *
526 * Note, that in modern internet, where routing is unreliable
527 * and in each dark corner broken firewalls sit, sending random
528 * errors ordered by their masters even this two messages finally lose
529 * their original sense (even Linux sends invalid PORT_UNREACHs)
530 *
531 * Now we are in compliance with RFCs.
532 * --ANK (980905)
533 */
534
535 inet = inet_sk(sk);
536 if (!sock_owned_by_user(sk) && inet->recverr) {
537 sk->sk_err = err;
538 sk->sk_error_report(sk);
539 } else { /* Only an error on timeout */
540 sk->sk_err_soft = err;
541 }
542
543out:
544 bh_unlock_sock(sk);
545 sock_put(sk);
546}
547
419f9f89
HX
548static void __tcp_v4_send_check(struct sk_buff *skb,
549 __be32 saddr, __be32 daddr)
1da177e4 550{
aa8223c7 551 struct tcphdr *th = tcp_hdr(skb);
1da177e4 552
84fa7933 553 if (skb->ip_summed == CHECKSUM_PARTIAL) {
419f9f89 554 th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0);
663ead3b 555 skb->csum_start = skb_transport_header(skb) - skb->head;
ff1dcadb 556 skb->csum_offset = offsetof(struct tcphdr, check);
1da177e4 557 } else {
419f9f89 558 th->check = tcp_v4_check(skb->len, saddr, daddr,
07f0757a 559 csum_partial(th,
1da177e4
LT
560 th->doff << 2,
561 skb->csum));
562 }
563}
564
419f9f89 565/* This routine computes an IPv4 TCP checksum. */
bb296246 566void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
419f9f89 567{
cf533ea5 568 const struct inet_sock *inet = inet_sk(sk);
419f9f89
HX
569
570 __tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
571}
4bc2f18b 572EXPORT_SYMBOL(tcp_v4_send_check);
419f9f89 573
a430a43d
HX
574int tcp_v4_gso_send_check(struct sk_buff *skb)
575{
eddc9ec5 576 const struct iphdr *iph;
a430a43d
HX
577 struct tcphdr *th;
578
579 if (!pskb_may_pull(skb, sizeof(*th)))
580 return -EINVAL;
581
eddc9ec5 582 iph = ip_hdr(skb);
aa8223c7 583 th = tcp_hdr(skb);
a430a43d
HX
584
585 th->check = 0;
84fa7933 586 skb->ip_summed = CHECKSUM_PARTIAL;
419f9f89 587 __tcp_v4_send_check(skb, iph->saddr, iph->daddr);
a430a43d
HX
588 return 0;
589}
590
1da177e4
LT
591/*
592 * This routine will send an RST to the other tcp.
593 *
594 * Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
595 * for reset.
596 * Answer: if a packet caused RST, it is not for a socket
597 * existing in our system, if it is matched to a socket,
598 * it is just duplicate segment or bug in other side's TCP.
599 * So that we build reply only basing on parameters
600 * arrived with segment.
601 * Exception: precedence violation. We do not implement it in any case.
602 */
603
cfb6eeb4 604static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
1da177e4 605{
cf533ea5 606 const struct tcphdr *th = tcp_hdr(skb);
cfb6eeb4
YH
607 struct {
608 struct tcphdr th;
609#ifdef CONFIG_TCP_MD5SIG
714e85be 610 __be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
cfb6eeb4
YH
611#endif
612 } rep;
1da177e4 613 struct ip_reply_arg arg;
cfb6eeb4
YH
614#ifdef CONFIG_TCP_MD5SIG
615 struct tcp_md5sig_key *key;
658ddaaf
SL
616 const __u8 *hash_location = NULL;
617 unsigned char newhash[16];
618 int genhash;
619 struct sock *sk1 = NULL;
cfb6eeb4 620#endif
a86b1e30 621 struct net *net;
1da177e4
LT
622
623 /* Never send a reset in response to a reset. */
624 if (th->rst)
625 return;
626
511c3f92 627 if (skb_rtable(skb)->rt_type != RTN_LOCAL)
1da177e4
LT
628 return;
629
630 /* Swap the send and the receive. */
cfb6eeb4
YH
631 memset(&rep, 0, sizeof(rep));
632 rep.th.dest = th->source;
633 rep.th.source = th->dest;
634 rep.th.doff = sizeof(struct tcphdr) / 4;
635 rep.th.rst = 1;
1da177e4
LT
636
637 if (th->ack) {
cfb6eeb4 638 rep.th.seq = th->ack_seq;
1da177e4 639 } else {
cfb6eeb4
YH
640 rep.th.ack = 1;
641 rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
642 skb->len - (th->doff << 2));
1da177e4
LT
643 }
644
7174259e 645 memset(&arg, 0, sizeof(arg));
cfb6eeb4
YH
646 arg.iov[0].iov_base = (unsigned char *)&rep;
647 arg.iov[0].iov_len = sizeof(rep.th);
648
649#ifdef CONFIG_TCP_MD5SIG
658ddaaf
SL
650 hash_location = tcp_parse_md5sig_option(th);
651 if (!sk && hash_location) {
652 /*
653 * active side is lost. Try to find listening socket through
654 * source port, and then find md5 key through listening socket.
655 * we are not loose security here:
656 * Incoming packet is checked with md5 hash with finding key,
657 * no RST generated if md5 hash doesn't match.
658 */
659 sk1 = __inet_lookup_listener(dev_net(skb_dst(skb)->dev),
da5e3630
TH
660 &tcp_hashinfo, ip_hdr(skb)->saddr,
661 th->source, ip_hdr(skb)->daddr,
658ddaaf
SL
662 ntohs(th->source), inet_iif(skb));
663 /* don't send rst if it can't find key */
664 if (!sk1)
665 return;
666 rcu_read_lock();
667 key = tcp_md5_do_lookup(sk1, (union tcp_md5_addr *)
668 &ip_hdr(skb)->saddr, AF_INET);
669 if (!key)
670 goto release_sk1;
671
672 genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, NULL, skb);
673 if (genhash || memcmp(hash_location, newhash, 16) != 0)
674 goto release_sk1;
675 } else {
676 key = sk ? tcp_md5_do_lookup(sk, (union tcp_md5_addr *)
677 &ip_hdr(skb)->saddr,
678 AF_INET) : NULL;
679 }
680
cfb6eeb4
YH
681 if (key) {
682 rep.opt[0] = htonl((TCPOPT_NOP << 24) |
683 (TCPOPT_NOP << 16) |
684 (TCPOPT_MD5SIG << 8) |
685 TCPOLEN_MD5SIG);
686 /* Update length and the length the header thinks exists */
687 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
688 rep.th.doff = arg.iov[0].iov_len / 4;
689
49a72dfb 690 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
78e645cb
IJ
691 key, ip_hdr(skb)->saddr,
692 ip_hdr(skb)->daddr, &rep.th);
cfb6eeb4
YH
693 }
694#endif
eddc9ec5
ACM
695 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
696 ip_hdr(skb)->saddr, /* XXX */
52cd5750 697 arg.iov[0].iov_len, IPPROTO_TCP, 0);
1da177e4 698 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
88ef4a5a 699 arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0;
e2446eaa 700 /* When socket is gone, all binding information is lost.
4c675258
AK
701 * routing might fail in this case. No choice here, if we choose to force
702 * input interface, we will misroute in case of asymmetric route.
e2446eaa 703 */
4c675258
AK
704 if (sk)
705 arg.bound_dev_if = sk->sk_bound_dev_if;
1da177e4 706
adf30907 707 net = dev_net(skb_dst(skb)->dev);
66b13d99 708 arg.tos = ip_hdr(skb)->tos;
be9f4a44 709 ip_send_unicast_reply(net, skb, ip_hdr(skb)->saddr,
70e73416 710 ip_hdr(skb)->daddr, &arg, arg.iov[0].iov_len);
1da177e4 711
63231bdd
PE
712 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
713 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
658ddaaf
SL
714
715#ifdef CONFIG_TCP_MD5SIG
716release_sk1:
717 if (sk1) {
718 rcu_read_unlock();
719 sock_put(sk1);
720 }
721#endif
1da177e4
LT
722}
723
724/* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
725 outside socket context is ugly, certainly. What can I do?
726 */
727
9501f972 728static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
ee684b6f 729 u32 win, u32 tsval, u32 tsecr, int oif,
88ef4a5a 730 struct tcp_md5sig_key *key,
66b13d99 731 int reply_flags, u8 tos)
1da177e4 732{
cf533ea5 733 const struct tcphdr *th = tcp_hdr(skb);
1da177e4
LT
734 struct {
735 struct tcphdr th;
714e85be 736 __be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
cfb6eeb4 737#ifdef CONFIG_TCP_MD5SIG
714e85be 738 + (TCPOLEN_MD5SIG_ALIGNED >> 2)
cfb6eeb4
YH
739#endif
740 ];
1da177e4
LT
741 } rep;
742 struct ip_reply_arg arg;
adf30907 743 struct net *net = dev_net(skb_dst(skb)->dev);
1da177e4
LT
744
745 memset(&rep.th, 0, sizeof(struct tcphdr));
7174259e 746 memset(&arg, 0, sizeof(arg));
1da177e4
LT
747
748 arg.iov[0].iov_base = (unsigned char *)&rep;
749 arg.iov[0].iov_len = sizeof(rep.th);
ee684b6f 750 if (tsecr) {
cfb6eeb4
YH
751 rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
752 (TCPOPT_TIMESTAMP << 8) |
753 TCPOLEN_TIMESTAMP);
ee684b6f
AV
754 rep.opt[1] = htonl(tsval);
755 rep.opt[2] = htonl(tsecr);
cb48cfe8 756 arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
1da177e4
LT
757 }
758
759 /* Swap the send and the receive. */
760 rep.th.dest = th->source;
761 rep.th.source = th->dest;
762 rep.th.doff = arg.iov[0].iov_len / 4;
763 rep.th.seq = htonl(seq);
764 rep.th.ack_seq = htonl(ack);
765 rep.th.ack = 1;
766 rep.th.window = htons(win);
767
cfb6eeb4 768#ifdef CONFIG_TCP_MD5SIG
cfb6eeb4 769 if (key) {
ee684b6f 770 int offset = (tsecr) ? 3 : 0;
cfb6eeb4
YH
771
772 rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
773 (TCPOPT_NOP << 16) |
774 (TCPOPT_MD5SIG << 8) |
775 TCPOLEN_MD5SIG);
776 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
777 rep.th.doff = arg.iov[0].iov_len/4;
778
49a72dfb 779 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
90b7e112
AL
780 key, ip_hdr(skb)->saddr,
781 ip_hdr(skb)->daddr, &rep.th);
cfb6eeb4
YH
782 }
783#endif
88ef4a5a 784 arg.flags = reply_flags;
eddc9ec5
ACM
785 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
786 ip_hdr(skb)->saddr, /* XXX */
1da177e4
LT
787 arg.iov[0].iov_len, IPPROTO_TCP, 0);
788 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
9501f972
YH
789 if (oif)
790 arg.bound_dev_if = oif;
66b13d99 791 arg.tos = tos;
be9f4a44 792 ip_send_unicast_reply(net, skb, ip_hdr(skb)->saddr,
70e73416 793 ip_hdr(skb)->daddr, &arg, arg.iov[0].iov_len);
1da177e4 794
63231bdd 795 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
1da177e4
LT
796}
797
798static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
799{
8feaf0c0 800 struct inet_timewait_sock *tw = inet_twsk(sk);
cfb6eeb4 801 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
1da177e4 802
9501f972 803 tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
7174259e 804 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
ee684b6f 805 tcp_time_stamp + tcptw->tw_ts_offset,
9501f972
YH
806 tcptw->tw_ts_recent,
807 tw->tw_bound_dev_if,
88ef4a5a 808 tcp_twsk_md5_key(tcptw),
66b13d99
ED
809 tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0,
810 tw->tw_tos
9501f972 811 );
1da177e4 812
8feaf0c0 813 inet_twsk_put(tw);
1da177e4
LT
814}
815
6edafaaf 816static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
7174259e 817 struct request_sock *req)
1da177e4 818{
168a8f58
JC
819 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
820 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
821 */
822 tcp_v4_send_ack(skb, (sk->sk_state == TCP_LISTEN) ?
823 tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
824 tcp_rsk(req)->rcv_nxt, req->rcv_wnd,
ee684b6f 825 tcp_time_stamp,
9501f972
YH
826 req->ts_recent,
827 0,
a915da9b
ED
828 tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr,
829 AF_INET),
66b13d99
ED
830 inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
831 ip_hdr(skb)->tos);
1da177e4
LT
832}
833
1da177e4 834/*
9bf1d83e 835 * Send a SYN-ACK after having received a SYN.
60236fdd 836 * This still operates on a request_sock only, not on a big
1da177e4
LT
837 * socket.
838 */
72659ecc
OP
839static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
840 struct request_sock *req,
fff32699 841 struct request_values *rvp,
7586eceb
ED
842 u16 queue_mapping,
843 bool nocache)
1da177e4 844{
2e6599cb 845 const struct inet_request_sock *ireq = inet_rsk(req);
6bd023f3 846 struct flowi4 fl4;
1da177e4
LT
847 int err = -1;
848 struct sk_buff * skb;
849
850 /* First, grab a route. */
ba3f7f04 851 if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
fd80eb94 852 return -1;
1da177e4 853
8336886f 854 skb = tcp_make_synack(sk, dst, req, rvp, NULL);
1da177e4
LT
855
856 if (skb) {
419f9f89 857 __tcp_v4_send_check(skb, ireq->loc_addr, ireq->rmt_addr);
1da177e4 858
fff32699 859 skb_set_queue_mapping(skb, queue_mapping);
2e6599cb
ACM
860 err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr,
861 ireq->rmt_addr,
862 ireq->opt);
b9df3cb8 863 err = net_xmit_eval(err);
016818d0
NC
864 if (!tcp_rsk(req)->snt_synack && !err)
865 tcp_rsk(req)->snt_synack = tcp_time_stamp;
1da177e4
LT
866 }
867
1da177e4
LT
868 return err;
869}
870
72659ecc 871static int tcp_v4_rtx_synack(struct sock *sk, struct request_sock *req,
e6c022a4 872 struct request_values *rvp)
fd80eb94 873{
e6c022a4
ED
874 int res = tcp_v4_send_synack(sk, NULL, req, rvp, 0, false);
875
876 if (!res)
877 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
878 return res;
fd80eb94
DL
879}
880
1da177e4 881/*
60236fdd 882 * IPv4 request_sock destructor.
1da177e4 883 */
60236fdd 884static void tcp_v4_reqsk_destructor(struct request_sock *req)
1da177e4 885{
a51482bd 886 kfree(inet_rsk(req)->opt);
1da177e4
LT
887}
888
946cedcc 889/*
a2a385d6 890 * Return true if a syncookie should be sent
946cedcc 891 */
a2a385d6 892bool tcp_syn_flood_action(struct sock *sk,
946cedcc
ED
893 const struct sk_buff *skb,
894 const char *proto)
1da177e4 895{
946cedcc 896 const char *msg = "Dropping request";
a2a385d6 897 bool want_cookie = false;
946cedcc
ED
898 struct listen_sock *lopt;
899
900
1da177e4 901
2a1d4bd4 902#ifdef CONFIG_SYN_COOKIES
946cedcc 903 if (sysctl_tcp_syncookies) {
2a1d4bd4 904 msg = "Sending cookies";
a2a385d6 905 want_cookie = true;
946cedcc
ED
906 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDOCOOKIES);
907 } else
80e40daa 908#endif
946cedcc
ED
909 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDROP);
910
911 lopt = inet_csk(sk)->icsk_accept_queue.listen_opt;
912 if (!lopt->synflood_warned) {
913 lopt->synflood_warned = 1;
afd46503 914 pr_info("%s: Possible SYN flooding on port %d. %s. Check SNMP counters.\n",
946cedcc
ED
915 proto, ntohs(tcp_hdr(skb)->dest), msg);
916 }
917 return want_cookie;
2a1d4bd4 918}
946cedcc 919EXPORT_SYMBOL(tcp_syn_flood_action);
1da177e4
LT
920
921/*
60236fdd 922 * Save and compile IPv4 options into the request_sock if needed.
1da177e4 923 */
5dff747b 924static struct ip_options_rcu *tcp_v4_save_options(struct sk_buff *skb)
1da177e4 925{
f6d8bd05
ED
926 const struct ip_options *opt = &(IPCB(skb)->opt);
927 struct ip_options_rcu *dopt = NULL;
1da177e4
LT
928
929 if (opt && opt->optlen) {
f6d8bd05
ED
930 int opt_size = sizeof(*dopt) + opt->optlen;
931
1da177e4
LT
932 dopt = kmalloc(opt_size, GFP_ATOMIC);
933 if (dopt) {
f6d8bd05 934 if (ip_options_echo(&dopt->opt, skb)) {
1da177e4
LT
935 kfree(dopt);
936 dopt = NULL;
937 }
938 }
939 }
940 return dopt;
941}
942
cfb6eeb4
YH
943#ifdef CONFIG_TCP_MD5SIG
944/*
945 * RFC2385 MD5 checksumming requires a mapping of
946 * IP address->MD5 Key.
947 * We need to maintain these in the sk structure.
948 */
949
950/* Find the Key structure for an address. */
a915da9b
ED
951struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk,
952 const union tcp_md5_addr *addr,
953 int family)
cfb6eeb4
YH
954{
955 struct tcp_sock *tp = tcp_sk(sk);
a915da9b 956 struct tcp_md5sig_key *key;
a915da9b 957 unsigned int size = sizeof(struct in_addr);
a8afca03 958 struct tcp_md5sig_info *md5sig;
cfb6eeb4 959
a8afca03
ED
960 /* caller either holds rcu_read_lock() or socket lock */
961 md5sig = rcu_dereference_check(tp->md5sig_info,
b4fb05ea
ED
962 sock_owned_by_user(sk) ||
963 lockdep_is_held(&sk->sk_lock.slock));
a8afca03 964 if (!md5sig)
cfb6eeb4 965 return NULL;
a915da9b
ED
966#if IS_ENABLED(CONFIG_IPV6)
967 if (family == AF_INET6)
968 size = sizeof(struct in6_addr);
969#endif
b67bfe0d 970 hlist_for_each_entry_rcu(key, &md5sig->head, node) {
a915da9b
ED
971 if (key->family != family)
972 continue;
973 if (!memcmp(&key->addr, addr, size))
974 return key;
cfb6eeb4
YH
975 }
976 return NULL;
977}
a915da9b 978EXPORT_SYMBOL(tcp_md5_do_lookup);
cfb6eeb4
YH
979
980struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
981 struct sock *addr_sk)
982{
a915da9b
ED
983 union tcp_md5_addr *addr;
984
985 addr = (union tcp_md5_addr *)&inet_sk(addr_sk)->inet_daddr;
986 return tcp_md5_do_lookup(sk, addr, AF_INET);
cfb6eeb4 987}
cfb6eeb4
YH
988EXPORT_SYMBOL(tcp_v4_md5_lookup);
989
f5b99bcd
AB
990static struct tcp_md5sig_key *tcp_v4_reqsk_md5_lookup(struct sock *sk,
991 struct request_sock *req)
cfb6eeb4 992{
a915da9b
ED
993 union tcp_md5_addr *addr;
994
995 addr = (union tcp_md5_addr *)&inet_rsk(req)->rmt_addr;
996 return tcp_md5_do_lookup(sk, addr, AF_INET);
cfb6eeb4
YH
997}
998
999/* This can be called on a newly created socket, from other files */
a915da9b
ED
1000int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
1001 int family, const u8 *newkey, u8 newkeylen, gfp_t gfp)
cfb6eeb4
YH
1002{
1003 /* Add Key to the list */
b0a713e9 1004 struct tcp_md5sig_key *key;
cfb6eeb4 1005 struct tcp_sock *tp = tcp_sk(sk);
a915da9b 1006 struct tcp_md5sig_info *md5sig;
cfb6eeb4 1007
a915da9b 1008 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&addr, AF_INET);
cfb6eeb4
YH
1009 if (key) {
1010 /* Pre-existing entry - just update that one. */
a915da9b 1011 memcpy(key->key, newkey, newkeylen);
b0a713e9 1012 key->keylen = newkeylen;
a915da9b
ED
1013 return 0;
1014 }
260fcbeb 1015
a8afca03
ED
1016 md5sig = rcu_dereference_protected(tp->md5sig_info,
1017 sock_owned_by_user(sk));
a915da9b
ED
1018 if (!md5sig) {
1019 md5sig = kmalloc(sizeof(*md5sig), gfp);
1020 if (!md5sig)
cfb6eeb4 1021 return -ENOMEM;
cfb6eeb4 1022
a915da9b
ED
1023 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
1024 INIT_HLIST_HEAD(&md5sig->head);
a8afca03 1025 rcu_assign_pointer(tp->md5sig_info, md5sig);
a915da9b 1026 }
cfb6eeb4 1027
5f3d9cb2 1028 key = sock_kmalloc(sk, sizeof(*key), gfp);
a915da9b
ED
1029 if (!key)
1030 return -ENOMEM;
1031 if (hlist_empty(&md5sig->head) && !tcp_alloc_md5sig_pool(sk)) {
5f3d9cb2 1032 sock_kfree_s(sk, key, sizeof(*key));
a915da9b 1033 return -ENOMEM;
cfb6eeb4 1034 }
a915da9b
ED
1035
1036 memcpy(key->key, newkey, newkeylen);
1037 key->keylen = newkeylen;
1038 key->family = family;
1039 memcpy(&key->addr, addr,
1040 (family == AF_INET6) ? sizeof(struct in6_addr) :
1041 sizeof(struct in_addr));
1042 hlist_add_head_rcu(&key->node, &md5sig->head);
cfb6eeb4
YH
1043 return 0;
1044}
a915da9b 1045EXPORT_SYMBOL(tcp_md5_do_add);
cfb6eeb4 1046
a915da9b 1047int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family)
cfb6eeb4
YH
1048{
1049 struct tcp_sock *tp = tcp_sk(sk);
a915da9b 1050 struct tcp_md5sig_key *key;
a8afca03 1051 struct tcp_md5sig_info *md5sig;
a915da9b
ED
1052
1053 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&addr, AF_INET);
1054 if (!key)
1055 return -ENOENT;
1056 hlist_del_rcu(&key->node);
5f3d9cb2 1057 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
a915da9b 1058 kfree_rcu(key, rcu);
a8afca03
ED
1059 md5sig = rcu_dereference_protected(tp->md5sig_info,
1060 sock_owned_by_user(sk));
1061 if (hlist_empty(&md5sig->head))
a915da9b
ED
1062 tcp_free_md5sig_pool();
1063 return 0;
cfb6eeb4 1064}
a915da9b 1065EXPORT_SYMBOL(tcp_md5_do_del);
cfb6eeb4 1066
e0683e70 1067static void tcp_clear_md5_list(struct sock *sk)
cfb6eeb4
YH
1068{
1069 struct tcp_sock *tp = tcp_sk(sk);
a915da9b 1070 struct tcp_md5sig_key *key;
b67bfe0d 1071 struct hlist_node *n;
a8afca03 1072 struct tcp_md5sig_info *md5sig;
cfb6eeb4 1073
a8afca03
ED
1074 md5sig = rcu_dereference_protected(tp->md5sig_info, 1);
1075
1076 if (!hlist_empty(&md5sig->head))
cfb6eeb4 1077 tcp_free_md5sig_pool();
b67bfe0d 1078 hlist_for_each_entry_safe(key, n, &md5sig->head, node) {
a915da9b 1079 hlist_del_rcu(&key->node);
5f3d9cb2 1080 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
a915da9b 1081 kfree_rcu(key, rcu);
cfb6eeb4
YH
1082 }
1083}
1084
7174259e
ACM
1085static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval,
1086 int optlen)
cfb6eeb4
YH
1087{
1088 struct tcp_md5sig cmd;
1089 struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
cfb6eeb4
YH
1090
1091 if (optlen < sizeof(cmd))
1092 return -EINVAL;
1093
7174259e 1094 if (copy_from_user(&cmd, optval, sizeof(cmd)))
cfb6eeb4
YH
1095 return -EFAULT;
1096
1097 if (sin->sin_family != AF_INET)
1098 return -EINVAL;
1099
a8afca03 1100 if (!cmd.tcpm_key || !cmd.tcpm_keylen)
a915da9b
ED
1101 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1102 AF_INET);
cfb6eeb4
YH
1103
1104 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
1105 return -EINVAL;
1106
a915da9b
ED
1107 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1108 AF_INET, cmd.tcpm_key, cmd.tcpm_keylen,
1109 GFP_KERNEL);
cfb6eeb4
YH
1110}
1111
49a72dfb
AL
1112static int tcp_v4_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
1113 __be32 daddr, __be32 saddr, int nbytes)
cfb6eeb4 1114{
cfb6eeb4 1115 struct tcp4_pseudohdr *bp;
49a72dfb 1116 struct scatterlist sg;
cfb6eeb4
YH
1117
1118 bp = &hp->md5_blk.ip4;
cfb6eeb4
YH
1119
1120 /*
49a72dfb 1121 * 1. the TCP pseudo-header (in the order: source IP address,
cfb6eeb4
YH
1122 * destination IP address, zero-padded protocol number, and
1123 * segment length)
1124 */
1125 bp->saddr = saddr;
1126 bp->daddr = daddr;
1127 bp->pad = 0;
076fb722 1128 bp->protocol = IPPROTO_TCP;
49a72dfb 1129 bp->len = cpu_to_be16(nbytes);
c7da57a1 1130
49a72dfb
AL
1131 sg_init_one(&sg, bp, sizeof(*bp));
1132 return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
1133}
1134
a915da9b 1135static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
318cf7aa 1136 __be32 daddr, __be32 saddr, const struct tcphdr *th)
49a72dfb
AL
1137{
1138 struct tcp_md5sig_pool *hp;
1139 struct hash_desc *desc;
1140
1141 hp = tcp_get_md5sig_pool();
1142 if (!hp)
1143 goto clear_hash_noput;
1144 desc = &hp->md5_desc;
1145
1146 if (crypto_hash_init(desc))
1147 goto clear_hash;
1148 if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
1149 goto clear_hash;
1150 if (tcp_md5_hash_header(hp, th))
1151 goto clear_hash;
1152 if (tcp_md5_hash_key(hp, key))
1153 goto clear_hash;
1154 if (crypto_hash_final(desc, md5_hash))
cfb6eeb4
YH
1155 goto clear_hash;
1156
cfb6eeb4 1157 tcp_put_md5sig_pool();
cfb6eeb4 1158 return 0;
49a72dfb 1159
cfb6eeb4
YH
1160clear_hash:
1161 tcp_put_md5sig_pool();
1162clear_hash_noput:
1163 memset(md5_hash, 0, 16);
49a72dfb 1164 return 1;
cfb6eeb4
YH
1165}
1166
49a72dfb 1167int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
318cf7aa
ED
1168 const struct sock *sk, const struct request_sock *req,
1169 const struct sk_buff *skb)
cfb6eeb4 1170{
49a72dfb
AL
1171 struct tcp_md5sig_pool *hp;
1172 struct hash_desc *desc;
318cf7aa 1173 const struct tcphdr *th = tcp_hdr(skb);
cfb6eeb4
YH
1174 __be32 saddr, daddr;
1175
1176 if (sk) {
c720c7e8
ED
1177 saddr = inet_sk(sk)->inet_saddr;
1178 daddr = inet_sk(sk)->inet_daddr;
49a72dfb
AL
1179 } else if (req) {
1180 saddr = inet_rsk(req)->loc_addr;
1181 daddr = inet_rsk(req)->rmt_addr;
cfb6eeb4 1182 } else {
49a72dfb
AL
1183 const struct iphdr *iph = ip_hdr(skb);
1184 saddr = iph->saddr;
1185 daddr = iph->daddr;
cfb6eeb4 1186 }
49a72dfb
AL
1187
1188 hp = tcp_get_md5sig_pool();
1189 if (!hp)
1190 goto clear_hash_noput;
1191 desc = &hp->md5_desc;
1192
1193 if (crypto_hash_init(desc))
1194 goto clear_hash;
1195
1196 if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
1197 goto clear_hash;
1198 if (tcp_md5_hash_header(hp, th))
1199 goto clear_hash;
1200 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
1201 goto clear_hash;
1202 if (tcp_md5_hash_key(hp, key))
1203 goto clear_hash;
1204 if (crypto_hash_final(desc, md5_hash))
1205 goto clear_hash;
1206
1207 tcp_put_md5sig_pool();
1208 return 0;
1209
1210clear_hash:
1211 tcp_put_md5sig_pool();
1212clear_hash_noput:
1213 memset(md5_hash, 0, 16);
1214 return 1;
cfb6eeb4 1215}
49a72dfb 1216EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
cfb6eeb4 1217
a2a385d6 1218static bool tcp_v4_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
cfb6eeb4
YH
1219{
1220 /*
1221 * This gets called for each TCP segment that arrives
1222 * so we want to be efficient.
1223 * We have 3 drop cases:
1224 * o No MD5 hash and one expected.
1225 * o MD5 hash and we're not expecting one.
1226 * o MD5 hash and its wrong.
1227 */
cf533ea5 1228 const __u8 *hash_location = NULL;
cfb6eeb4 1229 struct tcp_md5sig_key *hash_expected;
eddc9ec5 1230 const struct iphdr *iph = ip_hdr(skb);
cf533ea5 1231 const struct tcphdr *th = tcp_hdr(skb);
cfb6eeb4 1232 int genhash;
cfb6eeb4
YH
1233 unsigned char newhash[16];
1234
a915da9b
ED
1235 hash_expected = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&iph->saddr,
1236 AF_INET);
7d5d5525 1237 hash_location = tcp_parse_md5sig_option(th);
cfb6eeb4 1238
cfb6eeb4
YH
1239 /* We've parsed the options - do we have a hash? */
1240 if (!hash_expected && !hash_location)
a2a385d6 1241 return false;
cfb6eeb4
YH
1242
1243 if (hash_expected && !hash_location) {
785957d3 1244 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
a2a385d6 1245 return true;
cfb6eeb4
YH
1246 }
1247
1248 if (!hash_expected && hash_location) {
785957d3 1249 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
a2a385d6 1250 return true;
cfb6eeb4
YH
1251 }
1252
1253 /* Okay, so this is hash_expected and hash_location -
1254 * so we need to calculate the checksum.
1255 */
49a72dfb
AL
1256 genhash = tcp_v4_md5_hash_skb(newhash,
1257 hash_expected,
1258 NULL, NULL, skb);
cfb6eeb4
YH
1259
1260 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
e87cc472
JP
1261 net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
1262 &iph->saddr, ntohs(th->source),
1263 &iph->daddr, ntohs(th->dest),
1264 genhash ? " tcp_v4_calc_md5_hash failed"
1265 : "");
a2a385d6 1266 return true;
cfb6eeb4 1267 }
a2a385d6 1268 return false;
cfb6eeb4
YH
1269}
1270
1271#endif
1272
72a3effa 1273struct request_sock_ops tcp_request_sock_ops __read_mostly = {
1da177e4 1274 .family = PF_INET,
2e6599cb 1275 .obj_size = sizeof(struct tcp_request_sock),
72659ecc 1276 .rtx_syn_ack = tcp_v4_rtx_synack,
60236fdd
ACM
1277 .send_ack = tcp_v4_reqsk_send_ack,
1278 .destructor = tcp_v4_reqsk_destructor,
1da177e4 1279 .send_reset = tcp_v4_send_reset,
72659ecc 1280 .syn_ack_timeout = tcp_syn_ack_timeout,
1da177e4
LT
1281};
1282
cfb6eeb4 1283#ifdef CONFIG_TCP_MD5SIG
b2e4b3de 1284static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
cfb6eeb4 1285 .md5_lookup = tcp_v4_reqsk_md5_lookup,
e3afe7b7 1286 .calc_md5_hash = tcp_v4_md5_hash_skb,
cfb6eeb4 1287};
b6332e6c 1288#endif
cfb6eeb4 1289
168a8f58
JC
1290static bool tcp_fastopen_check(struct sock *sk, struct sk_buff *skb,
1291 struct request_sock *req,
1292 struct tcp_fastopen_cookie *foc,
1293 struct tcp_fastopen_cookie *valid_foc)
1294{
1295 bool skip_cookie = false;
1296 struct fastopen_queue *fastopenq;
1297
1298 if (likely(!fastopen_cookie_present(foc))) {
1299 /* See include/net/tcp.h for the meaning of these knobs */
1300 if ((sysctl_tcp_fastopen & TFO_SERVER_ALWAYS) ||
1301 ((sysctl_tcp_fastopen & TFO_SERVER_COOKIE_NOT_REQD) &&
1302 (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq + 1)))
1303 skip_cookie = true; /* no cookie to validate */
1304 else
1305 return false;
1306 }
1307 fastopenq = inet_csk(sk)->icsk_accept_queue.fastopenq;
1308 /* A FO option is present; bump the counter. */
1309 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPFASTOPENPASSIVE);
1310
1311 /* Make sure the listener has enabled fastopen, and we don't
1312 * exceed the max # of pending TFO requests allowed before trying
1313 * to validating the cookie in order to avoid burning CPU cycles
1314 * unnecessarily.
1315 *
1316 * XXX (TFO) - The implication of checking the max_qlen before
1317 * processing a cookie request is that clients can't differentiate
1318 * between qlen overflow causing Fast Open to be disabled
1319 * temporarily vs a server not supporting Fast Open at all.
1320 */
1321 if ((sysctl_tcp_fastopen & TFO_SERVER_ENABLE) == 0 ||
1322 fastopenq == NULL || fastopenq->max_qlen == 0)
1323 return false;
1324
1325 if (fastopenq->qlen >= fastopenq->max_qlen) {
1326 struct request_sock *req1;
1327 spin_lock(&fastopenq->lock);
1328 req1 = fastopenq->rskq_rst_head;
1329 if ((req1 == NULL) || time_after(req1->expires, jiffies)) {
1330 spin_unlock(&fastopenq->lock);
1331 NET_INC_STATS_BH(sock_net(sk),
1332 LINUX_MIB_TCPFASTOPENLISTENOVERFLOW);
1333 /* Avoid bumping LINUX_MIB_TCPFASTOPENPASSIVEFAIL*/
1334 foc->len = -1;
1335 return false;
1336 }
1337 fastopenq->rskq_rst_head = req1->dl_next;
1338 fastopenq->qlen--;
1339 spin_unlock(&fastopenq->lock);
1340 reqsk_free(req1);
1341 }
1342 if (skip_cookie) {
1343 tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
1344 return true;
1345 }
1346 if (foc->len == TCP_FASTOPEN_COOKIE_SIZE) {
1347 if ((sysctl_tcp_fastopen & TFO_SERVER_COOKIE_NOT_CHKED) == 0) {
1348 tcp_fastopen_cookie_gen(ip_hdr(skb)->saddr, valid_foc);
1349 if ((valid_foc->len != TCP_FASTOPEN_COOKIE_SIZE) ||
1350 memcmp(&foc->val[0], &valid_foc->val[0],
1351 TCP_FASTOPEN_COOKIE_SIZE) != 0)
1352 return false;
1353 valid_foc->len = -1;
1354 }
1355 /* Acknowledge the data received from the peer. */
1356 tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
1357 return true;
1358 } else if (foc->len == 0) { /* Client requesting a cookie */
1359 tcp_fastopen_cookie_gen(ip_hdr(skb)->saddr, valid_foc);
1360 NET_INC_STATS_BH(sock_net(sk),
1361 LINUX_MIB_TCPFASTOPENCOOKIEREQD);
1362 } else {
1363 /* Client sent a cookie with wrong size. Treat it
1364 * the same as invalid and return a valid one.
1365 */
1366 tcp_fastopen_cookie_gen(ip_hdr(skb)->saddr, valid_foc);
1367 }
1368 return false;
1369}
1370
1371static int tcp_v4_conn_req_fastopen(struct sock *sk,
1372 struct sk_buff *skb,
1373 struct sk_buff *skb_synack,
1374 struct request_sock *req,
1375 struct request_values *rvp)
1376{
1377 struct tcp_sock *tp = tcp_sk(sk);
1378 struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
1379 const struct inet_request_sock *ireq = inet_rsk(req);
1380 struct sock *child;
016818d0 1381 int err;
168a8f58 1382
e6c022a4
ED
1383 req->num_retrans = 0;
1384 req->num_timeout = 0;
168a8f58
JC
1385 req->sk = NULL;
1386
1387 child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL);
1388 if (child == NULL) {
1389 NET_INC_STATS_BH(sock_net(sk),
1390 LINUX_MIB_TCPFASTOPENPASSIVEFAIL);
1391 kfree_skb(skb_synack);
1392 return -1;
1393 }
016818d0
NC
1394 err = ip_build_and_send_pkt(skb_synack, sk, ireq->loc_addr,
1395 ireq->rmt_addr, ireq->opt);
1396 err = net_xmit_eval(err);
1397 if (!err)
1398 tcp_rsk(req)->snt_synack = tcp_time_stamp;
168a8f58
JC
1399 /* XXX (TFO) - is it ok to ignore error and continue? */
1400
1401 spin_lock(&queue->fastopenq->lock);
1402 queue->fastopenq->qlen++;
1403 spin_unlock(&queue->fastopenq->lock);
1404
1405 /* Initialize the child socket. Have to fix some values to take
1406 * into account the child is a Fast Open socket and is created
1407 * only out of the bits carried in the SYN packet.
1408 */
1409 tp = tcp_sk(child);
1410
1411 tp->fastopen_rsk = req;
1412 /* Do a hold on the listner sk so that if the listener is being
1413 * closed, the child that has been accepted can live on and still
1414 * access listen_lock.
1415 */
1416 sock_hold(sk);
1417 tcp_rsk(req)->listener = sk;
1418
1419 /* RFC1323: The window in SYN & SYN/ACK segments is never
1420 * scaled. So correct it appropriately.
1421 */
1422 tp->snd_wnd = ntohs(tcp_hdr(skb)->window);
1423
1424 /* Activate the retrans timer so that SYNACK can be retransmitted.
1425 * The request socket is not added to the SYN table of the parent
1426 * because it's been added to the accept queue directly.
1427 */
1428 inet_csk_reset_xmit_timer(child, ICSK_TIME_RETRANS,
1429 TCP_TIMEOUT_INIT, TCP_RTO_MAX);
1430
1431 /* Add the child socket directly into the accept queue */
1432 inet_csk_reqsk_queue_add(sk, req, child);
1433
1434 /* Now finish processing the fastopen child socket. */
1435 inet_csk(child)->icsk_af_ops->rebuild_header(child);
1436 tcp_init_congestion_control(child);
1437 tcp_mtup_init(child);
1438 tcp_init_buffer_space(child);
1439 tcp_init_metrics(child);
1440
1441 /* Queue the data carried in the SYN packet. We need to first
1442 * bump skb's refcnt because the caller will attempt to free it.
1443 *
1444 * XXX (TFO) - we honor a zero-payload TFO request for now.
1445 * (Any reason not to?)
1446 */
1447 if (TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq + 1) {
1448 /* Don't queue the skb if there is no payload in SYN.
1449 * XXX (TFO) - How about SYN+FIN?
1450 */
1451 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
1452 } else {
1453 skb = skb_get(skb);
1454 skb_dst_drop(skb);
1455 __skb_pull(skb, tcp_hdr(skb)->doff * 4);
1456 skb_set_owner_r(skb, child);
1457 __skb_queue_tail(&child->sk_receive_queue, skb);
1458 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
6f73601e 1459 tp->syn_data_acked = 1;
168a8f58
JC
1460 }
1461 sk->sk_data_ready(sk, 0);
1462 bh_unlock_sock(child);
1463 sock_put(child);
1464 WARN_ON(req->sk == NULL);
1465 return 0;
1466}
1467
1da177e4
LT
1468int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1469{
4957faad 1470 struct tcp_extend_values tmp_ext;
1da177e4 1471 struct tcp_options_received tmp_opt;
cf533ea5 1472 const u8 *hash_location;
60236fdd 1473 struct request_sock *req;
e6b4d113 1474 struct inet_request_sock *ireq;
4957faad 1475 struct tcp_sock *tp = tcp_sk(sk);
e6b4d113 1476 struct dst_entry *dst = NULL;
eddc9ec5
ACM
1477 __be32 saddr = ip_hdr(skb)->saddr;
1478 __be32 daddr = ip_hdr(skb)->daddr;
1da177e4 1479 __u32 isn = TCP_SKB_CB(skb)->when;
a2a385d6 1480 bool want_cookie = false;
168a8f58
JC
1481 struct flowi4 fl4;
1482 struct tcp_fastopen_cookie foc = { .len = -1 };
1483 struct tcp_fastopen_cookie valid_foc = { .len = -1 };
1484 struct sk_buff *skb_synack;
1485 int do_fastopen;
1da177e4
LT
1486
1487 /* Never answer to SYNs send to broadcast or multicast */
511c3f92 1488 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
1da177e4
LT
1489 goto drop;
1490
1491 /* TW buckets are converted to open requests without
1492 * limitations, they conserve resources and peer is
1493 * evidently real one.
1494 */
463c84b9 1495 if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
946cedcc
ED
1496 want_cookie = tcp_syn_flood_action(sk, skb, "TCP");
1497 if (!want_cookie)
1498 goto drop;
1da177e4
LT
1499 }
1500
1501 /* Accept backlog is full. If we have already queued enough
1502 * of warm entries in syn queue, drop request. It is better than
1503 * clogging syn queue with openreqs with exponentially increasing
1504 * timeout.
1505 */
2aeef18d
NS
1506 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) {
1507 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1da177e4 1508 goto drop;
2aeef18d 1509 }
1da177e4 1510
ce4a7d0d 1511 req = inet_reqsk_alloc(&tcp_request_sock_ops);
1da177e4
LT
1512 if (!req)
1513 goto drop;
1514
cfb6eeb4
YH
1515#ifdef CONFIG_TCP_MD5SIG
1516 tcp_rsk(req)->af_specific = &tcp_request_sock_ipv4_ops;
1517#endif
1518
1da177e4 1519 tcp_clear_options(&tmp_opt);
bee7ca9e 1520 tmp_opt.mss_clamp = TCP_MSS_DEFAULT;
4957faad 1521 tmp_opt.user_mss = tp->rx_opt.user_mss;
168a8f58
JC
1522 tcp_parse_options(skb, &tmp_opt, &hash_location, 0,
1523 want_cookie ? NULL : &foc);
4957faad
WAS
1524
1525 if (tmp_opt.cookie_plus > 0 &&
1526 tmp_opt.saw_tstamp &&
1527 !tp->rx_opt.cookie_out_never &&
1528 (sysctl_tcp_cookie_size > 0 ||
1529 (tp->cookie_values != NULL &&
1530 tp->cookie_values->cookie_desired > 0))) {
1531 u8 *c;
1532 u32 *mess = &tmp_ext.cookie_bakery[COOKIE_DIGEST_WORDS];
1533 int l = tmp_opt.cookie_plus - TCPOLEN_COOKIE_BASE;
1534
1535 if (tcp_cookie_generator(&tmp_ext.cookie_bakery[0]) != 0)
1536 goto drop_and_release;
1537
1538 /* Secret recipe starts with IP addresses */
0eae88f3
ED
1539 *mess++ ^= (__force u32)daddr;
1540 *mess++ ^= (__force u32)saddr;
1da177e4 1541
4957faad
WAS
1542 /* plus variable length Initiator Cookie */
1543 c = (u8 *)mess;
1544 while (l-- > 0)
1545 *c++ ^= *hash_location++;
1546
a2a385d6 1547 want_cookie = false; /* not our kind of cookie */
4957faad
WAS
1548 tmp_ext.cookie_out_never = 0; /* false */
1549 tmp_ext.cookie_plus = tmp_opt.cookie_plus;
1550 } else if (!tp->rx_opt.cookie_in_always) {
1551 /* redundant indications, but ensure initialization. */
1552 tmp_ext.cookie_out_never = 1; /* true */
1553 tmp_ext.cookie_plus = 0;
1554 } else {
1555 goto drop_and_release;
1556 }
1557 tmp_ext.cookie_in_always = tp->rx_opt.cookie_in_always;
1da177e4 1558
4dfc2817 1559 if (want_cookie && !tmp_opt.saw_tstamp)
1da177e4 1560 tcp_clear_options(&tmp_opt);
1da177e4 1561
1da177e4 1562 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
1da177e4
LT
1563 tcp_openreq_init(req, &tmp_opt, skb);
1564
bb5b7c11
DM
1565 ireq = inet_rsk(req);
1566 ireq->loc_addr = daddr;
1567 ireq->rmt_addr = saddr;
1568 ireq->no_srccheck = inet_sk(sk)->transparent;
5dff747b 1569 ireq->opt = tcp_v4_save_options(skb);
bb5b7c11 1570
284904aa 1571 if (security_inet_conn_request(sk, skb, req))
bb5b7c11 1572 goto drop_and_free;
284904aa 1573
172d69e6 1574 if (!want_cookie || tmp_opt.tstamp_ok)
5d134f1c 1575 TCP_ECN_create_request(req, skb, sock_net(sk));
1da177e4
LT
1576
1577 if (want_cookie) {
1da177e4 1578 isn = cookie_v4_init_sequence(sk, skb, &req->mss);
172d69e6 1579 req->cookie_ts = tmp_opt.tstamp_ok;
1da177e4 1580 } else if (!isn) {
1da177e4
LT
1581 /* VJ's idea. We save last timestamp seen
1582 * from the destination in peer table, when entering
1583 * state TIME-WAIT, and check against it before
1584 * accepting new connection request.
1585 *
1586 * If "isn" is not zero, this request hit alive
1587 * timewait bucket, so that all the necessary checks
1588 * are made in the function processing timewait state.
1589 */
1590 if (tmp_opt.saw_tstamp &&
295ff7ed 1591 tcp_death_row.sysctl_tw_recycle &&
ba3f7f04 1592 (dst = inet_csk_route_req(sk, &fl4, req)) != NULL &&
81166dd6
DM
1593 fl4.daddr == saddr) {
1594 if (!tcp_peer_is_proven(req, dst, true)) {
de0744af 1595 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
7cd04fa7 1596 goto drop_and_release;
1da177e4
LT
1597 }
1598 }
1599 /* Kill the following clause, if you dislike this way. */
1600 else if (!sysctl_tcp_syncookies &&
463c84b9 1601 (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
1da177e4 1602 (sysctl_max_syn_backlog >> 2)) &&
81166dd6 1603 !tcp_peer_is_proven(req, dst, false)) {
1da177e4
LT
1604 /* Without syncookies last quarter of
1605 * backlog is filled with destinations,
1606 * proven to be alive.
1607 * It means that we continue to communicate
1608 * to destinations, already remembered
1609 * to the moment of synflood.
1610 */
afd46503 1611 LIMIT_NETDEBUG(KERN_DEBUG pr_fmt("drop open request from %pI4/%u\n"),
673d57e7 1612 &saddr, ntohs(tcp_hdr(skb)->source));
7cd04fa7 1613 goto drop_and_release;
1da177e4
LT
1614 }
1615
a94f723d 1616 isn = tcp_v4_init_sequence(skb);
1da177e4 1617 }
2e6599cb 1618 tcp_rsk(req)->snt_isn = isn;
1da177e4 1619
168a8f58
JC
1620 if (dst == NULL) {
1621 dst = inet_csk_route_req(sk, &fl4, req);
1622 if (dst == NULL)
1623 goto drop_and_free;
1624 }
1625 do_fastopen = tcp_fastopen_check(sk, skb, req, &foc, &valid_foc);
1626
1627 /* We don't call tcp_v4_send_synack() directly because we need
1628 * to make sure a child socket can be created successfully before
1629 * sending back synack!
1630 *
1631 * XXX (TFO) - Ideally one would simply call tcp_v4_send_synack()
1632 * (or better yet, call tcp_send_synack() in the child context
1633 * directly, but will have to fix bunch of other code first)
1634 * after syn_recv_sock() except one will need to first fix the
1635 * latter to remove its dependency on the current implementation
1636 * of tcp_v4_send_synack()->tcp_select_initial_window().
1637 */
1638 skb_synack = tcp_make_synack(sk, dst, req,
1639 (struct request_values *)&tmp_ext,
1640 fastopen_cookie_present(&valid_foc) ? &valid_foc : NULL);
1641
1642 if (skb_synack) {
1643 __tcp_v4_send_check(skb_synack, ireq->loc_addr, ireq->rmt_addr);
1644 skb_set_queue_mapping(skb_synack, skb_get_queue_mapping(skb));
1645 } else
1646 goto drop_and_free;
1647
1648 if (likely(!do_fastopen)) {
1649 int err;
1650 err = ip_build_and_send_pkt(skb_synack, sk, ireq->loc_addr,
1651 ireq->rmt_addr, ireq->opt);
1652 err = net_xmit_eval(err);
1653 if (err || want_cookie)
1654 goto drop_and_free;
1655
016818d0 1656 tcp_rsk(req)->snt_synack = tcp_time_stamp;
168a8f58
JC
1657 tcp_rsk(req)->listener = NULL;
1658 /* Add the request_sock to the SYN table */
1659 inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1660 if (fastopen_cookie_present(&foc) && foc.len != 0)
1661 NET_INC_STATS_BH(sock_net(sk),
1662 LINUX_MIB_TCPFASTOPENPASSIVEFAIL);
1663 } else if (tcp_v4_conn_req_fastopen(sk, skb, skb_synack, req,
1664 (struct request_values *)&tmp_ext))
1da177e4
LT
1665 goto drop_and_free;
1666
1da177e4
LT
1667 return 0;
1668
7cd04fa7
DL
1669drop_and_release:
1670 dst_release(dst);
1da177e4 1671drop_and_free:
60236fdd 1672 reqsk_free(req);
1da177e4 1673drop:
848bf15f 1674 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1da177e4
LT
1675 return 0;
1676}
4bc2f18b 1677EXPORT_SYMBOL(tcp_v4_conn_request);
1da177e4
LT
1678
1679
1680/*
1681 * The three way handshake has completed - we got a valid synack -
1682 * now create the new socket.
1683 */
1684struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
60236fdd 1685 struct request_sock *req,
1da177e4
LT
1686 struct dst_entry *dst)
1687{
2e6599cb 1688 struct inet_request_sock *ireq;
1da177e4
LT
1689 struct inet_sock *newinet;
1690 struct tcp_sock *newtp;
1691 struct sock *newsk;
cfb6eeb4
YH
1692#ifdef CONFIG_TCP_MD5SIG
1693 struct tcp_md5sig_key *key;
1694#endif
f6d8bd05 1695 struct ip_options_rcu *inet_opt;
1da177e4
LT
1696
1697 if (sk_acceptq_is_full(sk))
1698 goto exit_overflow;
1699
1da177e4
LT
1700 newsk = tcp_create_openreq_child(sk, req, skb);
1701 if (!newsk)
093d2823 1702 goto exit_nonewsk;
1da177e4 1703
bcd76111 1704 newsk->sk_gso_type = SKB_GSO_TCPV4;
fae6ef87 1705 inet_sk_rx_dst_set(newsk, skb);
1da177e4
LT
1706
1707 newtp = tcp_sk(newsk);
1708 newinet = inet_sk(newsk);
2e6599cb 1709 ireq = inet_rsk(req);
c720c7e8
ED
1710 newinet->inet_daddr = ireq->rmt_addr;
1711 newinet->inet_rcv_saddr = ireq->loc_addr;
1712 newinet->inet_saddr = ireq->loc_addr;
f6d8bd05
ED
1713 inet_opt = ireq->opt;
1714 rcu_assign_pointer(newinet->inet_opt, inet_opt);
2e6599cb 1715 ireq->opt = NULL;
463c84b9 1716 newinet->mc_index = inet_iif(skb);
eddc9ec5 1717 newinet->mc_ttl = ip_hdr(skb)->ttl;
4c507d28 1718 newinet->rcv_tos = ip_hdr(skb)->tos;
d83d8461 1719 inet_csk(newsk)->icsk_ext_hdr_len = 0;
f6d8bd05
ED
1720 if (inet_opt)
1721 inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
c720c7e8 1722 newinet->inet_id = newtp->write_seq ^ jiffies;
1da177e4 1723
dfd25fff
ED
1724 if (!dst) {
1725 dst = inet_csk_route_child_sock(sk, newsk, req);
1726 if (!dst)
1727 goto put_and_exit;
1728 } else {
1729 /* syncookie case : see end of cookie_v4_check() */
1730 }
0e734419
DM
1731 sk_setup_caps(newsk, dst);
1732
5d424d5a 1733 tcp_mtup_init(newsk);
1da177e4 1734 tcp_sync_mss(newsk, dst_mtu(dst));
0dbaee3b 1735 newtp->advmss = dst_metric_advmss(dst);
f5fff5dc
TQ
1736 if (tcp_sk(sk)->rx_opt.user_mss &&
1737 tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1738 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1739
1da177e4 1740 tcp_initialize_rcv_mss(newsk);
623df484 1741 tcp_synack_rtt_meas(newsk, req);
e6c022a4 1742 newtp->total_retrans = req->num_retrans;
1da177e4 1743
cfb6eeb4
YH
1744#ifdef CONFIG_TCP_MD5SIG
1745 /* Copy over the MD5 key from the original socket */
a915da9b
ED
1746 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&newinet->inet_daddr,
1747 AF_INET);
c720c7e8 1748 if (key != NULL) {
cfb6eeb4
YH
1749 /*
1750 * We're using one, so create a matching key
1751 * on the newsk structure. If we fail to get
1752 * memory, then we end up not copying the key
1753 * across. Shucks.
1754 */
a915da9b
ED
1755 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newinet->inet_daddr,
1756 AF_INET, key->key, key->keylen, GFP_ATOMIC);
a465419b 1757 sk_nocaps_add(newsk, NETIF_F_GSO_MASK);
cfb6eeb4
YH
1758 }
1759#endif
1760
0e734419
DM
1761 if (__inet_inherit_port(sk, newsk) < 0)
1762 goto put_and_exit;
9327f705 1763 __inet_hash_nolisten(newsk, NULL);
1da177e4
LT
1764
1765 return newsk;
1766
1767exit_overflow:
de0744af 1768 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
093d2823
BS
1769exit_nonewsk:
1770 dst_release(dst);
1da177e4 1771exit:
de0744af 1772 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1da177e4 1773 return NULL;
0e734419 1774put_and_exit:
e337e24d
CP
1775 inet_csk_prepare_forced_close(newsk);
1776 tcp_done(newsk);
0e734419 1777 goto exit;
1da177e4 1778}
4bc2f18b 1779EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
1da177e4
LT
1780
1781static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
1782{
aa8223c7 1783 struct tcphdr *th = tcp_hdr(skb);
eddc9ec5 1784 const struct iphdr *iph = ip_hdr(skb);
1da177e4 1785 struct sock *nsk;
60236fdd 1786 struct request_sock **prev;
1da177e4 1787 /* Find possible connection requests. */
463c84b9
ACM
1788 struct request_sock *req = inet_csk_search_req(sk, &prev, th->source,
1789 iph->saddr, iph->daddr);
1da177e4 1790 if (req)
8336886f 1791 return tcp_check_req(sk, skb, req, prev, false);
1da177e4 1792
3b1e0a65 1793 nsk = inet_lookup_established(sock_net(sk), &tcp_hashinfo, iph->saddr,
c67499c0 1794 th->source, iph->daddr, th->dest, inet_iif(skb));
1da177e4
LT
1795
1796 if (nsk) {
1797 if (nsk->sk_state != TCP_TIME_WAIT) {
1798 bh_lock_sock(nsk);
1799 return nsk;
1800 }
9469c7b4 1801 inet_twsk_put(inet_twsk(nsk));
1da177e4
LT
1802 return NULL;
1803 }
1804
1805#ifdef CONFIG_SYN_COOKIES
af9b4738 1806 if (!th->syn)
1da177e4
LT
1807 sk = cookie_v4_check(sk, skb, &(IPCB(skb)->opt));
1808#endif
1809 return sk;
1810}
1811
b51655b9 1812static __sum16 tcp_v4_checksum_init(struct sk_buff *skb)
1da177e4 1813{
eddc9ec5
ACM
1814 const struct iphdr *iph = ip_hdr(skb);
1815
84fa7933 1816 if (skb->ip_summed == CHECKSUM_COMPLETE) {
eddc9ec5
ACM
1817 if (!tcp_v4_check(skb->len, iph->saddr,
1818 iph->daddr, skb->csum)) {
fb286bb2 1819 skb->ip_summed = CHECKSUM_UNNECESSARY;
1da177e4 1820 return 0;
fb286bb2 1821 }
1da177e4 1822 }
fb286bb2 1823
eddc9ec5 1824 skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
fb286bb2
HX
1825 skb->len, IPPROTO_TCP, 0);
1826
1da177e4 1827 if (skb->len <= 76) {
fb286bb2 1828 return __skb_checksum_complete(skb);
1da177e4
LT
1829 }
1830 return 0;
1831}
1832
1833
1834/* The socket must have it's spinlock held when we get
1835 * here.
1836 *
1837 * We have a potential double-lock case here, so even when
1838 * doing backlog processing we use the BH locking scheme.
1839 * This is because we cannot sleep with the original spinlock
1840 * held.
1841 */
1842int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1843{
cfb6eeb4
YH
1844 struct sock *rsk;
1845#ifdef CONFIG_TCP_MD5SIG
1846 /*
1847 * We really want to reject the packet as early as possible
1848 * if:
1849 * o We're expecting an MD5'd packet and this is no MD5 tcp option
1850 * o There is an MD5 option and we're not expecting one
1851 */
7174259e 1852 if (tcp_v4_inbound_md5_hash(sk, skb))
cfb6eeb4
YH
1853 goto discard;
1854#endif
1855
1da177e4 1856 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
404e0a8b
ED
1857 struct dst_entry *dst = sk->sk_rx_dst;
1858
bdeab991 1859 sock_rps_save_rxhash(sk, skb);
404e0a8b 1860 if (dst) {
505fbcf0
ED
1861 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1862 dst->ops->check(dst, 0) == NULL) {
92101b3b
DM
1863 dst_release(dst);
1864 sk->sk_rx_dst = NULL;
1865 }
1866 }
aa8223c7 1867 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) {
cfb6eeb4 1868 rsk = sk;
1da177e4 1869 goto reset;
cfb6eeb4 1870 }
1da177e4
LT
1871 return 0;
1872 }
1873
ab6a5bb6 1874 if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1da177e4
LT
1875 goto csum_err;
1876
1877 if (sk->sk_state == TCP_LISTEN) {
1878 struct sock *nsk = tcp_v4_hnd_req(sk, skb);
1879 if (!nsk)
1880 goto discard;
1881
1882 if (nsk != sk) {
bdeab991 1883 sock_rps_save_rxhash(nsk, skb);
cfb6eeb4
YH
1884 if (tcp_child_process(sk, nsk, skb)) {
1885 rsk = nsk;
1da177e4 1886 goto reset;
cfb6eeb4 1887 }
1da177e4
LT
1888 return 0;
1889 }
ca55158c 1890 } else
bdeab991 1891 sock_rps_save_rxhash(sk, skb);
ca55158c 1892
aa8223c7 1893 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) {
cfb6eeb4 1894 rsk = sk;
1da177e4 1895 goto reset;
cfb6eeb4 1896 }
1da177e4
LT
1897 return 0;
1898
1899reset:
cfb6eeb4 1900 tcp_v4_send_reset(rsk, skb);
1da177e4
LT
1901discard:
1902 kfree_skb(skb);
1903 /* Be careful here. If this function gets more complicated and
1904 * gcc suffers from register pressure on the x86, sk (in %ebx)
1905 * might be destroyed here. This current version compiles correctly,
1906 * but you have been warned.
1907 */
1908 return 0;
1909
1910csum_err:
63231bdd 1911 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1da177e4
LT
1912 goto discard;
1913}
4bc2f18b 1914EXPORT_SYMBOL(tcp_v4_do_rcv);
1da177e4 1915
160eb5a6 1916void tcp_v4_early_demux(struct sk_buff *skb)
41063e9d 1917{
41063e9d
DM
1918 const struct iphdr *iph;
1919 const struct tcphdr *th;
1920 struct sock *sk;
41063e9d 1921
41063e9d 1922 if (skb->pkt_type != PACKET_HOST)
160eb5a6 1923 return;
41063e9d 1924
45f00f99 1925 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
160eb5a6 1926 return;
41063e9d
DM
1927
1928 iph = ip_hdr(skb);
45f00f99 1929 th = tcp_hdr(skb);
41063e9d
DM
1930
1931 if (th->doff < sizeof(struct tcphdr) / 4)
160eb5a6 1932 return;
41063e9d 1933
45f00f99 1934 sk = __inet_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
41063e9d 1935 iph->saddr, th->source,
7011d085 1936 iph->daddr, ntohs(th->dest),
9cb429d6 1937 skb->skb_iif);
41063e9d
DM
1938 if (sk) {
1939 skb->sk = sk;
1940 skb->destructor = sock_edemux;
1941 if (sk->sk_state != TCP_TIME_WAIT) {
1942 struct dst_entry *dst = sk->sk_rx_dst;
505fbcf0 1943
41063e9d
DM
1944 if (dst)
1945 dst = dst_check(dst, 0);
92101b3b 1946 if (dst &&
505fbcf0 1947 inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
92101b3b 1948 skb_dst_set_noref(skb, dst);
41063e9d
DM
1949 }
1950 }
41063e9d
DM
1951}
1952
b2fb4f54
ED
1953/* Packet is added to VJ-style prequeue for processing in process
1954 * context, if a reader task is waiting. Apparently, this exciting
1955 * idea (VJ's mail "Re: query about TCP header on tcp-ip" of 07 Sep 93)
1956 * failed somewhere. Latency? Burstiness? Well, at least now we will
1957 * see, why it failed. 8)8) --ANK
1958 *
1959 */
1960bool tcp_prequeue(struct sock *sk, struct sk_buff *skb)
1961{
1962 struct tcp_sock *tp = tcp_sk(sk);
1963
1964 if (sysctl_tcp_low_latency || !tp->ucopy.task)
1965 return false;
1966
1967 if (skb->len <= tcp_hdrlen(skb) &&
1968 skb_queue_len(&tp->ucopy.prequeue) == 0)
1969 return false;
1970
1971 __skb_queue_tail(&tp->ucopy.prequeue, skb);
1972 tp->ucopy.memory += skb->truesize;
1973 if (tp->ucopy.memory > sk->sk_rcvbuf) {
1974 struct sk_buff *skb1;
1975
1976 BUG_ON(sock_owned_by_user(sk));
1977
1978 while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) {
1979 sk_backlog_rcv(sk, skb1);
1980 NET_INC_STATS_BH(sock_net(sk),
1981 LINUX_MIB_TCPPREQUEUEDROPPED);
1982 }
1983
1984 tp->ucopy.memory = 0;
1985 } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
1986 wake_up_interruptible_sync_poll(sk_sleep(sk),
1987 POLLIN | POLLRDNORM | POLLRDBAND);
1988 if (!inet_csk_ack_scheduled(sk))
1989 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
1990 (3 * tcp_rto_min(sk)) / 4,
1991 TCP_RTO_MAX);
1992 }
1993 return true;
1994}
1995EXPORT_SYMBOL(tcp_prequeue);
1996
1da177e4
LT
1997/*
1998 * From tcp_input.c
1999 */
2000
2001int tcp_v4_rcv(struct sk_buff *skb)
2002{
eddc9ec5 2003 const struct iphdr *iph;
cf533ea5 2004 const struct tcphdr *th;
1da177e4
LT
2005 struct sock *sk;
2006 int ret;
a86b1e30 2007 struct net *net = dev_net(skb->dev);
1da177e4
LT
2008
2009 if (skb->pkt_type != PACKET_HOST)
2010 goto discard_it;
2011
2012 /* Count it even if it's bad */
63231bdd 2013 TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1da177e4
LT
2014
2015 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
2016 goto discard_it;
2017
aa8223c7 2018 th = tcp_hdr(skb);
1da177e4
LT
2019
2020 if (th->doff < sizeof(struct tcphdr) / 4)
2021 goto bad_packet;
2022 if (!pskb_may_pull(skb, th->doff * 4))
2023 goto discard_it;
2024
2025 /* An explanation is required here, I think.
2026 * Packet length and doff are validated by header prediction,
caa20d9a 2027 * provided case of th->doff==0 is eliminated.
1da177e4 2028 * So, we defer the checks. */
60476372 2029 if (!skb_csum_unnecessary(skb) && tcp_v4_checksum_init(skb))
1da177e4
LT
2030 goto bad_packet;
2031
aa8223c7 2032 th = tcp_hdr(skb);
eddc9ec5 2033 iph = ip_hdr(skb);
1da177e4
LT
2034 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
2035 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
2036 skb->len - th->doff * 4);
2037 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
2038 TCP_SKB_CB(skb)->when = 0;
b82d1bb4 2039 TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
1da177e4
LT
2040 TCP_SKB_CB(skb)->sacked = 0;
2041
9a1f27c4 2042 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
1da177e4
LT
2043 if (!sk)
2044 goto no_tcp_socket;
2045
bb134d5d
ED
2046process:
2047 if (sk->sk_state == TCP_TIME_WAIT)
2048 goto do_time_wait;
2049
6cce09f8
ED
2050 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
2051 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
d218d111 2052 goto discard_and_relse;
6cce09f8 2053 }
d218d111 2054
1da177e4
LT
2055 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
2056 goto discard_and_relse;
b59c2701 2057 nf_reset(skb);
1da177e4 2058
fda9ef5d 2059 if (sk_filter(sk, skb))
1da177e4
LT
2060 goto discard_and_relse;
2061
2062 skb->dev = NULL;
2063
c6366184 2064 bh_lock_sock_nested(sk);
1da177e4
LT
2065 ret = 0;
2066 if (!sock_owned_by_user(sk)) {
1a2449a8
CL
2067#ifdef CONFIG_NET_DMA
2068 struct tcp_sock *tp = tcp_sk(sk);
2069 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
a2bd1140 2070 tp->ucopy.dma_chan = net_dma_find_channel();
1a2449a8 2071 if (tp->ucopy.dma_chan)
1da177e4 2072 ret = tcp_v4_do_rcv(sk, skb);
1a2449a8
CL
2073 else
2074#endif
2075 {
2076 if (!tcp_prequeue(sk, skb))
ae8d7f88 2077 ret = tcp_v4_do_rcv(sk, skb);
1a2449a8 2078 }
da882c1f
ED
2079 } else if (unlikely(sk_add_backlog(sk, skb,
2080 sk->sk_rcvbuf + sk->sk_sndbuf))) {
6b03a53a 2081 bh_unlock_sock(sk);
6cce09f8 2082 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
6b03a53a
ZY
2083 goto discard_and_relse;
2084 }
1da177e4
LT
2085 bh_unlock_sock(sk);
2086
2087 sock_put(sk);
2088
2089 return ret;
2090
2091no_tcp_socket:
2092 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
2093 goto discard_it;
2094
2095 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
2096bad_packet:
63231bdd 2097 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1da177e4 2098 } else {
cfb6eeb4 2099 tcp_v4_send_reset(NULL, skb);
1da177e4
LT
2100 }
2101
2102discard_it:
2103 /* Discard frame. */
2104 kfree_skb(skb);
e905a9ed 2105 return 0;
1da177e4
LT
2106
2107discard_and_relse:
2108 sock_put(sk);
2109 goto discard_it;
2110
2111do_time_wait:
2112 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
9469c7b4 2113 inet_twsk_put(inet_twsk(sk));
1da177e4
LT
2114 goto discard_it;
2115 }
2116
2117 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
63231bdd 2118 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
9469c7b4 2119 inet_twsk_put(inet_twsk(sk));
1da177e4
LT
2120 goto discard_it;
2121 }
9469c7b4 2122 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1da177e4 2123 case TCP_TW_SYN: {
c346dca1 2124 struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
c67499c0 2125 &tcp_hashinfo,
da5e3630 2126 iph->saddr, th->source,
eddc9ec5 2127 iph->daddr, th->dest,
463c84b9 2128 inet_iif(skb));
1da177e4 2129 if (sk2) {
9469c7b4
YH
2130 inet_twsk_deschedule(inet_twsk(sk), &tcp_death_row);
2131 inet_twsk_put(inet_twsk(sk));
1da177e4
LT
2132 sk = sk2;
2133 goto process;
2134 }
2135 /* Fall through to ACK */
2136 }
2137 case TCP_TW_ACK:
2138 tcp_v4_timewait_ack(sk, skb);
2139 break;
2140 case TCP_TW_RST:
2141 goto no_tcp_socket;
2142 case TCP_TW_SUCCESS:;
2143 }
2144 goto discard_it;
2145}
2146
ccb7c410
DM
2147static struct timewait_sock_ops tcp_timewait_sock_ops = {
2148 .twsk_obj_size = sizeof(struct tcp_timewait_sock),
2149 .twsk_unique = tcp_twsk_unique,
2150 .twsk_destructor= tcp_twsk_destructor,
ccb7c410 2151};
1da177e4 2152
63d02d15 2153void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
5d299f3d
ED
2154{
2155 struct dst_entry *dst = skb_dst(skb);
2156
2157 dst_hold(dst);
2158 sk->sk_rx_dst = dst;
2159 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
2160}
63d02d15 2161EXPORT_SYMBOL(inet_sk_rx_dst_set);
5d299f3d 2162
3b401a81 2163const struct inet_connection_sock_af_ops ipv4_specific = {
543d9cfe
ACM
2164 .queue_xmit = ip_queue_xmit,
2165 .send_check = tcp_v4_send_check,
2166 .rebuild_header = inet_sk_rebuild_header,
5d299f3d 2167 .sk_rx_dst_set = inet_sk_rx_dst_set,
543d9cfe
ACM
2168 .conn_request = tcp_v4_conn_request,
2169 .syn_recv_sock = tcp_v4_syn_recv_sock,
543d9cfe
ACM
2170 .net_header_len = sizeof(struct iphdr),
2171 .setsockopt = ip_setsockopt,
2172 .getsockopt = ip_getsockopt,
2173 .addr2sockaddr = inet_csk_addr2sockaddr,
2174 .sockaddr_len = sizeof(struct sockaddr_in),
ab1e0a13 2175 .bind_conflict = inet_csk_bind_conflict,
3fdadf7d 2176#ifdef CONFIG_COMPAT
543d9cfe
ACM
2177 .compat_setsockopt = compat_ip_setsockopt,
2178 .compat_getsockopt = compat_ip_getsockopt,
3fdadf7d 2179#endif
1da177e4 2180};
4bc2f18b 2181EXPORT_SYMBOL(ipv4_specific);
1da177e4 2182
cfb6eeb4 2183#ifdef CONFIG_TCP_MD5SIG
b2e4b3de 2184static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
cfb6eeb4 2185 .md5_lookup = tcp_v4_md5_lookup,
49a72dfb 2186 .calc_md5_hash = tcp_v4_md5_hash_skb,
cfb6eeb4 2187 .md5_parse = tcp_v4_parse_md5_keys,
cfb6eeb4 2188};
b6332e6c 2189#endif
cfb6eeb4 2190
1da177e4
LT
2191/* NOTE: A lot of things set to zero explicitly by call to
2192 * sk_alloc() so need not be done here.
2193 */
2194static int tcp_v4_init_sock(struct sock *sk)
2195{
6687e988 2196 struct inet_connection_sock *icsk = inet_csk(sk);
1da177e4 2197
900f65d3 2198 tcp_init_sock(sk);
1da177e4 2199
8292a17a 2200 icsk->icsk_af_ops = &ipv4_specific;
900f65d3 2201
cfb6eeb4 2202#ifdef CONFIG_TCP_MD5SIG
ac807fa8 2203 tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific;
cfb6eeb4 2204#endif
1da177e4 2205
1da177e4
LT
2206 return 0;
2207}
2208
7d06b2e0 2209void tcp_v4_destroy_sock(struct sock *sk)
1da177e4
LT
2210{
2211 struct tcp_sock *tp = tcp_sk(sk);
2212
2213 tcp_clear_xmit_timers(sk);
2214
6687e988 2215 tcp_cleanup_congestion_control(sk);
317a76f9 2216
1da177e4 2217 /* Cleanup up the write buffer. */
fe067e8a 2218 tcp_write_queue_purge(sk);
1da177e4
LT
2219
2220 /* Cleans up our, hopefully empty, out_of_order_queue. */
e905a9ed 2221 __skb_queue_purge(&tp->out_of_order_queue);
1da177e4 2222
cfb6eeb4
YH
2223#ifdef CONFIG_TCP_MD5SIG
2224 /* Clean up the MD5 key list, if any */
2225 if (tp->md5sig_info) {
a915da9b 2226 tcp_clear_md5_list(sk);
a8afca03 2227 kfree_rcu(tp->md5sig_info, rcu);
cfb6eeb4
YH
2228 tp->md5sig_info = NULL;
2229 }
2230#endif
2231
1a2449a8
CL
2232#ifdef CONFIG_NET_DMA
2233 /* Cleans up our sk_async_wait_queue */
e905a9ed 2234 __skb_queue_purge(&sk->sk_async_wait_queue);
1a2449a8
CL
2235#endif
2236
1da177e4
LT
2237 /* Clean prequeue, it must be empty really */
2238 __skb_queue_purge(&tp->ucopy.prequeue);
2239
2240 /* Clean up a referenced TCP bind bucket. */
463c84b9 2241 if (inet_csk(sk)->icsk_bind_hash)
ab1e0a13 2242 inet_put_port(sk);
1da177e4 2243
435cf559
WAS
2244 /* TCP Cookie Transactions */
2245 if (tp->cookie_values != NULL) {
2246 kref_put(&tp->cookie_values->kref,
2247 tcp_cookie_values_release);
2248 tp->cookie_values = NULL;
2249 }
168a8f58 2250 BUG_ON(tp->fastopen_rsk != NULL);
435cf559 2251
cf60af03
YC
2252 /* If socket is aborted during connect operation */
2253 tcp_free_fastopen_req(tp);
2254
180d8cd9 2255 sk_sockets_allocated_dec(sk);
d1a4c0b3 2256 sock_release_memcg(sk);
1da177e4 2257}
1da177e4
LT
2258EXPORT_SYMBOL(tcp_v4_destroy_sock);
2259
2260#ifdef CONFIG_PROC_FS
2261/* Proc filesystem TCP sock list dumping. */
2262
3ab5aee7 2263static inline struct inet_timewait_sock *tw_head(struct hlist_nulls_head *head)
1da177e4 2264{
3ab5aee7 2265 return hlist_nulls_empty(head) ? NULL :
8feaf0c0 2266 list_entry(head->first, struct inet_timewait_sock, tw_node);
1da177e4
LT
2267}
2268
8feaf0c0 2269static inline struct inet_timewait_sock *tw_next(struct inet_timewait_sock *tw)
1da177e4 2270{
3ab5aee7
ED
2271 return !is_a_nulls(tw->tw_node.next) ?
2272 hlist_nulls_entry(tw->tw_node.next, typeof(*tw), tw_node) : NULL;
1da177e4
LT
2273}
2274
a8b690f9
TH
2275/*
2276 * Get next listener socket follow cur. If cur is NULL, get first socket
2277 * starting from bucket given in st->bucket; when st->bucket is zero the
2278 * very first socket in the hash table is returned.
2279 */
1da177e4
LT
2280static void *listening_get_next(struct seq_file *seq, void *cur)
2281{
463c84b9 2282 struct inet_connection_sock *icsk;
c25eb3bf 2283 struct hlist_nulls_node *node;
1da177e4 2284 struct sock *sk = cur;
5caea4ea 2285 struct inet_listen_hashbucket *ilb;
5799de0b 2286 struct tcp_iter_state *st = seq->private;
a4146b1b 2287 struct net *net = seq_file_net(seq);
1da177e4
LT
2288
2289 if (!sk) {
a8b690f9 2290 ilb = &tcp_hashinfo.listening_hash[st->bucket];
5caea4ea 2291 spin_lock_bh(&ilb->lock);
c25eb3bf 2292 sk = sk_nulls_head(&ilb->head);
a8b690f9 2293 st->offset = 0;
1da177e4
LT
2294 goto get_sk;
2295 }
5caea4ea 2296 ilb = &tcp_hashinfo.listening_hash[st->bucket];
1da177e4 2297 ++st->num;
a8b690f9 2298 ++st->offset;
1da177e4
LT
2299
2300 if (st->state == TCP_SEQ_STATE_OPENREQ) {
60236fdd 2301 struct request_sock *req = cur;
1da177e4 2302
72a3effa 2303 icsk = inet_csk(st->syn_wait_sk);
1da177e4
LT
2304 req = req->dl_next;
2305 while (1) {
2306 while (req) {
bdccc4ca 2307 if (req->rsk_ops->family == st->family) {
1da177e4
LT
2308 cur = req;
2309 goto out;
2310 }
2311 req = req->dl_next;
2312 }
72a3effa 2313 if (++st->sbucket >= icsk->icsk_accept_queue.listen_opt->nr_table_entries)
1da177e4
LT
2314 break;
2315get_req:
463c84b9 2316 req = icsk->icsk_accept_queue.listen_opt->syn_table[st->sbucket];
1da177e4 2317 }
1bde5ac4 2318 sk = sk_nulls_next(st->syn_wait_sk);
1da177e4 2319 st->state = TCP_SEQ_STATE_LISTENING;
463c84b9 2320 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1da177e4 2321 } else {
e905a9ed 2322 icsk = inet_csk(sk);
463c84b9
ACM
2323 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2324 if (reqsk_queue_len(&icsk->icsk_accept_queue))
1da177e4 2325 goto start_req;
463c84b9 2326 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1bde5ac4 2327 sk = sk_nulls_next(sk);
1da177e4
LT
2328 }
2329get_sk:
c25eb3bf 2330 sk_nulls_for_each_from(sk, node) {
8475ef9f
PE
2331 if (!net_eq(sock_net(sk), net))
2332 continue;
2333 if (sk->sk_family == st->family) {
1da177e4
LT
2334 cur = sk;
2335 goto out;
2336 }
e905a9ed 2337 icsk = inet_csk(sk);
463c84b9
ACM
2338 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2339 if (reqsk_queue_len(&icsk->icsk_accept_queue)) {
1da177e4
LT
2340start_req:
2341 st->uid = sock_i_uid(sk);
2342 st->syn_wait_sk = sk;
2343 st->state = TCP_SEQ_STATE_OPENREQ;
2344 st->sbucket = 0;
2345 goto get_req;
2346 }
463c84b9 2347 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1da177e4 2348 }
5caea4ea 2349 spin_unlock_bh(&ilb->lock);
a8b690f9 2350 st->offset = 0;
0f7ff927 2351 if (++st->bucket < INET_LHTABLE_SIZE) {
5caea4ea
ED
2352 ilb = &tcp_hashinfo.listening_hash[st->bucket];
2353 spin_lock_bh(&ilb->lock);
c25eb3bf 2354 sk = sk_nulls_head(&ilb->head);
1da177e4
LT
2355 goto get_sk;
2356 }
2357 cur = NULL;
2358out:
2359 return cur;
2360}
2361
2362static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
2363{
a8b690f9
TH
2364 struct tcp_iter_state *st = seq->private;
2365 void *rc;
2366
2367 st->bucket = 0;
2368 st->offset = 0;
2369 rc = listening_get_next(seq, NULL);
1da177e4
LT
2370
2371 while (rc && *pos) {
2372 rc = listening_get_next(seq, rc);
2373 --*pos;
2374 }
2375 return rc;
2376}
2377
a2a385d6 2378static inline bool empty_bucket(struct tcp_iter_state *st)
6eac5604 2379{
3ab5aee7
ED
2380 return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain) &&
2381 hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].twchain);
6eac5604
AK
2382}
2383
a8b690f9
TH
2384/*
2385 * Get first established socket starting from bucket given in st->bucket.
2386 * If st->bucket is zero, the very first socket in the hash is returned.
2387 */
1da177e4
LT
2388static void *established_get_first(struct seq_file *seq)
2389{
5799de0b 2390 struct tcp_iter_state *st = seq->private;
a4146b1b 2391 struct net *net = seq_file_net(seq);
1da177e4
LT
2392 void *rc = NULL;
2393
a8b690f9
TH
2394 st->offset = 0;
2395 for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
1da177e4 2396 struct sock *sk;
3ab5aee7 2397 struct hlist_nulls_node *node;
8feaf0c0 2398 struct inet_timewait_sock *tw;
9db66bdc 2399 spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
1da177e4 2400
6eac5604
AK
2401 /* Lockless fast path for the common case of empty buckets */
2402 if (empty_bucket(st))
2403 continue;
2404
9db66bdc 2405 spin_lock_bh(lock);
3ab5aee7 2406 sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
f40c8174 2407 if (sk->sk_family != st->family ||
878628fb 2408 !net_eq(sock_net(sk), net)) {
1da177e4
LT
2409 continue;
2410 }
2411 rc = sk;
2412 goto out;
2413 }
2414 st->state = TCP_SEQ_STATE_TIME_WAIT;
8feaf0c0 2415 inet_twsk_for_each(tw, node,
dbca9b27 2416 &tcp_hashinfo.ehash[st->bucket].twchain) {
28518fc1 2417 if (tw->tw_family != st->family ||
878628fb 2418 !net_eq(twsk_net(tw), net)) {
1da177e4
LT
2419 continue;
2420 }
2421 rc = tw;
2422 goto out;
2423 }
9db66bdc 2424 spin_unlock_bh(lock);
1da177e4
LT
2425 st->state = TCP_SEQ_STATE_ESTABLISHED;
2426 }
2427out:
2428 return rc;
2429}
2430
2431static void *established_get_next(struct seq_file *seq, void *cur)
2432{
2433 struct sock *sk = cur;
8feaf0c0 2434 struct inet_timewait_sock *tw;
3ab5aee7 2435 struct hlist_nulls_node *node;
5799de0b 2436 struct tcp_iter_state *st = seq->private;
a4146b1b 2437 struct net *net = seq_file_net(seq);
1da177e4
LT
2438
2439 ++st->num;
a8b690f9 2440 ++st->offset;
1da177e4
LT
2441
2442 if (st->state == TCP_SEQ_STATE_TIME_WAIT) {
2443 tw = cur;
2444 tw = tw_next(tw);
2445get_tw:
878628fb 2446 while (tw && (tw->tw_family != st->family || !net_eq(twsk_net(tw), net))) {
1da177e4
LT
2447 tw = tw_next(tw);
2448 }
2449 if (tw) {
2450 cur = tw;
2451 goto out;
2452 }
9db66bdc 2453 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
1da177e4
LT
2454 st->state = TCP_SEQ_STATE_ESTABLISHED;
2455
6eac5604 2456 /* Look for next non empty bucket */
a8b690f9 2457 st->offset = 0;
f373b53b 2458 while (++st->bucket <= tcp_hashinfo.ehash_mask &&
6eac5604
AK
2459 empty_bucket(st))
2460 ;
f373b53b 2461 if (st->bucket > tcp_hashinfo.ehash_mask)
6eac5604
AK
2462 return NULL;
2463
9db66bdc 2464 spin_lock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
3ab5aee7 2465 sk = sk_nulls_head(&tcp_hashinfo.ehash[st->bucket].chain);
1da177e4 2466 } else
3ab5aee7 2467 sk = sk_nulls_next(sk);
1da177e4 2468
3ab5aee7 2469 sk_nulls_for_each_from(sk, node) {
878628fb 2470 if (sk->sk_family == st->family && net_eq(sock_net(sk), net))
1da177e4
LT
2471 goto found;
2472 }
2473
2474 st->state = TCP_SEQ_STATE_TIME_WAIT;
dbca9b27 2475 tw = tw_head(&tcp_hashinfo.ehash[st->bucket].twchain);
1da177e4
LT
2476 goto get_tw;
2477found:
2478 cur = sk;
2479out:
2480 return cur;
2481}
2482
2483static void *established_get_idx(struct seq_file *seq, loff_t pos)
2484{
a8b690f9
TH
2485 struct tcp_iter_state *st = seq->private;
2486 void *rc;
2487
2488 st->bucket = 0;
2489 rc = established_get_first(seq);
1da177e4
LT
2490
2491 while (rc && pos) {
2492 rc = established_get_next(seq, rc);
2493 --pos;
7174259e 2494 }
1da177e4
LT
2495 return rc;
2496}
2497
2498static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
2499{
2500 void *rc;
5799de0b 2501 struct tcp_iter_state *st = seq->private;
1da177e4 2502
1da177e4
LT
2503 st->state = TCP_SEQ_STATE_LISTENING;
2504 rc = listening_get_idx(seq, &pos);
2505
2506 if (!rc) {
1da177e4
LT
2507 st->state = TCP_SEQ_STATE_ESTABLISHED;
2508 rc = established_get_idx(seq, pos);
2509 }
2510
2511 return rc;
2512}
2513
a8b690f9
TH
2514static void *tcp_seek_last_pos(struct seq_file *seq)
2515{
2516 struct tcp_iter_state *st = seq->private;
2517 int offset = st->offset;
2518 int orig_num = st->num;
2519 void *rc = NULL;
2520
2521 switch (st->state) {
2522 case TCP_SEQ_STATE_OPENREQ:
2523 case TCP_SEQ_STATE_LISTENING:
2524 if (st->bucket >= INET_LHTABLE_SIZE)
2525 break;
2526 st->state = TCP_SEQ_STATE_LISTENING;
2527 rc = listening_get_next(seq, NULL);
2528 while (offset-- && rc)
2529 rc = listening_get_next(seq, rc);
2530 if (rc)
2531 break;
2532 st->bucket = 0;
2533 /* Fallthrough */
2534 case TCP_SEQ_STATE_ESTABLISHED:
2535 case TCP_SEQ_STATE_TIME_WAIT:
2536 st->state = TCP_SEQ_STATE_ESTABLISHED;
2537 if (st->bucket > tcp_hashinfo.ehash_mask)
2538 break;
2539 rc = established_get_first(seq);
2540 while (offset-- && rc)
2541 rc = established_get_next(seq, rc);
2542 }
2543
2544 st->num = orig_num;
2545
2546 return rc;
2547}
2548
1da177e4
LT
2549static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2550{
5799de0b 2551 struct tcp_iter_state *st = seq->private;
a8b690f9
TH
2552 void *rc;
2553
2554 if (*pos && *pos == st->last_pos) {
2555 rc = tcp_seek_last_pos(seq);
2556 if (rc)
2557 goto out;
2558 }
2559
1da177e4
LT
2560 st->state = TCP_SEQ_STATE_LISTENING;
2561 st->num = 0;
a8b690f9
TH
2562 st->bucket = 0;
2563 st->offset = 0;
2564 rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2565
2566out:
2567 st->last_pos = *pos;
2568 return rc;
1da177e4
LT
2569}
2570
2571static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2572{
a8b690f9 2573 struct tcp_iter_state *st = seq->private;
1da177e4 2574 void *rc = NULL;
1da177e4
LT
2575
2576 if (v == SEQ_START_TOKEN) {
2577 rc = tcp_get_idx(seq, 0);
2578 goto out;
2579 }
1da177e4
LT
2580
2581 switch (st->state) {
2582 case TCP_SEQ_STATE_OPENREQ:
2583 case TCP_SEQ_STATE_LISTENING:
2584 rc = listening_get_next(seq, v);
2585 if (!rc) {
1da177e4 2586 st->state = TCP_SEQ_STATE_ESTABLISHED;
a8b690f9
TH
2587 st->bucket = 0;
2588 st->offset = 0;
1da177e4
LT
2589 rc = established_get_first(seq);
2590 }
2591 break;
2592 case TCP_SEQ_STATE_ESTABLISHED:
2593 case TCP_SEQ_STATE_TIME_WAIT:
2594 rc = established_get_next(seq, v);
2595 break;
2596 }
2597out:
2598 ++*pos;
a8b690f9 2599 st->last_pos = *pos;
1da177e4
LT
2600 return rc;
2601}
2602
2603static void tcp_seq_stop(struct seq_file *seq, void *v)
2604{
5799de0b 2605 struct tcp_iter_state *st = seq->private;
1da177e4
LT
2606
2607 switch (st->state) {
2608 case TCP_SEQ_STATE_OPENREQ:
2609 if (v) {
463c84b9
ACM
2610 struct inet_connection_sock *icsk = inet_csk(st->syn_wait_sk);
2611 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1da177e4
LT
2612 }
2613 case TCP_SEQ_STATE_LISTENING:
2614 if (v != SEQ_START_TOKEN)
5caea4ea 2615 spin_unlock_bh(&tcp_hashinfo.listening_hash[st->bucket].lock);
1da177e4
LT
2616 break;
2617 case TCP_SEQ_STATE_TIME_WAIT:
2618 case TCP_SEQ_STATE_ESTABLISHED:
2619 if (v)
9db66bdc 2620 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
1da177e4
LT
2621 break;
2622 }
2623}
2624
73cb88ec 2625int tcp_seq_open(struct inode *inode, struct file *file)
1da177e4
LT
2626{
2627 struct tcp_seq_afinfo *afinfo = PDE(inode)->data;
1da177e4 2628 struct tcp_iter_state *s;
52d6f3f1 2629 int err;
1da177e4 2630
52d6f3f1
DL
2631 err = seq_open_net(inode, file, &afinfo->seq_ops,
2632 sizeof(struct tcp_iter_state));
2633 if (err < 0)
2634 return err;
f40c8174 2635
52d6f3f1 2636 s = ((struct seq_file *)file->private_data)->private;
1da177e4 2637 s->family = afinfo->family;
a8b690f9 2638 s->last_pos = 0;
f40c8174
DL
2639 return 0;
2640}
73cb88ec 2641EXPORT_SYMBOL(tcp_seq_open);
f40c8174 2642
6f8b13bc 2643int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo)
1da177e4
LT
2644{
2645 int rc = 0;
2646 struct proc_dir_entry *p;
2647
9427c4b3
DL
2648 afinfo->seq_ops.start = tcp_seq_start;
2649 afinfo->seq_ops.next = tcp_seq_next;
2650 afinfo->seq_ops.stop = tcp_seq_stop;
2651
84841c3c 2652 p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
73cb88ec 2653 afinfo->seq_fops, afinfo);
84841c3c 2654 if (!p)
1da177e4
LT
2655 rc = -ENOMEM;
2656 return rc;
2657}
4bc2f18b 2658EXPORT_SYMBOL(tcp_proc_register);
1da177e4 2659
6f8b13bc 2660void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo)
1da177e4 2661{
ece31ffd 2662 remove_proc_entry(afinfo->name, net->proc_net);
1da177e4 2663}
4bc2f18b 2664EXPORT_SYMBOL(tcp_proc_unregister);
1da177e4 2665
cf533ea5 2666static void get_openreq4(const struct sock *sk, const struct request_sock *req,
a7cb5a49 2667 struct seq_file *f, int i, kuid_t uid, int *len)
1da177e4 2668{
2e6599cb 2669 const struct inet_request_sock *ireq = inet_rsk(req);
a399a805 2670 long delta = req->expires - jiffies;
1da177e4 2671
5e659e4c 2672 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
71338aa7 2673 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %u %d %pK%n",
1da177e4 2674 i,
2e6599cb 2675 ireq->loc_addr,
c720c7e8 2676 ntohs(inet_sk(sk)->inet_sport),
2e6599cb
ACM
2677 ireq->rmt_addr,
2678 ntohs(ireq->rmt_port),
1da177e4
LT
2679 TCP_SYN_RECV,
2680 0, 0, /* could print option size, but that is af dependent. */
2681 1, /* timers active (only the expire timer) */
a399a805 2682 jiffies_delta_to_clock_t(delta),
e6c022a4 2683 req->num_timeout,
a7cb5a49 2684 from_kuid_munged(seq_user_ns(f), uid),
1da177e4
LT
2685 0, /* non standard timer */
2686 0, /* open_requests have no inode */
2687 atomic_read(&sk->sk_refcnt),
5e659e4c
PE
2688 req,
2689 len);
1da177e4
LT
2690}
2691
5e659e4c 2692static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
1da177e4
LT
2693{
2694 int timer_active;
2695 unsigned long timer_expires;
cf533ea5 2696 const struct tcp_sock *tp = tcp_sk(sk);
cf4c6bf8 2697 const struct inet_connection_sock *icsk = inet_csk(sk);
cf533ea5 2698 const struct inet_sock *inet = inet_sk(sk);
168a8f58 2699 struct fastopen_queue *fastopenq = icsk->icsk_accept_queue.fastopenq;
c720c7e8
ED
2700 __be32 dest = inet->inet_daddr;
2701 __be32 src = inet->inet_rcv_saddr;
2702 __u16 destp = ntohs(inet->inet_dport);
2703 __u16 srcp = ntohs(inet->inet_sport);
49d09007 2704 int rx_queue;
1da177e4 2705
6ba8a3b1
ND
2706 if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
2707 icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
2708 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
1da177e4 2709 timer_active = 1;
463c84b9
ACM
2710 timer_expires = icsk->icsk_timeout;
2711 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1da177e4 2712 timer_active = 4;
463c84b9 2713 timer_expires = icsk->icsk_timeout;
cf4c6bf8 2714 } else if (timer_pending(&sk->sk_timer)) {
1da177e4 2715 timer_active = 2;
cf4c6bf8 2716 timer_expires = sk->sk_timer.expires;
1da177e4
LT
2717 } else {
2718 timer_active = 0;
2719 timer_expires = jiffies;
2720 }
2721
49d09007
ED
2722 if (sk->sk_state == TCP_LISTEN)
2723 rx_queue = sk->sk_ack_backlog;
2724 else
2725 /*
2726 * because we dont lock socket, we might find a transient negative value
2727 */
2728 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
2729
5e659e4c 2730 seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
71338aa7 2731 "%08X %5d %8d %lu %d %pK %lu %lu %u %u %d%n",
cf4c6bf8 2732 i, src, srcp, dest, destp, sk->sk_state,
47da8ee6 2733 tp->write_seq - tp->snd_una,
49d09007 2734 rx_queue,
1da177e4 2735 timer_active,
a399a805 2736 jiffies_delta_to_clock_t(timer_expires - jiffies),
463c84b9 2737 icsk->icsk_retransmits,
a7cb5a49 2738 from_kuid_munged(seq_user_ns(f), sock_i_uid(sk)),
6687e988 2739 icsk->icsk_probes_out,
cf4c6bf8
IJ
2740 sock_i_ino(sk),
2741 atomic_read(&sk->sk_refcnt), sk,
7be87351
SH
2742 jiffies_to_clock_t(icsk->icsk_rto),
2743 jiffies_to_clock_t(icsk->icsk_ack.ato),
463c84b9 2744 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
1da177e4 2745 tp->snd_cwnd,
168a8f58
JC
2746 sk->sk_state == TCP_LISTEN ?
2747 (fastopenq ? fastopenq->max_qlen : 0) :
2748 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh),
5e659e4c 2749 len);
1da177e4
LT
2750}
2751
cf533ea5 2752static void get_timewait4_sock(const struct inet_timewait_sock *tw,
5e659e4c 2753 struct seq_file *f, int i, int *len)
1da177e4 2754{
23f33c2d 2755 __be32 dest, src;
1da177e4 2756 __u16 destp, srcp;
a399a805 2757 long delta = tw->tw_ttd - jiffies;
1da177e4
LT
2758
2759 dest = tw->tw_daddr;
2760 src = tw->tw_rcv_saddr;
2761 destp = ntohs(tw->tw_dport);
2762 srcp = ntohs(tw->tw_sport);
2763
5e659e4c 2764 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
71338aa7 2765 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK%n",
1da177e4 2766 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
a399a805 2767 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
5e659e4c 2768 atomic_read(&tw->tw_refcnt), tw, len);
1da177e4
LT
2769}
2770
2771#define TMPSZ 150
2772
2773static int tcp4_seq_show(struct seq_file *seq, void *v)
2774{
5799de0b 2775 struct tcp_iter_state *st;
5e659e4c 2776 int len;
1da177e4
LT
2777
2778 if (v == SEQ_START_TOKEN) {
2779 seq_printf(seq, "%-*s\n", TMPSZ - 1,
2780 " sl local_address rem_address st tx_queue "
2781 "rx_queue tr tm->when retrnsmt uid timeout "
2782 "inode");
2783 goto out;
2784 }
2785 st = seq->private;
2786
2787 switch (st->state) {
2788 case TCP_SEQ_STATE_LISTENING:
2789 case TCP_SEQ_STATE_ESTABLISHED:
5e659e4c 2790 get_tcp4_sock(v, seq, st->num, &len);
1da177e4
LT
2791 break;
2792 case TCP_SEQ_STATE_OPENREQ:
5e659e4c 2793 get_openreq4(st->syn_wait_sk, v, seq, st->num, st->uid, &len);
1da177e4
LT
2794 break;
2795 case TCP_SEQ_STATE_TIME_WAIT:
5e659e4c 2796 get_timewait4_sock(v, seq, st->num, &len);
1da177e4
LT
2797 break;
2798 }
5e659e4c 2799 seq_printf(seq, "%*s\n", TMPSZ - 1 - len, "");
1da177e4
LT
2800out:
2801 return 0;
2802}
2803
73cb88ec
AV
2804static const struct file_operations tcp_afinfo_seq_fops = {
2805 .owner = THIS_MODULE,
2806 .open = tcp_seq_open,
2807 .read = seq_read,
2808 .llseek = seq_lseek,
2809 .release = seq_release_net
2810};
2811
1da177e4 2812static struct tcp_seq_afinfo tcp4_seq_afinfo = {
1da177e4
LT
2813 .name = "tcp",
2814 .family = AF_INET,
73cb88ec 2815 .seq_fops = &tcp_afinfo_seq_fops,
9427c4b3
DL
2816 .seq_ops = {
2817 .show = tcp4_seq_show,
2818 },
1da177e4
LT
2819};
2820
2c8c1e72 2821static int __net_init tcp4_proc_init_net(struct net *net)
757764f6
PE
2822{
2823 return tcp_proc_register(net, &tcp4_seq_afinfo);
2824}
2825
2c8c1e72 2826static void __net_exit tcp4_proc_exit_net(struct net *net)
757764f6
PE
2827{
2828 tcp_proc_unregister(net, &tcp4_seq_afinfo);
2829}
2830
2831static struct pernet_operations tcp4_net_ops = {
2832 .init = tcp4_proc_init_net,
2833 .exit = tcp4_proc_exit_net,
2834};
2835
1da177e4
LT
2836int __init tcp4_proc_init(void)
2837{
757764f6 2838 return register_pernet_subsys(&tcp4_net_ops);
1da177e4
LT
2839}
2840
2841void tcp4_proc_exit(void)
2842{
757764f6 2843 unregister_pernet_subsys(&tcp4_net_ops);
1da177e4
LT
2844}
2845#endif /* CONFIG_PROC_FS */
2846
bf296b12
HX
2847struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2848{
b71d1d42 2849 const struct iphdr *iph = skb_gro_network_header(skb);
861b6501
ED
2850 __wsum wsum;
2851 __sum16 sum;
bf296b12
HX
2852
2853 switch (skb->ip_summed) {
2854 case CHECKSUM_COMPLETE:
86911732 2855 if (!tcp_v4_check(skb_gro_len(skb), iph->saddr, iph->daddr,
bf296b12
HX
2856 skb->csum)) {
2857 skb->ip_summed = CHECKSUM_UNNECESSARY;
2858 break;
2859 }
861b6501 2860flush:
bf296b12
HX
2861 NAPI_GRO_CB(skb)->flush = 1;
2862 return NULL;
861b6501
ED
2863
2864 case CHECKSUM_NONE:
2865 wsum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
2866 skb_gro_len(skb), IPPROTO_TCP, 0);
2867 sum = csum_fold(skb_checksum(skb,
2868 skb_gro_offset(skb),
2869 skb_gro_len(skb),
2870 wsum));
2871 if (sum)
2872 goto flush;
2873
2874 skb->ip_summed = CHECKSUM_UNNECESSARY;
2875 break;
bf296b12
HX
2876 }
2877
2878 return tcp_gro_receive(head, skb);
2879}
bf296b12
HX
2880
2881int tcp4_gro_complete(struct sk_buff *skb)
2882{
b71d1d42 2883 const struct iphdr *iph = ip_hdr(skb);
bf296b12
HX
2884 struct tcphdr *th = tcp_hdr(skb);
2885
2886 th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
2887 iph->saddr, iph->daddr, 0);
2888 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
2889
2890 return tcp_gro_complete(skb);
2891}
bf296b12 2892
1da177e4
LT
2893struct proto tcp_prot = {
2894 .name = "TCP",
2895 .owner = THIS_MODULE,
2896 .close = tcp_close,
2897 .connect = tcp_v4_connect,
2898 .disconnect = tcp_disconnect,
463c84b9 2899 .accept = inet_csk_accept,
1da177e4
LT
2900 .ioctl = tcp_ioctl,
2901 .init = tcp_v4_init_sock,
2902 .destroy = tcp_v4_destroy_sock,
2903 .shutdown = tcp_shutdown,
2904 .setsockopt = tcp_setsockopt,
2905 .getsockopt = tcp_getsockopt,
1da177e4 2906 .recvmsg = tcp_recvmsg,
7ba42910
CG
2907 .sendmsg = tcp_sendmsg,
2908 .sendpage = tcp_sendpage,
1da177e4 2909 .backlog_rcv = tcp_v4_do_rcv,
46d3ceab 2910 .release_cb = tcp_release_cb,
563d34d0 2911 .mtu_reduced = tcp_v4_mtu_reduced,
ab1e0a13
ACM
2912 .hash = inet_hash,
2913 .unhash = inet_unhash,
2914 .get_port = inet_csk_get_port,
1da177e4
LT
2915 .enter_memory_pressure = tcp_enter_memory_pressure,
2916 .sockets_allocated = &tcp_sockets_allocated,
0a5578cf 2917 .orphan_count = &tcp_orphan_count,
1da177e4
LT
2918 .memory_allocated = &tcp_memory_allocated,
2919 .memory_pressure = &tcp_memory_pressure,
1da177e4
LT
2920 .sysctl_wmem = sysctl_tcp_wmem,
2921 .sysctl_rmem = sysctl_tcp_rmem,
2922 .max_header = MAX_TCP_HEADER,
2923 .obj_size = sizeof(struct tcp_sock),
3ab5aee7 2924 .slab_flags = SLAB_DESTROY_BY_RCU,
6d6ee43e 2925 .twsk_prot = &tcp_timewait_sock_ops,
60236fdd 2926 .rsk_prot = &tcp_request_sock_ops,
39d8cda7 2927 .h.hashinfo = &tcp_hashinfo,
7ba42910 2928 .no_autobind = true,
543d9cfe
ACM
2929#ifdef CONFIG_COMPAT
2930 .compat_setsockopt = compat_tcp_setsockopt,
2931 .compat_getsockopt = compat_tcp_getsockopt,
2932#endif
c255a458 2933#ifdef CONFIG_MEMCG_KMEM
d1a4c0b3
GC
2934 .init_cgroup = tcp_init_cgroup,
2935 .destroy_cgroup = tcp_destroy_cgroup,
2936 .proto_cgroup = tcp_proto_cgroup,
2937#endif
1da177e4 2938};
4bc2f18b 2939EXPORT_SYMBOL(tcp_prot);
1da177e4 2940
046ee902
DL
2941static int __net_init tcp_sk_init(struct net *net)
2942{
5d134f1c 2943 net->ipv4.sysctl_tcp_ecn = 2;
be9f4a44 2944 return 0;
046ee902
DL
2945}
2946
2947static void __net_exit tcp_sk_exit(struct net *net)
2948{
b099ce26
EB
2949}
2950
2951static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
2952{
2953 inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET);
046ee902
DL
2954}
2955
2956static struct pernet_operations __net_initdata tcp_sk_ops = {
b099ce26
EB
2957 .init = tcp_sk_init,
2958 .exit = tcp_sk_exit,
2959 .exit_batch = tcp_sk_exit_batch,
046ee902
DL
2960};
2961
9b0f976f 2962void __init tcp_v4_init(void)
1da177e4 2963{
5caea4ea 2964 inet_hashinfo_init(&tcp_hashinfo);
6a1b3054 2965 if (register_pernet_subsys(&tcp_sk_ops))
1da177e4 2966 panic("Failed to create the TCP control socket.\n");
1da177e4 2967}
This page took 1.044412 seconds and 5 git commands to generate.