tcp/dccp: do not touch listener sk_refcnt under synflood
[deliverable/linux.git] / net / ipv4 / tcp_ipv4.c
CommitLineData
1da177e4
LT
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Implementation of the Transmission Control Protocol(TCP).
7 *
1da177e4
LT
8 * IPv4 specific functions
9 *
10 *
11 * code split from:
12 * linux/ipv4/tcp.c
13 * linux/ipv4/tcp_input.c
14 * linux/ipv4/tcp_output.c
15 *
16 * See tcp.c for author information
17 *
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
22 */
23
24/*
25 * Changes:
26 * David S. Miller : New socket lookup architecture.
27 * This code is dedicated to John Dyson.
28 * David S. Miller : Change semantics of established hash,
29 * half is devoted to TIME_WAIT sockets
30 * and the rest go in the other half.
31 * Andi Kleen : Add support for syncookies and fixed
32 * some bugs: ip options weren't passed to
33 * the TCP layer, missed a check for an
34 * ACK bit.
35 * Andi Kleen : Implemented fast path mtu discovery.
36 * Fixed many serious bugs in the
60236fdd 37 * request_sock handling and moved
1da177e4
LT
38 * most of it into the af independent code.
39 * Added tail drop and some other bugfixes.
caa20d9a 40 * Added new listen semantics.
1da177e4
LT
41 * Mike McLagan : Routing by source
42 * Juan Jose Ciarlante: ip_dynaddr bits
43 * Andi Kleen: various fixes.
44 * Vitaly E. Lavrov : Transparent proxy revived after year
45 * coma.
46 * Andi Kleen : Fix new listen.
47 * Andi Kleen : Fix accept error reporting.
48 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
49 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
50 * a single port at the same time.
51 */
52
afd46503 53#define pr_fmt(fmt) "TCP: " fmt
1da177e4 54
eb4dea58 55#include <linux/bottom_half.h>
1da177e4
LT
56#include <linux/types.h>
57#include <linux/fcntl.h>
58#include <linux/module.h>
59#include <linux/random.h>
60#include <linux/cache.h>
61#include <linux/jhash.h>
62#include <linux/init.h>
63#include <linux/times.h>
5a0e3ad6 64#include <linux/slab.h>
1da177e4 65
457c4cbc 66#include <net/net_namespace.h>
1da177e4 67#include <net/icmp.h>
304a1618 68#include <net/inet_hashtables.h>
1da177e4 69#include <net/tcp.h>
20380731 70#include <net/transp_v6.h>
1da177e4
LT
71#include <net/ipv6.h>
72#include <net/inet_common.h>
6d6ee43e 73#include <net/timewait_sock.h>
1da177e4 74#include <net/xfrm.h>
6e5714ea 75#include <net/secure_seq.h>
076bb0c8 76#include <net/busy_poll.h>
1da177e4
LT
77
78#include <linux/inet.h>
79#include <linux/ipv6.h>
80#include <linux/stddef.h>
81#include <linux/proc_fs.h>
82#include <linux/seq_file.h>
83
cf80e0e4 84#include <crypto/hash.h>
cfb6eeb4
YH
85#include <linux/scatterlist.h>
86
ab32ea5d
BH
87int sysctl_tcp_tw_reuse __read_mostly;
88int sysctl_tcp_low_latency __read_mostly;
4bc2f18b 89EXPORT_SYMBOL(sysctl_tcp_low_latency);
1da177e4 90
cfb6eeb4 91#ifdef CONFIG_TCP_MD5SIG
a915da9b 92static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
318cf7aa 93 __be32 daddr, __be32 saddr, const struct tcphdr *th);
cfb6eeb4
YH
94#endif
95
5caea4ea 96struct inet_hashinfo tcp_hashinfo;
4bc2f18b 97EXPORT_SYMBOL(tcp_hashinfo);
1da177e4 98
936b8bdb 99static __u32 tcp_v4_init_sequence(const struct sk_buff *skb)
1da177e4 100{
eddc9ec5
ACM
101 return secure_tcp_sequence_number(ip_hdr(skb)->daddr,
102 ip_hdr(skb)->saddr,
aa8223c7
ACM
103 tcp_hdr(skb)->dest,
104 tcp_hdr(skb)->source);
1da177e4
LT
105}
106
6d6ee43e
ACM
107int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
108{
109 const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
110 struct tcp_sock *tp = tcp_sk(sk);
111
112 /* With PAWS, it is safe from the viewpoint
113 of data integrity. Even without PAWS it is safe provided sequence
114 spaces do not overlap i.e. at data rates <= 80Mbit/sec.
115
116 Actually, the idea is close to VJ's one, only timestamp cache is
117 held not per host, but per port pair and TW bucket is used as state
118 holder.
119
120 If TW bucket has been already destroyed we fall back to VJ's scheme
121 and use initial timestamp retrieved from peer table.
122 */
123 if (tcptw->tw_ts_recent_stamp &&
51456b29 124 (!twp || (sysctl_tcp_tw_reuse &&
9d729f72 125 get_seconds() - tcptw->tw_ts_recent_stamp > 1))) {
6d6ee43e
ACM
126 tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
127 if (tp->write_seq == 0)
128 tp->write_seq = 1;
129 tp->rx_opt.ts_recent = tcptw->tw_ts_recent;
130 tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
131 sock_hold(sktw);
132 return 1;
133 }
134
135 return 0;
136}
6d6ee43e
ACM
137EXPORT_SYMBOL_GPL(tcp_twsk_unique);
138
1da177e4
LT
139/* This will initiate an outgoing connection. */
140int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
141{
2d7192d6 142 struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
1da177e4
LT
143 struct inet_sock *inet = inet_sk(sk);
144 struct tcp_sock *tp = tcp_sk(sk);
dca8b089 145 __be16 orig_sport, orig_dport;
bada8adc 146 __be32 daddr, nexthop;
da905bd1 147 struct flowi4 *fl4;
2d7192d6 148 struct rtable *rt;
1da177e4 149 int err;
f6d8bd05 150 struct ip_options_rcu *inet_opt;
1da177e4
LT
151
152 if (addr_len < sizeof(struct sockaddr_in))
153 return -EINVAL;
154
155 if (usin->sin_family != AF_INET)
156 return -EAFNOSUPPORT;
157
158 nexthop = daddr = usin->sin_addr.s_addr;
f6d8bd05
ED
159 inet_opt = rcu_dereference_protected(inet->inet_opt,
160 sock_owned_by_user(sk));
161 if (inet_opt && inet_opt->opt.srr) {
1da177e4
LT
162 if (!daddr)
163 return -EINVAL;
f6d8bd05 164 nexthop = inet_opt->opt.faddr;
1da177e4
LT
165 }
166
dca8b089
DM
167 orig_sport = inet->inet_sport;
168 orig_dport = usin->sin_port;
da905bd1
DM
169 fl4 = &inet->cork.fl.u.ip4;
170 rt = ip_route_connect(fl4, nexthop, inet->inet_saddr,
b23dd4fe
DM
171 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
172 IPPROTO_TCP,
0e0d44ab 173 orig_sport, orig_dport, sk);
b23dd4fe
DM
174 if (IS_ERR(rt)) {
175 err = PTR_ERR(rt);
176 if (err == -ENETUNREACH)
f1d8cba6 177 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
b23dd4fe 178 return err;
584bdf8c 179 }
1da177e4
LT
180
181 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
182 ip_rt_put(rt);
183 return -ENETUNREACH;
184 }
185
f6d8bd05 186 if (!inet_opt || !inet_opt->opt.srr)
da905bd1 187 daddr = fl4->daddr;
1da177e4 188
c720c7e8 189 if (!inet->inet_saddr)
da905bd1 190 inet->inet_saddr = fl4->saddr;
d1e559d0 191 sk_rcv_saddr_set(sk, inet->inet_saddr);
1da177e4 192
c720c7e8 193 if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
1da177e4
LT
194 /* Reset inherited state */
195 tp->rx_opt.ts_recent = 0;
196 tp->rx_opt.ts_recent_stamp = 0;
ee995283
PE
197 if (likely(!tp->repair))
198 tp->write_seq = 0;
1da177e4
LT
199 }
200
295ff7ed 201 if (tcp_death_row.sysctl_tw_recycle &&
81166dd6
DM
202 !tp->rx_opt.ts_recent_stamp && fl4->daddr == daddr)
203 tcp_fetch_timewait_stamp(sk, &rt->dst);
1da177e4 204
c720c7e8 205 inet->inet_dport = usin->sin_port;
d1e559d0 206 sk_daddr_set(sk, daddr);
1da177e4 207
d83d8461 208 inet_csk(sk)->icsk_ext_hdr_len = 0;
f6d8bd05
ED
209 if (inet_opt)
210 inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
1da177e4 211
bee7ca9e 212 tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
1da177e4
LT
213
214 /* Socket identity is still unknown (sport may be zero).
215 * However we set state to SYN-SENT and not releasing socket
216 * lock select source port, enter ourselves into the hash tables and
217 * complete initialization after this.
218 */
219 tcp_set_state(sk, TCP_SYN_SENT);
a7f5e7f1 220 err = inet_hash_connect(&tcp_death_row, sk);
1da177e4
LT
221 if (err)
222 goto failure;
223
877d1f62 224 sk_set_txhash(sk);
9e7ceb06 225
da905bd1 226 rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
b23dd4fe
DM
227 inet->inet_sport, inet->inet_dport, sk);
228 if (IS_ERR(rt)) {
229 err = PTR_ERR(rt);
230 rt = NULL;
1da177e4 231 goto failure;
b23dd4fe 232 }
1da177e4 233 /* OK, now commit destination to socket. */
bcd76111 234 sk->sk_gso_type = SKB_GSO_TCPV4;
d8d1f30b 235 sk_setup_caps(sk, &rt->dst);
1da177e4 236
ee995283 237 if (!tp->write_seq && likely(!tp->repair))
c720c7e8
ED
238 tp->write_seq = secure_tcp_sequence_number(inet->inet_saddr,
239 inet->inet_daddr,
240 inet->inet_sport,
1da177e4
LT
241 usin->sin_port);
242
c720c7e8 243 inet->inet_id = tp->write_seq ^ jiffies;
1da177e4 244
2b916477 245 err = tcp_connect(sk);
ee995283 246
1da177e4
LT
247 rt = NULL;
248 if (err)
249 goto failure;
250
251 return 0;
252
253failure:
7174259e
ACM
254 /*
255 * This unhashes the socket and releases the local port,
256 * if necessary.
257 */
1da177e4
LT
258 tcp_set_state(sk, TCP_CLOSE);
259 ip_rt_put(rt);
260 sk->sk_route_caps = 0;
c720c7e8 261 inet->inet_dport = 0;
1da177e4
LT
262 return err;
263}
4bc2f18b 264EXPORT_SYMBOL(tcp_v4_connect);
1da177e4 265
1da177e4 266/*
563d34d0
ED
267 * This routine reacts to ICMP_FRAG_NEEDED mtu indications as defined in RFC1191.
268 * It can be called through tcp_release_cb() if socket was owned by user
269 * at the time tcp_v4_err() was called to handle ICMP message.
1da177e4 270 */
4fab9071 271void tcp_v4_mtu_reduced(struct sock *sk)
1da177e4
LT
272{
273 struct dst_entry *dst;
274 struct inet_sock *inet = inet_sk(sk);
563d34d0 275 u32 mtu = tcp_sk(sk)->mtu_info;
1da177e4 276
80d0a69f
DM
277 dst = inet_csk_update_pmtu(sk, mtu);
278 if (!dst)
1da177e4
LT
279 return;
280
1da177e4
LT
281 /* Something is about to be wrong... Remember soft error
282 * for the case, if this connection will not able to recover.
283 */
284 if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
285 sk->sk_err_soft = EMSGSIZE;
286
287 mtu = dst_mtu(dst);
288
289 if (inet->pmtudisc != IP_PMTUDISC_DONT &&
482fc609 290 ip_sk_accept_pmtu(sk) &&
d83d8461 291 inet_csk(sk)->icsk_pmtu_cookie > mtu) {
1da177e4
LT
292 tcp_sync_mss(sk, mtu);
293
294 /* Resend the TCP packet because it's
295 * clear that the old packet has been
296 * dropped. This is the new "fast" path mtu
297 * discovery.
298 */
299 tcp_simple_retransmit(sk);
300 } /* else let the usual retransmit timer handle it */
301}
4fab9071 302EXPORT_SYMBOL(tcp_v4_mtu_reduced);
1da177e4 303
55be7a9c
DM
304static void do_redirect(struct sk_buff *skb, struct sock *sk)
305{
306 struct dst_entry *dst = __sk_dst_check(sk, 0);
307
1ed5c48f 308 if (dst)
6700c270 309 dst->ops->redirect(dst, sk, skb);
55be7a9c
DM
310}
311
26e37360
ED
312
313/* handle ICMP messages on TCP_NEW_SYN_RECV request sockets */
9cf74903 314void tcp_req_err(struct sock *sk, u32 seq, bool abort)
26e37360
ED
315{
316 struct request_sock *req = inet_reqsk(sk);
317 struct net *net = sock_net(sk);
318
319 /* ICMPs are not backlogged, hence we cannot get
320 * an established socket here.
321 */
26e37360
ED
322 if (seq != tcp_rsk(req)->snt_isn) {
323 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
9cf74903 324 } else if (abort) {
26e37360
ED
325 /*
326 * Still in SYN_RECV, just remove it silently.
327 * There is no good way to pass the error to the newly
328 * created socket, and POSIX does not want network
329 * errors returned from accept().
330 */
c6973669 331 inet_csk_reqsk_queue_drop(req->rsk_listener, req);
ef84d8ce 332 NET_INC_STATS_BH(net, LINUX_MIB_LISTENDROPS);
26e37360 333 }
ef84d8ce 334 reqsk_put(req);
26e37360
ED
335}
336EXPORT_SYMBOL(tcp_req_err);
337
1da177e4
LT
338/*
339 * This routine is called by the ICMP module when it gets some
340 * sort of error condition. If err < 0 then the socket should
341 * be closed and the error returned to the user. If err > 0
342 * it's just the icmp type << 8 | icmp code. After adjustment
343 * header points to the first 8 bytes of the tcp header. We need
344 * to find the appropriate port.
345 *
346 * The locking strategy used here is very "optimistic". When
347 * someone else accesses the socket the ICMP is just dropped
348 * and for some paths there is no check at all.
349 * A more general error queue to queue errors for later handling
350 * is probably better.
351 *
352 */
353
4d1a2d9e 354void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
1da177e4 355{
b71d1d42 356 const struct iphdr *iph = (const struct iphdr *)icmp_skb->data;
4d1a2d9e 357 struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2));
f1ecd5d9 358 struct inet_connection_sock *icsk;
1da177e4
LT
359 struct tcp_sock *tp;
360 struct inet_sock *inet;
4d1a2d9e
DL
361 const int type = icmp_hdr(icmp_skb)->type;
362 const int code = icmp_hdr(icmp_skb)->code;
1da177e4 363 struct sock *sk;
f1ecd5d9 364 struct sk_buff *skb;
0a672f74
YC
365 struct request_sock *fastopen;
366 __u32 seq, snd_una;
f1ecd5d9 367 __u32 remaining;
1da177e4 368 int err;
4d1a2d9e 369 struct net *net = dev_net(icmp_skb->dev);
1da177e4 370
26e37360
ED
371 sk = __inet_lookup_established(net, &tcp_hashinfo, iph->daddr,
372 th->dest, iph->saddr, ntohs(th->source),
373 inet_iif(icmp_skb));
1da177e4 374 if (!sk) {
dcfc23ca 375 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
1da177e4
LT
376 return;
377 }
378 if (sk->sk_state == TCP_TIME_WAIT) {
9469c7b4 379 inet_twsk_put(inet_twsk(sk));
1da177e4
LT
380 return;
381 }
26e37360
ED
382 seq = ntohl(th->seq);
383 if (sk->sk_state == TCP_NEW_SYN_RECV)
9cf74903
ED
384 return tcp_req_err(sk, seq,
385 type == ICMP_PARAMETERPROB ||
386 type == ICMP_TIME_EXCEEDED ||
387 (type == ICMP_DEST_UNREACH &&
388 (code == ICMP_NET_UNREACH ||
389 code == ICMP_HOST_UNREACH)));
1da177e4
LT
390
391 bh_lock_sock(sk);
392 /* If too many ICMPs get dropped on busy
393 * servers this needs to be solved differently.
563d34d0
ED
394 * We do take care of PMTU discovery (RFC1191) special case :
395 * we can receive locally generated ICMP messages while socket is held.
1da177e4 396 */
b74aa930
ED
397 if (sock_owned_by_user(sk)) {
398 if (!(type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED))
399 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
400 }
1da177e4
LT
401 if (sk->sk_state == TCP_CLOSE)
402 goto out;
403
97e3ecd1 404 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
405 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
406 goto out;
407 }
408
f1ecd5d9 409 icsk = inet_csk(sk);
1da177e4 410 tp = tcp_sk(sk);
0a672f74
YC
411 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
412 fastopen = tp->fastopen_rsk;
413 snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
1da177e4 414 if (sk->sk_state != TCP_LISTEN &&
0a672f74 415 !between(seq, snd_una, tp->snd_nxt)) {
de0744af 416 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
1da177e4
LT
417 goto out;
418 }
419
420 switch (type) {
55be7a9c
DM
421 case ICMP_REDIRECT:
422 do_redirect(icmp_skb, sk);
423 goto out;
1da177e4
LT
424 case ICMP_SOURCE_QUENCH:
425 /* Just silently ignore these. */
426 goto out;
427 case ICMP_PARAMETERPROB:
428 err = EPROTO;
429 break;
430 case ICMP_DEST_UNREACH:
431 if (code > NR_ICMP_UNREACH)
432 goto out;
433
434 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
0d4f0608
ED
435 /* We are not interested in TCP_LISTEN and open_requests
436 * (SYN-ACKs send out by Linux are always <576bytes so
437 * they should go through unfragmented).
438 */
439 if (sk->sk_state == TCP_LISTEN)
440 goto out;
441
563d34d0 442 tp->mtu_info = info;
144d56e9 443 if (!sock_owned_by_user(sk)) {
563d34d0 444 tcp_v4_mtu_reduced(sk);
144d56e9
ED
445 } else {
446 if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &tp->tsq_flags))
447 sock_hold(sk);
448 }
1da177e4
LT
449 goto out;
450 }
451
452 err = icmp_err_convert[code].errno;
f1ecd5d9
DL
453 /* check if icmp_skb allows revert of backoff
454 * (see draft-zimmermann-tcp-lcd) */
455 if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH)
456 break;
457 if (seq != tp->snd_una || !icsk->icsk_retransmits ||
0a672f74 458 !icsk->icsk_backoff || fastopen)
f1ecd5d9
DL
459 break;
460
8f49c270
DM
461 if (sock_owned_by_user(sk))
462 break;
463
f1ecd5d9 464 icsk->icsk_backoff--;
fcdd1cf4
ED
465 icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) :
466 TCP_TIMEOUT_INIT;
467 icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX);
f1ecd5d9
DL
468
469 skb = tcp_write_queue_head(sk);
470 BUG_ON(!skb);
471
7faee5c0
ED
472 remaining = icsk->icsk_rto -
473 min(icsk->icsk_rto,
474 tcp_time_stamp - tcp_skb_timestamp(skb));
f1ecd5d9
DL
475
476 if (remaining) {
477 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
478 remaining, TCP_RTO_MAX);
f1ecd5d9
DL
479 } else {
480 /* RTO revert clocked out retransmission.
481 * Will retransmit now */
482 tcp_retransmit_timer(sk);
483 }
484
1da177e4
LT
485 break;
486 case ICMP_TIME_EXCEEDED:
487 err = EHOSTUNREACH;
488 break;
489 default:
490 goto out;
491 }
492
493 switch (sk->sk_state) {
1da177e4 494 case TCP_SYN_SENT:
0a672f74
YC
495 case TCP_SYN_RECV:
496 /* Only in fast or simultaneous open. If a fast open socket is
497 * is already accepted it is treated as a connected one below.
498 */
51456b29 499 if (fastopen && !fastopen->sk)
0a672f74
YC
500 break;
501
1da177e4 502 if (!sock_owned_by_user(sk)) {
1da177e4
LT
503 sk->sk_err = err;
504
505 sk->sk_error_report(sk);
506
507 tcp_done(sk);
508 } else {
509 sk->sk_err_soft = err;
510 }
511 goto out;
512 }
513
514 /* If we've already connected we will keep trying
515 * until we time out, or the user gives up.
516 *
517 * rfc1122 4.2.3.9 allows to consider as hard errors
518 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
519 * but it is obsoleted by pmtu discovery).
520 *
521 * Note, that in modern internet, where routing is unreliable
522 * and in each dark corner broken firewalls sit, sending random
523 * errors ordered by their masters even this two messages finally lose
524 * their original sense (even Linux sends invalid PORT_UNREACHs)
525 *
526 * Now we are in compliance with RFCs.
527 * --ANK (980905)
528 */
529
530 inet = inet_sk(sk);
531 if (!sock_owned_by_user(sk) && inet->recverr) {
532 sk->sk_err = err;
533 sk->sk_error_report(sk);
534 } else { /* Only an error on timeout */
535 sk->sk_err_soft = err;
536 }
537
538out:
539 bh_unlock_sock(sk);
540 sock_put(sk);
541}
542
28850dc7 543void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr)
1da177e4 544{
aa8223c7 545 struct tcphdr *th = tcp_hdr(skb);
1da177e4 546
84fa7933 547 if (skb->ip_summed == CHECKSUM_PARTIAL) {
419f9f89 548 th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0);
663ead3b 549 skb->csum_start = skb_transport_header(skb) - skb->head;
ff1dcadb 550 skb->csum_offset = offsetof(struct tcphdr, check);
1da177e4 551 } else {
419f9f89 552 th->check = tcp_v4_check(skb->len, saddr, daddr,
07f0757a 553 csum_partial(th,
1da177e4
LT
554 th->doff << 2,
555 skb->csum));
556 }
557}
558
419f9f89 559/* This routine computes an IPv4 TCP checksum. */
bb296246 560void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
419f9f89 561{
cf533ea5 562 const struct inet_sock *inet = inet_sk(sk);
419f9f89
HX
563
564 __tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
565}
4bc2f18b 566EXPORT_SYMBOL(tcp_v4_send_check);
419f9f89 567
1da177e4
LT
568/*
569 * This routine will send an RST to the other tcp.
570 *
571 * Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
572 * for reset.
573 * Answer: if a packet caused RST, it is not for a socket
574 * existing in our system, if it is matched to a socket,
575 * it is just duplicate segment or bug in other side's TCP.
576 * So that we build reply only basing on parameters
577 * arrived with segment.
578 * Exception: precedence violation. We do not implement it in any case.
579 */
580
a00e7444 581static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb)
1da177e4 582{
cf533ea5 583 const struct tcphdr *th = tcp_hdr(skb);
cfb6eeb4
YH
584 struct {
585 struct tcphdr th;
586#ifdef CONFIG_TCP_MD5SIG
714e85be 587 __be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
cfb6eeb4
YH
588#endif
589 } rep;
1da177e4 590 struct ip_reply_arg arg;
cfb6eeb4 591#ifdef CONFIG_TCP_MD5SIG
e46787f0 592 struct tcp_md5sig_key *key = NULL;
658ddaaf
SL
593 const __u8 *hash_location = NULL;
594 unsigned char newhash[16];
595 int genhash;
596 struct sock *sk1 = NULL;
cfb6eeb4 597#endif
a86b1e30 598 struct net *net;
1da177e4
LT
599
600 /* Never send a reset in response to a reset. */
601 if (th->rst)
602 return;
603
c3658e8d
ED
604 /* If sk not NULL, it means we did a successful lookup and incoming
605 * route had to be correct. prequeue might have dropped our dst.
606 */
607 if (!sk && skb_rtable(skb)->rt_type != RTN_LOCAL)
1da177e4
LT
608 return;
609
610 /* Swap the send and the receive. */
cfb6eeb4
YH
611 memset(&rep, 0, sizeof(rep));
612 rep.th.dest = th->source;
613 rep.th.source = th->dest;
614 rep.th.doff = sizeof(struct tcphdr) / 4;
615 rep.th.rst = 1;
1da177e4
LT
616
617 if (th->ack) {
cfb6eeb4 618 rep.th.seq = th->ack_seq;
1da177e4 619 } else {
cfb6eeb4
YH
620 rep.th.ack = 1;
621 rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
622 skb->len - (th->doff << 2));
1da177e4
LT
623 }
624
7174259e 625 memset(&arg, 0, sizeof(arg));
cfb6eeb4
YH
626 arg.iov[0].iov_base = (unsigned char *)&rep;
627 arg.iov[0].iov_len = sizeof(rep.th);
628
0f85feae 629 net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
cfb6eeb4 630#ifdef CONFIG_TCP_MD5SIG
3b24d854 631 rcu_read_lock();
658ddaaf 632 hash_location = tcp_parse_md5sig_option(th);
271c3b9b 633 if (sk && sk_fullsock(sk)) {
e46787f0
FW
634 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)
635 &ip_hdr(skb)->saddr, AF_INET);
636 } else if (hash_location) {
658ddaaf
SL
637 /*
638 * active side is lost. Try to find listening socket through
639 * source port, and then find md5 key through listening socket.
640 * we are not loose security here:
641 * Incoming packet is checked with md5 hash with finding key,
642 * no RST generated if md5 hash doesn't match.
643 */
a583636a
CG
644 sk1 = __inet_lookup_listener(net, &tcp_hashinfo, NULL, 0,
645 ip_hdr(skb)->saddr,
da5e3630 646 th->source, ip_hdr(skb)->daddr,
658ddaaf
SL
647 ntohs(th->source), inet_iif(skb));
648 /* don't send rst if it can't find key */
649 if (!sk1)
3b24d854
ED
650 goto out;
651
658ddaaf
SL
652 key = tcp_md5_do_lookup(sk1, (union tcp_md5_addr *)
653 &ip_hdr(skb)->saddr, AF_INET);
654 if (!key)
3b24d854
ED
655 goto out;
656
658ddaaf 657
39f8e58e 658 genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, skb);
658ddaaf 659 if (genhash || memcmp(hash_location, newhash, 16) != 0)
3b24d854
ED
660 goto out;
661
658ddaaf
SL
662 }
663
cfb6eeb4
YH
664 if (key) {
665 rep.opt[0] = htonl((TCPOPT_NOP << 24) |
666 (TCPOPT_NOP << 16) |
667 (TCPOPT_MD5SIG << 8) |
668 TCPOLEN_MD5SIG);
669 /* Update length and the length the header thinks exists */
670 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
671 rep.th.doff = arg.iov[0].iov_len / 4;
672
49a72dfb 673 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
78e645cb
IJ
674 key, ip_hdr(skb)->saddr,
675 ip_hdr(skb)->daddr, &rep.th);
cfb6eeb4
YH
676 }
677#endif
eddc9ec5
ACM
678 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
679 ip_hdr(skb)->saddr, /* XXX */
52cd5750 680 arg.iov[0].iov_len, IPPROTO_TCP, 0);
1da177e4 681 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
271c3b9b
FW
682 arg.flags = (sk && inet_sk_transparent(sk)) ? IP_REPLY_ARG_NOSRCCHECK : 0;
683
e2446eaa 684 /* When socket is gone, all binding information is lost.
4c675258
AK
685 * routing might fail in this case. No choice here, if we choose to force
686 * input interface, we will misroute in case of asymmetric route.
e2446eaa 687 */
4c675258
AK
688 if (sk)
689 arg.bound_dev_if = sk->sk_bound_dev_if;
1da177e4 690
271c3b9b
FW
691 BUILD_BUG_ON(offsetof(struct sock, sk_bound_dev_if) !=
692 offsetof(struct inet_timewait_sock, tw_bound_dev_if));
693
66b13d99 694 arg.tos = ip_hdr(skb)->tos;
bdbbb852
ED
695 ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
696 skb, &TCP_SKB_CB(skb)->header.h4.opt,
24a2d43d
ED
697 ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
698 &arg, arg.iov[0].iov_len);
1da177e4 699
63231bdd
PE
700 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
701 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
658ddaaf
SL
702
703#ifdef CONFIG_TCP_MD5SIG
3b24d854
ED
704out:
705 rcu_read_unlock();
658ddaaf 706#endif
1da177e4
LT
707}
708
709/* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
710 outside socket context is ugly, certainly. What can I do?
711 */
712
e62a123b
ED
713static void tcp_v4_send_ack(struct net *net,
714 struct sk_buff *skb, u32 seq, u32 ack,
ee684b6f 715 u32 win, u32 tsval, u32 tsecr, int oif,
88ef4a5a 716 struct tcp_md5sig_key *key,
66b13d99 717 int reply_flags, u8 tos)
1da177e4 718{
cf533ea5 719 const struct tcphdr *th = tcp_hdr(skb);
1da177e4
LT
720 struct {
721 struct tcphdr th;
714e85be 722 __be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
cfb6eeb4 723#ifdef CONFIG_TCP_MD5SIG
714e85be 724 + (TCPOLEN_MD5SIG_ALIGNED >> 2)
cfb6eeb4
YH
725#endif
726 ];
1da177e4
LT
727 } rep;
728 struct ip_reply_arg arg;
729
730 memset(&rep.th, 0, sizeof(struct tcphdr));
7174259e 731 memset(&arg, 0, sizeof(arg));
1da177e4
LT
732
733 arg.iov[0].iov_base = (unsigned char *)&rep;
734 arg.iov[0].iov_len = sizeof(rep.th);
ee684b6f 735 if (tsecr) {
cfb6eeb4
YH
736 rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
737 (TCPOPT_TIMESTAMP << 8) |
738 TCPOLEN_TIMESTAMP);
ee684b6f
AV
739 rep.opt[1] = htonl(tsval);
740 rep.opt[2] = htonl(tsecr);
cb48cfe8 741 arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
1da177e4
LT
742 }
743
744 /* Swap the send and the receive. */
745 rep.th.dest = th->source;
746 rep.th.source = th->dest;
747 rep.th.doff = arg.iov[0].iov_len / 4;
748 rep.th.seq = htonl(seq);
749 rep.th.ack_seq = htonl(ack);
750 rep.th.ack = 1;
751 rep.th.window = htons(win);
752
cfb6eeb4 753#ifdef CONFIG_TCP_MD5SIG
cfb6eeb4 754 if (key) {
ee684b6f 755 int offset = (tsecr) ? 3 : 0;
cfb6eeb4
YH
756
757 rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
758 (TCPOPT_NOP << 16) |
759 (TCPOPT_MD5SIG << 8) |
760 TCPOLEN_MD5SIG);
761 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
762 rep.th.doff = arg.iov[0].iov_len/4;
763
49a72dfb 764 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
90b7e112
AL
765 key, ip_hdr(skb)->saddr,
766 ip_hdr(skb)->daddr, &rep.th);
cfb6eeb4
YH
767 }
768#endif
88ef4a5a 769 arg.flags = reply_flags;
eddc9ec5
ACM
770 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
771 ip_hdr(skb)->saddr, /* XXX */
1da177e4
LT
772 arg.iov[0].iov_len, IPPROTO_TCP, 0);
773 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
9501f972
YH
774 if (oif)
775 arg.bound_dev_if = oif;
66b13d99 776 arg.tos = tos;
bdbbb852
ED
777 ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
778 skb, &TCP_SKB_CB(skb)->header.h4.opt,
24a2d43d
ED
779 ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
780 &arg, arg.iov[0].iov_len);
1da177e4 781
63231bdd 782 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
1da177e4
LT
783}
784
785static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
786{
8feaf0c0 787 struct inet_timewait_sock *tw = inet_twsk(sk);
cfb6eeb4 788 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
1da177e4 789
e62a123b
ED
790 tcp_v4_send_ack(sock_net(sk), skb,
791 tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
7174259e 792 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
ee684b6f 793 tcp_time_stamp + tcptw->tw_ts_offset,
9501f972
YH
794 tcptw->tw_ts_recent,
795 tw->tw_bound_dev_if,
88ef4a5a 796 tcp_twsk_md5_key(tcptw),
66b13d99
ED
797 tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0,
798 tw->tw_tos
9501f972 799 );
1da177e4 800
8feaf0c0 801 inet_twsk_put(tw);
1da177e4
LT
802}
803
a00e7444 804static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
7174259e 805 struct request_sock *req)
1da177e4 806{
168a8f58
JC
807 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
808 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
809 */
e62a123b
ED
810 u32 seq = (sk->sk_state == TCP_LISTEN) ? tcp_rsk(req)->snt_isn + 1 :
811 tcp_sk(sk)->snd_nxt;
812
813 tcp_v4_send_ack(sock_net(sk), skb, seq,
ed53d0ab 814 tcp_rsk(req)->rcv_nxt, req->rsk_rcv_wnd,
ee684b6f 815 tcp_time_stamp,
9501f972
YH
816 req->ts_recent,
817 0,
a915da9b
ED
818 tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr,
819 AF_INET),
66b13d99
ED
820 inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
821 ip_hdr(skb)->tos);
1da177e4
LT
822}
823
1da177e4 824/*
9bf1d83e 825 * Send a SYN-ACK after having received a SYN.
60236fdd 826 * This still operates on a request_sock only, not on a big
1da177e4
LT
827 * socket.
828 */
0f935dbe 829static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
d6274bd8 830 struct flowi *fl,
72659ecc 831 struct request_sock *req,
ca6fb065
ED
832 struct tcp_fastopen_cookie *foc,
833 bool attach_req)
1da177e4 834{
2e6599cb 835 const struct inet_request_sock *ireq = inet_rsk(req);
6bd023f3 836 struct flowi4 fl4;
1da177e4 837 int err = -1;
d41db5af 838 struct sk_buff *skb;
1da177e4
LT
839
840 /* First, grab a route. */
ba3f7f04 841 if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
fd80eb94 842 return -1;
1da177e4 843
ca6fb065 844 skb = tcp_make_synack(sk, dst, req, foc, attach_req);
1da177e4
LT
845
846 if (skb) {
634fb979 847 __tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
1da177e4 848
634fb979
ED
849 err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
850 ireq->ir_rmt_addr,
2e6599cb 851 ireq->opt);
b9df3cb8 852 err = net_xmit_eval(err);
1da177e4
LT
853 }
854
1da177e4
LT
855 return err;
856}
857
858/*
60236fdd 859 * IPv4 request_sock destructor.
1da177e4 860 */
60236fdd 861static void tcp_v4_reqsk_destructor(struct request_sock *req)
1da177e4 862{
a51482bd 863 kfree(inet_rsk(req)->opt);
1da177e4
LT
864}
865
cfb6eeb4
YH
866#ifdef CONFIG_TCP_MD5SIG
867/*
868 * RFC2385 MD5 checksumming requires a mapping of
869 * IP address->MD5 Key.
870 * We need to maintain these in the sk structure.
871 */
872
873/* Find the Key structure for an address. */
b83e3deb 874struct tcp_md5sig_key *tcp_md5_do_lookup(const struct sock *sk,
a915da9b
ED
875 const union tcp_md5_addr *addr,
876 int family)
cfb6eeb4 877{
fd3a154a 878 const struct tcp_sock *tp = tcp_sk(sk);
a915da9b 879 struct tcp_md5sig_key *key;
a915da9b 880 unsigned int size = sizeof(struct in_addr);
fd3a154a 881 const struct tcp_md5sig_info *md5sig;
cfb6eeb4 882
a8afca03
ED
883 /* caller either holds rcu_read_lock() or socket lock */
884 md5sig = rcu_dereference_check(tp->md5sig_info,
b4fb05ea 885 sock_owned_by_user(sk) ||
b83e3deb 886 lockdep_is_held((spinlock_t *)&sk->sk_lock.slock));
a8afca03 887 if (!md5sig)
cfb6eeb4 888 return NULL;
a915da9b
ED
889#if IS_ENABLED(CONFIG_IPV6)
890 if (family == AF_INET6)
891 size = sizeof(struct in6_addr);
892#endif
b67bfe0d 893 hlist_for_each_entry_rcu(key, &md5sig->head, node) {
a915da9b
ED
894 if (key->family != family)
895 continue;
896 if (!memcmp(&key->addr, addr, size))
897 return key;
cfb6eeb4
YH
898 }
899 return NULL;
900}
a915da9b 901EXPORT_SYMBOL(tcp_md5_do_lookup);
cfb6eeb4 902
b83e3deb 903struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
fd3a154a 904 const struct sock *addr_sk)
cfb6eeb4 905{
b52e6921 906 const union tcp_md5_addr *addr;
a915da9b 907
b52e6921 908 addr = (const union tcp_md5_addr *)&addr_sk->sk_daddr;
a915da9b 909 return tcp_md5_do_lookup(sk, addr, AF_INET);
cfb6eeb4 910}
cfb6eeb4
YH
911EXPORT_SYMBOL(tcp_v4_md5_lookup);
912
cfb6eeb4 913/* This can be called on a newly created socket, from other files */
a915da9b
ED
914int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
915 int family, const u8 *newkey, u8 newkeylen, gfp_t gfp)
cfb6eeb4
YH
916{
917 /* Add Key to the list */
b0a713e9 918 struct tcp_md5sig_key *key;
cfb6eeb4 919 struct tcp_sock *tp = tcp_sk(sk);
a915da9b 920 struct tcp_md5sig_info *md5sig;
cfb6eeb4 921
c0353c7b 922 key = tcp_md5_do_lookup(sk, addr, family);
cfb6eeb4
YH
923 if (key) {
924 /* Pre-existing entry - just update that one. */
a915da9b 925 memcpy(key->key, newkey, newkeylen);
b0a713e9 926 key->keylen = newkeylen;
a915da9b
ED
927 return 0;
928 }
260fcbeb 929
a8afca03 930 md5sig = rcu_dereference_protected(tp->md5sig_info,
1b8e6a01
ED
931 sock_owned_by_user(sk) ||
932 lockdep_is_held(&sk->sk_lock.slock));
a915da9b
ED
933 if (!md5sig) {
934 md5sig = kmalloc(sizeof(*md5sig), gfp);
935 if (!md5sig)
cfb6eeb4 936 return -ENOMEM;
cfb6eeb4 937
a915da9b
ED
938 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
939 INIT_HLIST_HEAD(&md5sig->head);
a8afca03 940 rcu_assign_pointer(tp->md5sig_info, md5sig);
a915da9b 941 }
cfb6eeb4 942
5f3d9cb2 943 key = sock_kmalloc(sk, sizeof(*key), gfp);
a915da9b
ED
944 if (!key)
945 return -ENOMEM;
71cea17e 946 if (!tcp_alloc_md5sig_pool()) {
5f3d9cb2 947 sock_kfree_s(sk, key, sizeof(*key));
a915da9b 948 return -ENOMEM;
cfb6eeb4 949 }
a915da9b
ED
950
951 memcpy(key->key, newkey, newkeylen);
952 key->keylen = newkeylen;
953 key->family = family;
954 memcpy(&key->addr, addr,
955 (family == AF_INET6) ? sizeof(struct in6_addr) :
956 sizeof(struct in_addr));
957 hlist_add_head_rcu(&key->node, &md5sig->head);
cfb6eeb4
YH
958 return 0;
959}
a915da9b 960EXPORT_SYMBOL(tcp_md5_do_add);
cfb6eeb4 961
a915da9b 962int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family)
cfb6eeb4 963{
a915da9b
ED
964 struct tcp_md5sig_key *key;
965
c0353c7b 966 key = tcp_md5_do_lookup(sk, addr, family);
a915da9b
ED
967 if (!key)
968 return -ENOENT;
969 hlist_del_rcu(&key->node);
5f3d9cb2 970 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
a915da9b 971 kfree_rcu(key, rcu);
a915da9b 972 return 0;
cfb6eeb4 973}
a915da9b 974EXPORT_SYMBOL(tcp_md5_do_del);
cfb6eeb4 975
e0683e70 976static void tcp_clear_md5_list(struct sock *sk)
cfb6eeb4
YH
977{
978 struct tcp_sock *tp = tcp_sk(sk);
a915da9b 979 struct tcp_md5sig_key *key;
b67bfe0d 980 struct hlist_node *n;
a8afca03 981 struct tcp_md5sig_info *md5sig;
cfb6eeb4 982
a8afca03
ED
983 md5sig = rcu_dereference_protected(tp->md5sig_info, 1);
984
b67bfe0d 985 hlist_for_each_entry_safe(key, n, &md5sig->head, node) {
a915da9b 986 hlist_del_rcu(&key->node);
5f3d9cb2 987 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
a915da9b 988 kfree_rcu(key, rcu);
cfb6eeb4
YH
989 }
990}
991
7174259e
ACM
992static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval,
993 int optlen)
cfb6eeb4
YH
994{
995 struct tcp_md5sig cmd;
996 struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
cfb6eeb4
YH
997
998 if (optlen < sizeof(cmd))
999 return -EINVAL;
1000
7174259e 1001 if (copy_from_user(&cmd, optval, sizeof(cmd)))
cfb6eeb4
YH
1002 return -EFAULT;
1003
1004 if (sin->sin_family != AF_INET)
1005 return -EINVAL;
1006
64a124ed 1007 if (!cmd.tcpm_keylen)
a915da9b
ED
1008 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1009 AF_INET);
cfb6eeb4
YH
1010
1011 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
1012 return -EINVAL;
1013
a915da9b
ED
1014 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1015 AF_INET, cmd.tcpm_key, cmd.tcpm_keylen,
1016 GFP_KERNEL);
cfb6eeb4
YH
1017}
1018
49a72dfb
AL
1019static int tcp_v4_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
1020 __be32 daddr, __be32 saddr, int nbytes)
cfb6eeb4 1021{
cfb6eeb4 1022 struct tcp4_pseudohdr *bp;
49a72dfb 1023 struct scatterlist sg;
cfb6eeb4
YH
1024
1025 bp = &hp->md5_blk.ip4;
cfb6eeb4
YH
1026
1027 /*
49a72dfb 1028 * 1. the TCP pseudo-header (in the order: source IP address,
cfb6eeb4
YH
1029 * destination IP address, zero-padded protocol number, and
1030 * segment length)
1031 */
1032 bp->saddr = saddr;
1033 bp->daddr = daddr;
1034 bp->pad = 0;
076fb722 1035 bp->protocol = IPPROTO_TCP;
49a72dfb 1036 bp->len = cpu_to_be16(nbytes);
c7da57a1 1037
49a72dfb 1038 sg_init_one(&sg, bp, sizeof(*bp));
cf80e0e4
HX
1039 ahash_request_set_crypt(hp->md5_req, &sg, NULL, sizeof(*bp));
1040 return crypto_ahash_update(hp->md5_req);
49a72dfb
AL
1041}
1042
a915da9b 1043static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
318cf7aa 1044 __be32 daddr, __be32 saddr, const struct tcphdr *th)
49a72dfb
AL
1045{
1046 struct tcp_md5sig_pool *hp;
cf80e0e4 1047 struct ahash_request *req;
49a72dfb
AL
1048
1049 hp = tcp_get_md5sig_pool();
1050 if (!hp)
1051 goto clear_hash_noput;
cf80e0e4 1052 req = hp->md5_req;
49a72dfb 1053
cf80e0e4 1054 if (crypto_ahash_init(req))
49a72dfb
AL
1055 goto clear_hash;
1056 if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
1057 goto clear_hash;
1058 if (tcp_md5_hash_header(hp, th))
1059 goto clear_hash;
1060 if (tcp_md5_hash_key(hp, key))
1061 goto clear_hash;
cf80e0e4
HX
1062 ahash_request_set_crypt(req, NULL, md5_hash, 0);
1063 if (crypto_ahash_final(req))
cfb6eeb4
YH
1064 goto clear_hash;
1065
cfb6eeb4 1066 tcp_put_md5sig_pool();
cfb6eeb4 1067 return 0;
49a72dfb 1068
cfb6eeb4
YH
1069clear_hash:
1070 tcp_put_md5sig_pool();
1071clear_hash_noput:
1072 memset(md5_hash, 0, 16);
49a72dfb 1073 return 1;
cfb6eeb4
YH
1074}
1075
39f8e58e
ED
1076int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
1077 const struct sock *sk,
318cf7aa 1078 const struct sk_buff *skb)
cfb6eeb4 1079{
49a72dfb 1080 struct tcp_md5sig_pool *hp;
cf80e0e4 1081 struct ahash_request *req;
318cf7aa 1082 const struct tcphdr *th = tcp_hdr(skb);
cfb6eeb4
YH
1083 __be32 saddr, daddr;
1084
39f8e58e
ED
1085 if (sk) { /* valid for establish/request sockets */
1086 saddr = sk->sk_rcv_saddr;
1087 daddr = sk->sk_daddr;
cfb6eeb4 1088 } else {
49a72dfb
AL
1089 const struct iphdr *iph = ip_hdr(skb);
1090 saddr = iph->saddr;
1091 daddr = iph->daddr;
cfb6eeb4 1092 }
49a72dfb
AL
1093
1094 hp = tcp_get_md5sig_pool();
1095 if (!hp)
1096 goto clear_hash_noput;
cf80e0e4 1097 req = hp->md5_req;
49a72dfb 1098
cf80e0e4 1099 if (crypto_ahash_init(req))
49a72dfb
AL
1100 goto clear_hash;
1101
1102 if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
1103 goto clear_hash;
1104 if (tcp_md5_hash_header(hp, th))
1105 goto clear_hash;
1106 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
1107 goto clear_hash;
1108 if (tcp_md5_hash_key(hp, key))
1109 goto clear_hash;
cf80e0e4
HX
1110 ahash_request_set_crypt(req, NULL, md5_hash, 0);
1111 if (crypto_ahash_final(req))
49a72dfb
AL
1112 goto clear_hash;
1113
1114 tcp_put_md5sig_pool();
1115 return 0;
1116
1117clear_hash:
1118 tcp_put_md5sig_pool();
1119clear_hash_noput:
1120 memset(md5_hash, 0, 16);
1121 return 1;
cfb6eeb4 1122}
49a72dfb 1123EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
cfb6eeb4 1124
ba8e275a
ED
1125#endif
1126
ff74e23f 1127/* Called with rcu_read_lock() */
ba8e275a 1128static bool tcp_v4_inbound_md5_hash(const struct sock *sk,
ff74e23f 1129 const struct sk_buff *skb)
cfb6eeb4 1130{
ba8e275a 1131#ifdef CONFIG_TCP_MD5SIG
cfb6eeb4
YH
1132 /*
1133 * This gets called for each TCP segment that arrives
1134 * so we want to be efficient.
1135 * We have 3 drop cases:
1136 * o No MD5 hash and one expected.
1137 * o MD5 hash and we're not expecting one.
1138 * o MD5 hash and its wrong.
1139 */
cf533ea5 1140 const __u8 *hash_location = NULL;
cfb6eeb4 1141 struct tcp_md5sig_key *hash_expected;
eddc9ec5 1142 const struct iphdr *iph = ip_hdr(skb);
cf533ea5 1143 const struct tcphdr *th = tcp_hdr(skb);
cfb6eeb4 1144 int genhash;
cfb6eeb4
YH
1145 unsigned char newhash[16];
1146
a915da9b
ED
1147 hash_expected = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&iph->saddr,
1148 AF_INET);
7d5d5525 1149 hash_location = tcp_parse_md5sig_option(th);
cfb6eeb4 1150
cfb6eeb4
YH
1151 /* We've parsed the options - do we have a hash? */
1152 if (!hash_expected && !hash_location)
a2a385d6 1153 return false;
cfb6eeb4
YH
1154
1155 if (hash_expected && !hash_location) {
785957d3 1156 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
a2a385d6 1157 return true;
cfb6eeb4
YH
1158 }
1159
1160 if (!hash_expected && hash_location) {
785957d3 1161 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
a2a385d6 1162 return true;
cfb6eeb4
YH
1163 }
1164
1165 /* Okay, so this is hash_expected and hash_location -
1166 * so we need to calculate the checksum.
1167 */
49a72dfb
AL
1168 genhash = tcp_v4_md5_hash_skb(newhash,
1169 hash_expected,
39f8e58e 1170 NULL, skb);
cfb6eeb4
YH
1171
1172 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
e87cc472
JP
1173 net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
1174 &iph->saddr, ntohs(th->source),
1175 &iph->daddr, ntohs(th->dest),
1176 genhash ? " tcp_v4_calc_md5_hash failed"
1177 : "");
a2a385d6 1178 return true;
cfb6eeb4 1179 }
a2a385d6 1180 return false;
cfb6eeb4 1181#endif
ba8e275a
ED
1182 return false;
1183}
cfb6eeb4 1184
b40cf18e
ED
1185static void tcp_v4_init_req(struct request_sock *req,
1186 const struct sock *sk_listener,
16bea70a
OP
1187 struct sk_buff *skb)
1188{
1189 struct inet_request_sock *ireq = inet_rsk(req);
1190
08d2cc3b
ED
1191 sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr);
1192 sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr);
1193 ireq->no_srccheck = inet_sk(sk_listener)->transparent;
16bea70a
OP
1194 ireq->opt = tcp_v4_save_options(skb);
1195}
1196
f964629e
ED
1197static struct dst_entry *tcp_v4_route_req(const struct sock *sk,
1198 struct flowi *fl,
d94e0417
OP
1199 const struct request_sock *req,
1200 bool *strict)
1201{
1202 struct dst_entry *dst = inet_csk_route_req(sk, &fl->u.ip4, req);
1203
1204 if (strict) {
1205 if (fl->u.ip4.daddr == inet_rsk(req)->ir_rmt_addr)
1206 *strict = true;
1207 else
1208 *strict = false;
1209 }
1210
1211 return dst;
1212}
1213
72a3effa 1214struct request_sock_ops tcp_request_sock_ops __read_mostly = {
1da177e4 1215 .family = PF_INET,
2e6599cb 1216 .obj_size = sizeof(struct tcp_request_sock),
5db92c99 1217 .rtx_syn_ack = tcp_rtx_synack,
60236fdd
ACM
1218 .send_ack = tcp_v4_reqsk_send_ack,
1219 .destructor = tcp_v4_reqsk_destructor,
1da177e4 1220 .send_reset = tcp_v4_send_reset,
688d1945 1221 .syn_ack_timeout = tcp_syn_ack_timeout,
1da177e4
LT
1222};
1223
b2e4b3de 1224static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
2aec4a29 1225 .mss_clamp = TCP_MSS_DEFAULT,
16bea70a 1226#ifdef CONFIG_TCP_MD5SIG
fd3a154a 1227 .req_md5_lookup = tcp_v4_md5_lookup,
e3afe7b7 1228 .calc_md5_hash = tcp_v4_md5_hash_skb,
b6332e6c 1229#endif
16bea70a 1230 .init_req = tcp_v4_init_req,
fb7b37a7
OP
1231#ifdef CONFIG_SYN_COOKIES
1232 .cookie_init_seq = cookie_v4_init_sequence,
1233#endif
d94e0417 1234 .route_req = tcp_v4_route_req,
936b8bdb 1235 .init_seq = tcp_v4_init_sequence,
d6274bd8 1236 .send_synack = tcp_v4_send_synack,
16bea70a 1237};
cfb6eeb4 1238
1da177e4
LT
1239int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1240{
1da177e4 1241 /* Never answer to SYNs send to broadcast or multicast */
511c3f92 1242 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
1da177e4
LT
1243 goto drop;
1244
1fb6f159
OP
1245 return tcp_conn_request(&tcp_request_sock_ops,
1246 &tcp_request_sock_ipv4_ops, sk, skb);
1da177e4 1247
1da177e4 1248drop:
848bf15f 1249 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1da177e4
LT
1250 return 0;
1251}
4bc2f18b 1252EXPORT_SYMBOL(tcp_v4_conn_request);
1da177e4
LT
1253
1254
1255/*
1256 * The three way handshake has completed - we got a valid synack -
1257 * now create the new socket.
1258 */
0c27171e 1259struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
60236fdd 1260 struct request_sock *req,
5e0724d0
ED
1261 struct dst_entry *dst,
1262 struct request_sock *req_unhash,
1263 bool *own_req)
1da177e4 1264{
2e6599cb 1265 struct inet_request_sock *ireq;
1da177e4
LT
1266 struct inet_sock *newinet;
1267 struct tcp_sock *newtp;
1268 struct sock *newsk;
cfb6eeb4
YH
1269#ifdef CONFIG_TCP_MD5SIG
1270 struct tcp_md5sig_key *key;
1271#endif
f6d8bd05 1272 struct ip_options_rcu *inet_opt;
1da177e4
LT
1273
1274 if (sk_acceptq_is_full(sk))
1275 goto exit_overflow;
1276
1da177e4
LT
1277 newsk = tcp_create_openreq_child(sk, req, skb);
1278 if (!newsk)
093d2823 1279 goto exit_nonewsk;
1da177e4 1280
bcd76111 1281 newsk->sk_gso_type = SKB_GSO_TCPV4;
fae6ef87 1282 inet_sk_rx_dst_set(newsk, skb);
1da177e4
LT
1283
1284 newtp = tcp_sk(newsk);
1285 newinet = inet_sk(newsk);
2e6599cb 1286 ireq = inet_rsk(req);
d1e559d0
ED
1287 sk_daddr_set(newsk, ireq->ir_rmt_addr);
1288 sk_rcv_saddr_set(newsk, ireq->ir_loc_addr);
6dd9a14e 1289 newsk->sk_bound_dev_if = ireq->ir_iif;
634fb979 1290 newinet->inet_saddr = ireq->ir_loc_addr;
f6d8bd05
ED
1291 inet_opt = ireq->opt;
1292 rcu_assign_pointer(newinet->inet_opt, inet_opt);
2e6599cb 1293 ireq->opt = NULL;
463c84b9 1294 newinet->mc_index = inet_iif(skb);
eddc9ec5 1295 newinet->mc_ttl = ip_hdr(skb)->ttl;
4c507d28 1296 newinet->rcv_tos = ip_hdr(skb)->tos;
d83d8461 1297 inet_csk(newsk)->icsk_ext_hdr_len = 0;
f6d8bd05
ED
1298 if (inet_opt)
1299 inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
c720c7e8 1300 newinet->inet_id = newtp->write_seq ^ jiffies;
1da177e4 1301
dfd25fff
ED
1302 if (!dst) {
1303 dst = inet_csk_route_child_sock(sk, newsk, req);
1304 if (!dst)
1305 goto put_and_exit;
1306 } else {
1307 /* syncookie case : see end of cookie_v4_check() */
1308 }
0e734419
DM
1309 sk_setup_caps(newsk, dst);
1310
81164413
DB
1311 tcp_ca_openreq_child(newsk, dst);
1312
1da177e4 1313 tcp_sync_mss(newsk, dst_mtu(dst));
0dbaee3b 1314 newtp->advmss = dst_metric_advmss(dst);
f5fff5dc
TQ
1315 if (tcp_sk(sk)->rx_opt.user_mss &&
1316 tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1317 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1318
1da177e4
LT
1319 tcp_initialize_rcv_mss(newsk);
1320
cfb6eeb4
YH
1321#ifdef CONFIG_TCP_MD5SIG
1322 /* Copy over the MD5 key from the original socket */
a915da9b
ED
1323 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&newinet->inet_daddr,
1324 AF_INET);
00db4124 1325 if (key) {
cfb6eeb4
YH
1326 /*
1327 * We're using one, so create a matching key
1328 * on the newsk structure. If we fail to get
1329 * memory, then we end up not copying the key
1330 * across. Shucks.
1331 */
a915da9b
ED
1332 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newinet->inet_daddr,
1333 AF_INET, key->key, key->keylen, GFP_ATOMIC);
a465419b 1334 sk_nocaps_add(newsk, NETIF_F_GSO_MASK);
cfb6eeb4
YH
1335 }
1336#endif
1337
0e734419
DM
1338 if (__inet_inherit_port(sk, newsk) < 0)
1339 goto put_and_exit;
5e0724d0 1340 *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
805c4bc0 1341 if (*own_req)
49a496c9 1342 tcp_move_syn(newtp, req);
1da177e4
LT
1343
1344 return newsk;
1345
1346exit_overflow:
de0744af 1347 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
093d2823
BS
1348exit_nonewsk:
1349 dst_release(dst);
1da177e4 1350exit:
de0744af 1351 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1da177e4 1352 return NULL;
0e734419 1353put_and_exit:
e337e24d
CP
1354 inet_csk_prepare_forced_close(newsk);
1355 tcp_done(newsk);
0e734419 1356 goto exit;
1da177e4 1357}
4bc2f18b 1358EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
1da177e4 1359
079096f1 1360static struct sock *tcp_v4_cookie_check(struct sock *sk, struct sk_buff *skb)
1da177e4 1361{
079096f1 1362#ifdef CONFIG_SYN_COOKIES
52452c54 1363 const struct tcphdr *th = tcp_hdr(skb);
1da177e4 1364
af9b4738 1365 if (!th->syn)
461b74c3 1366 sk = cookie_v4_check(sk, skb);
1da177e4
LT
1367#endif
1368 return sk;
1369}
1370
1da177e4 1371/* The socket must have it's spinlock held when we get
e994b2f0 1372 * here, unless it is a TCP_LISTEN socket.
1da177e4
LT
1373 *
1374 * We have a potential double-lock case here, so even when
1375 * doing backlog processing we use the BH locking scheme.
1376 * This is because we cannot sleep with the original spinlock
1377 * held.
1378 */
1379int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1380{
cfb6eeb4 1381 struct sock *rsk;
cfb6eeb4 1382
1da177e4 1383 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
404e0a8b
ED
1384 struct dst_entry *dst = sk->sk_rx_dst;
1385
bdeab991 1386 sock_rps_save_rxhash(sk, skb);
3d97379a 1387 sk_mark_napi_id(sk, skb);
404e0a8b 1388 if (dst) {
505fbcf0 1389 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
51456b29 1390 !dst->ops->check(dst, 0)) {
92101b3b
DM
1391 dst_release(dst);
1392 sk->sk_rx_dst = NULL;
1393 }
1394 }
c995ae22 1395 tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len);
1da177e4
LT
1396 return 0;
1397 }
1398
12e25e10 1399 if (tcp_checksum_complete(skb))
1da177e4
LT
1400 goto csum_err;
1401
1402 if (sk->sk_state == TCP_LISTEN) {
079096f1
ED
1403 struct sock *nsk = tcp_v4_cookie_check(sk, skb);
1404
1da177e4
LT
1405 if (!nsk)
1406 goto discard;
1da177e4 1407 if (nsk != sk) {
bdeab991 1408 sock_rps_save_rxhash(nsk, skb);
38cb5245 1409 sk_mark_napi_id(nsk, skb);
cfb6eeb4
YH
1410 if (tcp_child_process(sk, nsk, skb)) {
1411 rsk = nsk;
1da177e4 1412 goto reset;
cfb6eeb4 1413 }
1da177e4
LT
1414 return 0;
1415 }
ca55158c 1416 } else
bdeab991 1417 sock_rps_save_rxhash(sk, skb);
ca55158c 1418
72ab4a86 1419 if (tcp_rcv_state_process(sk, skb)) {
cfb6eeb4 1420 rsk = sk;
1da177e4 1421 goto reset;
cfb6eeb4 1422 }
1da177e4
LT
1423 return 0;
1424
1425reset:
cfb6eeb4 1426 tcp_v4_send_reset(rsk, skb);
1da177e4
LT
1427discard:
1428 kfree_skb(skb);
1429 /* Be careful here. If this function gets more complicated and
1430 * gcc suffers from register pressure on the x86, sk (in %ebx)
1431 * might be destroyed here. This current version compiles correctly,
1432 * but you have been warned.
1433 */
1434 return 0;
1435
1436csum_err:
6a5dc9e5 1437 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_CSUMERRORS);
63231bdd 1438 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1da177e4
LT
1439 goto discard;
1440}
4bc2f18b 1441EXPORT_SYMBOL(tcp_v4_do_rcv);
1da177e4 1442
160eb5a6 1443void tcp_v4_early_demux(struct sk_buff *skb)
41063e9d 1444{
41063e9d
DM
1445 const struct iphdr *iph;
1446 const struct tcphdr *th;
1447 struct sock *sk;
41063e9d 1448
41063e9d 1449 if (skb->pkt_type != PACKET_HOST)
160eb5a6 1450 return;
41063e9d 1451
45f00f99 1452 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
160eb5a6 1453 return;
41063e9d
DM
1454
1455 iph = ip_hdr(skb);
45f00f99 1456 th = tcp_hdr(skb);
41063e9d
DM
1457
1458 if (th->doff < sizeof(struct tcphdr) / 4)
160eb5a6 1459 return;
41063e9d 1460
45f00f99 1461 sk = __inet_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
41063e9d 1462 iph->saddr, th->source,
7011d085 1463 iph->daddr, ntohs(th->dest),
9cb429d6 1464 skb->skb_iif);
41063e9d
DM
1465 if (sk) {
1466 skb->sk = sk;
1467 skb->destructor = sock_edemux;
f7e4eb03 1468 if (sk_fullsock(sk)) {
d0c294c5 1469 struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
505fbcf0 1470
41063e9d
DM
1471 if (dst)
1472 dst = dst_check(dst, 0);
92101b3b 1473 if (dst &&
505fbcf0 1474 inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
92101b3b 1475 skb_dst_set_noref(skb, dst);
41063e9d
DM
1476 }
1477 }
41063e9d
DM
1478}
1479
b2fb4f54
ED
1480/* Packet is added to VJ-style prequeue for processing in process
1481 * context, if a reader task is waiting. Apparently, this exciting
1482 * idea (VJ's mail "Re: query about TCP header on tcp-ip" of 07 Sep 93)
1483 * failed somewhere. Latency? Burstiness? Well, at least now we will
1484 * see, why it failed. 8)8) --ANK
1485 *
1486 */
1487bool tcp_prequeue(struct sock *sk, struct sk_buff *skb)
1488{
1489 struct tcp_sock *tp = tcp_sk(sk);
1490
1491 if (sysctl_tcp_low_latency || !tp->ucopy.task)
1492 return false;
1493
1494 if (skb->len <= tcp_hdrlen(skb) &&
1495 skb_queue_len(&tp->ucopy.prequeue) == 0)
1496 return false;
1497
ca777eff
ED
1498 /* Before escaping RCU protected region, we need to take care of skb
1499 * dst. Prequeue is only enabled for established sockets.
1500 * For such sockets, we might need the skb dst only to set sk->sk_rx_dst
1501 * Instead of doing full sk_rx_dst validity here, let's perform
1502 * an optimistic check.
1503 */
1504 if (likely(sk->sk_rx_dst))
1505 skb_dst_drop(skb);
1506 else
5037e9ef 1507 skb_dst_force_safe(skb);
ca777eff 1508
b2fb4f54
ED
1509 __skb_queue_tail(&tp->ucopy.prequeue, skb);
1510 tp->ucopy.memory += skb->truesize;
1511 if (tp->ucopy.memory > sk->sk_rcvbuf) {
1512 struct sk_buff *skb1;
1513
1514 BUG_ON(sock_owned_by_user(sk));
1515
1516 while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) {
1517 sk_backlog_rcv(sk, skb1);
1518 NET_INC_STATS_BH(sock_net(sk),
1519 LINUX_MIB_TCPPREQUEUEDROPPED);
1520 }
1521
1522 tp->ucopy.memory = 0;
1523 } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
1524 wake_up_interruptible_sync_poll(sk_sleep(sk),
1525 POLLIN | POLLRDNORM | POLLRDBAND);
1526 if (!inet_csk_ack_scheduled(sk))
1527 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
1528 (3 * tcp_rto_min(sk)) / 4,
1529 TCP_RTO_MAX);
1530 }
1531 return true;
1532}
1533EXPORT_SYMBOL(tcp_prequeue);
1534
1da177e4
LT
1535/*
1536 * From tcp_input.c
1537 */
1538
1539int tcp_v4_rcv(struct sk_buff *skb)
1540{
3b24d854 1541 struct net *net = dev_net(skb->dev);
eddc9ec5 1542 const struct iphdr *iph;
cf533ea5 1543 const struct tcphdr *th;
3b24d854 1544 bool refcounted;
1da177e4
LT
1545 struct sock *sk;
1546 int ret;
1547
1548 if (skb->pkt_type != PACKET_HOST)
1549 goto discard_it;
1550
1551 /* Count it even if it's bad */
63231bdd 1552 TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1da177e4
LT
1553
1554 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1555 goto discard_it;
1556
aa8223c7 1557 th = tcp_hdr(skb);
1da177e4
LT
1558
1559 if (th->doff < sizeof(struct tcphdr) / 4)
1560 goto bad_packet;
1561 if (!pskb_may_pull(skb, th->doff * 4))
1562 goto discard_it;
1563
1564 /* An explanation is required here, I think.
1565 * Packet length and doff are validated by header prediction,
caa20d9a 1566 * provided case of th->doff==0 is eliminated.
1da177e4 1567 * So, we defer the checks. */
ed70fcfc
TH
1568
1569 if (skb_checksum_init(skb, IPPROTO_TCP, inet_compute_pseudo))
6a5dc9e5 1570 goto csum_error;
1da177e4 1571
aa8223c7 1572 th = tcp_hdr(skb);
eddc9ec5 1573 iph = ip_hdr(skb);
971f10ec
ED
1574 /* This is tricky : We move IPCB at its correct location into TCP_SKB_CB()
1575 * barrier() makes sure compiler wont play fool^Waliasing games.
1576 */
1577 memmove(&TCP_SKB_CB(skb)->header.h4, IPCB(skb),
1578 sizeof(struct inet_skb_parm));
1579 barrier();
1580
1da177e4
LT
1581 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1582 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1583 skb->len - th->doff * 4);
1584 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
e11ecddf 1585 TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
04317daf 1586 TCP_SKB_CB(skb)->tcp_tw_isn = 0;
b82d1bb4 1587 TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
1da177e4
LT
1588 TCP_SKB_CB(skb)->sacked = 0;
1589
4bdc3d66 1590lookup:
a583636a 1591 sk = __inet_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th), th->source,
3b24d854 1592 th->dest, &refcounted);
1da177e4
LT
1593 if (!sk)
1594 goto no_tcp_socket;
1595
bb134d5d
ED
1596process:
1597 if (sk->sk_state == TCP_TIME_WAIT)
1598 goto do_time_wait;
1599
079096f1
ED
1600 if (sk->sk_state == TCP_NEW_SYN_RECV) {
1601 struct request_sock *req = inet_reqsk(sk);
7716682c 1602 struct sock *nsk;
079096f1
ED
1603
1604 sk = req->rsk_listener;
72923555
ED
1605 if (unlikely(tcp_v4_inbound_md5_hash(sk, skb))) {
1606 reqsk_put(req);
1607 goto discard_it;
1608 }
7716682c 1609 if (unlikely(sk->sk_state != TCP_LISTEN)) {
f03f2e15 1610 inet_csk_reqsk_queue_drop_and_put(sk, req);
4bdc3d66
ED
1611 goto lookup;
1612 }
3b24d854
ED
1613 /* We own a reference on the listener, increase it again
1614 * as we might lose it too soon.
1615 */
7716682c 1616 sock_hold(sk);
3b24d854 1617 refcounted = true;
7716682c 1618 nsk = tcp_check_req(sk, skb, req, false);
079096f1
ED
1619 if (!nsk) {
1620 reqsk_put(req);
7716682c 1621 goto discard_and_relse;
079096f1
ED
1622 }
1623 if (nsk == sk) {
079096f1
ED
1624 reqsk_put(req);
1625 } else if (tcp_child_process(sk, nsk, skb)) {
1626 tcp_v4_send_reset(nsk, skb);
7716682c 1627 goto discard_and_relse;
079096f1 1628 } else {
7716682c 1629 sock_put(sk);
079096f1
ED
1630 return 0;
1631 }
1632 }
6cce09f8
ED
1633 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
1634 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
d218d111 1635 goto discard_and_relse;
6cce09f8 1636 }
d218d111 1637
1da177e4
LT
1638 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1639 goto discard_and_relse;
9ea88a15 1640
9ea88a15
DP
1641 if (tcp_v4_inbound_md5_hash(sk, skb))
1642 goto discard_and_relse;
9ea88a15 1643
b59c2701 1644 nf_reset(skb);
1da177e4 1645
fda9ef5d 1646 if (sk_filter(sk, skb))
1da177e4
LT
1647 goto discard_and_relse;
1648
1649 skb->dev = NULL;
1650
e994b2f0
ED
1651 if (sk->sk_state == TCP_LISTEN) {
1652 ret = tcp_v4_do_rcv(sk, skb);
1653 goto put_and_return;
1654 }
1655
1656 sk_incoming_cpu_update(sk);
1657
c6366184 1658 bh_lock_sock_nested(sk);
a44d6eac 1659 tcp_segs_in(tcp_sk(sk), skb);
1da177e4
LT
1660 ret = 0;
1661 if (!sock_owned_by_user(sk)) {
7bced397 1662 if (!tcp_prequeue(sk, skb))
1da177e4 1663 ret = tcp_v4_do_rcv(sk, skb);
da882c1f
ED
1664 } else if (unlikely(sk_add_backlog(sk, skb,
1665 sk->sk_rcvbuf + sk->sk_sndbuf))) {
6b03a53a 1666 bh_unlock_sock(sk);
6cce09f8 1667 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
6b03a53a
ZY
1668 goto discard_and_relse;
1669 }
1da177e4
LT
1670 bh_unlock_sock(sk);
1671
e994b2f0 1672put_and_return:
3b24d854
ED
1673 if (refcounted)
1674 sock_put(sk);
1da177e4
LT
1675
1676 return ret;
1677
1678no_tcp_socket:
1679 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1680 goto discard_it;
1681
12e25e10 1682 if (tcp_checksum_complete(skb)) {
6a5dc9e5
ED
1683csum_error:
1684 TCP_INC_STATS_BH(net, TCP_MIB_CSUMERRORS);
1da177e4 1685bad_packet:
63231bdd 1686 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1da177e4 1687 } else {
cfb6eeb4 1688 tcp_v4_send_reset(NULL, skb);
1da177e4
LT
1689 }
1690
1691discard_it:
1692 /* Discard frame. */
1693 kfree_skb(skb);
e905a9ed 1694 return 0;
1da177e4
LT
1695
1696discard_and_relse:
3b24d854
ED
1697 if (refcounted)
1698 sock_put(sk);
1da177e4
LT
1699 goto discard_it;
1700
1701do_time_wait:
1702 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
9469c7b4 1703 inet_twsk_put(inet_twsk(sk));
1da177e4
LT
1704 goto discard_it;
1705 }
1706
6a5dc9e5
ED
1707 if (tcp_checksum_complete(skb)) {
1708 inet_twsk_put(inet_twsk(sk));
1709 goto csum_error;
1da177e4 1710 }
9469c7b4 1711 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1da177e4 1712 case TCP_TW_SYN: {
c346dca1 1713 struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
a583636a
CG
1714 &tcp_hashinfo, skb,
1715 __tcp_hdrlen(th),
da5e3630 1716 iph->saddr, th->source,
eddc9ec5 1717 iph->daddr, th->dest,
463c84b9 1718 inet_iif(skb));
1da177e4 1719 if (sk2) {
dbe7faa4 1720 inet_twsk_deschedule_put(inet_twsk(sk));
1da177e4 1721 sk = sk2;
3b24d854 1722 refcounted = false;
1da177e4
LT
1723 goto process;
1724 }
1725 /* Fall through to ACK */
1726 }
1727 case TCP_TW_ACK:
1728 tcp_v4_timewait_ack(sk, skb);
1729 break;
1730 case TCP_TW_RST:
271c3b9b
FW
1731 tcp_v4_send_reset(sk, skb);
1732 inet_twsk_deschedule_put(inet_twsk(sk));
1733 goto discard_it;
1da177e4
LT
1734 case TCP_TW_SUCCESS:;
1735 }
1736 goto discard_it;
1737}
1738
ccb7c410
DM
1739static struct timewait_sock_ops tcp_timewait_sock_ops = {
1740 .twsk_obj_size = sizeof(struct tcp_timewait_sock),
1741 .twsk_unique = tcp_twsk_unique,
1742 .twsk_destructor= tcp_twsk_destructor,
ccb7c410 1743};
1da177e4 1744
63d02d15 1745void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
5d299f3d
ED
1746{
1747 struct dst_entry *dst = skb_dst(skb);
1748
5037e9ef 1749 if (dst && dst_hold_safe(dst)) {
ca777eff
ED
1750 sk->sk_rx_dst = dst;
1751 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
1752 }
5d299f3d 1753}
63d02d15 1754EXPORT_SYMBOL(inet_sk_rx_dst_set);
5d299f3d 1755
3b401a81 1756const struct inet_connection_sock_af_ops ipv4_specific = {
543d9cfe
ACM
1757 .queue_xmit = ip_queue_xmit,
1758 .send_check = tcp_v4_send_check,
1759 .rebuild_header = inet_sk_rebuild_header,
5d299f3d 1760 .sk_rx_dst_set = inet_sk_rx_dst_set,
543d9cfe
ACM
1761 .conn_request = tcp_v4_conn_request,
1762 .syn_recv_sock = tcp_v4_syn_recv_sock,
543d9cfe
ACM
1763 .net_header_len = sizeof(struct iphdr),
1764 .setsockopt = ip_setsockopt,
1765 .getsockopt = ip_getsockopt,
1766 .addr2sockaddr = inet_csk_addr2sockaddr,
1767 .sockaddr_len = sizeof(struct sockaddr_in),
ab1e0a13 1768 .bind_conflict = inet_csk_bind_conflict,
3fdadf7d 1769#ifdef CONFIG_COMPAT
543d9cfe
ACM
1770 .compat_setsockopt = compat_ip_setsockopt,
1771 .compat_getsockopt = compat_ip_getsockopt,
3fdadf7d 1772#endif
4fab9071 1773 .mtu_reduced = tcp_v4_mtu_reduced,
1da177e4 1774};
4bc2f18b 1775EXPORT_SYMBOL(ipv4_specific);
1da177e4 1776
cfb6eeb4 1777#ifdef CONFIG_TCP_MD5SIG
b2e4b3de 1778static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
cfb6eeb4 1779 .md5_lookup = tcp_v4_md5_lookup,
49a72dfb 1780 .calc_md5_hash = tcp_v4_md5_hash_skb,
cfb6eeb4 1781 .md5_parse = tcp_v4_parse_md5_keys,
cfb6eeb4 1782};
b6332e6c 1783#endif
cfb6eeb4 1784
1da177e4
LT
1785/* NOTE: A lot of things set to zero explicitly by call to
1786 * sk_alloc() so need not be done here.
1787 */
1788static int tcp_v4_init_sock(struct sock *sk)
1789{
6687e988 1790 struct inet_connection_sock *icsk = inet_csk(sk);
1da177e4 1791
900f65d3 1792 tcp_init_sock(sk);
1da177e4 1793
8292a17a 1794 icsk->icsk_af_ops = &ipv4_specific;
900f65d3 1795
cfb6eeb4 1796#ifdef CONFIG_TCP_MD5SIG
ac807fa8 1797 tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific;
cfb6eeb4 1798#endif
1da177e4 1799
1da177e4
LT
1800 return 0;
1801}
1802
7d06b2e0 1803void tcp_v4_destroy_sock(struct sock *sk)
1da177e4
LT
1804{
1805 struct tcp_sock *tp = tcp_sk(sk);
1806
1807 tcp_clear_xmit_timers(sk);
1808
6687e988 1809 tcp_cleanup_congestion_control(sk);
317a76f9 1810
1da177e4 1811 /* Cleanup up the write buffer. */
fe067e8a 1812 tcp_write_queue_purge(sk);
1da177e4
LT
1813
1814 /* Cleans up our, hopefully empty, out_of_order_queue. */
e905a9ed 1815 __skb_queue_purge(&tp->out_of_order_queue);
1da177e4 1816
cfb6eeb4
YH
1817#ifdef CONFIG_TCP_MD5SIG
1818 /* Clean up the MD5 key list, if any */
1819 if (tp->md5sig_info) {
a915da9b 1820 tcp_clear_md5_list(sk);
a8afca03 1821 kfree_rcu(tp->md5sig_info, rcu);
cfb6eeb4
YH
1822 tp->md5sig_info = NULL;
1823 }
1824#endif
1a2449a8 1825
1da177e4
LT
1826 /* Clean prequeue, it must be empty really */
1827 __skb_queue_purge(&tp->ucopy.prequeue);
1828
1829 /* Clean up a referenced TCP bind bucket. */
463c84b9 1830 if (inet_csk(sk)->icsk_bind_hash)
ab1e0a13 1831 inet_put_port(sk);
1da177e4 1832
00db4124 1833 BUG_ON(tp->fastopen_rsk);
435cf559 1834
cf60af03
YC
1835 /* If socket is aborted during connect operation */
1836 tcp_free_fastopen_req(tp);
cd8ae852 1837 tcp_saved_syn_free(tp);
cf60af03 1838
180d8cd9 1839 sk_sockets_allocated_dec(sk);
3d596f7b 1840
baac50bb 1841 if (mem_cgroup_sockets_enabled && sk->sk_memcg)
3d596f7b 1842 sock_release_memcg(sk);
1da177e4 1843}
1da177e4
LT
1844EXPORT_SYMBOL(tcp_v4_destroy_sock);
1845
1846#ifdef CONFIG_PROC_FS
1847/* Proc filesystem TCP sock list dumping. */
1848
a8b690f9
TH
1849/*
1850 * Get next listener socket follow cur. If cur is NULL, get first socket
1851 * starting from bucket given in st->bucket; when st->bucket is zero the
1852 * very first socket in the hash table is returned.
1853 */
1da177e4
LT
1854static void *listening_get_next(struct seq_file *seq, void *cur)
1855{
5799de0b 1856 struct tcp_iter_state *st = seq->private;
a4146b1b 1857 struct net *net = seq_file_net(seq);
3b24d854
ED
1858 struct inet_listen_hashbucket *ilb;
1859 struct inet_connection_sock *icsk;
1860 struct sock *sk = cur;
1da177e4
LT
1861
1862 if (!sk) {
3b24d854 1863get_head:
a8b690f9 1864 ilb = &tcp_hashinfo.listening_hash[st->bucket];
5caea4ea 1865 spin_lock_bh(&ilb->lock);
3b24d854 1866 sk = sk_head(&ilb->head);
a8b690f9 1867 st->offset = 0;
1da177e4
LT
1868 goto get_sk;
1869 }
5caea4ea 1870 ilb = &tcp_hashinfo.listening_hash[st->bucket];
1da177e4 1871 ++st->num;
a8b690f9 1872 ++st->offset;
1da177e4 1873
3b24d854 1874 sk = sk_next(sk);
1da177e4 1875get_sk:
3b24d854 1876 sk_for_each_from(sk) {
8475ef9f
PE
1877 if (!net_eq(sock_net(sk), net))
1878 continue;
3b24d854
ED
1879 if (sk->sk_family == st->family)
1880 return sk;
e905a9ed 1881 icsk = inet_csk(sk);
1da177e4 1882 }
5caea4ea 1883 spin_unlock_bh(&ilb->lock);
a8b690f9 1884 st->offset = 0;
3b24d854
ED
1885 if (++st->bucket < INET_LHTABLE_SIZE)
1886 goto get_head;
1887 return NULL;
1da177e4
LT
1888}
1889
1890static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
1891{
a8b690f9
TH
1892 struct tcp_iter_state *st = seq->private;
1893 void *rc;
1894
1895 st->bucket = 0;
1896 st->offset = 0;
1897 rc = listening_get_next(seq, NULL);
1da177e4
LT
1898
1899 while (rc && *pos) {
1900 rc = listening_get_next(seq, rc);
1901 --*pos;
1902 }
1903 return rc;
1904}
1905
05dbc7b5 1906static inline bool empty_bucket(const struct tcp_iter_state *st)
6eac5604 1907{
05dbc7b5 1908 return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain);
6eac5604
AK
1909}
1910
a8b690f9
TH
1911/*
1912 * Get first established socket starting from bucket given in st->bucket.
1913 * If st->bucket is zero, the very first socket in the hash is returned.
1914 */
1da177e4
LT
1915static void *established_get_first(struct seq_file *seq)
1916{
5799de0b 1917 struct tcp_iter_state *st = seq->private;
a4146b1b 1918 struct net *net = seq_file_net(seq);
1da177e4
LT
1919 void *rc = NULL;
1920
a8b690f9
TH
1921 st->offset = 0;
1922 for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
1da177e4 1923 struct sock *sk;
3ab5aee7 1924 struct hlist_nulls_node *node;
9db66bdc 1925 spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
1da177e4 1926
6eac5604
AK
1927 /* Lockless fast path for the common case of empty buckets */
1928 if (empty_bucket(st))
1929 continue;
1930
9db66bdc 1931 spin_lock_bh(lock);
3ab5aee7 1932 sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
f40c8174 1933 if (sk->sk_family != st->family ||
878628fb 1934 !net_eq(sock_net(sk), net)) {
1da177e4
LT
1935 continue;
1936 }
1937 rc = sk;
1938 goto out;
1939 }
9db66bdc 1940 spin_unlock_bh(lock);
1da177e4
LT
1941 }
1942out:
1943 return rc;
1944}
1945
1946static void *established_get_next(struct seq_file *seq, void *cur)
1947{
1948 struct sock *sk = cur;
3ab5aee7 1949 struct hlist_nulls_node *node;
5799de0b 1950 struct tcp_iter_state *st = seq->private;
a4146b1b 1951 struct net *net = seq_file_net(seq);
1da177e4
LT
1952
1953 ++st->num;
a8b690f9 1954 ++st->offset;
1da177e4 1955
05dbc7b5 1956 sk = sk_nulls_next(sk);
1da177e4 1957
3ab5aee7 1958 sk_nulls_for_each_from(sk, node) {
878628fb 1959 if (sk->sk_family == st->family && net_eq(sock_net(sk), net))
05dbc7b5 1960 return sk;
1da177e4
LT
1961 }
1962
05dbc7b5
ED
1963 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
1964 ++st->bucket;
1965 return established_get_first(seq);
1da177e4
LT
1966}
1967
1968static void *established_get_idx(struct seq_file *seq, loff_t pos)
1969{
a8b690f9
TH
1970 struct tcp_iter_state *st = seq->private;
1971 void *rc;
1972
1973 st->bucket = 0;
1974 rc = established_get_first(seq);
1da177e4
LT
1975
1976 while (rc && pos) {
1977 rc = established_get_next(seq, rc);
1978 --pos;
7174259e 1979 }
1da177e4
LT
1980 return rc;
1981}
1982
1983static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
1984{
1985 void *rc;
5799de0b 1986 struct tcp_iter_state *st = seq->private;
1da177e4 1987
1da177e4
LT
1988 st->state = TCP_SEQ_STATE_LISTENING;
1989 rc = listening_get_idx(seq, &pos);
1990
1991 if (!rc) {
1da177e4
LT
1992 st->state = TCP_SEQ_STATE_ESTABLISHED;
1993 rc = established_get_idx(seq, pos);
1994 }
1995
1996 return rc;
1997}
1998
a8b690f9
TH
1999static void *tcp_seek_last_pos(struct seq_file *seq)
2000{
2001 struct tcp_iter_state *st = seq->private;
2002 int offset = st->offset;
2003 int orig_num = st->num;
2004 void *rc = NULL;
2005
2006 switch (st->state) {
a8b690f9
TH
2007 case TCP_SEQ_STATE_LISTENING:
2008 if (st->bucket >= INET_LHTABLE_SIZE)
2009 break;
2010 st->state = TCP_SEQ_STATE_LISTENING;
2011 rc = listening_get_next(seq, NULL);
2012 while (offset-- && rc)
2013 rc = listening_get_next(seq, rc);
2014 if (rc)
2015 break;
2016 st->bucket = 0;
05dbc7b5 2017 st->state = TCP_SEQ_STATE_ESTABLISHED;
a8b690f9
TH
2018 /* Fallthrough */
2019 case TCP_SEQ_STATE_ESTABLISHED:
a8b690f9
TH
2020 if (st->bucket > tcp_hashinfo.ehash_mask)
2021 break;
2022 rc = established_get_first(seq);
2023 while (offset-- && rc)
2024 rc = established_get_next(seq, rc);
2025 }
2026
2027 st->num = orig_num;
2028
2029 return rc;
2030}
2031
1da177e4
LT
2032static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2033{
5799de0b 2034 struct tcp_iter_state *st = seq->private;
a8b690f9
TH
2035 void *rc;
2036
2037 if (*pos && *pos == st->last_pos) {
2038 rc = tcp_seek_last_pos(seq);
2039 if (rc)
2040 goto out;
2041 }
2042
1da177e4
LT
2043 st->state = TCP_SEQ_STATE_LISTENING;
2044 st->num = 0;
a8b690f9
TH
2045 st->bucket = 0;
2046 st->offset = 0;
2047 rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2048
2049out:
2050 st->last_pos = *pos;
2051 return rc;
1da177e4
LT
2052}
2053
2054static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2055{
a8b690f9 2056 struct tcp_iter_state *st = seq->private;
1da177e4 2057 void *rc = NULL;
1da177e4
LT
2058
2059 if (v == SEQ_START_TOKEN) {
2060 rc = tcp_get_idx(seq, 0);
2061 goto out;
2062 }
1da177e4
LT
2063
2064 switch (st->state) {
1da177e4
LT
2065 case TCP_SEQ_STATE_LISTENING:
2066 rc = listening_get_next(seq, v);
2067 if (!rc) {
1da177e4 2068 st->state = TCP_SEQ_STATE_ESTABLISHED;
a8b690f9
TH
2069 st->bucket = 0;
2070 st->offset = 0;
1da177e4
LT
2071 rc = established_get_first(seq);
2072 }
2073 break;
2074 case TCP_SEQ_STATE_ESTABLISHED:
1da177e4
LT
2075 rc = established_get_next(seq, v);
2076 break;
2077 }
2078out:
2079 ++*pos;
a8b690f9 2080 st->last_pos = *pos;
1da177e4
LT
2081 return rc;
2082}
2083
2084static void tcp_seq_stop(struct seq_file *seq, void *v)
2085{
5799de0b 2086 struct tcp_iter_state *st = seq->private;
1da177e4
LT
2087
2088 switch (st->state) {
1da177e4
LT
2089 case TCP_SEQ_STATE_LISTENING:
2090 if (v != SEQ_START_TOKEN)
5caea4ea 2091 spin_unlock_bh(&tcp_hashinfo.listening_hash[st->bucket].lock);
1da177e4 2092 break;
1da177e4
LT
2093 case TCP_SEQ_STATE_ESTABLISHED:
2094 if (v)
9db66bdc 2095 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
1da177e4
LT
2096 break;
2097 }
2098}
2099
73cb88ec 2100int tcp_seq_open(struct inode *inode, struct file *file)
1da177e4 2101{
d9dda78b 2102 struct tcp_seq_afinfo *afinfo = PDE_DATA(inode);
1da177e4 2103 struct tcp_iter_state *s;
52d6f3f1 2104 int err;
1da177e4 2105
52d6f3f1
DL
2106 err = seq_open_net(inode, file, &afinfo->seq_ops,
2107 sizeof(struct tcp_iter_state));
2108 if (err < 0)
2109 return err;
f40c8174 2110
52d6f3f1 2111 s = ((struct seq_file *)file->private_data)->private;
1da177e4 2112 s->family = afinfo->family;
688d1945 2113 s->last_pos = 0;
f40c8174
DL
2114 return 0;
2115}
73cb88ec 2116EXPORT_SYMBOL(tcp_seq_open);
f40c8174 2117
6f8b13bc 2118int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo)
1da177e4
LT
2119{
2120 int rc = 0;
2121 struct proc_dir_entry *p;
2122
9427c4b3
DL
2123 afinfo->seq_ops.start = tcp_seq_start;
2124 afinfo->seq_ops.next = tcp_seq_next;
2125 afinfo->seq_ops.stop = tcp_seq_stop;
2126
84841c3c 2127 p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
73cb88ec 2128 afinfo->seq_fops, afinfo);
84841c3c 2129 if (!p)
1da177e4
LT
2130 rc = -ENOMEM;
2131 return rc;
2132}
4bc2f18b 2133EXPORT_SYMBOL(tcp_proc_register);
1da177e4 2134
6f8b13bc 2135void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo)
1da177e4 2136{
ece31ffd 2137 remove_proc_entry(afinfo->name, net->proc_net);
1da177e4 2138}
4bc2f18b 2139EXPORT_SYMBOL(tcp_proc_unregister);
1da177e4 2140
d4f06873 2141static void get_openreq4(const struct request_sock *req,
aa3a0c8c 2142 struct seq_file *f, int i)
1da177e4 2143{
2e6599cb 2144 const struct inet_request_sock *ireq = inet_rsk(req);
fa76ce73 2145 long delta = req->rsk_timer.expires - jiffies;
1da177e4 2146
5e659e4c 2147 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
652586df 2148 " %02X %08X:%08X %02X:%08lX %08X %5u %8d %u %d %pK",
1da177e4 2149 i,
634fb979 2150 ireq->ir_loc_addr,
d4f06873 2151 ireq->ir_num,
634fb979
ED
2152 ireq->ir_rmt_addr,
2153 ntohs(ireq->ir_rmt_port),
1da177e4
LT
2154 TCP_SYN_RECV,
2155 0, 0, /* could print option size, but that is af dependent. */
2156 1, /* timers active (only the expire timer) */
a399a805 2157 jiffies_delta_to_clock_t(delta),
e6c022a4 2158 req->num_timeout,
aa3a0c8c
ED
2159 from_kuid_munged(seq_user_ns(f),
2160 sock_i_uid(req->rsk_listener)),
1da177e4
LT
2161 0, /* non standard timer */
2162 0, /* open_requests have no inode */
d4f06873 2163 0,
652586df 2164 req);
1da177e4
LT
2165}
2166
652586df 2167static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
1da177e4
LT
2168{
2169 int timer_active;
2170 unsigned long timer_expires;
cf533ea5 2171 const struct tcp_sock *tp = tcp_sk(sk);
cf4c6bf8 2172 const struct inet_connection_sock *icsk = inet_csk(sk);
cf533ea5 2173 const struct inet_sock *inet = inet_sk(sk);
0536fcc0 2174 const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
c720c7e8
ED
2175 __be32 dest = inet->inet_daddr;
2176 __be32 src = inet->inet_rcv_saddr;
2177 __u16 destp = ntohs(inet->inet_dport);
2178 __u16 srcp = ntohs(inet->inet_sport);
49d09007 2179 int rx_queue;
00fd38d9 2180 int state;
1da177e4 2181
6ba8a3b1
ND
2182 if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
2183 icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
2184 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
1da177e4 2185 timer_active = 1;
463c84b9
ACM
2186 timer_expires = icsk->icsk_timeout;
2187 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1da177e4 2188 timer_active = 4;
463c84b9 2189 timer_expires = icsk->icsk_timeout;
cf4c6bf8 2190 } else if (timer_pending(&sk->sk_timer)) {
1da177e4 2191 timer_active = 2;
cf4c6bf8 2192 timer_expires = sk->sk_timer.expires;
1da177e4
LT
2193 } else {
2194 timer_active = 0;
2195 timer_expires = jiffies;
2196 }
2197
00fd38d9
ED
2198 state = sk_state_load(sk);
2199 if (state == TCP_LISTEN)
49d09007
ED
2200 rx_queue = sk->sk_ack_backlog;
2201 else
00fd38d9
ED
2202 /* Because we don't lock the socket,
2203 * we might find a transient negative value.
49d09007
ED
2204 */
2205 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
2206
5e659e4c 2207 seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
652586df 2208 "%08X %5u %8d %lu %d %pK %lu %lu %u %u %d",
00fd38d9 2209 i, src, srcp, dest, destp, state,
47da8ee6 2210 tp->write_seq - tp->snd_una,
49d09007 2211 rx_queue,
1da177e4 2212 timer_active,
a399a805 2213 jiffies_delta_to_clock_t(timer_expires - jiffies),
463c84b9 2214 icsk->icsk_retransmits,
a7cb5a49 2215 from_kuid_munged(seq_user_ns(f), sock_i_uid(sk)),
6687e988 2216 icsk->icsk_probes_out,
cf4c6bf8
IJ
2217 sock_i_ino(sk),
2218 atomic_read(&sk->sk_refcnt), sk,
7be87351
SH
2219 jiffies_to_clock_t(icsk->icsk_rto),
2220 jiffies_to_clock_t(icsk->icsk_ack.ato),
463c84b9 2221 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
1da177e4 2222 tp->snd_cwnd,
00fd38d9
ED
2223 state == TCP_LISTEN ?
2224 fastopenq->max_qlen :
652586df 2225 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh));
1da177e4
LT
2226}
2227
cf533ea5 2228static void get_timewait4_sock(const struct inet_timewait_sock *tw,
652586df 2229 struct seq_file *f, int i)
1da177e4 2230{
789f558c 2231 long delta = tw->tw_timer.expires - jiffies;
23f33c2d 2232 __be32 dest, src;
1da177e4 2233 __u16 destp, srcp;
1da177e4
LT
2234
2235 dest = tw->tw_daddr;
2236 src = tw->tw_rcv_saddr;
2237 destp = ntohs(tw->tw_dport);
2238 srcp = ntohs(tw->tw_sport);
2239
5e659e4c 2240 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
652586df 2241 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK",
1da177e4 2242 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
a399a805 2243 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
652586df 2244 atomic_read(&tw->tw_refcnt), tw);
1da177e4
LT
2245}
2246
2247#define TMPSZ 150
2248
2249static int tcp4_seq_show(struct seq_file *seq, void *v)
2250{
5799de0b 2251 struct tcp_iter_state *st;
05dbc7b5 2252 struct sock *sk = v;
1da177e4 2253
652586df 2254 seq_setwidth(seq, TMPSZ - 1);
1da177e4 2255 if (v == SEQ_START_TOKEN) {
652586df 2256 seq_puts(seq, " sl local_address rem_address st tx_queue "
1da177e4
LT
2257 "rx_queue tr tm->when retrnsmt uid timeout "
2258 "inode");
2259 goto out;
2260 }
2261 st = seq->private;
2262
079096f1
ED
2263 if (sk->sk_state == TCP_TIME_WAIT)
2264 get_timewait4_sock(v, seq, st->num);
2265 else if (sk->sk_state == TCP_NEW_SYN_RECV)
aa3a0c8c 2266 get_openreq4(v, seq, st->num);
079096f1
ED
2267 else
2268 get_tcp4_sock(v, seq, st->num);
1da177e4 2269out:
652586df 2270 seq_pad(seq, '\n');
1da177e4
LT
2271 return 0;
2272}
2273
73cb88ec
AV
2274static const struct file_operations tcp_afinfo_seq_fops = {
2275 .owner = THIS_MODULE,
2276 .open = tcp_seq_open,
2277 .read = seq_read,
2278 .llseek = seq_lseek,
2279 .release = seq_release_net
2280};
2281
1da177e4 2282static struct tcp_seq_afinfo tcp4_seq_afinfo = {
1da177e4
LT
2283 .name = "tcp",
2284 .family = AF_INET,
73cb88ec 2285 .seq_fops = &tcp_afinfo_seq_fops,
9427c4b3
DL
2286 .seq_ops = {
2287 .show = tcp4_seq_show,
2288 },
1da177e4
LT
2289};
2290
2c8c1e72 2291static int __net_init tcp4_proc_init_net(struct net *net)
757764f6
PE
2292{
2293 return tcp_proc_register(net, &tcp4_seq_afinfo);
2294}
2295
2c8c1e72 2296static void __net_exit tcp4_proc_exit_net(struct net *net)
757764f6
PE
2297{
2298 tcp_proc_unregister(net, &tcp4_seq_afinfo);
2299}
2300
2301static struct pernet_operations tcp4_net_ops = {
2302 .init = tcp4_proc_init_net,
2303 .exit = tcp4_proc_exit_net,
2304};
2305
1da177e4
LT
2306int __init tcp4_proc_init(void)
2307{
757764f6 2308 return register_pernet_subsys(&tcp4_net_ops);
1da177e4
LT
2309}
2310
2311void tcp4_proc_exit(void)
2312{
757764f6 2313 unregister_pernet_subsys(&tcp4_net_ops);
1da177e4
LT
2314}
2315#endif /* CONFIG_PROC_FS */
2316
2317struct proto tcp_prot = {
2318 .name = "TCP",
2319 .owner = THIS_MODULE,
2320 .close = tcp_close,
2321 .connect = tcp_v4_connect,
2322 .disconnect = tcp_disconnect,
463c84b9 2323 .accept = inet_csk_accept,
1da177e4
LT
2324 .ioctl = tcp_ioctl,
2325 .init = tcp_v4_init_sock,
2326 .destroy = tcp_v4_destroy_sock,
2327 .shutdown = tcp_shutdown,
2328 .setsockopt = tcp_setsockopt,
2329 .getsockopt = tcp_getsockopt,
1da177e4 2330 .recvmsg = tcp_recvmsg,
7ba42910
CG
2331 .sendmsg = tcp_sendmsg,
2332 .sendpage = tcp_sendpage,
1da177e4 2333 .backlog_rcv = tcp_v4_do_rcv,
46d3ceab 2334 .release_cb = tcp_release_cb,
ab1e0a13
ACM
2335 .hash = inet_hash,
2336 .unhash = inet_unhash,
2337 .get_port = inet_csk_get_port,
1da177e4 2338 .enter_memory_pressure = tcp_enter_memory_pressure,
c9bee3b7 2339 .stream_memory_free = tcp_stream_memory_free,
1da177e4 2340 .sockets_allocated = &tcp_sockets_allocated,
0a5578cf 2341 .orphan_count = &tcp_orphan_count,
1da177e4
LT
2342 .memory_allocated = &tcp_memory_allocated,
2343 .memory_pressure = &tcp_memory_pressure,
a4fe34bf 2344 .sysctl_mem = sysctl_tcp_mem,
1da177e4
LT
2345 .sysctl_wmem = sysctl_tcp_wmem,
2346 .sysctl_rmem = sysctl_tcp_rmem,
2347 .max_header = MAX_TCP_HEADER,
2348 .obj_size = sizeof(struct tcp_sock),
3ab5aee7 2349 .slab_flags = SLAB_DESTROY_BY_RCU,
6d6ee43e 2350 .twsk_prot = &tcp_timewait_sock_ops,
60236fdd 2351 .rsk_prot = &tcp_request_sock_ops,
39d8cda7 2352 .h.hashinfo = &tcp_hashinfo,
7ba42910 2353 .no_autobind = true,
543d9cfe
ACM
2354#ifdef CONFIG_COMPAT
2355 .compat_setsockopt = compat_tcp_setsockopt,
2356 .compat_getsockopt = compat_tcp_getsockopt,
d1a4c0b3 2357#endif
c1e64e29 2358 .diag_destroy = tcp_abort,
1da177e4 2359};
4bc2f18b 2360EXPORT_SYMBOL(tcp_prot);
1da177e4 2361
bdbbb852
ED
2362static void __net_exit tcp_sk_exit(struct net *net)
2363{
2364 int cpu;
2365
2366 for_each_possible_cpu(cpu)
2367 inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.tcp_sk, cpu));
2368 free_percpu(net->ipv4.tcp_sk);
2369}
2370
046ee902
DL
2371static int __net_init tcp_sk_init(struct net *net)
2372{
bdbbb852
ED
2373 int res, cpu;
2374
2375 net->ipv4.tcp_sk = alloc_percpu(struct sock *);
2376 if (!net->ipv4.tcp_sk)
2377 return -ENOMEM;
2378
2379 for_each_possible_cpu(cpu) {
2380 struct sock *sk;
2381
2382 res = inet_ctl_sock_create(&sk, PF_INET, SOCK_RAW,
2383 IPPROTO_TCP, net);
2384 if (res)
2385 goto fail;
2386 *per_cpu_ptr(net->ipv4.tcp_sk, cpu) = sk;
2387 }
49213555 2388
5d134f1c 2389 net->ipv4.sysctl_tcp_ecn = 2;
49213555
DB
2390 net->ipv4.sysctl_tcp_ecn_fallback = 1;
2391
b0f9ca53 2392 net->ipv4.sysctl_tcp_base_mss = TCP_BASE_MSS;
6b58e0a5 2393 net->ipv4.sysctl_tcp_probe_threshold = TCP_PROBE_THRESHOLD;
05cbc0db 2394 net->ipv4.sysctl_tcp_probe_interval = TCP_PROBE_INTERVAL;
046ee902 2395
13b287e8 2396 net->ipv4.sysctl_tcp_keepalive_time = TCP_KEEPALIVE_TIME;
9bd6861b 2397 net->ipv4.sysctl_tcp_keepalive_probes = TCP_KEEPALIVE_PROBES;
b840d15d 2398 net->ipv4.sysctl_tcp_keepalive_intvl = TCP_KEEPALIVE_INTVL;
13b287e8 2399
6fa25166 2400 net->ipv4.sysctl_tcp_syn_retries = TCP_SYN_RETRIES;
7c083ecb 2401 net->ipv4.sysctl_tcp_synack_retries = TCP_SYNACK_RETRIES;
0aca737d 2402 net->ipv4.sysctl_tcp_syncookies = 1;
1043e25f 2403 net->ipv4.sysctl_tcp_reordering = TCP_FASTRETRANS_THRESH;
ae5c3f40 2404 net->ipv4.sysctl_tcp_retries1 = TCP_RETR1;
c6214a97 2405 net->ipv4.sysctl_tcp_retries2 = TCP_RETR2;
c402d9be 2406 net->ipv4.sysctl_tcp_orphan_retries = 0;
1e579caa 2407 net->ipv4.sysctl_tcp_fin_timeout = TCP_FIN_TIMEOUT;
4979f2d9 2408 net->ipv4.sysctl_tcp_notsent_lowat = UINT_MAX;
12ed8244 2409
49213555 2410 return 0;
bdbbb852
ED
2411fail:
2412 tcp_sk_exit(net);
2413
2414 return res;
b099ce26
EB
2415}
2416
2417static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
2418{
2419 inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET);
046ee902
DL
2420}
2421
2422static struct pernet_operations __net_initdata tcp_sk_ops = {
b099ce26
EB
2423 .init = tcp_sk_init,
2424 .exit = tcp_sk_exit,
2425 .exit_batch = tcp_sk_exit_batch,
046ee902
DL
2426};
2427
9b0f976f 2428void __init tcp_v4_init(void)
1da177e4 2429{
5caea4ea 2430 inet_hashinfo_init(&tcp_hashinfo);
6a1b3054 2431 if (register_pernet_subsys(&tcp_sk_ops))
1da177e4 2432 panic("Failed to create the TCP control socket.\n");
1da177e4 2433}
This page took 1.57948 seconds and 5 git commands to generate.