Merge tag 'mac80211-for-davem-2016-06-09' of git://git.kernel.org/pub/scm/linux/kerne...
[deliverable/linux.git] / net / ipv4 / tcp_ipv4.c
CommitLineData
1da177e4
LT
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Implementation of the Transmission Control Protocol(TCP).
7 *
1da177e4
LT
8 * IPv4 specific functions
9 *
10 *
11 * code split from:
12 * linux/ipv4/tcp.c
13 * linux/ipv4/tcp_input.c
14 * linux/ipv4/tcp_output.c
15 *
16 * See tcp.c for author information
17 *
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
22 */
23
24/*
25 * Changes:
26 * David S. Miller : New socket lookup architecture.
27 * This code is dedicated to John Dyson.
28 * David S. Miller : Change semantics of established hash,
29 * half is devoted to TIME_WAIT sockets
30 * and the rest go in the other half.
31 * Andi Kleen : Add support for syncookies and fixed
32 * some bugs: ip options weren't passed to
33 * the TCP layer, missed a check for an
34 * ACK bit.
35 * Andi Kleen : Implemented fast path mtu discovery.
36 * Fixed many serious bugs in the
60236fdd 37 * request_sock handling and moved
1da177e4
LT
38 * most of it into the af independent code.
39 * Added tail drop and some other bugfixes.
caa20d9a 40 * Added new listen semantics.
1da177e4
LT
41 * Mike McLagan : Routing by source
42 * Juan Jose Ciarlante: ip_dynaddr bits
43 * Andi Kleen: various fixes.
44 * Vitaly E. Lavrov : Transparent proxy revived after year
45 * coma.
46 * Andi Kleen : Fix new listen.
47 * Andi Kleen : Fix accept error reporting.
48 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
49 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
50 * a single port at the same time.
51 */
52
afd46503 53#define pr_fmt(fmt) "TCP: " fmt
1da177e4 54
eb4dea58 55#include <linux/bottom_half.h>
1da177e4
LT
56#include <linux/types.h>
57#include <linux/fcntl.h>
58#include <linux/module.h>
59#include <linux/random.h>
60#include <linux/cache.h>
61#include <linux/jhash.h>
62#include <linux/init.h>
63#include <linux/times.h>
5a0e3ad6 64#include <linux/slab.h>
1da177e4 65
457c4cbc 66#include <net/net_namespace.h>
1da177e4 67#include <net/icmp.h>
304a1618 68#include <net/inet_hashtables.h>
1da177e4 69#include <net/tcp.h>
20380731 70#include <net/transp_v6.h>
1da177e4
LT
71#include <net/ipv6.h>
72#include <net/inet_common.h>
6d6ee43e 73#include <net/timewait_sock.h>
1da177e4 74#include <net/xfrm.h>
6e5714ea 75#include <net/secure_seq.h>
076bb0c8 76#include <net/busy_poll.h>
1da177e4
LT
77
78#include <linux/inet.h>
79#include <linux/ipv6.h>
80#include <linux/stddef.h>
81#include <linux/proc_fs.h>
82#include <linux/seq_file.h>
83
cf80e0e4 84#include <crypto/hash.h>
cfb6eeb4
YH
85#include <linux/scatterlist.h>
86
ab32ea5d
BH
87int sysctl_tcp_tw_reuse __read_mostly;
88int sysctl_tcp_low_latency __read_mostly;
4bc2f18b 89EXPORT_SYMBOL(sysctl_tcp_low_latency);
1da177e4 90
cfb6eeb4 91#ifdef CONFIG_TCP_MD5SIG
a915da9b 92static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
318cf7aa 93 __be32 daddr, __be32 saddr, const struct tcphdr *th);
cfb6eeb4
YH
94#endif
95
5caea4ea 96struct inet_hashinfo tcp_hashinfo;
4bc2f18b 97EXPORT_SYMBOL(tcp_hashinfo);
1da177e4 98
936b8bdb 99static __u32 tcp_v4_init_sequence(const struct sk_buff *skb)
1da177e4 100{
eddc9ec5
ACM
101 return secure_tcp_sequence_number(ip_hdr(skb)->daddr,
102 ip_hdr(skb)->saddr,
aa8223c7
ACM
103 tcp_hdr(skb)->dest,
104 tcp_hdr(skb)->source);
1da177e4
LT
105}
106
6d6ee43e
ACM
107int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
108{
109 const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
110 struct tcp_sock *tp = tcp_sk(sk);
111
112 /* With PAWS, it is safe from the viewpoint
113 of data integrity. Even without PAWS it is safe provided sequence
114 spaces do not overlap i.e. at data rates <= 80Mbit/sec.
115
116 Actually, the idea is close to VJ's one, only timestamp cache is
117 held not per host, but per port pair and TW bucket is used as state
118 holder.
119
120 If TW bucket has been already destroyed we fall back to VJ's scheme
121 and use initial timestamp retrieved from peer table.
122 */
123 if (tcptw->tw_ts_recent_stamp &&
51456b29 124 (!twp || (sysctl_tcp_tw_reuse &&
9d729f72 125 get_seconds() - tcptw->tw_ts_recent_stamp > 1))) {
6d6ee43e
ACM
126 tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
127 if (tp->write_seq == 0)
128 tp->write_seq = 1;
129 tp->rx_opt.ts_recent = tcptw->tw_ts_recent;
130 tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
131 sock_hold(sktw);
132 return 1;
133 }
134
135 return 0;
136}
6d6ee43e
ACM
137EXPORT_SYMBOL_GPL(tcp_twsk_unique);
138
1da177e4
LT
139/* This will initiate an outgoing connection. */
140int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
141{
2d7192d6 142 struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
1da177e4
LT
143 struct inet_sock *inet = inet_sk(sk);
144 struct tcp_sock *tp = tcp_sk(sk);
dca8b089 145 __be16 orig_sport, orig_dport;
bada8adc 146 __be32 daddr, nexthop;
da905bd1 147 struct flowi4 *fl4;
2d7192d6 148 struct rtable *rt;
1da177e4 149 int err;
f6d8bd05 150 struct ip_options_rcu *inet_opt;
1da177e4
LT
151
152 if (addr_len < sizeof(struct sockaddr_in))
153 return -EINVAL;
154
155 if (usin->sin_family != AF_INET)
156 return -EAFNOSUPPORT;
157
158 nexthop = daddr = usin->sin_addr.s_addr;
f6d8bd05 159 inet_opt = rcu_dereference_protected(inet->inet_opt,
1e1d04e6 160 lockdep_sock_is_held(sk));
f6d8bd05 161 if (inet_opt && inet_opt->opt.srr) {
1da177e4
LT
162 if (!daddr)
163 return -EINVAL;
f6d8bd05 164 nexthop = inet_opt->opt.faddr;
1da177e4
LT
165 }
166
dca8b089
DM
167 orig_sport = inet->inet_sport;
168 orig_dport = usin->sin_port;
da905bd1
DM
169 fl4 = &inet->cork.fl.u.ip4;
170 rt = ip_route_connect(fl4, nexthop, inet->inet_saddr,
b23dd4fe
DM
171 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
172 IPPROTO_TCP,
0e0d44ab 173 orig_sport, orig_dport, sk);
b23dd4fe
DM
174 if (IS_ERR(rt)) {
175 err = PTR_ERR(rt);
176 if (err == -ENETUNREACH)
f1d8cba6 177 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
b23dd4fe 178 return err;
584bdf8c 179 }
1da177e4
LT
180
181 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
182 ip_rt_put(rt);
183 return -ENETUNREACH;
184 }
185
f6d8bd05 186 if (!inet_opt || !inet_opt->opt.srr)
da905bd1 187 daddr = fl4->daddr;
1da177e4 188
c720c7e8 189 if (!inet->inet_saddr)
da905bd1 190 inet->inet_saddr = fl4->saddr;
d1e559d0 191 sk_rcv_saddr_set(sk, inet->inet_saddr);
1da177e4 192
c720c7e8 193 if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
1da177e4
LT
194 /* Reset inherited state */
195 tp->rx_opt.ts_recent = 0;
196 tp->rx_opt.ts_recent_stamp = 0;
ee995283
PE
197 if (likely(!tp->repair))
198 tp->write_seq = 0;
1da177e4
LT
199 }
200
295ff7ed 201 if (tcp_death_row.sysctl_tw_recycle &&
81166dd6
DM
202 !tp->rx_opt.ts_recent_stamp && fl4->daddr == daddr)
203 tcp_fetch_timewait_stamp(sk, &rt->dst);
1da177e4 204
c720c7e8 205 inet->inet_dport = usin->sin_port;
d1e559d0 206 sk_daddr_set(sk, daddr);
1da177e4 207
d83d8461 208 inet_csk(sk)->icsk_ext_hdr_len = 0;
f6d8bd05
ED
209 if (inet_opt)
210 inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
1da177e4 211
bee7ca9e 212 tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
1da177e4
LT
213
214 /* Socket identity is still unknown (sport may be zero).
215 * However we set state to SYN-SENT and not releasing socket
216 * lock select source port, enter ourselves into the hash tables and
217 * complete initialization after this.
218 */
219 tcp_set_state(sk, TCP_SYN_SENT);
a7f5e7f1 220 err = inet_hash_connect(&tcp_death_row, sk);
1da177e4
LT
221 if (err)
222 goto failure;
223
877d1f62 224 sk_set_txhash(sk);
9e7ceb06 225
da905bd1 226 rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
b23dd4fe
DM
227 inet->inet_sport, inet->inet_dport, sk);
228 if (IS_ERR(rt)) {
229 err = PTR_ERR(rt);
230 rt = NULL;
1da177e4 231 goto failure;
b23dd4fe 232 }
1da177e4 233 /* OK, now commit destination to socket. */
bcd76111 234 sk->sk_gso_type = SKB_GSO_TCPV4;
d8d1f30b 235 sk_setup_caps(sk, &rt->dst);
1da177e4 236
ee995283 237 if (!tp->write_seq && likely(!tp->repair))
c720c7e8
ED
238 tp->write_seq = secure_tcp_sequence_number(inet->inet_saddr,
239 inet->inet_daddr,
240 inet->inet_sport,
1da177e4
LT
241 usin->sin_port);
242
c720c7e8 243 inet->inet_id = tp->write_seq ^ jiffies;
1da177e4 244
2b916477 245 err = tcp_connect(sk);
ee995283 246
1da177e4
LT
247 rt = NULL;
248 if (err)
249 goto failure;
250
251 return 0;
252
253failure:
7174259e
ACM
254 /*
255 * This unhashes the socket and releases the local port,
256 * if necessary.
257 */
1da177e4
LT
258 tcp_set_state(sk, TCP_CLOSE);
259 ip_rt_put(rt);
260 sk->sk_route_caps = 0;
c720c7e8 261 inet->inet_dport = 0;
1da177e4
LT
262 return err;
263}
4bc2f18b 264EXPORT_SYMBOL(tcp_v4_connect);
1da177e4 265
1da177e4 266/*
563d34d0
ED
267 * This routine reacts to ICMP_FRAG_NEEDED mtu indications as defined in RFC1191.
268 * It can be called through tcp_release_cb() if socket was owned by user
269 * at the time tcp_v4_err() was called to handle ICMP message.
1da177e4 270 */
4fab9071 271void tcp_v4_mtu_reduced(struct sock *sk)
1da177e4
LT
272{
273 struct dst_entry *dst;
274 struct inet_sock *inet = inet_sk(sk);
563d34d0 275 u32 mtu = tcp_sk(sk)->mtu_info;
1da177e4 276
80d0a69f
DM
277 dst = inet_csk_update_pmtu(sk, mtu);
278 if (!dst)
1da177e4
LT
279 return;
280
1da177e4
LT
281 /* Something is about to be wrong... Remember soft error
282 * for the case, if this connection will not able to recover.
283 */
284 if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
285 sk->sk_err_soft = EMSGSIZE;
286
287 mtu = dst_mtu(dst);
288
289 if (inet->pmtudisc != IP_PMTUDISC_DONT &&
482fc609 290 ip_sk_accept_pmtu(sk) &&
d83d8461 291 inet_csk(sk)->icsk_pmtu_cookie > mtu) {
1da177e4
LT
292 tcp_sync_mss(sk, mtu);
293
294 /* Resend the TCP packet because it's
295 * clear that the old packet has been
296 * dropped. This is the new "fast" path mtu
297 * discovery.
298 */
299 tcp_simple_retransmit(sk);
300 } /* else let the usual retransmit timer handle it */
301}
4fab9071 302EXPORT_SYMBOL(tcp_v4_mtu_reduced);
1da177e4 303
55be7a9c
DM
304static void do_redirect(struct sk_buff *skb, struct sock *sk)
305{
306 struct dst_entry *dst = __sk_dst_check(sk, 0);
307
1ed5c48f 308 if (dst)
6700c270 309 dst->ops->redirect(dst, sk, skb);
55be7a9c
DM
310}
311
26e37360
ED
312
313/* handle ICMP messages on TCP_NEW_SYN_RECV request sockets */
9cf74903 314void tcp_req_err(struct sock *sk, u32 seq, bool abort)
26e37360
ED
315{
316 struct request_sock *req = inet_reqsk(sk);
317 struct net *net = sock_net(sk);
318
319 /* ICMPs are not backlogged, hence we cannot get
320 * an established socket here.
321 */
26e37360 322 if (seq != tcp_rsk(req)->snt_isn) {
02a1d6e7 323 __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
9cf74903 324 } else if (abort) {
26e37360
ED
325 /*
326 * Still in SYN_RECV, just remove it silently.
327 * There is no good way to pass the error to the newly
328 * created socket, and POSIX does not want network
329 * errors returned from accept().
330 */
c6973669 331 inet_csk_reqsk_queue_drop(req->rsk_listener, req);
9caad864 332 tcp_listendrop(req->rsk_listener);
26e37360 333 }
ef84d8ce 334 reqsk_put(req);
26e37360
ED
335}
336EXPORT_SYMBOL(tcp_req_err);
337
1da177e4
LT
338/*
339 * This routine is called by the ICMP module when it gets some
340 * sort of error condition. If err < 0 then the socket should
341 * be closed and the error returned to the user. If err > 0
342 * it's just the icmp type << 8 | icmp code. After adjustment
343 * header points to the first 8 bytes of the tcp header. We need
344 * to find the appropriate port.
345 *
346 * The locking strategy used here is very "optimistic". When
347 * someone else accesses the socket the ICMP is just dropped
348 * and for some paths there is no check at all.
349 * A more general error queue to queue errors for later handling
350 * is probably better.
351 *
352 */
353
4d1a2d9e 354void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
1da177e4 355{
b71d1d42 356 const struct iphdr *iph = (const struct iphdr *)icmp_skb->data;
4d1a2d9e 357 struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2));
f1ecd5d9 358 struct inet_connection_sock *icsk;
1da177e4
LT
359 struct tcp_sock *tp;
360 struct inet_sock *inet;
4d1a2d9e
DL
361 const int type = icmp_hdr(icmp_skb)->type;
362 const int code = icmp_hdr(icmp_skb)->code;
1da177e4 363 struct sock *sk;
f1ecd5d9 364 struct sk_buff *skb;
0a672f74
YC
365 struct request_sock *fastopen;
366 __u32 seq, snd_una;
f1ecd5d9 367 __u32 remaining;
1da177e4 368 int err;
4d1a2d9e 369 struct net *net = dev_net(icmp_skb->dev);
1da177e4 370
26e37360
ED
371 sk = __inet_lookup_established(net, &tcp_hashinfo, iph->daddr,
372 th->dest, iph->saddr, ntohs(th->source),
373 inet_iif(icmp_skb));
1da177e4 374 if (!sk) {
5d3848bc 375 __ICMP_INC_STATS(net, ICMP_MIB_INERRORS);
1da177e4
LT
376 return;
377 }
378 if (sk->sk_state == TCP_TIME_WAIT) {
9469c7b4 379 inet_twsk_put(inet_twsk(sk));
1da177e4
LT
380 return;
381 }
26e37360
ED
382 seq = ntohl(th->seq);
383 if (sk->sk_state == TCP_NEW_SYN_RECV)
9cf74903
ED
384 return tcp_req_err(sk, seq,
385 type == ICMP_PARAMETERPROB ||
386 type == ICMP_TIME_EXCEEDED ||
387 (type == ICMP_DEST_UNREACH &&
388 (code == ICMP_NET_UNREACH ||
389 code == ICMP_HOST_UNREACH)));
1da177e4
LT
390
391 bh_lock_sock(sk);
392 /* If too many ICMPs get dropped on busy
393 * servers this needs to be solved differently.
563d34d0
ED
394 * We do take care of PMTU discovery (RFC1191) special case :
395 * we can receive locally generated ICMP messages while socket is held.
1da177e4 396 */
b74aa930
ED
397 if (sock_owned_by_user(sk)) {
398 if (!(type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED))
02a1d6e7 399 __NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
b74aa930 400 }
1da177e4
LT
401 if (sk->sk_state == TCP_CLOSE)
402 goto out;
403
97e3ecd1 404 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
02a1d6e7 405 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
97e3ecd1 406 goto out;
407 }
408
f1ecd5d9 409 icsk = inet_csk(sk);
1da177e4 410 tp = tcp_sk(sk);
0a672f74
YC
411 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
412 fastopen = tp->fastopen_rsk;
413 snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
1da177e4 414 if (sk->sk_state != TCP_LISTEN &&
0a672f74 415 !between(seq, snd_una, tp->snd_nxt)) {
02a1d6e7 416 __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
1da177e4
LT
417 goto out;
418 }
419
420 switch (type) {
55be7a9c
DM
421 case ICMP_REDIRECT:
422 do_redirect(icmp_skb, sk);
423 goto out;
1da177e4
LT
424 case ICMP_SOURCE_QUENCH:
425 /* Just silently ignore these. */
426 goto out;
427 case ICMP_PARAMETERPROB:
428 err = EPROTO;
429 break;
430 case ICMP_DEST_UNREACH:
431 if (code > NR_ICMP_UNREACH)
432 goto out;
433
434 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
0d4f0608
ED
435 /* We are not interested in TCP_LISTEN and open_requests
436 * (SYN-ACKs send out by Linux are always <576bytes so
437 * they should go through unfragmented).
438 */
439 if (sk->sk_state == TCP_LISTEN)
440 goto out;
441
563d34d0 442 tp->mtu_info = info;
144d56e9 443 if (!sock_owned_by_user(sk)) {
563d34d0 444 tcp_v4_mtu_reduced(sk);
144d56e9
ED
445 } else {
446 if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &tp->tsq_flags))
447 sock_hold(sk);
448 }
1da177e4
LT
449 goto out;
450 }
451
452 err = icmp_err_convert[code].errno;
f1ecd5d9
DL
453 /* check if icmp_skb allows revert of backoff
454 * (see draft-zimmermann-tcp-lcd) */
455 if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH)
456 break;
457 if (seq != tp->snd_una || !icsk->icsk_retransmits ||
0a672f74 458 !icsk->icsk_backoff || fastopen)
f1ecd5d9
DL
459 break;
460
8f49c270
DM
461 if (sock_owned_by_user(sk))
462 break;
463
f1ecd5d9 464 icsk->icsk_backoff--;
fcdd1cf4
ED
465 icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) :
466 TCP_TIMEOUT_INIT;
467 icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX);
f1ecd5d9
DL
468
469 skb = tcp_write_queue_head(sk);
470 BUG_ON(!skb);
471
7faee5c0
ED
472 remaining = icsk->icsk_rto -
473 min(icsk->icsk_rto,
474 tcp_time_stamp - tcp_skb_timestamp(skb));
f1ecd5d9
DL
475
476 if (remaining) {
477 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
478 remaining, TCP_RTO_MAX);
f1ecd5d9
DL
479 } else {
480 /* RTO revert clocked out retransmission.
481 * Will retransmit now */
482 tcp_retransmit_timer(sk);
483 }
484
1da177e4
LT
485 break;
486 case ICMP_TIME_EXCEEDED:
487 err = EHOSTUNREACH;
488 break;
489 default:
490 goto out;
491 }
492
493 switch (sk->sk_state) {
1da177e4 494 case TCP_SYN_SENT:
0a672f74
YC
495 case TCP_SYN_RECV:
496 /* Only in fast or simultaneous open. If a fast open socket is
497 * is already accepted it is treated as a connected one below.
498 */
51456b29 499 if (fastopen && !fastopen->sk)
0a672f74
YC
500 break;
501
1da177e4 502 if (!sock_owned_by_user(sk)) {
1da177e4
LT
503 sk->sk_err = err;
504
505 sk->sk_error_report(sk);
506
507 tcp_done(sk);
508 } else {
509 sk->sk_err_soft = err;
510 }
511 goto out;
512 }
513
514 /* If we've already connected we will keep trying
515 * until we time out, or the user gives up.
516 *
517 * rfc1122 4.2.3.9 allows to consider as hard errors
518 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
519 * but it is obsoleted by pmtu discovery).
520 *
521 * Note, that in modern internet, where routing is unreliable
522 * and in each dark corner broken firewalls sit, sending random
523 * errors ordered by their masters even this two messages finally lose
524 * their original sense (even Linux sends invalid PORT_UNREACHs)
525 *
526 * Now we are in compliance with RFCs.
527 * --ANK (980905)
528 */
529
530 inet = inet_sk(sk);
531 if (!sock_owned_by_user(sk) && inet->recverr) {
532 sk->sk_err = err;
533 sk->sk_error_report(sk);
534 } else { /* Only an error on timeout */
535 sk->sk_err_soft = err;
536 }
537
538out:
539 bh_unlock_sock(sk);
540 sock_put(sk);
541}
542
28850dc7 543void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr)
1da177e4 544{
aa8223c7 545 struct tcphdr *th = tcp_hdr(skb);
1da177e4 546
84fa7933 547 if (skb->ip_summed == CHECKSUM_PARTIAL) {
419f9f89 548 th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0);
663ead3b 549 skb->csum_start = skb_transport_header(skb) - skb->head;
ff1dcadb 550 skb->csum_offset = offsetof(struct tcphdr, check);
1da177e4 551 } else {
419f9f89 552 th->check = tcp_v4_check(skb->len, saddr, daddr,
07f0757a 553 csum_partial(th,
1da177e4
LT
554 th->doff << 2,
555 skb->csum));
556 }
557}
558
419f9f89 559/* This routine computes an IPv4 TCP checksum. */
bb296246 560void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
419f9f89 561{
cf533ea5 562 const struct inet_sock *inet = inet_sk(sk);
419f9f89
HX
563
564 __tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
565}
4bc2f18b 566EXPORT_SYMBOL(tcp_v4_send_check);
419f9f89 567
1da177e4
LT
568/*
569 * This routine will send an RST to the other tcp.
570 *
571 * Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
572 * for reset.
573 * Answer: if a packet caused RST, it is not for a socket
574 * existing in our system, if it is matched to a socket,
575 * it is just duplicate segment or bug in other side's TCP.
576 * So that we build reply only basing on parameters
577 * arrived with segment.
578 * Exception: precedence violation. We do not implement it in any case.
579 */
580
a00e7444 581static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb)
1da177e4 582{
cf533ea5 583 const struct tcphdr *th = tcp_hdr(skb);
cfb6eeb4
YH
584 struct {
585 struct tcphdr th;
586#ifdef CONFIG_TCP_MD5SIG
714e85be 587 __be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
cfb6eeb4
YH
588#endif
589 } rep;
1da177e4 590 struct ip_reply_arg arg;
cfb6eeb4 591#ifdef CONFIG_TCP_MD5SIG
e46787f0 592 struct tcp_md5sig_key *key = NULL;
658ddaaf
SL
593 const __u8 *hash_location = NULL;
594 unsigned char newhash[16];
595 int genhash;
596 struct sock *sk1 = NULL;
cfb6eeb4 597#endif
a86b1e30 598 struct net *net;
1da177e4
LT
599
600 /* Never send a reset in response to a reset. */
601 if (th->rst)
602 return;
603
c3658e8d
ED
604 /* If sk not NULL, it means we did a successful lookup and incoming
605 * route had to be correct. prequeue might have dropped our dst.
606 */
607 if (!sk && skb_rtable(skb)->rt_type != RTN_LOCAL)
1da177e4
LT
608 return;
609
610 /* Swap the send and the receive. */
cfb6eeb4
YH
611 memset(&rep, 0, sizeof(rep));
612 rep.th.dest = th->source;
613 rep.th.source = th->dest;
614 rep.th.doff = sizeof(struct tcphdr) / 4;
615 rep.th.rst = 1;
1da177e4
LT
616
617 if (th->ack) {
cfb6eeb4 618 rep.th.seq = th->ack_seq;
1da177e4 619 } else {
cfb6eeb4
YH
620 rep.th.ack = 1;
621 rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
622 skb->len - (th->doff << 2));
1da177e4
LT
623 }
624
7174259e 625 memset(&arg, 0, sizeof(arg));
cfb6eeb4
YH
626 arg.iov[0].iov_base = (unsigned char *)&rep;
627 arg.iov[0].iov_len = sizeof(rep.th);
628
0f85feae 629 net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
cfb6eeb4 630#ifdef CONFIG_TCP_MD5SIG
3b24d854 631 rcu_read_lock();
658ddaaf 632 hash_location = tcp_parse_md5sig_option(th);
271c3b9b 633 if (sk && sk_fullsock(sk)) {
e46787f0
FW
634 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)
635 &ip_hdr(skb)->saddr, AF_INET);
636 } else if (hash_location) {
658ddaaf
SL
637 /*
638 * active side is lost. Try to find listening socket through
639 * source port, and then find md5 key through listening socket.
640 * we are not loose security here:
641 * Incoming packet is checked with md5 hash with finding key,
642 * no RST generated if md5 hash doesn't match.
643 */
a583636a
CG
644 sk1 = __inet_lookup_listener(net, &tcp_hashinfo, NULL, 0,
645 ip_hdr(skb)->saddr,
da5e3630 646 th->source, ip_hdr(skb)->daddr,
658ddaaf
SL
647 ntohs(th->source), inet_iif(skb));
648 /* don't send rst if it can't find key */
649 if (!sk1)
3b24d854
ED
650 goto out;
651
658ddaaf
SL
652 key = tcp_md5_do_lookup(sk1, (union tcp_md5_addr *)
653 &ip_hdr(skb)->saddr, AF_INET);
654 if (!key)
3b24d854
ED
655 goto out;
656
658ddaaf 657
39f8e58e 658 genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, skb);
658ddaaf 659 if (genhash || memcmp(hash_location, newhash, 16) != 0)
3b24d854
ED
660 goto out;
661
658ddaaf
SL
662 }
663
cfb6eeb4
YH
664 if (key) {
665 rep.opt[0] = htonl((TCPOPT_NOP << 24) |
666 (TCPOPT_NOP << 16) |
667 (TCPOPT_MD5SIG << 8) |
668 TCPOLEN_MD5SIG);
669 /* Update length and the length the header thinks exists */
670 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
671 rep.th.doff = arg.iov[0].iov_len / 4;
672
49a72dfb 673 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
78e645cb
IJ
674 key, ip_hdr(skb)->saddr,
675 ip_hdr(skb)->daddr, &rep.th);
cfb6eeb4
YH
676 }
677#endif
eddc9ec5
ACM
678 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
679 ip_hdr(skb)->saddr, /* XXX */
52cd5750 680 arg.iov[0].iov_len, IPPROTO_TCP, 0);
1da177e4 681 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
271c3b9b
FW
682 arg.flags = (sk && inet_sk_transparent(sk)) ? IP_REPLY_ARG_NOSRCCHECK : 0;
683
e2446eaa 684 /* When socket is gone, all binding information is lost.
4c675258
AK
685 * routing might fail in this case. No choice here, if we choose to force
686 * input interface, we will misroute in case of asymmetric route.
e2446eaa 687 */
4c675258
AK
688 if (sk)
689 arg.bound_dev_if = sk->sk_bound_dev_if;
1da177e4 690
271c3b9b
FW
691 BUILD_BUG_ON(offsetof(struct sock, sk_bound_dev_if) !=
692 offsetof(struct inet_timewait_sock, tw_bound_dev_if));
693
66b13d99 694 arg.tos = ip_hdr(skb)->tos;
47dcc20a 695 local_bh_disable();
bdbbb852
ED
696 ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
697 skb, &TCP_SKB_CB(skb)->header.h4.opt,
24a2d43d
ED
698 ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
699 &arg, arg.iov[0].iov_len);
1da177e4 700
90bbcc60
ED
701 __TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
702 __TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
47dcc20a 703 local_bh_enable();
658ddaaf
SL
704
705#ifdef CONFIG_TCP_MD5SIG
3b24d854
ED
706out:
707 rcu_read_unlock();
658ddaaf 708#endif
1da177e4
LT
709}
710
711/* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
712 outside socket context is ugly, certainly. What can I do?
713 */
714
e62a123b
ED
715static void tcp_v4_send_ack(struct net *net,
716 struct sk_buff *skb, u32 seq, u32 ack,
ee684b6f 717 u32 win, u32 tsval, u32 tsecr, int oif,
88ef4a5a 718 struct tcp_md5sig_key *key,
66b13d99 719 int reply_flags, u8 tos)
1da177e4 720{
cf533ea5 721 const struct tcphdr *th = tcp_hdr(skb);
1da177e4
LT
722 struct {
723 struct tcphdr th;
714e85be 724 __be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
cfb6eeb4 725#ifdef CONFIG_TCP_MD5SIG
714e85be 726 + (TCPOLEN_MD5SIG_ALIGNED >> 2)
cfb6eeb4
YH
727#endif
728 ];
1da177e4
LT
729 } rep;
730 struct ip_reply_arg arg;
731
732 memset(&rep.th, 0, sizeof(struct tcphdr));
7174259e 733 memset(&arg, 0, sizeof(arg));
1da177e4
LT
734
735 arg.iov[0].iov_base = (unsigned char *)&rep;
736 arg.iov[0].iov_len = sizeof(rep.th);
ee684b6f 737 if (tsecr) {
cfb6eeb4
YH
738 rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
739 (TCPOPT_TIMESTAMP << 8) |
740 TCPOLEN_TIMESTAMP);
ee684b6f
AV
741 rep.opt[1] = htonl(tsval);
742 rep.opt[2] = htonl(tsecr);
cb48cfe8 743 arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
1da177e4
LT
744 }
745
746 /* Swap the send and the receive. */
747 rep.th.dest = th->source;
748 rep.th.source = th->dest;
749 rep.th.doff = arg.iov[0].iov_len / 4;
750 rep.th.seq = htonl(seq);
751 rep.th.ack_seq = htonl(ack);
752 rep.th.ack = 1;
753 rep.th.window = htons(win);
754
cfb6eeb4 755#ifdef CONFIG_TCP_MD5SIG
cfb6eeb4 756 if (key) {
ee684b6f 757 int offset = (tsecr) ? 3 : 0;
cfb6eeb4
YH
758
759 rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
760 (TCPOPT_NOP << 16) |
761 (TCPOPT_MD5SIG << 8) |
762 TCPOLEN_MD5SIG);
763 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
764 rep.th.doff = arg.iov[0].iov_len/4;
765
49a72dfb 766 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
90b7e112
AL
767 key, ip_hdr(skb)->saddr,
768 ip_hdr(skb)->daddr, &rep.th);
cfb6eeb4
YH
769 }
770#endif
88ef4a5a 771 arg.flags = reply_flags;
eddc9ec5
ACM
772 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
773 ip_hdr(skb)->saddr, /* XXX */
1da177e4
LT
774 arg.iov[0].iov_len, IPPROTO_TCP, 0);
775 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
9501f972
YH
776 if (oif)
777 arg.bound_dev_if = oif;
66b13d99 778 arg.tos = tos;
47dcc20a 779 local_bh_disable();
bdbbb852
ED
780 ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
781 skb, &TCP_SKB_CB(skb)->header.h4.opt,
24a2d43d
ED
782 ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
783 &arg, arg.iov[0].iov_len);
1da177e4 784
90bbcc60 785 __TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
47dcc20a 786 local_bh_enable();
1da177e4
LT
787}
788
789static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
790{
8feaf0c0 791 struct inet_timewait_sock *tw = inet_twsk(sk);
cfb6eeb4 792 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
1da177e4 793
e62a123b
ED
794 tcp_v4_send_ack(sock_net(sk), skb,
795 tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
7174259e 796 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
ee684b6f 797 tcp_time_stamp + tcptw->tw_ts_offset,
9501f972
YH
798 tcptw->tw_ts_recent,
799 tw->tw_bound_dev_if,
88ef4a5a 800 tcp_twsk_md5_key(tcptw),
66b13d99
ED
801 tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0,
802 tw->tw_tos
9501f972 803 );
1da177e4 804
8feaf0c0 805 inet_twsk_put(tw);
1da177e4
LT
806}
807
a00e7444 808static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
7174259e 809 struct request_sock *req)
1da177e4 810{
168a8f58
JC
811 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
812 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
813 */
e62a123b
ED
814 u32 seq = (sk->sk_state == TCP_LISTEN) ? tcp_rsk(req)->snt_isn + 1 :
815 tcp_sk(sk)->snd_nxt;
816
817 tcp_v4_send_ack(sock_net(sk), skb, seq,
ed53d0ab 818 tcp_rsk(req)->rcv_nxt, req->rsk_rcv_wnd,
ee684b6f 819 tcp_time_stamp,
9501f972
YH
820 req->ts_recent,
821 0,
a915da9b
ED
822 tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr,
823 AF_INET),
66b13d99
ED
824 inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
825 ip_hdr(skb)->tos);
1da177e4
LT
826}
827
1da177e4 828/*
9bf1d83e 829 * Send a SYN-ACK after having received a SYN.
60236fdd 830 * This still operates on a request_sock only, not on a big
1da177e4
LT
831 * socket.
832 */
0f935dbe 833static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
d6274bd8 834 struct flowi *fl,
72659ecc 835 struct request_sock *req,
ca6fb065 836 struct tcp_fastopen_cookie *foc,
b3d05147 837 enum tcp_synack_type synack_type)
1da177e4 838{
2e6599cb 839 const struct inet_request_sock *ireq = inet_rsk(req);
6bd023f3 840 struct flowi4 fl4;
1da177e4 841 int err = -1;
d41db5af 842 struct sk_buff *skb;
1da177e4
LT
843
844 /* First, grab a route. */
ba3f7f04 845 if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
fd80eb94 846 return -1;
1da177e4 847
b3d05147 848 skb = tcp_make_synack(sk, dst, req, foc, synack_type);
1da177e4
LT
849
850 if (skb) {
634fb979 851 __tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
1da177e4 852
634fb979
ED
853 err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
854 ireq->ir_rmt_addr,
2e6599cb 855 ireq->opt);
b9df3cb8 856 err = net_xmit_eval(err);
1da177e4
LT
857 }
858
1da177e4
LT
859 return err;
860}
861
862/*
60236fdd 863 * IPv4 request_sock destructor.
1da177e4 864 */
60236fdd 865static void tcp_v4_reqsk_destructor(struct request_sock *req)
1da177e4 866{
a51482bd 867 kfree(inet_rsk(req)->opt);
1da177e4
LT
868}
869
cfb6eeb4
YH
870#ifdef CONFIG_TCP_MD5SIG
871/*
872 * RFC2385 MD5 checksumming requires a mapping of
873 * IP address->MD5 Key.
874 * We need to maintain these in the sk structure.
875 */
876
877/* Find the Key structure for an address. */
b83e3deb 878struct tcp_md5sig_key *tcp_md5_do_lookup(const struct sock *sk,
a915da9b
ED
879 const union tcp_md5_addr *addr,
880 int family)
cfb6eeb4 881{
fd3a154a 882 const struct tcp_sock *tp = tcp_sk(sk);
a915da9b 883 struct tcp_md5sig_key *key;
a915da9b 884 unsigned int size = sizeof(struct in_addr);
fd3a154a 885 const struct tcp_md5sig_info *md5sig;
cfb6eeb4 886
a8afca03
ED
887 /* caller either holds rcu_read_lock() or socket lock */
888 md5sig = rcu_dereference_check(tp->md5sig_info,
1e1d04e6 889 lockdep_sock_is_held(sk));
a8afca03 890 if (!md5sig)
cfb6eeb4 891 return NULL;
a915da9b
ED
892#if IS_ENABLED(CONFIG_IPV6)
893 if (family == AF_INET6)
894 size = sizeof(struct in6_addr);
895#endif
b67bfe0d 896 hlist_for_each_entry_rcu(key, &md5sig->head, node) {
a915da9b
ED
897 if (key->family != family)
898 continue;
899 if (!memcmp(&key->addr, addr, size))
900 return key;
cfb6eeb4
YH
901 }
902 return NULL;
903}
a915da9b 904EXPORT_SYMBOL(tcp_md5_do_lookup);
cfb6eeb4 905
b83e3deb 906struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
fd3a154a 907 const struct sock *addr_sk)
cfb6eeb4 908{
b52e6921 909 const union tcp_md5_addr *addr;
a915da9b 910
b52e6921 911 addr = (const union tcp_md5_addr *)&addr_sk->sk_daddr;
a915da9b 912 return tcp_md5_do_lookup(sk, addr, AF_INET);
cfb6eeb4 913}
cfb6eeb4
YH
914EXPORT_SYMBOL(tcp_v4_md5_lookup);
915
cfb6eeb4 916/* This can be called on a newly created socket, from other files */
a915da9b
ED
917int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
918 int family, const u8 *newkey, u8 newkeylen, gfp_t gfp)
cfb6eeb4
YH
919{
920 /* Add Key to the list */
b0a713e9 921 struct tcp_md5sig_key *key;
cfb6eeb4 922 struct tcp_sock *tp = tcp_sk(sk);
a915da9b 923 struct tcp_md5sig_info *md5sig;
cfb6eeb4 924
c0353c7b 925 key = tcp_md5_do_lookup(sk, addr, family);
cfb6eeb4
YH
926 if (key) {
927 /* Pre-existing entry - just update that one. */
a915da9b 928 memcpy(key->key, newkey, newkeylen);
b0a713e9 929 key->keylen = newkeylen;
a915da9b
ED
930 return 0;
931 }
260fcbeb 932
a8afca03 933 md5sig = rcu_dereference_protected(tp->md5sig_info,
1e1d04e6 934 lockdep_sock_is_held(sk));
a915da9b
ED
935 if (!md5sig) {
936 md5sig = kmalloc(sizeof(*md5sig), gfp);
937 if (!md5sig)
cfb6eeb4 938 return -ENOMEM;
cfb6eeb4 939
a915da9b
ED
940 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
941 INIT_HLIST_HEAD(&md5sig->head);
a8afca03 942 rcu_assign_pointer(tp->md5sig_info, md5sig);
a915da9b 943 }
cfb6eeb4 944
5f3d9cb2 945 key = sock_kmalloc(sk, sizeof(*key), gfp);
a915da9b
ED
946 if (!key)
947 return -ENOMEM;
71cea17e 948 if (!tcp_alloc_md5sig_pool()) {
5f3d9cb2 949 sock_kfree_s(sk, key, sizeof(*key));
a915da9b 950 return -ENOMEM;
cfb6eeb4 951 }
a915da9b
ED
952
953 memcpy(key->key, newkey, newkeylen);
954 key->keylen = newkeylen;
955 key->family = family;
956 memcpy(&key->addr, addr,
957 (family == AF_INET6) ? sizeof(struct in6_addr) :
958 sizeof(struct in_addr));
959 hlist_add_head_rcu(&key->node, &md5sig->head);
cfb6eeb4
YH
960 return 0;
961}
a915da9b 962EXPORT_SYMBOL(tcp_md5_do_add);
cfb6eeb4 963
a915da9b 964int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family)
cfb6eeb4 965{
a915da9b
ED
966 struct tcp_md5sig_key *key;
967
c0353c7b 968 key = tcp_md5_do_lookup(sk, addr, family);
a915da9b
ED
969 if (!key)
970 return -ENOENT;
971 hlist_del_rcu(&key->node);
5f3d9cb2 972 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
a915da9b 973 kfree_rcu(key, rcu);
a915da9b 974 return 0;
cfb6eeb4 975}
a915da9b 976EXPORT_SYMBOL(tcp_md5_do_del);
cfb6eeb4 977
e0683e70 978static void tcp_clear_md5_list(struct sock *sk)
cfb6eeb4
YH
979{
980 struct tcp_sock *tp = tcp_sk(sk);
a915da9b 981 struct tcp_md5sig_key *key;
b67bfe0d 982 struct hlist_node *n;
a8afca03 983 struct tcp_md5sig_info *md5sig;
cfb6eeb4 984
a8afca03
ED
985 md5sig = rcu_dereference_protected(tp->md5sig_info, 1);
986
b67bfe0d 987 hlist_for_each_entry_safe(key, n, &md5sig->head, node) {
a915da9b 988 hlist_del_rcu(&key->node);
5f3d9cb2 989 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
a915da9b 990 kfree_rcu(key, rcu);
cfb6eeb4
YH
991 }
992}
993
7174259e
ACM
994static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval,
995 int optlen)
cfb6eeb4
YH
996{
997 struct tcp_md5sig cmd;
998 struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
cfb6eeb4
YH
999
1000 if (optlen < sizeof(cmd))
1001 return -EINVAL;
1002
7174259e 1003 if (copy_from_user(&cmd, optval, sizeof(cmd)))
cfb6eeb4
YH
1004 return -EFAULT;
1005
1006 if (sin->sin_family != AF_INET)
1007 return -EINVAL;
1008
64a124ed 1009 if (!cmd.tcpm_keylen)
a915da9b
ED
1010 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1011 AF_INET);
cfb6eeb4
YH
1012
1013 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
1014 return -EINVAL;
1015
a915da9b
ED
1016 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1017 AF_INET, cmd.tcpm_key, cmd.tcpm_keylen,
1018 GFP_KERNEL);
cfb6eeb4
YH
1019}
1020
49a72dfb
AL
1021static int tcp_v4_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
1022 __be32 daddr, __be32 saddr, int nbytes)
cfb6eeb4 1023{
cfb6eeb4 1024 struct tcp4_pseudohdr *bp;
49a72dfb 1025 struct scatterlist sg;
cfb6eeb4
YH
1026
1027 bp = &hp->md5_blk.ip4;
cfb6eeb4
YH
1028
1029 /*
49a72dfb 1030 * 1. the TCP pseudo-header (in the order: source IP address,
cfb6eeb4
YH
1031 * destination IP address, zero-padded protocol number, and
1032 * segment length)
1033 */
1034 bp->saddr = saddr;
1035 bp->daddr = daddr;
1036 bp->pad = 0;
076fb722 1037 bp->protocol = IPPROTO_TCP;
49a72dfb 1038 bp->len = cpu_to_be16(nbytes);
c7da57a1 1039
49a72dfb 1040 sg_init_one(&sg, bp, sizeof(*bp));
cf80e0e4
HX
1041 ahash_request_set_crypt(hp->md5_req, &sg, NULL, sizeof(*bp));
1042 return crypto_ahash_update(hp->md5_req);
49a72dfb
AL
1043}
1044
a915da9b 1045static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
318cf7aa 1046 __be32 daddr, __be32 saddr, const struct tcphdr *th)
49a72dfb
AL
1047{
1048 struct tcp_md5sig_pool *hp;
cf80e0e4 1049 struct ahash_request *req;
49a72dfb
AL
1050
1051 hp = tcp_get_md5sig_pool();
1052 if (!hp)
1053 goto clear_hash_noput;
cf80e0e4 1054 req = hp->md5_req;
49a72dfb 1055
cf80e0e4 1056 if (crypto_ahash_init(req))
49a72dfb
AL
1057 goto clear_hash;
1058 if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
1059 goto clear_hash;
1060 if (tcp_md5_hash_header(hp, th))
1061 goto clear_hash;
1062 if (tcp_md5_hash_key(hp, key))
1063 goto clear_hash;
cf80e0e4
HX
1064 ahash_request_set_crypt(req, NULL, md5_hash, 0);
1065 if (crypto_ahash_final(req))
cfb6eeb4
YH
1066 goto clear_hash;
1067
cfb6eeb4 1068 tcp_put_md5sig_pool();
cfb6eeb4 1069 return 0;
49a72dfb 1070
cfb6eeb4
YH
1071clear_hash:
1072 tcp_put_md5sig_pool();
1073clear_hash_noput:
1074 memset(md5_hash, 0, 16);
49a72dfb 1075 return 1;
cfb6eeb4
YH
1076}
1077
39f8e58e
ED
1078int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
1079 const struct sock *sk,
318cf7aa 1080 const struct sk_buff *skb)
cfb6eeb4 1081{
49a72dfb 1082 struct tcp_md5sig_pool *hp;
cf80e0e4 1083 struct ahash_request *req;
318cf7aa 1084 const struct tcphdr *th = tcp_hdr(skb);
cfb6eeb4
YH
1085 __be32 saddr, daddr;
1086
39f8e58e
ED
1087 if (sk) { /* valid for establish/request sockets */
1088 saddr = sk->sk_rcv_saddr;
1089 daddr = sk->sk_daddr;
cfb6eeb4 1090 } else {
49a72dfb
AL
1091 const struct iphdr *iph = ip_hdr(skb);
1092 saddr = iph->saddr;
1093 daddr = iph->daddr;
cfb6eeb4 1094 }
49a72dfb
AL
1095
1096 hp = tcp_get_md5sig_pool();
1097 if (!hp)
1098 goto clear_hash_noput;
cf80e0e4 1099 req = hp->md5_req;
49a72dfb 1100
cf80e0e4 1101 if (crypto_ahash_init(req))
49a72dfb
AL
1102 goto clear_hash;
1103
1104 if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
1105 goto clear_hash;
1106 if (tcp_md5_hash_header(hp, th))
1107 goto clear_hash;
1108 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
1109 goto clear_hash;
1110 if (tcp_md5_hash_key(hp, key))
1111 goto clear_hash;
cf80e0e4
HX
1112 ahash_request_set_crypt(req, NULL, md5_hash, 0);
1113 if (crypto_ahash_final(req))
49a72dfb
AL
1114 goto clear_hash;
1115
1116 tcp_put_md5sig_pool();
1117 return 0;
1118
1119clear_hash:
1120 tcp_put_md5sig_pool();
1121clear_hash_noput:
1122 memset(md5_hash, 0, 16);
1123 return 1;
cfb6eeb4 1124}
49a72dfb 1125EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
cfb6eeb4 1126
ba8e275a
ED
1127#endif
1128
ff74e23f 1129/* Called with rcu_read_lock() */
ba8e275a 1130static bool tcp_v4_inbound_md5_hash(const struct sock *sk,
ff74e23f 1131 const struct sk_buff *skb)
cfb6eeb4 1132{
ba8e275a 1133#ifdef CONFIG_TCP_MD5SIG
cfb6eeb4
YH
1134 /*
1135 * This gets called for each TCP segment that arrives
1136 * so we want to be efficient.
1137 * We have 3 drop cases:
1138 * o No MD5 hash and one expected.
1139 * o MD5 hash and we're not expecting one.
1140 * o MD5 hash and its wrong.
1141 */
cf533ea5 1142 const __u8 *hash_location = NULL;
cfb6eeb4 1143 struct tcp_md5sig_key *hash_expected;
eddc9ec5 1144 const struct iphdr *iph = ip_hdr(skb);
cf533ea5 1145 const struct tcphdr *th = tcp_hdr(skb);
cfb6eeb4 1146 int genhash;
cfb6eeb4
YH
1147 unsigned char newhash[16];
1148
a915da9b
ED
1149 hash_expected = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&iph->saddr,
1150 AF_INET);
7d5d5525 1151 hash_location = tcp_parse_md5sig_option(th);
cfb6eeb4 1152
cfb6eeb4
YH
1153 /* We've parsed the options - do we have a hash? */
1154 if (!hash_expected && !hash_location)
a2a385d6 1155 return false;
cfb6eeb4
YH
1156
1157 if (hash_expected && !hash_location) {
c10d9310 1158 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
a2a385d6 1159 return true;
cfb6eeb4
YH
1160 }
1161
1162 if (!hash_expected && hash_location) {
c10d9310 1163 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
a2a385d6 1164 return true;
cfb6eeb4
YH
1165 }
1166
1167 /* Okay, so this is hash_expected and hash_location -
1168 * so we need to calculate the checksum.
1169 */
49a72dfb
AL
1170 genhash = tcp_v4_md5_hash_skb(newhash,
1171 hash_expected,
39f8e58e 1172 NULL, skb);
cfb6eeb4
YH
1173
1174 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
e87cc472
JP
1175 net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
1176 &iph->saddr, ntohs(th->source),
1177 &iph->daddr, ntohs(th->dest),
1178 genhash ? " tcp_v4_calc_md5_hash failed"
1179 : "");
a2a385d6 1180 return true;
cfb6eeb4 1181 }
a2a385d6 1182 return false;
cfb6eeb4 1183#endif
ba8e275a
ED
1184 return false;
1185}
cfb6eeb4 1186
b40cf18e
ED
1187static void tcp_v4_init_req(struct request_sock *req,
1188 const struct sock *sk_listener,
16bea70a
OP
1189 struct sk_buff *skb)
1190{
1191 struct inet_request_sock *ireq = inet_rsk(req);
1192
08d2cc3b
ED
1193 sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr);
1194 sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr);
1195 ireq->no_srccheck = inet_sk(sk_listener)->transparent;
16bea70a
OP
1196 ireq->opt = tcp_v4_save_options(skb);
1197}
1198
f964629e
ED
1199static struct dst_entry *tcp_v4_route_req(const struct sock *sk,
1200 struct flowi *fl,
d94e0417
OP
1201 const struct request_sock *req,
1202 bool *strict)
1203{
1204 struct dst_entry *dst = inet_csk_route_req(sk, &fl->u.ip4, req);
1205
1206 if (strict) {
1207 if (fl->u.ip4.daddr == inet_rsk(req)->ir_rmt_addr)
1208 *strict = true;
1209 else
1210 *strict = false;
1211 }
1212
1213 return dst;
1214}
1215
72a3effa 1216struct request_sock_ops tcp_request_sock_ops __read_mostly = {
1da177e4 1217 .family = PF_INET,
2e6599cb 1218 .obj_size = sizeof(struct tcp_request_sock),
5db92c99 1219 .rtx_syn_ack = tcp_rtx_synack,
60236fdd
ACM
1220 .send_ack = tcp_v4_reqsk_send_ack,
1221 .destructor = tcp_v4_reqsk_destructor,
1da177e4 1222 .send_reset = tcp_v4_send_reset,
688d1945 1223 .syn_ack_timeout = tcp_syn_ack_timeout,
1da177e4
LT
1224};
1225
b2e4b3de 1226static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
2aec4a29 1227 .mss_clamp = TCP_MSS_DEFAULT,
16bea70a 1228#ifdef CONFIG_TCP_MD5SIG
fd3a154a 1229 .req_md5_lookup = tcp_v4_md5_lookup,
e3afe7b7 1230 .calc_md5_hash = tcp_v4_md5_hash_skb,
b6332e6c 1231#endif
16bea70a 1232 .init_req = tcp_v4_init_req,
fb7b37a7
OP
1233#ifdef CONFIG_SYN_COOKIES
1234 .cookie_init_seq = cookie_v4_init_sequence,
1235#endif
d94e0417 1236 .route_req = tcp_v4_route_req,
936b8bdb 1237 .init_seq = tcp_v4_init_sequence,
d6274bd8 1238 .send_synack = tcp_v4_send_synack,
16bea70a 1239};
cfb6eeb4 1240
1da177e4
LT
1241int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1242{
1da177e4 1243 /* Never answer to SYNs send to broadcast or multicast */
511c3f92 1244 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
1da177e4
LT
1245 goto drop;
1246
1fb6f159
OP
1247 return tcp_conn_request(&tcp_request_sock_ops,
1248 &tcp_request_sock_ipv4_ops, sk, skb);
1da177e4 1249
1da177e4 1250drop:
9caad864 1251 tcp_listendrop(sk);
1da177e4
LT
1252 return 0;
1253}
4bc2f18b 1254EXPORT_SYMBOL(tcp_v4_conn_request);
1da177e4
LT
1255
1256
1257/*
1258 * The three way handshake has completed - we got a valid synack -
1259 * now create the new socket.
1260 */
0c27171e 1261struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
60236fdd 1262 struct request_sock *req,
5e0724d0
ED
1263 struct dst_entry *dst,
1264 struct request_sock *req_unhash,
1265 bool *own_req)
1da177e4 1266{
2e6599cb 1267 struct inet_request_sock *ireq;
1da177e4
LT
1268 struct inet_sock *newinet;
1269 struct tcp_sock *newtp;
1270 struct sock *newsk;
cfb6eeb4
YH
1271#ifdef CONFIG_TCP_MD5SIG
1272 struct tcp_md5sig_key *key;
1273#endif
f6d8bd05 1274 struct ip_options_rcu *inet_opt;
1da177e4
LT
1275
1276 if (sk_acceptq_is_full(sk))
1277 goto exit_overflow;
1278
1da177e4
LT
1279 newsk = tcp_create_openreq_child(sk, req, skb);
1280 if (!newsk)
093d2823 1281 goto exit_nonewsk;
1da177e4 1282
bcd76111 1283 newsk->sk_gso_type = SKB_GSO_TCPV4;
fae6ef87 1284 inet_sk_rx_dst_set(newsk, skb);
1da177e4
LT
1285
1286 newtp = tcp_sk(newsk);
1287 newinet = inet_sk(newsk);
2e6599cb 1288 ireq = inet_rsk(req);
d1e559d0
ED
1289 sk_daddr_set(newsk, ireq->ir_rmt_addr);
1290 sk_rcv_saddr_set(newsk, ireq->ir_loc_addr);
6dd9a14e 1291 newsk->sk_bound_dev_if = ireq->ir_iif;
634fb979 1292 newinet->inet_saddr = ireq->ir_loc_addr;
f6d8bd05
ED
1293 inet_opt = ireq->opt;
1294 rcu_assign_pointer(newinet->inet_opt, inet_opt);
2e6599cb 1295 ireq->opt = NULL;
463c84b9 1296 newinet->mc_index = inet_iif(skb);
eddc9ec5 1297 newinet->mc_ttl = ip_hdr(skb)->ttl;
4c507d28 1298 newinet->rcv_tos = ip_hdr(skb)->tos;
d83d8461 1299 inet_csk(newsk)->icsk_ext_hdr_len = 0;
f6d8bd05
ED
1300 if (inet_opt)
1301 inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
c720c7e8 1302 newinet->inet_id = newtp->write_seq ^ jiffies;
1da177e4 1303
dfd25fff
ED
1304 if (!dst) {
1305 dst = inet_csk_route_child_sock(sk, newsk, req);
1306 if (!dst)
1307 goto put_and_exit;
1308 } else {
1309 /* syncookie case : see end of cookie_v4_check() */
1310 }
0e734419
DM
1311 sk_setup_caps(newsk, dst);
1312
81164413
DB
1313 tcp_ca_openreq_child(newsk, dst);
1314
1da177e4 1315 tcp_sync_mss(newsk, dst_mtu(dst));
0dbaee3b 1316 newtp->advmss = dst_metric_advmss(dst);
f5fff5dc
TQ
1317 if (tcp_sk(sk)->rx_opt.user_mss &&
1318 tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1319 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1320
1da177e4
LT
1321 tcp_initialize_rcv_mss(newsk);
1322
cfb6eeb4
YH
1323#ifdef CONFIG_TCP_MD5SIG
1324 /* Copy over the MD5 key from the original socket */
a915da9b
ED
1325 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&newinet->inet_daddr,
1326 AF_INET);
00db4124 1327 if (key) {
cfb6eeb4
YH
1328 /*
1329 * We're using one, so create a matching key
1330 * on the newsk structure. If we fail to get
1331 * memory, then we end up not copying the key
1332 * across. Shucks.
1333 */
a915da9b
ED
1334 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newinet->inet_daddr,
1335 AF_INET, key->key, key->keylen, GFP_ATOMIC);
a465419b 1336 sk_nocaps_add(newsk, NETIF_F_GSO_MASK);
cfb6eeb4
YH
1337 }
1338#endif
1339
0e734419
DM
1340 if (__inet_inherit_port(sk, newsk) < 0)
1341 goto put_and_exit;
5e0724d0 1342 *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
805c4bc0 1343 if (*own_req)
49a496c9 1344 tcp_move_syn(newtp, req);
1da177e4
LT
1345
1346 return newsk;
1347
1348exit_overflow:
c10d9310 1349 NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
093d2823
BS
1350exit_nonewsk:
1351 dst_release(dst);
1da177e4 1352exit:
9caad864 1353 tcp_listendrop(sk);
1da177e4 1354 return NULL;
0e734419 1355put_and_exit:
e337e24d
CP
1356 inet_csk_prepare_forced_close(newsk);
1357 tcp_done(newsk);
0e734419 1358 goto exit;
1da177e4 1359}
4bc2f18b 1360EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
1da177e4 1361
079096f1 1362static struct sock *tcp_v4_cookie_check(struct sock *sk, struct sk_buff *skb)
1da177e4 1363{
079096f1 1364#ifdef CONFIG_SYN_COOKIES
52452c54 1365 const struct tcphdr *th = tcp_hdr(skb);
1da177e4 1366
af9b4738 1367 if (!th->syn)
461b74c3 1368 sk = cookie_v4_check(sk, skb);
1da177e4
LT
1369#endif
1370 return sk;
1371}
1372
1da177e4 1373/* The socket must have it's spinlock held when we get
e994b2f0 1374 * here, unless it is a TCP_LISTEN socket.
1da177e4
LT
1375 *
1376 * We have a potential double-lock case here, so even when
1377 * doing backlog processing we use the BH locking scheme.
1378 * This is because we cannot sleep with the original spinlock
1379 * held.
1380 */
1381int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1382{
cfb6eeb4 1383 struct sock *rsk;
cfb6eeb4 1384
1da177e4 1385 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
404e0a8b
ED
1386 struct dst_entry *dst = sk->sk_rx_dst;
1387
bdeab991 1388 sock_rps_save_rxhash(sk, skb);
3d97379a 1389 sk_mark_napi_id(sk, skb);
404e0a8b 1390 if (dst) {
505fbcf0 1391 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
51456b29 1392 !dst->ops->check(dst, 0)) {
92101b3b
DM
1393 dst_release(dst);
1394 sk->sk_rx_dst = NULL;
1395 }
1396 }
c995ae22 1397 tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len);
1da177e4
LT
1398 return 0;
1399 }
1400
12e25e10 1401 if (tcp_checksum_complete(skb))
1da177e4
LT
1402 goto csum_err;
1403
1404 if (sk->sk_state == TCP_LISTEN) {
079096f1
ED
1405 struct sock *nsk = tcp_v4_cookie_check(sk, skb);
1406
1da177e4
LT
1407 if (!nsk)
1408 goto discard;
1da177e4 1409 if (nsk != sk) {
bdeab991 1410 sock_rps_save_rxhash(nsk, skb);
38cb5245 1411 sk_mark_napi_id(nsk, skb);
cfb6eeb4
YH
1412 if (tcp_child_process(sk, nsk, skb)) {
1413 rsk = nsk;
1da177e4 1414 goto reset;
cfb6eeb4 1415 }
1da177e4
LT
1416 return 0;
1417 }
ca55158c 1418 } else
bdeab991 1419 sock_rps_save_rxhash(sk, skb);
ca55158c 1420
72ab4a86 1421 if (tcp_rcv_state_process(sk, skb)) {
cfb6eeb4 1422 rsk = sk;
1da177e4 1423 goto reset;
cfb6eeb4 1424 }
1da177e4
LT
1425 return 0;
1426
1427reset:
cfb6eeb4 1428 tcp_v4_send_reset(rsk, skb);
1da177e4
LT
1429discard:
1430 kfree_skb(skb);
1431 /* Be careful here. If this function gets more complicated and
1432 * gcc suffers from register pressure on the x86, sk (in %ebx)
1433 * might be destroyed here. This current version compiles correctly,
1434 * but you have been warned.
1435 */
1436 return 0;
1437
1438csum_err:
c10d9310
ED
1439 TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
1440 TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
1da177e4
LT
1441 goto discard;
1442}
4bc2f18b 1443EXPORT_SYMBOL(tcp_v4_do_rcv);
1da177e4 1444
160eb5a6 1445void tcp_v4_early_demux(struct sk_buff *skb)
41063e9d 1446{
41063e9d
DM
1447 const struct iphdr *iph;
1448 const struct tcphdr *th;
1449 struct sock *sk;
41063e9d 1450
41063e9d 1451 if (skb->pkt_type != PACKET_HOST)
160eb5a6 1452 return;
41063e9d 1453
45f00f99 1454 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
160eb5a6 1455 return;
41063e9d
DM
1456
1457 iph = ip_hdr(skb);
45f00f99 1458 th = tcp_hdr(skb);
41063e9d
DM
1459
1460 if (th->doff < sizeof(struct tcphdr) / 4)
160eb5a6 1461 return;
41063e9d 1462
45f00f99 1463 sk = __inet_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
41063e9d 1464 iph->saddr, th->source,
7011d085 1465 iph->daddr, ntohs(th->dest),
9cb429d6 1466 skb->skb_iif);
41063e9d
DM
1467 if (sk) {
1468 skb->sk = sk;
1469 skb->destructor = sock_edemux;
f7e4eb03 1470 if (sk_fullsock(sk)) {
d0c294c5 1471 struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
505fbcf0 1472
41063e9d
DM
1473 if (dst)
1474 dst = dst_check(dst, 0);
92101b3b 1475 if (dst &&
505fbcf0 1476 inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
92101b3b 1477 skb_dst_set_noref(skb, dst);
41063e9d
DM
1478 }
1479 }
41063e9d
DM
1480}
1481
b2fb4f54
ED
1482/* Packet is added to VJ-style prequeue for processing in process
1483 * context, if a reader task is waiting. Apparently, this exciting
1484 * idea (VJ's mail "Re: query about TCP header on tcp-ip" of 07 Sep 93)
1485 * failed somewhere. Latency? Burstiness? Well, at least now we will
1486 * see, why it failed. 8)8) --ANK
1487 *
1488 */
1489bool tcp_prequeue(struct sock *sk, struct sk_buff *skb)
1490{
1491 struct tcp_sock *tp = tcp_sk(sk);
1492
1493 if (sysctl_tcp_low_latency || !tp->ucopy.task)
1494 return false;
1495
1496 if (skb->len <= tcp_hdrlen(skb) &&
1497 skb_queue_len(&tp->ucopy.prequeue) == 0)
1498 return false;
1499
ca777eff
ED
1500 /* Before escaping RCU protected region, we need to take care of skb
1501 * dst. Prequeue is only enabled for established sockets.
1502 * For such sockets, we might need the skb dst only to set sk->sk_rx_dst
1503 * Instead of doing full sk_rx_dst validity here, let's perform
1504 * an optimistic check.
1505 */
1506 if (likely(sk->sk_rx_dst))
1507 skb_dst_drop(skb);
1508 else
5037e9ef 1509 skb_dst_force_safe(skb);
ca777eff 1510
b2fb4f54
ED
1511 __skb_queue_tail(&tp->ucopy.prequeue, skb);
1512 tp->ucopy.memory += skb->truesize;
0cef6a4c
ED
1513 if (skb_queue_len(&tp->ucopy.prequeue) >= 32 ||
1514 tp->ucopy.memory + atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf) {
b2fb4f54
ED
1515 struct sk_buff *skb1;
1516
1517 BUG_ON(sock_owned_by_user(sk));
0cef6a4c
ED
1518 __NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPPREQUEUEDROPPED,
1519 skb_queue_len(&tp->ucopy.prequeue));
b2fb4f54 1520
0cef6a4c 1521 while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
b2fb4f54 1522 sk_backlog_rcv(sk, skb1);
b2fb4f54
ED
1523
1524 tp->ucopy.memory = 0;
1525 } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
1526 wake_up_interruptible_sync_poll(sk_sleep(sk),
1527 POLLIN | POLLRDNORM | POLLRDBAND);
1528 if (!inet_csk_ack_scheduled(sk))
1529 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
1530 (3 * tcp_rto_min(sk)) / 4,
1531 TCP_RTO_MAX);
1532 }
1533 return true;
1534}
1535EXPORT_SYMBOL(tcp_prequeue);
1536
1da177e4
LT
1537/*
1538 * From tcp_input.c
1539 */
1540
1541int tcp_v4_rcv(struct sk_buff *skb)
1542{
3b24d854 1543 struct net *net = dev_net(skb->dev);
eddc9ec5 1544 const struct iphdr *iph;
cf533ea5 1545 const struct tcphdr *th;
3b24d854 1546 bool refcounted;
1da177e4
LT
1547 struct sock *sk;
1548 int ret;
1549
1550 if (skb->pkt_type != PACKET_HOST)
1551 goto discard_it;
1552
1553 /* Count it even if it's bad */
90bbcc60 1554 __TCP_INC_STATS(net, TCP_MIB_INSEGS);
1da177e4
LT
1555
1556 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1557 goto discard_it;
1558
ea1627c2 1559 th = (const struct tcphdr *)skb->data;
1da177e4 1560
ea1627c2 1561 if (unlikely(th->doff < sizeof(struct tcphdr) / 4))
1da177e4
LT
1562 goto bad_packet;
1563 if (!pskb_may_pull(skb, th->doff * 4))
1564 goto discard_it;
1565
1566 /* An explanation is required here, I think.
1567 * Packet length and doff are validated by header prediction,
caa20d9a 1568 * provided case of th->doff==0 is eliminated.
1da177e4 1569 * So, we defer the checks. */
ed70fcfc
TH
1570
1571 if (skb_checksum_init(skb, IPPROTO_TCP, inet_compute_pseudo))
6a5dc9e5 1572 goto csum_error;
1da177e4 1573
ea1627c2 1574 th = (const struct tcphdr *)skb->data;
eddc9ec5 1575 iph = ip_hdr(skb);
971f10ec
ED
1576 /* This is tricky : We move IPCB at its correct location into TCP_SKB_CB()
1577 * barrier() makes sure compiler wont play fool^Waliasing games.
1578 */
1579 memmove(&TCP_SKB_CB(skb)->header.h4, IPCB(skb),
1580 sizeof(struct inet_skb_parm));
1581 barrier();
1582
1da177e4
LT
1583 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1584 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1585 skb->len - th->doff * 4);
1586 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
e11ecddf 1587 TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
04317daf 1588 TCP_SKB_CB(skb)->tcp_tw_isn = 0;
b82d1bb4 1589 TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
1da177e4
LT
1590 TCP_SKB_CB(skb)->sacked = 0;
1591
4bdc3d66 1592lookup:
a583636a 1593 sk = __inet_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th), th->source,
3b24d854 1594 th->dest, &refcounted);
1da177e4
LT
1595 if (!sk)
1596 goto no_tcp_socket;
1597
bb134d5d
ED
1598process:
1599 if (sk->sk_state == TCP_TIME_WAIT)
1600 goto do_time_wait;
1601
079096f1
ED
1602 if (sk->sk_state == TCP_NEW_SYN_RECV) {
1603 struct request_sock *req = inet_reqsk(sk);
7716682c 1604 struct sock *nsk;
079096f1
ED
1605
1606 sk = req->rsk_listener;
72923555
ED
1607 if (unlikely(tcp_v4_inbound_md5_hash(sk, skb))) {
1608 reqsk_put(req);
1609 goto discard_it;
1610 }
7716682c 1611 if (unlikely(sk->sk_state != TCP_LISTEN)) {
f03f2e15 1612 inet_csk_reqsk_queue_drop_and_put(sk, req);
4bdc3d66
ED
1613 goto lookup;
1614 }
3b24d854
ED
1615 /* We own a reference on the listener, increase it again
1616 * as we might lose it too soon.
1617 */
7716682c 1618 sock_hold(sk);
3b24d854 1619 refcounted = true;
7716682c 1620 nsk = tcp_check_req(sk, skb, req, false);
079096f1
ED
1621 if (!nsk) {
1622 reqsk_put(req);
7716682c 1623 goto discard_and_relse;
079096f1
ED
1624 }
1625 if (nsk == sk) {
079096f1
ED
1626 reqsk_put(req);
1627 } else if (tcp_child_process(sk, nsk, skb)) {
1628 tcp_v4_send_reset(nsk, skb);
7716682c 1629 goto discard_and_relse;
079096f1 1630 } else {
7716682c 1631 sock_put(sk);
079096f1
ED
1632 return 0;
1633 }
1634 }
6cce09f8 1635 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
02a1d6e7 1636 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
d218d111 1637 goto discard_and_relse;
6cce09f8 1638 }
d218d111 1639
1da177e4
LT
1640 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1641 goto discard_and_relse;
9ea88a15 1642
9ea88a15
DP
1643 if (tcp_v4_inbound_md5_hash(sk, skb))
1644 goto discard_and_relse;
9ea88a15 1645
b59c2701 1646 nf_reset(skb);
1da177e4 1647
fda9ef5d 1648 if (sk_filter(sk, skb))
1da177e4
LT
1649 goto discard_and_relse;
1650
1651 skb->dev = NULL;
1652
e994b2f0
ED
1653 if (sk->sk_state == TCP_LISTEN) {
1654 ret = tcp_v4_do_rcv(sk, skb);
1655 goto put_and_return;
1656 }
1657
1658 sk_incoming_cpu_update(sk);
1659
c6366184 1660 bh_lock_sock_nested(sk);
a44d6eac 1661 tcp_segs_in(tcp_sk(sk), skb);
1da177e4
LT
1662 ret = 0;
1663 if (!sock_owned_by_user(sk)) {
7bced397 1664 if (!tcp_prequeue(sk, skb))
1da177e4 1665 ret = tcp_v4_do_rcv(sk, skb);
da882c1f
ED
1666 } else if (unlikely(sk_add_backlog(sk, skb,
1667 sk->sk_rcvbuf + sk->sk_sndbuf))) {
6b03a53a 1668 bh_unlock_sock(sk);
02a1d6e7 1669 __NET_INC_STATS(net, LINUX_MIB_TCPBACKLOGDROP);
6b03a53a
ZY
1670 goto discard_and_relse;
1671 }
1da177e4
LT
1672 bh_unlock_sock(sk);
1673
e994b2f0 1674put_and_return:
3b24d854
ED
1675 if (refcounted)
1676 sock_put(sk);
1da177e4
LT
1677
1678 return ret;
1679
1680no_tcp_socket:
1681 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1682 goto discard_it;
1683
12e25e10 1684 if (tcp_checksum_complete(skb)) {
6a5dc9e5 1685csum_error:
90bbcc60 1686 __TCP_INC_STATS(net, TCP_MIB_CSUMERRORS);
1da177e4 1687bad_packet:
90bbcc60 1688 __TCP_INC_STATS(net, TCP_MIB_INERRS);
1da177e4 1689 } else {
cfb6eeb4 1690 tcp_v4_send_reset(NULL, skb);
1da177e4
LT
1691 }
1692
1693discard_it:
1694 /* Discard frame. */
1695 kfree_skb(skb);
e905a9ed 1696 return 0;
1da177e4
LT
1697
1698discard_and_relse:
532182cd 1699 sk_drops_add(sk, skb);
3b24d854
ED
1700 if (refcounted)
1701 sock_put(sk);
1da177e4
LT
1702 goto discard_it;
1703
1704do_time_wait:
1705 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
9469c7b4 1706 inet_twsk_put(inet_twsk(sk));
1da177e4
LT
1707 goto discard_it;
1708 }
1709
6a5dc9e5
ED
1710 if (tcp_checksum_complete(skb)) {
1711 inet_twsk_put(inet_twsk(sk));
1712 goto csum_error;
1da177e4 1713 }
9469c7b4 1714 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1da177e4 1715 case TCP_TW_SYN: {
c346dca1 1716 struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
a583636a
CG
1717 &tcp_hashinfo, skb,
1718 __tcp_hdrlen(th),
da5e3630 1719 iph->saddr, th->source,
eddc9ec5 1720 iph->daddr, th->dest,
463c84b9 1721 inet_iif(skb));
1da177e4 1722 if (sk2) {
dbe7faa4 1723 inet_twsk_deschedule_put(inet_twsk(sk));
1da177e4 1724 sk = sk2;
3b24d854 1725 refcounted = false;
1da177e4
LT
1726 goto process;
1727 }
1728 /* Fall through to ACK */
1729 }
1730 case TCP_TW_ACK:
1731 tcp_v4_timewait_ack(sk, skb);
1732 break;
1733 case TCP_TW_RST:
271c3b9b
FW
1734 tcp_v4_send_reset(sk, skb);
1735 inet_twsk_deschedule_put(inet_twsk(sk));
1736 goto discard_it;
1da177e4
LT
1737 case TCP_TW_SUCCESS:;
1738 }
1739 goto discard_it;
1740}
1741
ccb7c410
DM
1742static struct timewait_sock_ops tcp_timewait_sock_ops = {
1743 .twsk_obj_size = sizeof(struct tcp_timewait_sock),
1744 .twsk_unique = tcp_twsk_unique,
1745 .twsk_destructor= tcp_twsk_destructor,
ccb7c410 1746};
1da177e4 1747
63d02d15 1748void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
5d299f3d
ED
1749{
1750 struct dst_entry *dst = skb_dst(skb);
1751
5037e9ef 1752 if (dst && dst_hold_safe(dst)) {
ca777eff
ED
1753 sk->sk_rx_dst = dst;
1754 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
1755 }
5d299f3d 1756}
63d02d15 1757EXPORT_SYMBOL(inet_sk_rx_dst_set);
5d299f3d 1758
3b401a81 1759const struct inet_connection_sock_af_ops ipv4_specific = {
543d9cfe
ACM
1760 .queue_xmit = ip_queue_xmit,
1761 .send_check = tcp_v4_send_check,
1762 .rebuild_header = inet_sk_rebuild_header,
5d299f3d 1763 .sk_rx_dst_set = inet_sk_rx_dst_set,
543d9cfe
ACM
1764 .conn_request = tcp_v4_conn_request,
1765 .syn_recv_sock = tcp_v4_syn_recv_sock,
543d9cfe
ACM
1766 .net_header_len = sizeof(struct iphdr),
1767 .setsockopt = ip_setsockopt,
1768 .getsockopt = ip_getsockopt,
1769 .addr2sockaddr = inet_csk_addr2sockaddr,
1770 .sockaddr_len = sizeof(struct sockaddr_in),
ab1e0a13 1771 .bind_conflict = inet_csk_bind_conflict,
3fdadf7d 1772#ifdef CONFIG_COMPAT
543d9cfe
ACM
1773 .compat_setsockopt = compat_ip_setsockopt,
1774 .compat_getsockopt = compat_ip_getsockopt,
3fdadf7d 1775#endif
4fab9071 1776 .mtu_reduced = tcp_v4_mtu_reduced,
1da177e4 1777};
4bc2f18b 1778EXPORT_SYMBOL(ipv4_specific);
1da177e4 1779
cfb6eeb4 1780#ifdef CONFIG_TCP_MD5SIG
b2e4b3de 1781static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
cfb6eeb4 1782 .md5_lookup = tcp_v4_md5_lookup,
49a72dfb 1783 .calc_md5_hash = tcp_v4_md5_hash_skb,
cfb6eeb4 1784 .md5_parse = tcp_v4_parse_md5_keys,
cfb6eeb4 1785};
b6332e6c 1786#endif
cfb6eeb4 1787
1da177e4
LT
1788/* NOTE: A lot of things set to zero explicitly by call to
1789 * sk_alloc() so need not be done here.
1790 */
1791static int tcp_v4_init_sock(struct sock *sk)
1792{
6687e988 1793 struct inet_connection_sock *icsk = inet_csk(sk);
1da177e4 1794
900f65d3 1795 tcp_init_sock(sk);
1da177e4 1796
8292a17a 1797 icsk->icsk_af_ops = &ipv4_specific;
900f65d3 1798
cfb6eeb4 1799#ifdef CONFIG_TCP_MD5SIG
ac807fa8 1800 tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific;
cfb6eeb4 1801#endif
1da177e4 1802
1da177e4
LT
1803 return 0;
1804}
1805
7d06b2e0 1806void tcp_v4_destroy_sock(struct sock *sk)
1da177e4
LT
1807{
1808 struct tcp_sock *tp = tcp_sk(sk);
1809
1810 tcp_clear_xmit_timers(sk);
1811
6687e988 1812 tcp_cleanup_congestion_control(sk);
317a76f9 1813
1da177e4 1814 /* Cleanup up the write buffer. */
fe067e8a 1815 tcp_write_queue_purge(sk);
1da177e4
LT
1816
1817 /* Cleans up our, hopefully empty, out_of_order_queue. */
e905a9ed 1818 __skb_queue_purge(&tp->out_of_order_queue);
1da177e4 1819
cfb6eeb4
YH
1820#ifdef CONFIG_TCP_MD5SIG
1821 /* Clean up the MD5 key list, if any */
1822 if (tp->md5sig_info) {
a915da9b 1823 tcp_clear_md5_list(sk);
a8afca03 1824 kfree_rcu(tp->md5sig_info, rcu);
cfb6eeb4
YH
1825 tp->md5sig_info = NULL;
1826 }
1827#endif
1a2449a8 1828
1da177e4
LT
1829 /* Clean prequeue, it must be empty really */
1830 __skb_queue_purge(&tp->ucopy.prequeue);
1831
1832 /* Clean up a referenced TCP bind bucket. */
463c84b9 1833 if (inet_csk(sk)->icsk_bind_hash)
ab1e0a13 1834 inet_put_port(sk);
1da177e4 1835
00db4124 1836 BUG_ON(tp->fastopen_rsk);
435cf559 1837
cf60af03
YC
1838 /* If socket is aborted during connect operation */
1839 tcp_free_fastopen_req(tp);
cd8ae852 1840 tcp_saved_syn_free(tp);
cf60af03 1841
777c6ae5 1842 local_bh_disable();
180d8cd9 1843 sk_sockets_allocated_dec(sk);
777c6ae5 1844 local_bh_enable();
3d596f7b 1845
baac50bb 1846 if (mem_cgroup_sockets_enabled && sk->sk_memcg)
3d596f7b 1847 sock_release_memcg(sk);
1da177e4 1848}
1da177e4
LT
1849EXPORT_SYMBOL(tcp_v4_destroy_sock);
1850
1851#ifdef CONFIG_PROC_FS
1852/* Proc filesystem TCP sock list dumping. */
1853
a8b690f9
TH
1854/*
1855 * Get next listener socket follow cur. If cur is NULL, get first socket
1856 * starting from bucket given in st->bucket; when st->bucket is zero the
1857 * very first socket in the hash table is returned.
1858 */
1da177e4
LT
1859static void *listening_get_next(struct seq_file *seq, void *cur)
1860{
5799de0b 1861 struct tcp_iter_state *st = seq->private;
a4146b1b 1862 struct net *net = seq_file_net(seq);
3b24d854
ED
1863 struct inet_listen_hashbucket *ilb;
1864 struct inet_connection_sock *icsk;
1865 struct sock *sk = cur;
1da177e4
LT
1866
1867 if (!sk) {
3b24d854 1868get_head:
a8b690f9 1869 ilb = &tcp_hashinfo.listening_hash[st->bucket];
5caea4ea 1870 spin_lock_bh(&ilb->lock);
3b24d854 1871 sk = sk_head(&ilb->head);
a8b690f9 1872 st->offset = 0;
1da177e4
LT
1873 goto get_sk;
1874 }
5caea4ea 1875 ilb = &tcp_hashinfo.listening_hash[st->bucket];
1da177e4 1876 ++st->num;
a8b690f9 1877 ++st->offset;
1da177e4 1878
3b24d854 1879 sk = sk_next(sk);
1da177e4 1880get_sk:
3b24d854 1881 sk_for_each_from(sk) {
8475ef9f
PE
1882 if (!net_eq(sock_net(sk), net))
1883 continue;
3b24d854
ED
1884 if (sk->sk_family == st->family)
1885 return sk;
e905a9ed 1886 icsk = inet_csk(sk);
1da177e4 1887 }
5caea4ea 1888 spin_unlock_bh(&ilb->lock);
a8b690f9 1889 st->offset = 0;
3b24d854
ED
1890 if (++st->bucket < INET_LHTABLE_SIZE)
1891 goto get_head;
1892 return NULL;
1da177e4
LT
1893}
1894
1895static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
1896{
a8b690f9
TH
1897 struct tcp_iter_state *st = seq->private;
1898 void *rc;
1899
1900 st->bucket = 0;
1901 st->offset = 0;
1902 rc = listening_get_next(seq, NULL);
1da177e4
LT
1903
1904 while (rc && *pos) {
1905 rc = listening_get_next(seq, rc);
1906 --*pos;
1907 }
1908 return rc;
1909}
1910
05dbc7b5 1911static inline bool empty_bucket(const struct tcp_iter_state *st)
6eac5604 1912{
05dbc7b5 1913 return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain);
6eac5604
AK
1914}
1915
a8b690f9
TH
1916/*
1917 * Get first established socket starting from bucket given in st->bucket.
1918 * If st->bucket is zero, the very first socket in the hash is returned.
1919 */
1da177e4
LT
1920static void *established_get_first(struct seq_file *seq)
1921{
5799de0b 1922 struct tcp_iter_state *st = seq->private;
a4146b1b 1923 struct net *net = seq_file_net(seq);
1da177e4
LT
1924 void *rc = NULL;
1925
a8b690f9
TH
1926 st->offset = 0;
1927 for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
1da177e4 1928 struct sock *sk;
3ab5aee7 1929 struct hlist_nulls_node *node;
9db66bdc 1930 spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
1da177e4 1931
6eac5604
AK
1932 /* Lockless fast path for the common case of empty buckets */
1933 if (empty_bucket(st))
1934 continue;
1935
9db66bdc 1936 spin_lock_bh(lock);
3ab5aee7 1937 sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
f40c8174 1938 if (sk->sk_family != st->family ||
878628fb 1939 !net_eq(sock_net(sk), net)) {
1da177e4
LT
1940 continue;
1941 }
1942 rc = sk;
1943 goto out;
1944 }
9db66bdc 1945 spin_unlock_bh(lock);
1da177e4
LT
1946 }
1947out:
1948 return rc;
1949}
1950
1951static void *established_get_next(struct seq_file *seq, void *cur)
1952{
1953 struct sock *sk = cur;
3ab5aee7 1954 struct hlist_nulls_node *node;
5799de0b 1955 struct tcp_iter_state *st = seq->private;
a4146b1b 1956 struct net *net = seq_file_net(seq);
1da177e4
LT
1957
1958 ++st->num;
a8b690f9 1959 ++st->offset;
1da177e4 1960
05dbc7b5 1961 sk = sk_nulls_next(sk);
1da177e4 1962
3ab5aee7 1963 sk_nulls_for_each_from(sk, node) {
878628fb 1964 if (sk->sk_family == st->family && net_eq(sock_net(sk), net))
05dbc7b5 1965 return sk;
1da177e4
LT
1966 }
1967
05dbc7b5
ED
1968 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
1969 ++st->bucket;
1970 return established_get_first(seq);
1da177e4
LT
1971}
1972
1973static void *established_get_idx(struct seq_file *seq, loff_t pos)
1974{
a8b690f9
TH
1975 struct tcp_iter_state *st = seq->private;
1976 void *rc;
1977
1978 st->bucket = 0;
1979 rc = established_get_first(seq);
1da177e4
LT
1980
1981 while (rc && pos) {
1982 rc = established_get_next(seq, rc);
1983 --pos;
7174259e 1984 }
1da177e4
LT
1985 return rc;
1986}
1987
1988static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
1989{
1990 void *rc;
5799de0b 1991 struct tcp_iter_state *st = seq->private;
1da177e4 1992
1da177e4
LT
1993 st->state = TCP_SEQ_STATE_LISTENING;
1994 rc = listening_get_idx(seq, &pos);
1995
1996 if (!rc) {
1da177e4
LT
1997 st->state = TCP_SEQ_STATE_ESTABLISHED;
1998 rc = established_get_idx(seq, pos);
1999 }
2000
2001 return rc;
2002}
2003
a8b690f9
TH
2004static void *tcp_seek_last_pos(struct seq_file *seq)
2005{
2006 struct tcp_iter_state *st = seq->private;
2007 int offset = st->offset;
2008 int orig_num = st->num;
2009 void *rc = NULL;
2010
2011 switch (st->state) {
a8b690f9
TH
2012 case TCP_SEQ_STATE_LISTENING:
2013 if (st->bucket >= INET_LHTABLE_SIZE)
2014 break;
2015 st->state = TCP_SEQ_STATE_LISTENING;
2016 rc = listening_get_next(seq, NULL);
2017 while (offset-- && rc)
2018 rc = listening_get_next(seq, rc);
2019 if (rc)
2020 break;
2021 st->bucket = 0;
05dbc7b5 2022 st->state = TCP_SEQ_STATE_ESTABLISHED;
a8b690f9
TH
2023 /* Fallthrough */
2024 case TCP_SEQ_STATE_ESTABLISHED:
a8b690f9
TH
2025 if (st->bucket > tcp_hashinfo.ehash_mask)
2026 break;
2027 rc = established_get_first(seq);
2028 while (offset-- && rc)
2029 rc = established_get_next(seq, rc);
2030 }
2031
2032 st->num = orig_num;
2033
2034 return rc;
2035}
2036
1da177e4
LT
2037static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2038{
5799de0b 2039 struct tcp_iter_state *st = seq->private;
a8b690f9
TH
2040 void *rc;
2041
2042 if (*pos && *pos == st->last_pos) {
2043 rc = tcp_seek_last_pos(seq);
2044 if (rc)
2045 goto out;
2046 }
2047
1da177e4
LT
2048 st->state = TCP_SEQ_STATE_LISTENING;
2049 st->num = 0;
a8b690f9
TH
2050 st->bucket = 0;
2051 st->offset = 0;
2052 rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2053
2054out:
2055 st->last_pos = *pos;
2056 return rc;
1da177e4
LT
2057}
2058
2059static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2060{
a8b690f9 2061 struct tcp_iter_state *st = seq->private;
1da177e4 2062 void *rc = NULL;
1da177e4
LT
2063
2064 if (v == SEQ_START_TOKEN) {
2065 rc = tcp_get_idx(seq, 0);
2066 goto out;
2067 }
1da177e4
LT
2068
2069 switch (st->state) {
1da177e4
LT
2070 case TCP_SEQ_STATE_LISTENING:
2071 rc = listening_get_next(seq, v);
2072 if (!rc) {
1da177e4 2073 st->state = TCP_SEQ_STATE_ESTABLISHED;
a8b690f9
TH
2074 st->bucket = 0;
2075 st->offset = 0;
1da177e4
LT
2076 rc = established_get_first(seq);
2077 }
2078 break;
2079 case TCP_SEQ_STATE_ESTABLISHED:
1da177e4
LT
2080 rc = established_get_next(seq, v);
2081 break;
2082 }
2083out:
2084 ++*pos;
a8b690f9 2085 st->last_pos = *pos;
1da177e4
LT
2086 return rc;
2087}
2088
2089static void tcp_seq_stop(struct seq_file *seq, void *v)
2090{
5799de0b 2091 struct tcp_iter_state *st = seq->private;
1da177e4
LT
2092
2093 switch (st->state) {
1da177e4
LT
2094 case TCP_SEQ_STATE_LISTENING:
2095 if (v != SEQ_START_TOKEN)
5caea4ea 2096 spin_unlock_bh(&tcp_hashinfo.listening_hash[st->bucket].lock);
1da177e4 2097 break;
1da177e4
LT
2098 case TCP_SEQ_STATE_ESTABLISHED:
2099 if (v)
9db66bdc 2100 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
1da177e4
LT
2101 break;
2102 }
2103}
2104
73cb88ec 2105int tcp_seq_open(struct inode *inode, struct file *file)
1da177e4 2106{
d9dda78b 2107 struct tcp_seq_afinfo *afinfo = PDE_DATA(inode);
1da177e4 2108 struct tcp_iter_state *s;
52d6f3f1 2109 int err;
1da177e4 2110
52d6f3f1
DL
2111 err = seq_open_net(inode, file, &afinfo->seq_ops,
2112 sizeof(struct tcp_iter_state));
2113 if (err < 0)
2114 return err;
f40c8174 2115
52d6f3f1 2116 s = ((struct seq_file *)file->private_data)->private;
1da177e4 2117 s->family = afinfo->family;
688d1945 2118 s->last_pos = 0;
f40c8174
DL
2119 return 0;
2120}
73cb88ec 2121EXPORT_SYMBOL(tcp_seq_open);
f40c8174 2122
6f8b13bc 2123int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo)
1da177e4
LT
2124{
2125 int rc = 0;
2126 struct proc_dir_entry *p;
2127
9427c4b3
DL
2128 afinfo->seq_ops.start = tcp_seq_start;
2129 afinfo->seq_ops.next = tcp_seq_next;
2130 afinfo->seq_ops.stop = tcp_seq_stop;
2131
84841c3c 2132 p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
73cb88ec 2133 afinfo->seq_fops, afinfo);
84841c3c 2134 if (!p)
1da177e4
LT
2135 rc = -ENOMEM;
2136 return rc;
2137}
4bc2f18b 2138EXPORT_SYMBOL(tcp_proc_register);
1da177e4 2139
6f8b13bc 2140void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo)
1da177e4 2141{
ece31ffd 2142 remove_proc_entry(afinfo->name, net->proc_net);
1da177e4 2143}
4bc2f18b 2144EXPORT_SYMBOL(tcp_proc_unregister);
1da177e4 2145
d4f06873 2146static void get_openreq4(const struct request_sock *req,
aa3a0c8c 2147 struct seq_file *f, int i)
1da177e4 2148{
2e6599cb 2149 const struct inet_request_sock *ireq = inet_rsk(req);
fa76ce73 2150 long delta = req->rsk_timer.expires - jiffies;
1da177e4 2151
5e659e4c 2152 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
652586df 2153 " %02X %08X:%08X %02X:%08lX %08X %5u %8d %u %d %pK",
1da177e4 2154 i,
634fb979 2155 ireq->ir_loc_addr,
d4f06873 2156 ireq->ir_num,
634fb979
ED
2157 ireq->ir_rmt_addr,
2158 ntohs(ireq->ir_rmt_port),
1da177e4
LT
2159 TCP_SYN_RECV,
2160 0, 0, /* could print option size, but that is af dependent. */
2161 1, /* timers active (only the expire timer) */
a399a805 2162 jiffies_delta_to_clock_t(delta),
e6c022a4 2163 req->num_timeout,
aa3a0c8c
ED
2164 from_kuid_munged(seq_user_ns(f),
2165 sock_i_uid(req->rsk_listener)),
1da177e4
LT
2166 0, /* non standard timer */
2167 0, /* open_requests have no inode */
d4f06873 2168 0,
652586df 2169 req);
1da177e4
LT
2170}
2171
652586df 2172static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
1da177e4
LT
2173{
2174 int timer_active;
2175 unsigned long timer_expires;
cf533ea5 2176 const struct tcp_sock *tp = tcp_sk(sk);
cf4c6bf8 2177 const struct inet_connection_sock *icsk = inet_csk(sk);
cf533ea5 2178 const struct inet_sock *inet = inet_sk(sk);
0536fcc0 2179 const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
c720c7e8
ED
2180 __be32 dest = inet->inet_daddr;
2181 __be32 src = inet->inet_rcv_saddr;
2182 __u16 destp = ntohs(inet->inet_dport);
2183 __u16 srcp = ntohs(inet->inet_sport);
49d09007 2184 int rx_queue;
00fd38d9 2185 int state;
1da177e4 2186
6ba8a3b1
ND
2187 if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
2188 icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
2189 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
1da177e4 2190 timer_active = 1;
463c84b9
ACM
2191 timer_expires = icsk->icsk_timeout;
2192 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1da177e4 2193 timer_active = 4;
463c84b9 2194 timer_expires = icsk->icsk_timeout;
cf4c6bf8 2195 } else if (timer_pending(&sk->sk_timer)) {
1da177e4 2196 timer_active = 2;
cf4c6bf8 2197 timer_expires = sk->sk_timer.expires;
1da177e4
LT
2198 } else {
2199 timer_active = 0;
2200 timer_expires = jiffies;
2201 }
2202
00fd38d9
ED
2203 state = sk_state_load(sk);
2204 if (state == TCP_LISTEN)
49d09007
ED
2205 rx_queue = sk->sk_ack_backlog;
2206 else
00fd38d9
ED
2207 /* Because we don't lock the socket,
2208 * we might find a transient negative value.
49d09007
ED
2209 */
2210 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
2211
5e659e4c 2212 seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
652586df 2213 "%08X %5u %8d %lu %d %pK %lu %lu %u %u %d",
00fd38d9 2214 i, src, srcp, dest, destp, state,
47da8ee6 2215 tp->write_seq - tp->snd_una,
49d09007 2216 rx_queue,
1da177e4 2217 timer_active,
a399a805 2218 jiffies_delta_to_clock_t(timer_expires - jiffies),
463c84b9 2219 icsk->icsk_retransmits,
a7cb5a49 2220 from_kuid_munged(seq_user_ns(f), sock_i_uid(sk)),
6687e988 2221 icsk->icsk_probes_out,
cf4c6bf8
IJ
2222 sock_i_ino(sk),
2223 atomic_read(&sk->sk_refcnt), sk,
7be87351
SH
2224 jiffies_to_clock_t(icsk->icsk_rto),
2225 jiffies_to_clock_t(icsk->icsk_ack.ato),
463c84b9 2226 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
1da177e4 2227 tp->snd_cwnd,
00fd38d9
ED
2228 state == TCP_LISTEN ?
2229 fastopenq->max_qlen :
652586df 2230 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh));
1da177e4
LT
2231}
2232
cf533ea5 2233static void get_timewait4_sock(const struct inet_timewait_sock *tw,
652586df 2234 struct seq_file *f, int i)
1da177e4 2235{
789f558c 2236 long delta = tw->tw_timer.expires - jiffies;
23f33c2d 2237 __be32 dest, src;
1da177e4 2238 __u16 destp, srcp;
1da177e4
LT
2239
2240 dest = tw->tw_daddr;
2241 src = tw->tw_rcv_saddr;
2242 destp = ntohs(tw->tw_dport);
2243 srcp = ntohs(tw->tw_sport);
2244
5e659e4c 2245 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
652586df 2246 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK",
1da177e4 2247 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
a399a805 2248 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
652586df 2249 atomic_read(&tw->tw_refcnt), tw);
1da177e4
LT
2250}
2251
2252#define TMPSZ 150
2253
2254static int tcp4_seq_show(struct seq_file *seq, void *v)
2255{
5799de0b 2256 struct tcp_iter_state *st;
05dbc7b5 2257 struct sock *sk = v;
1da177e4 2258
652586df 2259 seq_setwidth(seq, TMPSZ - 1);
1da177e4 2260 if (v == SEQ_START_TOKEN) {
652586df 2261 seq_puts(seq, " sl local_address rem_address st tx_queue "
1da177e4
LT
2262 "rx_queue tr tm->when retrnsmt uid timeout "
2263 "inode");
2264 goto out;
2265 }
2266 st = seq->private;
2267
079096f1
ED
2268 if (sk->sk_state == TCP_TIME_WAIT)
2269 get_timewait4_sock(v, seq, st->num);
2270 else if (sk->sk_state == TCP_NEW_SYN_RECV)
aa3a0c8c 2271 get_openreq4(v, seq, st->num);
079096f1
ED
2272 else
2273 get_tcp4_sock(v, seq, st->num);
1da177e4 2274out:
652586df 2275 seq_pad(seq, '\n');
1da177e4
LT
2276 return 0;
2277}
2278
73cb88ec
AV
2279static const struct file_operations tcp_afinfo_seq_fops = {
2280 .owner = THIS_MODULE,
2281 .open = tcp_seq_open,
2282 .read = seq_read,
2283 .llseek = seq_lseek,
2284 .release = seq_release_net
2285};
2286
1da177e4 2287static struct tcp_seq_afinfo tcp4_seq_afinfo = {
1da177e4
LT
2288 .name = "tcp",
2289 .family = AF_INET,
73cb88ec 2290 .seq_fops = &tcp_afinfo_seq_fops,
9427c4b3
DL
2291 .seq_ops = {
2292 .show = tcp4_seq_show,
2293 },
1da177e4
LT
2294};
2295
2c8c1e72 2296static int __net_init tcp4_proc_init_net(struct net *net)
757764f6
PE
2297{
2298 return tcp_proc_register(net, &tcp4_seq_afinfo);
2299}
2300
2c8c1e72 2301static void __net_exit tcp4_proc_exit_net(struct net *net)
757764f6
PE
2302{
2303 tcp_proc_unregister(net, &tcp4_seq_afinfo);
2304}
2305
2306static struct pernet_operations tcp4_net_ops = {
2307 .init = tcp4_proc_init_net,
2308 .exit = tcp4_proc_exit_net,
2309};
2310
1da177e4
LT
2311int __init tcp4_proc_init(void)
2312{
757764f6 2313 return register_pernet_subsys(&tcp4_net_ops);
1da177e4
LT
2314}
2315
2316void tcp4_proc_exit(void)
2317{
757764f6 2318 unregister_pernet_subsys(&tcp4_net_ops);
1da177e4
LT
2319}
2320#endif /* CONFIG_PROC_FS */
2321
2322struct proto tcp_prot = {
2323 .name = "TCP",
2324 .owner = THIS_MODULE,
2325 .close = tcp_close,
2326 .connect = tcp_v4_connect,
2327 .disconnect = tcp_disconnect,
463c84b9 2328 .accept = inet_csk_accept,
1da177e4
LT
2329 .ioctl = tcp_ioctl,
2330 .init = tcp_v4_init_sock,
2331 .destroy = tcp_v4_destroy_sock,
2332 .shutdown = tcp_shutdown,
2333 .setsockopt = tcp_setsockopt,
2334 .getsockopt = tcp_getsockopt,
1da177e4 2335 .recvmsg = tcp_recvmsg,
7ba42910
CG
2336 .sendmsg = tcp_sendmsg,
2337 .sendpage = tcp_sendpage,
1da177e4 2338 .backlog_rcv = tcp_v4_do_rcv,
46d3ceab 2339 .release_cb = tcp_release_cb,
ab1e0a13
ACM
2340 .hash = inet_hash,
2341 .unhash = inet_unhash,
2342 .get_port = inet_csk_get_port,
1da177e4 2343 .enter_memory_pressure = tcp_enter_memory_pressure,
c9bee3b7 2344 .stream_memory_free = tcp_stream_memory_free,
1da177e4 2345 .sockets_allocated = &tcp_sockets_allocated,
0a5578cf 2346 .orphan_count = &tcp_orphan_count,
1da177e4
LT
2347 .memory_allocated = &tcp_memory_allocated,
2348 .memory_pressure = &tcp_memory_pressure,
a4fe34bf 2349 .sysctl_mem = sysctl_tcp_mem,
1da177e4
LT
2350 .sysctl_wmem = sysctl_tcp_wmem,
2351 .sysctl_rmem = sysctl_tcp_rmem,
2352 .max_header = MAX_TCP_HEADER,
2353 .obj_size = sizeof(struct tcp_sock),
3ab5aee7 2354 .slab_flags = SLAB_DESTROY_BY_RCU,
6d6ee43e 2355 .twsk_prot = &tcp_timewait_sock_ops,
60236fdd 2356 .rsk_prot = &tcp_request_sock_ops,
39d8cda7 2357 .h.hashinfo = &tcp_hashinfo,
7ba42910 2358 .no_autobind = true,
543d9cfe
ACM
2359#ifdef CONFIG_COMPAT
2360 .compat_setsockopt = compat_tcp_setsockopt,
2361 .compat_getsockopt = compat_tcp_getsockopt,
d1a4c0b3 2362#endif
c1e64e29 2363 .diag_destroy = tcp_abort,
1da177e4 2364};
4bc2f18b 2365EXPORT_SYMBOL(tcp_prot);
1da177e4 2366
bdbbb852
ED
2367static void __net_exit tcp_sk_exit(struct net *net)
2368{
2369 int cpu;
2370
2371 for_each_possible_cpu(cpu)
2372 inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.tcp_sk, cpu));
2373 free_percpu(net->ipv4.tcp_sk);
2374}
2375
046ee902
DL
2376static int __net_init tcp_sk_init(struct net *net)
2377{
bdbbb852
ED
2378 int res, cpu;
2379
2380 net->ipv4.tcp_sk = alloc_percpu(struct sock *);
2381 if (!net->ipv4.tcp_sk)
2382 return -ENOMEM;
2383
2384 for_each_possible_cpu(cpu) {
2385 struct sock *sk;
2386
2387 res = inet_ctl_sock_create(&sk, PF_INET, SOCK_RAW,
2388 IPPROTO_TCP, net);
2389 if (res)
2390 goto fail;
a9d6532b 2391 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
bdbbb852
ED
2392 *per_cpu_ptr(net->ipv4.tcp_sk, cpu) = sk;
2393 }
49213555 2394
5d134f1c 2395 net->ipv4.sysctl_tcp_ecn = 2;
49213555
DB
2396 net->ipv4.sysctl_tcp_ecn_fallback = 1;
2397
b0f9ca53 2398 net->ipv4.sysctl_tcp_base_mss = TCP_BASE_MSS;
6b58e0a5 2399 net->ipv4.sysctl_tcp_probe_threshold = TCP_PROBE_THRESHOLD;
05cbc0db 2400 net->ipv4.sysctl_tcp_probe_interval = TCP_PROBE_INTERVAL;
046ee902 2401
13b287e8 2402 net->ipv4.sysctl_tcp_keepalive_time = TCP_KEEPALIVE_TIME;
9bd6861b 2403 net->ipv4.sysctl_tcp_keepalive_probes = TCP_KEEPALIVE_PROBES;
b840d15d 2404 net->ipv4.sysctl_tcp_keepalive_intvl = TCP_KEEPALIVE_INTVL;
13b287e8 2405
6fa25166 2406 net->ipv4.sysctl_tcp_syn_retries = TCP_SYN_RETRIES;
7c083ecb 2407 net->ipv4.sysctl_tcp_synack_retries = TCP_SYNACK_RETRIES;
0aca737d 2408 net->ipv4.sysctl_tcp_syncookies = 1;
1043e25f 2409 net->ipv4.sysctl_tcp_reordering = TCP_FASTRETRANS_THRESH;
ae5c3f40 2410 net->ipv4.sysctl_tcp_retries1 = TCP_RETR1;
c6214a97 2411 net->ipv4.sysctl_tcp_retries2 = TCP_RETR2;
c402d9be 2412 net->ipv4.sysctl_tcp_orphan_retries = 0;
1e579caa 2413 net->ipv4.sysctl_tcp_fin_timeout = TCP_FIN_TIMEOUT;
4979f2d9 2414 net->ipv4.sysctl_tcp_notsent_lowat = UINT_MAX;
12ed8244 2415
49213555 2416 return 0;
bdbbb852
ED
2417fail:
2418 tcp_sk_exit(net);
2419
2420 return res;
b099ce26
EB
2421}
2422
2423static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
2424{
2425 inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET);
046ee902
DL
2426}
2427
2428static struct pernet_operations __net_initdata tcp_sk_ops = {
b099ce26
EB
2429 .init = tcp_sk_init,
2430 .exit = tcp_sk_exit,
2431 .exit_batch = tcp_sk_exit_batch,
046ee902
DL
2432};
2433
9b0f976f 2434void __init tcp_v4_init(void)
1da177e4 2435{
5caea4ea 2436 inet_hashinfo_init(&tcp_hashinfo);
6a1b3054 2437 if (register_pernet_subsys(&tcp_sk_ops))
1da177e4 2438 panic("Failed to create the TCP control socket.\n");
1da177e4 2439}
This page took 1.23699 seconds and 5 git commands to generate.