2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Implementation of the Transmission Control Protocol(TCP).
8 * IPv4 specific functions
13 * linux/ipv4/tcp_input.c
14 * linux/ipv4/tcp_output.c
16 * See tcp.c for author information
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
26 * David S. Miller : New socket lookup architecture.
27 * This code is dedicated to John Dyson.
28 * David S. Miller : Change semantics of established hash,
29 * half is devoted to TIME_WAIT sockets
30 * and the rest go in the other half.
31 * Andi Kleen : Add support for syncookies and fixed
32 * some bugs: ip options weren't passed to
33 * the TCP layer, missed a check for an
35 * Andi Kleen : Implemented fast path mtu discovery.
36 * Fixed many serious bugs in the
37 * request_sock handling and moved
38 * most of it into the af independent code.
39 * Added tail drop and some other bugfixes.
40 * Added new listen semantics.
41 * Mike McLagan : Routing by source
42 * Juan Jose Ciarlante: ip_dynaddr bits
43 * Andi Kleen: various fixes.
44 * Vitaly E. Lavrov : Transparent proxy revived after year
46 * Andi Kleen : Fix new listen.
47 * Andi Kleen : Fix accept error reporting.
48 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
49 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
50 * a single port at the same time.
53 #define pr_fmt(fmt) "TCP: " fmt
55 #include <linux/bottom_half.h>
56 #include <linux/types.h>
57 #include <linux/fcntl.h>
58 #include <linux/module.h>
59 #include <linux/random.h>
60 #include <linux/cache.h>
61 #include <linux/jhash.h>
62 #include <linux/init.h>
63 #include <linux/times.h>
64 #include <linux/slab.h>
66 #include <net/net_namespace.h>
68 #include <net/inet_hashtables.h>
70 #include <net/transp_v6.h>
72 #include <net/inet_common.h>
73 #include <net/timewait_sock.h>
75 #include <net/netdma.h>
76 #include <net/secure_seq.h>
77 #include <net/tcp_memcontrol.h>
78 #include <net/busy_poll.h>
80 #include <linux/inet.h>
81 #include <linux/ipv6.h>
82 #include <linux/stddef.h>
83 #include <linux/proc_fs.h>
84 #include <linux/seq_file.h>
86 #include <linux/crypto.h>
87 #include <linux/scatterlist.h>
89 int sysctl_tcp_tw_reuse __read_mostly
;
90 int sysctl_tcp_low_latency __read_mostly
;
91 EXPORT_SYMBOL(sysctl_tcp_low_latency
);
94 #ifdef CONFIG_TCP_MD5SIG
95 static int tcp_v4_md5_hash_hdr(char *md5_hash
, const struct tcp_md5sig_key
*key
,
96 __be32 daddr
, __be32 saddr
, const struct tcphdr
*th
);
99 struct inet_hashinfo tcp_hashinfo
;
100 EXPORT_SYMBOL(tcp_hashinfo
);
102 static __u32
tcp_v4_init_sequence(const struct sk_buff
*skb
)
104 return secure_tcp_sequence_number(ip_hdr(skb
)->daddr
,
107 tcp_hdr(skb
)->source
);
110 int tcp_twsk_unique(struct sock
*sk
, struct sock
*sktw
, void *twp
)
112 const struct tcp_timewait_sock
*tcptw
= tcp_twsk(sktw
);
113 struct tcp_sock
*tp
= tcp_sk(sk
);
115 /* With PAWS, it is safe from the viewpoint
116 of data integrity. Even without PAWS it is safe provided sequence
117 spaces do not overlap i.e. at data rates <= 80Mbit/sec.
119 Actually, the idea is close to VJ's one, only timestamp cache is
120 held not per host, but per port pair and TW bucket is used as state
123 If TW bucket has been already destroyed we fall back to VJ's scheme
124 and use initial timestamp retrieved from peer table.
126 if (tcptw
->tw_ts_recent_stamp
&&
127 (twp
== NULL
|| (sysctl_tcp_tw_reuse
&&
128 get_seconds() - tcptw
->tw_ts_recent_stamp
> 1))) {
129 tp
->write_seq
= tcptw
->tw_snd_nxt
+ 65535 + 2;
130 if (tp
->write_seq
== 0)
132 tp
->rx_opt
.ts_recent
= tcptw
->tw_ts_recent
;
133 tp
->rx_opt
.ts_recent_stamp
= tcptw
->tw_ts_recent_stamp
;
140 EXPORT_SYMBOL_GPL(tcp_twsk_unique
);
142 /* This will initiate an outgoing connection. */
143 int tcp_v4_connect(struct sock
*sk
, struct sockaddr
*uaddr
, int addr_len
)
145 struct sockaddr_in
*usin
= (struct sockaddr_in
*)uaddr
;
146 struct inet_sock
*inet
= inet_sk(sk
);
147 struct tcp_sock
*tp
= tcp_sk(sk
);
148 __be16 orig_sport
, orig_dport
;
149 __be32 daddr
, nexthop
;
153 struct ip_options_rcu
*inet_opt
;
155 if (addr_len
< sizeof(struct sockaddr_in
))
158 if (usin
->sin_family
!= AF_INET
)
159 return -EAFNOSUPPORT
;
161 nexthop
= daddr
= usin
->sin_addr
.s_addr
;
162 inet_opt
= rcu_dereference_protected(inet
->inet_opt
,
163 sock_owned_by_user(sk
));
164 if (inet_opt
&& inet_opt
->opt
.srr
) {
167 nexthop
= inet_opt
->opt
.faddr
;
170 orig_sport
= inet
->inet_sport
;
171 orig_dport
= usin
->sin_port
;
172 fl4
= &inet
->cork
.fl
.u
.ip4
;
173 rt
= ip_route_connect(fl4
, nexthop
, inet
->inet_saddr
,
174 RT_CONN_FLAGS(sk
), sk
->sk_bound_dev_if
,
176 orig_sport
, orig_dport
, sk
);
179 if (err
== -ENETUNREACH
)
180 IP_INC_STATS(sock_net(sk
), IPSTATS_MIB_OUTNOROUTES
);
184 if (rt
->rt_flags
& (RTCF_MULTICAST
| RTCF_BROADCAST
)) {
189 if (!inet_opt
|| !inet_opt
->opt
.srr
)
192 if (!inet
->inet_saddr
)
193 inet
->inet_saddr
= fl4
->saddr
;
194 inet
->inet_rcv_saddr
= inet
->inet_saddr
;
196 if (tp
->rx_opt
.ts_recent_stamp
&& inet
->inet_daddr
!= daddr
) {
197 /* Reset inherited state */
198 tp
->rx_opt
.ts_recent
= 0;
199 tp
->rx_opt
.ts_recent_stamp
= 0;
200 if (likely(!tp
->repair
))
204 if (tcp_death_row
.sysctl_tw_recycle
&&
205 !tp
->rx_opt
.ts_recent_stamp
&& fl4
->daddr
== daddr
)
206 tcp_fetch_timewait_stamp(sk
, &rt
->dst
);
208 inet
->inet_dport
= usin
->sin_port
;
209 inet
->inet_daddr
= daddr
;
211 inet_csk(sk
)->icsk_ext_hdr_len
= 0;
213 inet_csk(sk
)->icsk_ext_hdr_len
= inet_opt
->opt
.optlen
;
215 tp
->rx_opt
.mss_clamp
= TCP_MSS_DEFAULT
;
217 /* Socket identity is still unknown (sport may be zero).
218 * However we set state to SYN-SENT and not releasing socket
219 * lock select source port, enter ourselves into the hash tables and
220 * complete initialization after this.
222 tcp_set_state(sk
, TCP_SYN_SENT
);
223 err
= inet_hash_connect(&tcp_death_row
, sk
);
227 rt
= ip_route_newports(fl4
, rt
, orig_sport
, orig_dport
,
228 inet
->inet_sport
, inet
->inet_dport
, sk
);
234 /* OK, now commit destination to socket. */
235 sk
->sk_gso_type
= SKB_GSO_TCPV4
;
236 sk_setup_caps(sk
, &rt
->dst
);
238 if (!tp
->write_seq
&& likely(!tp
->repair
))
239 tp
->write_seq
= secure_tcp_sequence_number(inet
->inet_saddr
,
244 inet
->inet_id
= tp
->write_seq
^ jiffies
;
246 err
= tcp_connect(sk
);
256 * This unhashes the socket and releases the local port,
259 tcp_set_state(sk
, TCP_CLOSE
);
261 sk
->sk_route_caps
= 0;
262 inet
->inet_dport
= 0;
265 EXPORT_SYMBOL(tcp_v4_connect
);
268 * This routine reacts to ICMP_FRAG_NEEDED mtu indications as defined in RFC1191.
269 * It can be called through tcp_release_cb() if socket was owned by user
270 * at the time tcp_v4_err() was called to handle ICMP message.
272 static void tcp_v4_mtu_reduced(struct sock
*sk
)
274 struct dst_entry
*dst
;
275 struct inet_sock
*inet
= inet_sk(sk
);
276 u32 mtu
= tcp_sk(sk
)->mtu_info
;
278 dst
= inet_csk_update_pmtu(sk
, mtu
);
282 /* Something is about to be wrong... Remember soft error
283 * for the case, if this connection will not able to recover.
285 if (mtu
< dst_mtu(dst
) && ip_dont_fragment(sk
, dst
))
286 sk
->sk_err_soft
= EMSGSIZE
;
290 if (inet
->pmtudisc
!= IP_PMTUDISC_DONT
&&
291 ip_sk_accept_pmtu(sk
) &&
292 inet_csk(sk
)->icsk_pmtu_cookie
> mtu
) {
293 tcp_sync_mss(sk
, mtu
);
295 /* Resend the TCP packet because it's
296 * clear that the old packet has been
297 * dropped. This is the new "fast" path mtu
300 tcp_simple_retransmit(sk
);
301 } /* else let the usual retransmit timer handle it */
304 static void do_redirect(struct sk_buff
*skb
, struct sock
*sk
)
306 struct dst_entry
*dst
= __sk_dst_check(sk
, 0);
309 dst
->ops
->redirect(dst
, sk
, skb
);
313 * This routine is called by the ICMP module when it gets some
314 * sort of error condition. If err < 0 then the socket should
315 * be closed and the error returned to the user. If err > 0
316 * it's just the icmp type << 8 | icmp code. After adjustment
317 * header points to the first 8 bytes of the tcp header. We need
318 * to find the appropriate port.
320 * The locking strategy used here is very "optimistic". When
321 * someone else accesses the socket the ICMP is just dropped
322 * and for some paths there is no check at all.
323 * A more general error queue to queue errors for later handling
324 * is probably better.
328 void tcp_v4_err(struct sk_buff
*icmp_skb
, u32 info
)
330 const struct iphdr
*iph
= (const struct iphdr
*)icmp_skb
->data
;
331 struct tcphdr
*th
= (struct tcphdr
*)(icmp_skb
->data
+ (iph
->ihl
<< 2));
332 struct inet_connection_sock
*icsk
;
334 struct inet_sock
*inet
;
335 const int type
= icmp_hdr(icmp_skb
)->type
;
336 const int code
= icmp_hdr(icmp_skb
)->code
;
339 struct request_sock
*fastopen
;
343 struct net
*net
= dev_net(icmp_skb
->dev
);
345 if (icmp_skb
->len
< (iph
->ihl
<< 2) + 8) {
346 ICMP_INC_STATS_BH(net
, ICMP_MIB_INERRORS
);
350 sk
= inet_lookup(net
, &tcp_hashinfo
, iph
->daddr
, th
->dest
,
351 iph
->saddr
, th
->source
, inet_iif(icmp_skb
));
353 ICMP_INC_STATS_BH(net
, ICMP_MIB_INERRORS
);
356 if (sk
->sk_state
== TCP_TIME_WAIT
) {
357 inet_twsk_put(inet_twsk(sk
));
362 /* If too many ICMPs get dropped on busy
363 * servers this needs to be solved differently.
364 * We do take care of PMTU discovery (RFC1191) special case :
365 * we can receive locally generated ICMP messages while socket is held.
367 if (sock_owned_by_user(sk
)) {
368 if (!(type
== ICMP_DEST_UNREACH
&& code
== ICMP_FRAG_NEEDED
))
369 NET_INC_STATS_BH(net
, LINUX_MIB_LOCKDROPPEDICMPS
);
371 if (sk
->sk_state
== TCP_CLOSE
)
374 if (unlikely(iph
->ttl
< inet_sk(sk
)->min_ttl
)) {
375 NET_INC_STATS_BH(net
, LINUX_MIB_TCPMINTTLDROP
);
381 seq
= ntohl(th
->seq
);
382 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
383 fastopen
= tp
->fastopen_rsk
;
384 snd_una
= fastopen
? tcp_rsk(fastopen
)->snt_isn
: tp
->snd_una
;
385 if (sk
->sk_state
!= TCP_LISTEN
&&
386 !between(seq
, snd_una
, tp
->snd_nxt
)) {
387 NET_INC_STATS_BH(net
, LINUX_MIB_OUTOFWINDOWICMPS
);
393 do_redirect(icmp_skb
, sk
);
395 case ICMP_SOURCE_QUENCH
:
396 /* Just silently ignore these. */
398 case ICMP_PARAMETERPROB
:
401 case ICMP_DEST_UNREACH
:
402 if (code
> NR_ICMP_UNREACH
)
405 if (code
== ICMP_FRAG_NEEDED
) { /* PMTU discovery (RFC1191) */
406 /* We are not interested in TCP_LISTEN and open_requests
407 * (SYN-ACKs send out by Linux are always <576bytes so
408 * they should go through unfragmented).
410 if (sk
->sk_state
== TCP_LISTEN
)
414 if (!sock_owned_by_user(sk
)) {
415 tcp_v4_mtu_reduced(sk
);
417 if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED
, &tp
->tsq_flags
))
423 err
= icmp_err_convert
[code
].errno
;
424 /* check if icmp_skb allows revert of backoff
425 * (see draft-zimmermann-tcp-lcd) */
426 if (code
!= ICMP_NET_UNREACH
&& code
!= ICMP_HOST_UNREACH
)
428 if (seq
!= tp
->snd_una
|| !icsk
->icsk_retransmits
||
429 !icsk
->icsk_backoff
|| fastopen
)
432 if (sock_owned_by_user(sk
))
435 icsk
->icsk_backoff
--;
436 inet_csk(sk
)->icsk_rto
= (tp
->srtt_us
? __tcp_set_rto(tp
) :
437 TCP_TIMEOUT_INIT
) << icsk
->icsk_backoff
;
440 skb
= tcp_write_queue_head(sk
);
443 remaining
= icsk
->icsk_rto
- min(icsk
->icsk_rto
,
444 tcp_time_stamp
- TCP_SKB_CB(skb
)->when
);
447 inet_csk_reset_xmit_timer(sk
, ICSK_TIME_RETRANS
,
448 remaining
, TCP_RTO_MAX
);
450 /* RTO revert clocked out retransmission.
451 * Will retransmit now */
452 tcp_retransmit_timer(sk
);
456 case ICMP_TIME_EXCEEDED
:
463 switch (sk
->sk_state
) {
464 struct request_sock
*req
, **prev
;
466 if (sock_owned_by_user(sk
))
469 req
= inet_csk_search_req(sk
, &prev
, th
->dest
,
470 iph
->daddr
, iph
->saddr
);
474 /* ICMPs are not backlogged, hence we cannot get
475 an established socket here.
479 if (seq
!= tcp_rsk(req
)->snt_isn
) {
480 NET_INC_STATS_BH(net
, LINUX_MIB_OUTOFWINDOWICMPS
);
485 * Still in SYN_RECV, just remove it silently.
486 * There is no good way to pass the error to the newly
487 * created socket, and POSIX does not want network
488 * errors returned from accept().
490 inet_csk_reqsk_queue_drop(sk
, req
, prev
);
491 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_LISTENDROPS
);
496 /* Only in fast or simultaneous open. If a fast open socket is
497 * is already accepted it is treated as a connected one below.
499 if (fastopen
&& fastopen
->sk
== NULL
)
502 if (!sock_owned_by_user(sk
)) {
505 sk
->sk_error_report(sk
);
509 sk
->sk_err_soft
= err
;
514 /* If we've already connected we will keep trying
515 * until we time out, or the user gives up.
517 * rfc1122 4.2.3.9 allows to consider as hard errors
518 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
519 * but it is obsoleted by pmtu discovery).
521 * Note, that in modern internet, where routing is unreliable
522 * and in each dark corner broken firewalls sit, sending random
523 * errors ordered by their masters even this two messages finally lose
524 * their original sense (even Linux sends invalid PORT_UNREACHs)
526 * Now we are in compliance with RFCs.
531 if (!sock_owned_by_user(sk
) && inet
->recverr
) {
533 sk
->sk_error_report(sk
);
534 } else { /* Only an error on timeout */
535 sk
->sk_err_soft
= err
;
543 void __tcp_v4_send_check(struct sk_buff
*skb
, __be32 saddr
, __be32 daddr
)
545 struct tcphdr
*th
= tcp_hdr(skb
);
547 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
548 th
->check
= ~tcp_v4_check(skb
->len
, saddr
, daddr
, 0);
549 skb
->csum_start
= skb_transport_header(skb
) - skb
->head
;
550 skb
->csum_offset
= offsetof(struct tcphdr
, check
);
552 th
->check
= tcp_v4_check(skb
->len
, saddr
, daddr
,
559 /* This routine computes an IPv4 TCP checksum. */
560 void tcp_v4_send_check(struct sock
*sk
, struct sk_buff
*skb
)
562 const struct inet_sock
*inet
= inet_sk(sk
);
564 __tcp_v4_send_check(skb
, inet
->inet_saddr
, inet
->inet_daddr
);
566 EXPORT_SYMBOL(tcp_v4_send_check
);
569 * This routine will send an RST to the other tcp.
571 * Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
573 * Answer: if a packet caused RST, it is not for a socket
574 * existing in our system, if it is matched to a socket,
575 * it is just duplicate segment or bug in other side's TCP.
576 * So that we build reply only basing on parameters
577 * arrived with segment.
578 * Exception: precedence violation. We do not implement it in any case.
581 static void tcp_v4_send_reset(struct sock
*sk
, struct sk_buff
*skb
)
583 const struct tcphdr
*th
= tcp_hdr(skb
);
586 #ifdef CONFIG_TCP_MD5SIG
587 __be32 opt
[(TCPOLEN_MD5SIG_ALIGNED
>> 2)];
590 struct ip_reply_arg arg
;
591 #ifdef CONFIG_TCP_MD5SIG
592 struct tcp_md5sig_key
*key
;
593 const __u8
*hash_location
= NULL
;
594 unsigned char newhash
[16];
596 struct sock
*sk1
= NULL
;
600 /* Never send a reset in response to a reset. */
604 if (skb_rtable(skb
)->rt_type
!= RTN_LOCAL
)
607 /* Swap the send and the receive. */
608 memset(&rep
, 0, sizeof(rep
));
609 rep
.th
.dest
= th
->source
;
610 rep
.th
.source
= th
->dest
;
611 rep
.th
.doff
= sizeof(struct tcphdr
) / 4;
615 rep
.th
.seq
= th
->ack_seq
;
618 rep
.th
.ack_seq
= htonl(ntohl(th
->seq
) + th
->syn
+ th
->fin
+
619 skb
->len
- (th
->doff
<< 2));
622 memset(&arg
, 0, sizeof(arg
));
623 arg
.iov
[0].iov_base
= (unsigned char *)&rep
;
624 arg
.iov
[0].iov_len
= sizeof(rep
.th
);
626 #ifdef CONFIG_TCP_MD5SIG
627 hash_location
= tcp_parse_md5sig_option(th
);
628 if (!sk
&& hash_location
) {
630 * active side is lost. Try to find listening socket through
631 * source port, and then find md5 key through listening socket.
632 * we are not loose security here:
633 * Incoming packet is checked with md5 hash with finding key,
634 * no RST generated if md5 hash doesn't match.
636 sk1
= __inet_lookup_listener(dev_net(skb_dst(skb
)->dev
),
637 &tcp_hashinfo
, ip_hdr(skb
)->saddr
,
638 th
->source
, ip_hdr(skb
)->daddr
,
639 ntohs(th
->source
), inet_iif(skb
));
640 /* don't send rst if it can't find key */
644 key
= tcp_md5_do_lookup(sk1
, (union tcp_md5_addr
*)
645 &ip_hdr(skb
)->saddr
, AF_INET
);
649 genhash
= tcp_v4_md5_hash_skb(newhash
, key
, NULL
, NULL
, skb
);
650 if (genhash
|| memcmp(hash_location
, newhash
, 16) != 0)
653 key
= sk
? tcp_md5_do_lookup(sk
, (union tcp_md5_addr
*)
659 rep
.opt
[0] = htonl((TCPOPT_NOP
<< 24) |
661 (TCPOPT_MD5SIG
<< 8) |
663 /* Update length and the length the header thinks exists */
664 arg
.iov
[0].iov_len
+= TCPOLEN_MD5SIG_ALIGNED
;
665 rep
.th
.doff
= arg
.iov
[0].iov_len
/ 4;
667 tcp_v4_md5_hash_hdr((__u8
*) &rep
.opt
[1],
668 key
, ip_hdr(skb
)->saddr
,
669 ip_hdr(skb
)->daddr
, &rep
.th
);
672 arg
.csum
= csum_tcpudp_nofold(ip_hdr(skb
)->daddr
,
673 ip_hdr(skb
)->saddr
, /* XXX */
674 arg
.iov
[0].iov_len
, IPPROTO_TCP
, 0);
675 arg
.csumoffset
= offsetof(struct tcphdr
, check
) / 2;
676 arg
.flags
= (sk
&& inet_sk(sk
)->transparent
) ? IP_REPLY_ARG_NOSRCCHECK
: 0;
677 /* When socket is gone, all binding information is lost.
678 * routing might fail in this case. No choice here, if we choose to force
679 * input interface, we will misroute in case of asymmetric route.
682 arg
.bound_dev_if
= sk
->sk_bound_dev_if
;
684 net
= dev_net(skb_dst(skb
)->dev
);
685 arg
.tos
= ip_hdr(skb
)->tos
;
686 ip_send_unicast_reply(net
, skb
, ip_hdr(skb
)->saddr
,
687 ip_hdr(skb
)->daddr
, &arg
, arg
.iov
[0].iov_len
);
689 TCP_INC_STATS_BH(net
, TCP_MIB_OUTSEGS
);
690 TCP_INC_STATS_BH(net
, TCP_MIB_OUTRSTS
);
692 #ifdef CONFIG_TCP_MD5SIG
701 /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
702 outside socket context is ugly, certainly. What can I do?
705 static void tcp_v4_send_ack(struct sk_buff
*skb
, u32 seq
, u32 ack
,
706 u32 win
, u32 tsval
, u32 tsecr
, int oif
,
707 struct tcp_md5sig_key
*key
,
708 int reply_flags
, u8 tos
)
710 const struct tcphdr
*th
= tcp_hdr(skb
);
713 __be32 opt
[(TCPOLEN_TSTAMP_ALIGNED
>> 2)
714 #ifdef CONFIG_TCP_MD5SIG
715 + (TCPOLEN_MD5SIG_ALIGNED
>> 2)
719 struct ip_reply_arg arg
;
720 struct net
*net
= dev_net(skb_dst(skb
)->dev
);
722 memset(&rep
.th
, 0, sizeof(struct tcphdr
));
723 memset(&arg
, 0, sizeof(arg
));
725 arg
.iov
[0].iov_base
= (unsigned char *)&rep
;
726 arg
.iov
[0].iov_len
= sizeof(rep
.th
);
728 rep
.opt
[0] = htonl((TCPOPT_NOP
<< 24) | (TCPOPT_NOP
<< 16) |
729 (TCPOPT_TIMESTAMP
<< 8) |
731 rep
.opt
[1] = htonl(tsval
);
732 rep
.opt
[2] = htonl(tsecr
);
733 arg
.iov
[0].iov_len
+= TCPOLEN_TSTAMP_ALIGNED
;
736 /* Swap the send and the receive. */
737 rep
.th
.dest
= th
->source
;
738 rep
.th
.source
= th
->dest
;
739 rep
.th
.doff
= arg
.iov
[0].iov_len
/ 4;
740 rep
.th
.seq
= htonl(seq
);
741 rep
.th
.ack_seq
= htonl(ack
);
743 rep
.th
.window
= htons(win
);
745 #ifdef CONFIG_TCP_MD5SIG
747 int offset
= (tsecr
) ? 3 : 0;
749 rep
.opt
[offset
++] = htonl((TCPOPT_NOP
<< 24) |
751 (TCPOPT_MD5SIG
<< 8) |
753 arg
.iov
[0].iov_len
+= TCPOLEN_MD5SIG_ALIGNED
;
754 rep
.th
.doff
= arg
.iov
[0].iov_len
/4;
756 tcp_v4_md5_hash_hdr((__u8
*) &rep
.opt
[offset
],
757 key
, ip_hdr(skb
)->saddr
,
758 ip_hdr(skb
)->daddr
, &rep
.th
);
761 arg
.flags
= reply_flags
;
762 arg
.csum
= csum_tcpudp_nofold(ip_hdr(skb
)->daddr
,
763 ip_hdr(skb
)->saddr
, /* XXX */
764 arg
.iov
[0].iov_len
, IPPROTO_TCP
, 0);
765 arg
.csumoffset
= offsetof(struct tcphdr
, check
) / 2;
767 arg
.bound_dev_if
= oif
;
769 ip_send_unicast_reply(net
, skb
, ip_hdr(skb
)->saddr
,
770 ip_hdr(skb
)->daddr
, &arg
, arg
.iov
[0].iov_len
);
772 TCP_INC_STATS_BH(net
, TCP_MIB_OUTSEGS
);
775 static void tcp_v4_timewait_ack(struct sock
*sk
, struct sk_buff
*skb
)
777 struct inet_timewait_sock
*tw
= inet_twsk(sk
);
778 struct tcp_timewait_sock
*tcptw
= tcp_twsk(sk
);
780 tcp_v4_send_ack(skb
, tcptw
->tw_snd_nxt
, tcptw
->tw_rcv_nxt
,
781 tcptw
->tw_rcv_wnd
>> tw
->tw_rcv_wscale
,
782 tcp_time_stamp
+ tcptw
->tw_ts_offset
,
785 tcp_twsk_md5_key(tcptw
),
786 tw
->tw_transparent
? IP_REPLY_ARG_NOSRCCHECK
: 0,
793 static void tcp_v4_reqsk_send_ack(struct sock
*sk
, struct sk_buff
*skb
,
794 struct request_sock
*req
)
796 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
797 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
799 tcp_v4_send_ack(skb
, (sk
->sk_state
== TCP_LISTEN
) ?
800 tcp_rsk(req
)->snt_isn
+ 1 : tcp_sk(sk
)->snd_nxt
,
801 tcp_rsk(req
)->rcv_nxt
, req
->rcv_wnd
,
805 tcp_md5_do_lookup(sk
, (union tcp_md5_addr
*)&ip_hdr(skb
)->daddr
,
807 inet_rsk(req
)->no_srccheck
? IP_REPLY_ARG_NOSRCCHECK
: 0,
812 * Send a SYN-ACK after having received a SYN.
813 * This still operates on a request_sock only, not on a big
816 static int tcp_v4_send_synack(struct sock
*sk
, struct dst_entry
*dst
,
818 struct request_sock
*req
,
820 struct tcp_fastopen_cookie
*foc
)
822 const struct inet_request_sock
*ireq
= inet_rsk(req
);
827 /* First, grab a route. */
828 if (!dst
&& (dst
= inet_csk_route_req(sk
, &fl4
, req
)) == NULL
)
831 skb
= tcp_make_synack(sk
, dst
, req
, foc
);
834 __tcp_v4_send_check(skb
, ireq
->ir_loc_addr
, ireq
->ir_rmt_addr
);
836 skb_set_queue_mapping(skb
, queue_mapping
);
837 err
= ip_build_and_send_pkt(skb
, sk
, ireq
->ir_loc_addr
,
840 err
= net_xmit_eval(err
);
841 if (!tcp_rsk(req
)->snt_synack
&& !err
)
842 tcp_rsk(req
)->snt_synack
= tcp_time_stamp
;
849 * IPv4 request_sock destructor.
851 static void tcp_v4_reqsk_destructor(struct request_sock
*req
)
853 kfree(inet_rsk(req
)->opt
);
857 * Return true if a syncookie should be sent
859 bool tcp_syn_flood_action(struct sock
*sk
,
860 const struct sk_buff
*skb
,
863 const char *msg
= "Dropping request";
864 bool want_cookie
= false;
865 struct listen_sock
*lopt
;
867 #ifdef CONFIG_SYN_COOKIES
868 if (sysctl_tcp_syncookies
) {
869 msg
= "Sending cookies";
871 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_TCPREQQFULLDOCOOKIES
);
874 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_TCPREQQFULLDROP
);
876 lopt
= inet_csk(sk
)->icsk_accept_queue
.listen_opt
;
877 if (!lopt
->synflood_warned
&& sysctl_tcp_syncookies
!= 2) {
878 lopt
->synflood_warned
= 1;
879 pr_info("%s: Possible SYN flooding on port %d. %s. Check SNMP counters.\n",
880 proto
, ntohs(tcp_hdr(skb
)->dest
), msg
);
884 EXPORT_SYMBOL(tcp_syn_flood_action
);
887 * Save and compile IPv4 options into the request_sock if needed.
889 static struct ip_options_rcu
*tcp_v4_save_options(struct sk_buff
*skb
)
891 const struct ip_options
*opt
= &(IPCB(skb
)->opt
);
892 struct ip_options_rcu
*dopt
= NULL
;
894 if (opt
&& opt
->optlen
) {
895 int opt_size
= sizeof(*dopt
) + opt
->optlen
;
897 dopt
= kmalloc(opt_size
, GFP_ATOMIC
);
899 if (ip_options_echo(&dopt
->opt
, skb
)) {
908 #ifdef CONFIG_TCP_MD5SIG
910 * RFC2385 MD5 checksumming requires a mapping of
911 * IP address->MD5 Key.
912 * We need to maintain these in the sk structure.
915 /* Find the Key structure for an address. */
916 struct tcp_md5sig_key
*tcp_md5_do_lookup(struct sock
*sk
,
917 const union tcp_md5_addr
*addr
,
920 struct tcp_sock
*tp
= tcp_sk(sk
);
921 struct tcp_md5sig_key
*key
;
922 unsigned int size
= sizeof(struct in_addr
);
923 struct tcp_md5sig_info
*md5sig
;
925 /* caller either holds rcu_read_lock() or socket lock */
926 md5sig
= rcu_dereference_check(tp
->md5sig_info
,
927 sock_owned_by_user(sk
) ||
928 lockdep_is_held(&sk
->sk_lock
.slock
));
931 #if IS_ENABLED(CONFIG_IPV6)
932 if (family
== AF_INET6
)
933 size
= sizeof(struct in6_addr
);
935 hlist_for_each_entry_rcu(key
, &md5sig
->head
, node
) {
936 if (key
->family
!= family
)
938 if (!memcmp(&key
->addr
, addr
, size
))
943 EXPORT_SYMBOL(tcp_md5_do_lookup
);
945 struct tcp_md5sig_key
*tcp_v4_md5_lookup(struct sock
*sk
,
946 struct sock
*addr_sk
)
948 union tcp_md5_addr
*addr
;
950 addr
= (union tcp_md5_addr
*)&inet_sk(addr_sk
)->inet_daddr
;
951 return tcp_md5_do_lookup(sk
, addr
, AF_INET
);
953 EXPORT_SYMBOL(tcp_v4_md5_lookup
);
955 static struct tcp_md5sig_key
*tcp_v4_reqsk_md5_lookup(struct sock
*sk
,
956 struct request_sock
*req
)
958 union tcp_md5_addr
*addr
;
960 addr
= (union tcp_md5_addr
*)&inet_rsk(req
)->ir_rmt_addr
;
961 return tcp_md5_do_lookup(sk
, addr
, AF_INET
);
964 /* This can be called on a newly created socket, from other files */
965 int tcp_md5_do_add(struct sock
*sk
, const union tcp_md5_addr
*addr
,
966 int family
, const u8
*newkey
, u8 newkeylen
, gfp_t gfp
)
968 /* Add Key to the list */
969 struct tcp_md5sig_key
*key
;
970 struct tcp_sock
*tp
= tcp_sk(sk
);
971 struct tcp_md5sig_info
*md5sig
;
973 key
= tcp_md5_do_lookup(sk
, addr
, family
);
975 /* Pre-existing entry - just update that one. */
976 memcpy(key
->key
, newkey
, newkeylen
);
977 key
->keylen
= newkeylen
;
981 md5sig
= rcu_dereference_protected(tp
->md5sig_info
,
982 sock_owned_by_user(sk
));
984 md5sig
= kmalloc(sizeof(*md5sig
), gfp
);
988 sk_nocaps_add(sk
, NETIF_F_GSO_MASK
);
989 INIT_HLIST_HEAD(&md5sig
->head
);
990 rcu_assign_pointer(tp
->md5sig_info
, md5sig
);
993 key
= sock_kmalloc(sk
, sizeof(*key
), gfp
);
996 if (!tcp_alloc_md5sig_pool()) {
997 sock_kfree_s(sk
, key
, sizeof(*key
));
1001 memcpy(key
->key
, newkey
, newkeylen
);
1002 key
->keylen
= newkeylen
;
1003 key
->family
= family
;
1004 memcpy(&key
->addr
, addr
,
1005 (family
== AF_INET6
) ? sizeof(struct in6_addr
) :
1006 sizeof(struct in_addr
));
1007 hlist_add_head_rcu(&key
->node
, &md5sig
->head
);
1010 EXPORT_SYMBOL(tcp_md5_do_add
);
1012 int tcp_md5_do_del(struct sock
*sk
, const union tcp_md5_addr
*addr
, int family
)
1014 struct tcp_md5sig_key
*key
;
1016 key
= tcp_md5_do_lookup(sk
, addr
, family
);
1019 hlist_del_rcu(&key
->node
);
1020 atomic_sub(sizeof(*key
), &sk
->sk_omem_alloc
);
1021 kfree_rcu(key
, rcu
);
1024 EXPORT_SYMBOL(tcp_md5_do_del
);
1026 static void tcp_clear_md5_list(struct sock
*sk
)
1028 struct tcp_sock
*tp
= tcp_sk(sk
);
1029 struct tcp_md5sig_key
*key
;
1030 struct hlist_node
*n
;
1031 struct tcp_md5sig_info
*md5sig
;
1033 md5sig
= rcu_dereference_protected(tp
->md5sig_info
, 1);
1035 hlist_for_each_entry_safe(key
, n
, &md5sig
->head
, node
) {
1036 hlist_del_rcu(&key
->node
);
1037 atomic_sub(sizeof(*key
), &sk
->sk_omem_alloc
);
1038 kfree_rcu(key
, rcu
);
1042 static int tcp_v4_parse_md5_keys(struct sock
*sk
, char __user
*optval
,
1045 struct tcp_md5sig cmd
;
1046 struct sockaddr_in
*sin
= (struct sockaddr_in
*)&cmd
.tcpm_addr
;
1048 if (optlen
< sizeof(cmd
))
1051 if (copy_from_user(&cmd
, optval
, sizeof(cmd
)))
1054 if (sin
->sin_family
!= AF_INET
)
1057 if (!cmd
.tcpm_key
|| !cmd
.tcpm_keylen
)
1058 return tcp_md5_do_del(sk
, (union tcp_md5_addr
*)&sin
->sin_addr
.s_addr
,
1061 if (cmd
.tcpm_keylen
> TCP_MD5SIG_MAXKEYLEN
)
1064 return tcp_md5_do_add(sk
, (union tcp_md5_addr
*)&sin
->sin_addr
.s_addr
,
1065 AF_INET
, cmd
.tcpm_key
, cmd
.tcpm_keylen
,
1069 static int tcp_v4_md5_hash_pseudoheader(struct tcp_md5sig_pool
*hp
,
1070 __be32 daddr
, __be32 saddr
, int nbytes
)
1072 struct tcp4_pseudohdr
*bp
;
1073 struct scatterlist sg
;
1075 bp
= &hp
->md5_blk
.ip4
;
1078 * 1. the TCP pseudo-header (in the order: source IP address,
1079 * destination IP address, zero-padded protocol number, and
1085 bp
->protocol
= IPPROTO_TCP
;
1086 bp
->len
= cpu_to_be16(nbytes
);
1088 sg_init_one(&sg
, bp
, sizeof(*bp
));
1089 return crypto_hash_update(&hp
->md5_desc
, &sg
, sizeof(*bp
));
1092 static int tcp_v4_md5_hash_hdr(char *md5_hash
, const struct tcp_md5sig_key
*key
,
1093 __be32 daddr
, __be32 saddr
, const struct tcphdr
*th
)
1095 struct tcp_md5sig_pool
*hp
;
1096 struct hash_desc
*desc
;
1098 hp
= tcp_get_md5sig_pool();
1100 goto clear_hash_noput
;
1101 desc
= &hp
->md5_desc
;
1103 if (crypto_hash_init(desc
))
1105 if (tcp_v4_md5_hash_pseudoheader(hp
, daddr
, saddr
, th
->doff
<< 2))
1107 if (tcp_md5_hash_header(hp
, th
))
1109 if (tcp_md5_hash_key(hp
, key
))
1111 if (crypto_hash_final(desc
, md5_hash
))
1114 tcp_put_md5sig_pool();
1118 tcp_put_md5sig_pool();
1120 memset(md5_hash
, 0, 16);
1124 int tcp_v4_md5_hash_skb(char *md5_hash
, struct tcp_md5sig_key
*key
,
1125 const struct sock
*sk
, const struct request_sock
*req
,
1126 const struct sk_buff
*skb
)
1128 struct tcp_md5sig_pool
*hp
;
1129 struct hash_desc
*desc
;
1130 const struct tcphdr
*th
= tcp_hdr(skb
);
1131 __be32 saddr
, daddr
;
1134 saddr
= inet_sk(sk
)->inet_saddr
;
1135 daddr
= inet_sk(sk
)->inet_daddr
;
1137 saddr
= inet_rsk(req
)->ir_loc_addr
;
1138 daddr
= inet_rsk(req
)->ir_rmt_addr
;
1140 const struct iphdr
*iph
= ip_hdr(skb
);
1145 hp
= tcp_get_md5sig_pool();
1147 goto clear_hash_noput
;
1148 desc
= &hp
->md5_desc
;
1150 if (crypto_hash_init(desc
))
1153 if (tcp_v4_md5_hash_pseudoheader(hp
, daddr
, saddr
, skb
->len
))
1155 if (tcp_md5_hash_header(hp
, th
))
1157 if (tcp_md5_hash_skb_data(hp
, skb
, th
->doff
<< 2))
1159 if (tcp_md5_hash_key(hp
, key
))
1161 if (crypto_hash_final(desc
, md5_hash
))
1164 tcp_put_md5sig_pool();
1168 tcp_put_md5sig_pool();
1170 memset(md5_hash
, 0, 16);
1173 EXPORT_SYMBOL(tcp_v4_md5_hash_skb
);
1175 static bool tcp_v4_inbound_md5_hash(struct sock
*sk
, const struct sk_buff
*skb
)
1178 * This gets called for each TCP segment that arrives
1179 * so we want to be efficient.
1180 * We have 3 drop cases:
1181 * o No MD5 hash and one expected.
1182 * o MD5 hash and we're not expecting one.
1183 * o MD5 hash and its wrong.
1185 const __u8
*hash_location
= NULL
;
1186 struct tcp_md5sig_key
*hash_expected
;
1187 const struct iphdr
*iph
= ip_hdr(skb
);
1188 const struct tcphdr
*th
= tcp_hdr(skb
);
1190 unsigned char newhash
[16];
1192 hash_expected
= tcp_md5_do_lookup(sk
, (union tcp_md5_addr
*)&iph
->saddr
,
1194 hash_location
= tcp_parse_md5sig_option(th
);
1196 /* We've parsed the options - do we have a hash? */
1197 if (!hash_expected
&& !hash_location
)
1200 if (hash_expected
&& !hash_location
) {
1201 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_TCPMD5NOTFOUND
);
1205 if (!hash_expected
&& hash_location
) {
1206 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_TCPMD5UNEXPECTED
);
1210 /* Okay, so this is hash_expected and hash_location -
1211 * so we need to calculate the checksum.
1213 genhash
= tcp_v4_md5_hash_skb(newhash
,
1217 if (genhash
|| memcmp(hash_location
, newhash
, 16) != 0) {
1218 net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
1219 &iph
->saddr
, ntohs(th
->source
),
1220 &iph
->daddr
, ntohs(th
->dest
),
1221 genhash
? " tcp_v4_calc_md5_hash failed"
1230 static void tcp_v4_init_req(struct request_sock
*req
, struct sock
*sk
,
1231 struct sk_buff
*skb
)
1233 struct inet_request_sock
*ireq
= inet_rsk(req
);
1235 ireq
->ir_loc_addr
= ip_hdr(skb
)->daddr
;
1236 ireq
->ir_rmt_addr
= ip_hdr(skb
)->saddr
;
1237 ireq
->no_srccheck
= inet_sk(sk
)->transparent
;
1238 ireq
->opt
= tcp_v4_save_options(skb
);
1241 static struct dst_entry
*tcp_v4_route_req(struct sock
*sk
, struct flowi
*fl
,
1242 const struct request_sock
*req
,
1245 struct dst_entry
*dst
= inet_csk_route_req(sk
, &fl
->u
.ip4
, req
);
1248 if (fl
->u
.ip4
.daddr
== inet_rsk(req
)->ir_rmt_addr
)
1257 struct request_sock_ops tcp_request_sock_ops __read_mostly
= {
1259 .obj_size
= sizeof(struct tcp_request_sock
),
1260 .rtx_syn_ack
= tcp_rtx_synack
,
1261 .send_ack
= tcp_v4_reqsk_send_ack
,
1262 .destructor
= tcp_v4_reqsk_destructor
,
1263 .send_reset
= tcp_v4_send_reset
,
1264 .syn_ack_timeout
= tcp_syn_ack_timeout
,
1267 static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops
= {
1268 .mss_clamp
= TCP_MSS_DEFAULT
,
1269 #ifdef CONFIG_TCP_MD5SIG
1270 .md5_lookup
= tcp_v4_reqsk_md5_lookup
,
1271 .calc_md5_hash
= tcp_v4_md5_hash_skb
,
1273 .init_req
= tcp_v4_init_req
,
1274 #ifdef CONFIG_SYN_COOKIES
1275 .cookie_init_seq
= cookie_v4_init_sequence
,
1277 .route_req
= tcp_v4_route_req
,
1278 .init_seq
= tcp_v4_init_sequence
,
1279 .send_synack
= tcp_v4_send_synack
,
1280 .queue_hash_add
= inet_csk_reqsk_queue_hash_add
,
1283 int tcp_v4_conn_request(struct sock
*sk
, struct sk_buff
*skb
)
1285 /* Never answer to SYNs send to broadcast or multicast */
1286 if (skb_rtable(skb
)->rt_flags
& (RTCF_BROADCAST
| RTCF_MULTICAST
))
1289 return tcp_conn_request(&tcp_request_sock_ops
,
1290 &tcp_request_sock_ipv4_ops
, sk
, skb
);
1293 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_LISTENDROPS
);
1296 EXPORT_SYMBOL(tcp_v4_conn_request
);
1300 * The three way handshake has completed - we got a valid synack -
1301 * now create the new socket.
1303 struct sock
*tcp_v4_syn_recv_sock(struct sock
*sk
, struct sk_buff
*skb
,
1304 struct request_sock
*req
,
1305 struct dst_entry
*dst
)
1307 struct inet_request_sock
*ireq
;
1308 struct inet_sock
*newinet
;
1309 struct tcp_sock
*newtp
;
1311 #ifdef CONFIG_TCP_MD5SIG
1312 struct tcp_md5sig_key
*key
;
1314 struct ip_options_rcu
*inet_opt
;
1316 if (sk_acceptq_is_full(sk
))
1319 newsk
= tcp_create_openreq_child(sk
, req
, skb
);
1323 newsk
->sk_gso_type
= SKB_GSO_TCPV4
;
1324 inet_sk_rx_dst_set(newsk
, skb
);
1326 newtp
= tcp_sk(newsk
);
1327 newinet
= inet_sk(newsk
);
1328 ireq
= inet_rsk(req
);
1329 newinet
->inet_daddr
= ireq
->ir_rmt_addr
;
1330 newinet
->inet_rcv_saddr
= ireq
->ir_loc_addr
;
1331 newinet
->inet_saddr
= ireq
->ir_loc_addr
;
1332 inet_opt
= ireq
->opt
;
1333 rcu_assign_pointer(newinet
->inet_opt
, inet_opt
);
1335 newinet
->mc_index
= inet_iif(skb
);
1336 newinet
->mc_ttl
= ip_hdr(skb
)->ttl
;
1337 newinet
->rcv_tos
= ip_hdr(skb
)->tos
;
1338 inet_csk(newsk
)->icsk_ext_hdr_len
= 0;
1340 inet_csk(newsk
)->icsk_ext_hdr_len
= inet_opt
->opt
.optlen
;
1341 newinet
->inet_id
= newtp
->write_seq
^ jiffies
;
1344 dst
= inet_csk_route_child_sock(sk
, newsk
, req
);
1348 /* syncookie case : see end of cookie_v4_check() */
1350 sk_setup_caps(newsk
, dst
);
1352 tcp_sync_mss(newsk
, dst_mtu(dst
));
1353 newtp
->advmss
= dst_metric_advmss(dst
);
1354 if (tcp_sk(sk
)->rx_opt
.user_mss
&&
1355 tcp_sk(sk
)->rx_opt
.user_mss
< newtp
->advmss
)
1356 newtp
->advmss
= tcp_sk(sk
)->rx_opt
.user_mss
;
1358 tcp_initialize_rcv_mss(newsk
);
1360 #ifdef CONFIG_TCP_MD5SIG
1361 /* Copy over the MD5 key from the original socket */
1362 key
= tcp_md5_do_lookup(sk
, (union tcp_md5_addr
*)&newinet
->inet_daddr
,
1366 * We're using one, so create a matching key
1367 * on the newsk structure. If we fail to get
1368 * memory, then we end up not copying the key
1371 tcp_md5_do_add(newsk
, (union tcp_md5_addr
*)&newinet
->inet_daddr
,
1372 AF_INET
, key
->key
, key
->keylen
, GFP_ATOMIC
);
1373 sk_nocaps_add(newsk
, NETIF_F_GSO_MASK
);
1377 if (__inet_inherit_port(sk
, newsk
) < 0)
1379 __inet_hash_nolisten(newsk
, NULL
);
1384 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_LISTENOVERFLOWS
);
1388 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_LISTENDROPS
);
1391 inet_csk_prepare_forced_close(newsk
);
1395 EXPORT_SYMBOL(tcp_v4_syn_recv_sock
);
1397 static struct sock
*tcp_v4_hnd_req(struct sock
*sk
, struct sk_buff
*skb
)
1399 struct tcphdr
*th
= tcp_hdr(skb
);
1400 const struct iphdr
*iph
= ip_hdr(skb
);
1402 struct request_sock
**prev
;
1403 /* Find possible connection requests. */
1404 struct request_sock
*req
= inet_csk_search_req(sk
, &prev
, th
->source
,
1405 iph
->saddr
, iph
->daddr
);
1407 return tcp_check_req(sk
, skb
, req
, prev
, false);
1409 nsk
= inet_lookup_established(sock_net(sk
), &tcp_hashinfo
, iph
->saddr
,
1410 th
->source
, iph
->daddr
, th
->dest
, inet_iif(skb
));
1413 if (nsk
->sk_state
!= TCP_TIME_WAIT
) {
1417 inet_twsk_put(inet_twsk(nsk
));
1421 #ifdef CONFIG_SYN_COOKIES
1423 sk
= cookie_v4_check(sk
, skb
, &(IPCB(skb
)->opt
));
1428 /* The socket must have it's spinlock held when we get
1431 * We have a potential double-lock case here, so even when
1432 * doing backlog processing we use the BH locking scheme.
1433 * This is because we cannot sleep with the original spinlock
1436 int tcp_v4_do_rcv(struct sock
*sk
, struct sk_buff
*skb
)
1439 #ifdef CONFIG_TCP_MD5SIG
1441 * We really want to reject the packet as early as possible
1443 * o We're expecting an MD5'd packet and this is no MD5 tcp option
1444 * o There is an MD5 option and we're not expecting one
1446 if (tcp_v4_inbound_md5_hash(sk
, skb
))
1450 if (sk
->sk_state
== TCP_ESTABLISHED
) { /* Fast path */
1451 struct dst_entry
*dst
= sk
->sk_rx_dst
;
1453 sock_rps_save_rxhash(sk
, skb
);
1455 if (inet_sk(sk
)->rx_dst_ifindex
!= skb
->skb_iif
||
1456 dst
->ops
->check(dst
, 0) == NULL
) {
1458 sk
->sk_rx_dst
= NULL
;
1461 tcp_rcv_established(sk
, skb
, tcp_hdr(skb
), skb
->len
);
1465 if (skb
->len
< tcp_hdrlen(skb
) || tcp_checksum_complete(skb
))
1468 if (sk
->sk_state
== TCP_LISTEN
) {
1469 struct sock
*nsk
= tcp_v4_hnd_req(sk
, skb
);
1474 sock_rps_save_rxhash(nsk
, skb
);
1475 if (tcp_child_process(sk
, nsk
, skb
)) {
1482 sock_rps_save_rxhash(sk
, skb
);
1484 if (tcp_rcv_state_process(sk
, skb
, tcp_hdr(skb
), skb
->len
)) {
1491 tcp_v4_send_reset(rsk
, skb
);
1494 /* Be careful here. If this function gets more complicated and
1495 * gcc suffers from register pressure on the x86, sk (in %ebx)
1496 * might be destroyed here. This current version compiles correctly,
1497 * but you have been warned.
1502 TCP_INC_STATS_BH(sock_net(sk
), TCP_MIB_CSUMERRORS
);
1503 TCP_INC_STATS_BH(sock_net(sk
), TCP_MIB_INERRS
);
1506 EXPORT_SYMBOL(tcp_v4_do_rcv
);
1508 void tcp_v4_early_demux(struct sk_buff
*skb
)
1510 const struct iphdr
*iph
;
1511 const struct tcphdr
*th
;
1514 if (skb
->pkt_type
!= PACKET_HOST
)
1517 if (!pskb_may_pull(skb
, skb_transport_offset(skb
) + sizeof(struct tcphdr
)))
1523 if (th
->doff
< sizeof(struct tcphdr
) / 4)
1526 sk
= __inet_lookup_established(dev_net(skb
->dev
), &tcp_hashinfo
,
1527 iph
->saddr
, th
->source
,
1528 iph
->daddr
, ntohs(th
->dest
),
1532 skb
->destructor
= sock_edemux
;
1533 if (sk
->sk_state
!= TCP_TIME_WAIT
) {
1534 struct dst_entry
*dst
= sk
->sk_rx_dst
;
1537 dst
= dst_check(dst
, 0);
1539 inet_sk(sk
)->rx_dst_ifindex
== skb
->skb_iif
)
1540 skb_dst_set_noref(skb
, dst
);
1545 /* Packet is added to VJ-style prequeue for processing in process
1546 * context, if a reader task is waiting. Apparently, this exciting
1547 * idea (VJ's mail "Re: query about TCP header on tcp-ip" of 07 Sep 93)
1548 * failed somewhere. Latency? Burstiness? Well, at least now we will
1549 * see, why it failed. 8)8) --ANK
1552 bool tcp_prequeue(struct sock
*sk
, struct sk_buff
*skb
)
1554 struct tcp_sock
*tp
= tcp_sk(sk
);
1556 if (sysctl_tcp_low_latency
|| !tp
->ucopy
.task
)
1559 if (skb
->len
<= tcp_hdrlen(skb
) &&
1560 skb_queue_len(&tp
->ucopy
.prequeue
) == 0)
1564 __skb_queue_tail(&tp
->ucopy
.prequeue
, skb
);
1565 tp
->ucopy
.memory
+= skb
->truesize
;
1566 if (tp
->ucopy
.memory
> sk
->sk_rcvbuf
) {
1567 struct sk_buff
*skb1
;
1569 BUG_ON(sock_owned_by_user(sk
));
1571 while ((skb1
= __skb_dequeue(&tp
->ucopy
.prequeue
)) != NULL
) {
1572 sk_backlog_rcv(sk
, skb1
);
1573 NET_INC_STATS_BH(sock_net(sk
),
1574 LINUX_MIB_TCPPREQUEUEDROPPED
);
1577 tp
->ucopy
.memory
= 0;
1578 } else if (skb_queue_len(&tp
->ucopy
.prequeue
) == 1) {
1579 wake_up_interruptible_sync_poll(sk_sleep(sk
),
1580 POLLIN
| POLLRDNORM
| POLLRDBAND
);
1581 if (!inet_csk_ack_scheduled(sk
))
1582 inet_csk_reset_xmit_timer(sk
, ICSK_TIME_DACK
,
1583 (3 * tcp_rto_min(sk
)) / 4,
1588 EXPORT_SYMBOL(tcp_prequeue
);
1594 int tcp_v4_rcv(struct sk_buff
*skb
)
1596 const struct iphdr
*iph
;
1597 const struct tcphdr
*th
;
1600 struct net
*net
= dev_net(skb
->dev
);
1602 if (skb
->pkt_type
!= PACKET_HOST
)
1605 /* Count it even if it's bad */
1606 TCP_INC_STATS_BH(net
, TCP_MIB_INSEGS
);
1608 if (!pskb_may_pull(skb
, sizeof(struct tcphdr
)))
1613 if (th
->doff
< sizeof(struct tcphdr
) / 4)
1615 if (!pskb_may_pull(skb
, th
->doff
* 4))
1618 /* An explanation is required here, I think.
1619 * Packet length and doff are validated by header prediction,
1620 * provided case of th->doff==0 is eliminated.
1621 * So, we defer the checks. */
1623 if (skb_checksum_init(skb
, IPPROTO_TCP
, inet_compute_pseudo
))
1628 TCP_SKB_CB(skb
)->seq
= ntohl(th
->seq
);
1629 TCP_SKB_CB(skb
)->end_seq
= (TCP_SKB_CB(skb
)->seq
+ th
->syn
+ th
->fin
+
1630 skb
->len
- th
->doff
* 4);
1631 TCP_SKB_CB(skb
)->ack_seq
= ntohl(th
->ack_seq
);
1632 TCP_SKB_CB(skb
)->when
= 0;
1633 TCP_SKB_CB(skb
)->ip_dsfield
= ipv4_get_dsfield(iph
);
1634 TCP_SKB_CB(skb
)->sacked
= 0;
1636 sk
= __inet_lookup_skb(&tcp_hashinfo
, skb
, th
->source
, th
->dest
);
1641 if (sk
->sk_state
== TCP_TIME_WAIT
)
1644 if (unlikely(iph
->ttl
< inet_sk(sk
)->min_ttl
)) {
1645 NET_INC_STATS_BH(net
, LINUX_MIB_TCPMINTTLDROP
);
1646 goto discard_and_relse
;
1649 if (!xfrm4_policy_check(sk
, XFRM_POLICY_IN
, skb
))
1650 goto discard_and_relse
;
1653 if (sk_filter(sk
, skb
))
1654 goto discard_and_relse
;
1656 sk_mark_napi_id(sk
, skb
);
1659 bh_lock_sock_nested(sk
);
1661 if (!sock_owned_by_user(sk
)) {
1662 #ifdef CONFIG_NET_DMA
1663 struct tcp_sock
*tp
= tcp_sk(sk
);
1664 if (!tp
->ucopy
.dma_chan
&& tp
->ucopy
.pinned_list
)
1665 tp
->ucopy
.dma_chan
= net_dma_find_channel();
1666 if (tp
->ucopy
.dma_chan
)
1667 ret
= tcp_v4_do_rcv(sk
, skb
);
1671 if (!tcp_prequeue(sk
, skb
))
1672 ret
= tcp_v4_do_rcv(sk
, skb
);
1674 } else if (unlikely(sk_add_backlog(sk
, skb
,
1675 sk
->sk_rcvbuf
+ sk
->sk_sndbuf
))) {
1677 NET_INC_STATS_BH(net
, LINUX_MIB_TCPBACKLOGDROP
);
1678 goto discard_and_relse
;
1687 if (!xfrm4_policy_check(NULL
, XFRM_POLICY_IN
, skb
))
1690 if (skb
->len
< (th
->doff
<< 2) || tcp_checksum_complete(skb
)) {
1692 TCP_INC_STATS_BH(net
, TCP_MIB_CSUMERRORS
);
1694 TCP_INC_STATS_BH(net
, TCP_MIB_INERRS
);
1696 tcp_v4_send_reset(NULL
, skb
);
1700 /* Discard frame. */
1709 if (!xfrm4_policy_check(NULL
, XFRM_POLICY_IN
, skb
)) {
1710 inet_twsk_put(inet_twsk(sk
));
1714 if (skb
->len
< (th
->doff
<< 2)) {
1715 inet_twsk_put(inet_twsk(sk
));
1718 if (tcp_checksum_complete(skb
)) {
1719 inet_twsk_put(inet_twsk(sk
));
1722 switch (tcp_timewait_state_process(inet_twsk(sk
), skb
, th
)) {
1724 struct sock
*sk2
= inet_lookup_listener(dev_net(skb
->dev
),
1726 iph
->saddr
, th
->source
,
1727 iph
->daddr
, th
->dest
,
1730 inet_twsk_deschedule(inet_twsk(sk
), &tcp_death_row
);
1731 inet_twsk_put(inet_twsk(sk
));
1735 /* Fall through to ACK */
1738 tcp_v4_timewait_ack(sk
, skb
);
1742 case TCP_TW_SUCCESS
:;
1747 static struct timewait_sock_ops tcp_timewait_sock_ops
= {
1748 .twsk_obj_size
= sizeof(struct tcp_timewait_sock
),
1749 .twsk_unique
= tcp_twsk_unique
,
1750 .twsk_destructor
= tcp_twsk_destructor
,
1753 void inet_sk_rx_dst_set(struct sock
*sk
, const struct sk_buff
*skb
)
1755 struct dst_entry
*dst
= skb_dst(skb
);
1758 sk
->sk_rx_dst
= dst
;
1759 inet_sk(sk
)->rx_dst_ifindex
= skb
->skb_iif
;
1761 EXPORT_SYMBOL(inet_sk_rx_dst_set
);
1763 const struct inet_connection_sock_af_ops ipv4_specific
= {
1764 .queue_xmit
= ip_queue_xmit
,
1765 .send_check
= tcp_v4_send_check
,
1766 .rebuild_header
= inet_sk_rebuild_header
,
1767 .sk_rx_dst_set
= inet_sk_rx_dst_set
,
1768 .conn_request
= tcp_v4_conn_request
,
1769 .syn_recv_sock
= tcp_v4_syn_recv_sock
,
1770 .net_header_len
= sizeof(struct iphdr
),
1771 .setsockopt
= ip_setsockopt
,
1772 .getsockopt
= ip_getsockopt
,
1773 .addr2sockaddr
= inet_csk_addr2sockaddr
,
1774 .sockaddr_len
= sizeof(struct sockaddr_in
),
1775 .bind_conflict
= inet_csk_bind_conflict
,
1776 #ifdef CONFIG_COMPAT
1777 .compat_setsockopt
= compat_ip_setsockopt
,
1778 .compat_getsockopt
= compat_ip_getsockopt
,
1781 EXPORT_SYMBOL(ipv4_specific
);
1783 #ifdef CONFIG_TCP_MD5SIG
1784 static const struct tcp_sock_af_ops tcp_sock_ipv4_specific
= {
1785 .md5_lookup
= tcp_v4_md5_lookup
,
1786 .calc_md5_hash
= tcp_v4_md5_hash_skb
,
1787 .md5_parse
= tcp_v4_parse_md5_keys
,
1791 /* NOTE: A lot of things set to zero explicitly by call to
1792 * sk_alloc() so need not be done here.
1794 static int tcp_v4_init_sock(struct sock
*sk
)
1796 struct inet_connection_sock
*icsk
= inet_csk(sk
);
1800 icsk
->icsk_af_ops
= &ipv4_specific
;
1802 #ifdef CONFIG_TCP_MD5SIG
1803 tcp_sk(sk
)->af_specific
= &tcp_sock_ipv4_specific
;
1809 void tcp_v4_destroy_sock(struct sock
*sk
)
1811 struct tcp_sock
*tp
= tcp_sk(sk
);
1813 tcp_clear_xmit_timers(sk
);
1815 tcp_cleanup_congestion_control(sk
);
1817 /* Cleanup up the write buffer. */
1818 tcp_write_queue_purge(sk
);
1820 /* Cleans up our, hopefully empty, out_of_order_queue. */
1821 __skb_queue_purge(&tp
->out_of_order_queue
);
1823 #ifdef CONFIG_TCP_MD5SIG
1824 /* Clean up the MD5 key list, if any */
1825 if (tp
->md5sig_info
) {
1826 tcp_clear_md5_list(sk
);
1827 kfree_rcu(tp
->md5sig_info
, rcu
);
1828 tp
->md5sig_info
= NULL
;
1832 #ifdef CONFIG_NET_DMA
1833 /* Cleans up our sk_async_wait_queue */
1834 __skb_queue_purge(&sk
->sk_async_wait_queue
);
1837 /* Clean prequeue, it must be empty really */
1838 __skb_queue_purge(&tp
->ucopy
.prequeue
);
1840 /* Clean up a referenced TCP bind bucket. */
1841 if (inet_csk(sk
)->icsk_bind_hash
)
1844 BUG_ON(tp
->fastopen_rsk
!= NULL
);
1846 /* If socket is aborted during connect operation */
1847 tcp_free_fastopen_req(tp
);
1849 sk_sockets_allocated_dec(sk
);
1850 sock_release_memcg(sk
);
1852 EXPORT_SYMBOL(tcp_v4_destroy_sock
);
1854 #ifdef CONFIG_PROC_FS
1855 /* Proc filesystem TCP sock list dumping. */
1858 * Get next listener socket follow cur. If cur is NULL, get first socket
1859 * starting from bucket given in st->bucket; when st->bucket is zero the
1860 * very first socket in the hash table is returned.
1862 static void *listening_get_next(struct seq_file
*seq
, void *cur
)
1864 struct inet_connection_sock
*icsk
;
1865 struct hlist_nulls_node
*node
;
1866 struct sock
*sk
= cur
;
1867 struct inet_listen_hashbucket
*ilb
;
1868 struct tcp_iter_state
*st
= seq
->private;
1869 struct net
*net
= seq_file_net(seq
);
1872 ilb
= &tcp_hashinfo
.listening_hash
[st
->bucket
];
1873 spin_lock_bh(&ilb
->lock
);
1874 sk
= sk_nulls_head(&ilb
->head
);
1878 ilb
= &tcp_hashinfo
.listening_hash
[st
->bucket
];
1882 if (st
->state
== TCP_SEQ_STATE_OPENREQ
) {
1883 struct request_sock
*req
= cur
;
1885 icsk
= inet_csk(st
->syn_wait_sk
);
1889 if (req
->rsk_ops
->family
== st
->family
) {
1895 if (++st
->sbucket
>= icsk
->icsk_accept_queue
.listen_opt
->nr_table_entries
)
1898 req
= icsk
->icsk_accept_queue
.listen_opt
->syn_table
[st
->sbucket
];
1900 sk
= sk_nulls_next(st
->syn_wait_sk
);
1901 st
->state
= TCP_SEQ_STATE_LISTENING
;
1902 read_unlock_bh(&icsk
->icsk_accept_queue
.syn_wait_lock
);
1904 icsk
= inet_csk(sk
);
1905 read_lock_bh(&icsk
->icsk_accept_queue
.syn_wait_lock
);
1906 if (reqsk_queue_len(&icsk
->icsk_accept_queue
))
1908 read_unlock_bh(&icsk
->icsk_accept_queue
.syn_wait_lock
);
1909 sk
= sk_nulls_next(sk
);
1912 sk_nulls_for_each_from(sk
, node
) {
1913 if (!net_eq(sock_net(sk
), net
))
1915 if (sk
->sk_family
== st
->family
) {
1919 icsk
= inet_csk(sk
);
1920 read_lock_bh(&icsk
->icsk_accept_queue
.syn_wait_lock
);
1921 if (reqsk_queue_len(&icsk
->icsk_accept_queue
)) {
1923 st
->uid
= sock_i_uid(sk
);
1924 st
->syn_wait_sk
= sk
;
1925 st
->state
= TCP_SEQ_STATE_OPENREQ
;
1929 read_unlock_bh(&icsk
->icsk_accept_queue
.syn_wait_lock
);
1931 spin_unlock_bh(&ilb
->lock
);
1933 if (++st
->bucket
< INET_LHTABLE_SIZE
) {
1934 ilb
= &tcp_hashinfo
.listening_hash
[st
->bucket
];
1935 spin_lock_bh(&ilb
->lock
);
1936 sk
= sk_nulls_head(&ilb
->head
);
1944 static void *listening_get_idx(struct seq_file
*seq
, loff_t
*pos
)
1946 struct tcp_iter_state
*st
= seq
->private;
1951 rc
= listening_get_next(seq
, NULL
);
1953 while (rc
&& *pos
) {
1954 rc
= listening_get_next(seq
, rc
);
1960 static inline bool empty_bucket(const struct tcp_iter_state
*st
)
1962 return hlist_nulls_empty(&tcp_hashinfo
.ehash
[st
->bucket
].chain
);
1966 * Get first established socket starting from bucket given in st->bucket.
1967 * If st->bucket is zero, the very first socket in the hash is returned.
1969 static void *established_get_first(struct seq_file
*seq
)
1971 struct tcp_iter_state
*st
= seq
->private;
1972 struct net
*net
= seq_file_net(seq
);
1976 for (; st
->bucket
<= tcp_hashinfo
.ehash_mask
; ++st
->bucket
) {
1978 struct hlist_nulls_node
*node
;
1979 spinlock_t
*lock
= inet_ehash_lockp(&tcp_hashinfo
, st
->bucket
);
1981 /* Lockless fast path for the common case of empty buckets */
1982 if (empty_bucket(st
))
1986 sk_nulls_for_each(sk
, node
, &tcp_hashinfo
.ehash
[st
->bucket
].chain
) {
1987 if (sk
->sk_family
!= st
->family
||
1988 !net_eq(sock_net(sk
), net
)) {
1994 spin_unlock_bh(lock
);
2000 static void *established_get_next(struct seq_file
*seq
, void *cur
)
2002 struct sock
*sk
= cur
;
2003 struct hlist_nulls_node
*node
;
2004 struct tcp_iter_state
*st
= seq
->private;
2005 struct net
*net
= seq_file_net(seq
);
2010 sk
= sk_nulls_next(sk
);
2012 sk_nulls_for_each_from(sk
, node
) {
2013 if (sk
->sk_family
== st
->family
&& net_eq(sock_net(sk
), net
))
2017 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo
, st
->bucket
));
2019 return established_get_first(seq
);
2022 static void *established_get_idx(struct seq_file
*seq
, loff_t pos
)
2024 struct tcp_iter_state
*st
= seq
->private;
2028 rc
= established_get_first(seq
);
2031 rc
= established_get_next(seq
, rc
);
2037 static void *tcp_get_idx(struct seq_file
*seq
, loff_t pos
)
2040 struct tcp_iter_state
*st
= seq
->private;
2042 st
->state
= TCP_SEQ_STATE_LISTENING
;
2043 rc
= listening_get_idx(seq
, &pos
);
2046 st
->state
= TCP_SEQ_STATE_ESTABLISHED
;
2047 rc
= established_get_idx(seq
, pos
);
2053 static void *tcp_seek_last_pos(struct seq_file
*seq
)
2055 struct tcp_iter_state
*st
= seq
->private;
2056 int offset
= st
->offset
;
2057 int orig_num
= st
->num
;
2060 switch (st
->state
) {
2061 case TCP_SEQ_STATE_OPENREQ
:
2062 case TCP_SEQ_STATE_LISTENING
:
2063 if (st
->bucket
>= INET_LHTABLE_SIZE
)
2065 st
->state
= TCP_SEQ_STATE_LISTENING
;
2066 rc
= listening_get_next(seq
, NULL
);
2067 while (offset
-- && rc
)
2068 rc
= listening_get_next(seq
, rc
);
2072 st
->state
= TCP_SEQ_STATE_ESTABLISHED
;
2074 case TCP_SEQ_STATE_ESTABLISHED
:
2075 if (st
->bucket
> tcp_hashinfo
.ehash_mask
)
2077 rc
= established_get_first(seq
);
2078 while (offset
-- && rc
)
2079 rc
= established_get_next(seq
, rc
);
2087 static void *tcp_seq_start(struct seq_file
*seq
, loff_t
*pos
)
2089 struct tcp_iter_state
*st
= seq
->private;
2092 if (*pos
&& *pos
== st
->last_pos
) {
2093 rc
= tcp_seek_last_pos(seq
);
2098 st
->state
= TCP_SEQ_STATE_LISTENING
;
2102 rc
= *pos
? tcp_get_idx(seq
, *pos
- 1) : SEQ_START_TOKEN
;
2105 st
->last_pos
= *pos
;
2109 static void *tcp_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
2111 struct tcp_iter_state
*st
= seq
->private;
2114 if (v
== SEQ_START_TOKEN
) {
2115 rc
= tcp_get_idx(seq
, 0);
2119 switch (st
->state
) {
2120 case TCP_SEQ_STATE_OPENREQ
:
2121 case TCP_SEQ_STATE_LISTENING
:
2122 rc
= listening_get_next(seq
, v
);
2124 st
->state
= TCP_SEQ_STATE_ESTABLISHED
;
2127 rc
= established_get_first(seq
);
2130 case TCP_SEQ_STATE_ESTABLISHED
:
2131 rc
= established_get_next(seq
, v
);
2136 st
->last_pos
= *pos
;
2140 static void tcp_seq_stop(struct seq_file
*seq
, void *v
)
2142 struct tcp_iter_state
*st
= seq
->private;
2144 switch (st
->state
) {
2145 case TCP_SEQ_STATE_OPENREQ
:
2147 struct inet_connection_sock
*icsk
= inet_csk(st
->syn_wait_sk
);
2148 read_unlock_bh(&icsk
->icsk_accept_queue
.syn_wait_lock
);
2150 case TCP_SEQ_STATE_LISTENING
:
2151 if (v
!= SEQ_START_TOKEN
)
2152 spin_unlock_bh(&tcp_hashinfo
.listening_hash
[st
->bucket
].lock
);
2154 case TCP_SEQ_STATE_ESTABLISHED
:
2156 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo
, st
->bucket
));
2161 int tcp_seq_open(struct inode
*inode
, struct file
*file
)
2163 struct tcp_seq_afinfo
*afinfo
= PDE_DATA(inode
);
2164 struct tcp_iter_state
*s
;
2167 err
= seq_open_net(inode
, file
, &afinfo
->seq_ops
,
2168 sizeof(struct tcp_iter_state
));
2172 s
= ((struct seq_file
*)file
->private_data
)->private;
2173 s
->family
= afinfo
->family
;
2177 EXPORT_SYMBOL(tcp_seq_open
);
2179 int tcp_proc_register(struct net
*net
, struct tcp_seq_afinfo
*afinfo
)
2182 struct proc_dir_entry
*p
;
2184 afinfo
->seq_ops
.start
= tcp_seq_start
;
2185 afinfo
->seq_ops
.next
= tcp_seq_next
;
2186 afinfo
->seq_ops
.stop
= tcp_seq_stop
;
2188 p
= proc_create_data(afinfo
->name
, S_IRUGO
, net
->proc_net
,
2189 afinfo
->seq_fops
, afinfo
);
2194 EXPORT_SYMBOL(tcp_proc_register
);
2196 void tcp_proc_unregister(struct net
*net
, struct tcp_seq_afinfo
*afinfo
)
2198 remove_proc_entry(afinfo
->name
, net
->proc_net
);
2200 EXPORT_SYMBOL(tcp_proc_unregister
);
2202 static void get_openreq4(const struct sock
*sk
, const struct request_sock
*req
,
2203 struct seq_file
*f
, int i
, kuid_t uid
)
2205 const struct inet_request_sock
*ireq
= inet_rsk(req
);
2206 long delta
= req
->expires
- jiffies
;
2208 seq_printf(f
, "%4d: %08X:%04X %08X:%04X"
2209 " %02X %08X:%08X %02X:%08lX %08X %5u %8d %u %d %pK",
2212 ntohs(inet_sk(sk
)->inet_sport
),
2214 ntohs(ireq
->ir_rmt_port
),
2216 0, 0, /* could print option size, but that is af dependent. */
2217 1, /* timers active (only the expire timer) */
2218 jiffies_delta_to_clock_t(delta
),
2220 from_kuid_munged(seq_user_ns(f
), uid
),
2221 0, /* non standard timer */
2222 0, /* open_requests have no inode */
2223 atomic_read(&sk
->sk_refcnt
),
2227 static void get_tcp4_sock(struct sock
*sk
, struct seq_file
*f
, int i
)
2230 unsigned long timer_expires
;
2231 const struct tcp_sock
*tp
= tcp_sk(sk
);
2232 const struct inet_connection_sock
*icsk
= inet_csk(sk
);
2233 const struct inet_sock
*inet
= inet_sk(sk
);
2234 struct fastopen_queue
*fastopenq
= icsk
->icsk_accept_queue
.fastopenq
;
2235 __be32 dest
= inet
->inet_daddr
;
2236 __be32 src
= inet
->inet_rcv_saddr
;
2237 __u16 destp
= ntohs(inet
->inet_dport
);
2238 __u16 srcp
= ntohs(inet
->inet_sport
);
2241 if (icsk
->icsk_pending
== ICSK_TIME_RETRANS
||
2242 icsk
->icsk_pending
== ICSK_TIME_EARLY_RETRANS
||
2243 icsk
->icsk_pending
== ICSK_TIME_LOSS_PROBE
) {
2245 timer_expires
= icsk
->icsk_timeout
;
2246 } else if (icsk
->icsk_pending
== ICSK_TIME_PROBE0
) {
2248 timer_expires
= icsk
->icsk_timeout
;
2249 } else if (timer_pending(&sk
->sk_timer
)) {
2251 timer_expires
= sk
->sk_timer
.expires
;
2254 timer_expires
= jiffies
;
2257 if (sk
->sk_state
== TCP_LISTEN
)
2258 rx_queue
= sk
->sk_ack_backlog
;
2261 * because we dont lock socket, we might find a transient negative value
2263 rx_queue
= max_t(int, tp
->rcv_nxt
- tp
->copied_seq
, 0);
2265 seq_printf(f
, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2266 "%08X %5u %8d %lu %d %pK %lu %lu %u %u %d",
2267 i
, src
, srcp
, dest
, destp
, sk
->sk_state
,
2268 tp
->write_seq
- tp
->snd_una
,
2271 jiffies_delta_to_clock_t(timer_expires
- jiffies
),
2272 icsk
->icsk_retransmits
,
2273 from_kuid_munged(seq_user_ns(f
), sock_i_uid(sk
)),
2274 icsk
->icsk_probes_out
,
2276 atomic_read(&sk
->sk_refcnt
), sk
,
2277 jiffies_to_clock_t(icsk
->icsk_rto
),
2278 jiffies_to_clock_t(icsk
->icsk_ack
.ato
),
2279 (icsk
->icsk_ack
.quick
<< 1) | icsk
->icsk_ack
.pingpong
,
2281 sk
->sk_state
== TCP_LISTEN
?
2282 (fastopenq
? fastopenq
->max_qlen
: 0) :
2283 (tcp_in_initial_slowstart(tp
) ? -1 : tp
->snd_ssthresh
));
2286 static void get_timewait4_sock(const struct inet_timewait_sock
*tw
,
2287 struct seq_file
*f
, int i
)
2291 s32 delta
= tw
->tw_ttd
- inet_tw_time_stamp();
2293 dest
= tw
->tw_daddr
;
2294 src
= tw
->tw_rcv_saddr
;
2295 destp
= ntohs(tw
->tw_dport
);
2296 srcp
= ntohs(tw
->tw_sport
);
2298 seq_printf(f
, "%4d: %08X:%04X %08X:%04X"
2299 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK",
2300 i
, src
, srcp
, dest
, destp
, tw
->tw_substate
, 0, 0,
2301 3, jiffies_delta_to_clock_t(delta
), 0, 0, 0, 0,
2302 atomic_read(&tw
->tw_refcnt
), tw
);
2307 static int tcp4_seq_show(struct seq_file
*seq
, void *v
)
2309 struct tcp_iter_state
*st
;
2310 struct sock
*sk
= v
;
2312 seq_setwidth(seq
, TMPSZ
- 1);
2313 if (v
== SEQ_START_TOKEN
) {
2314 seq_puts(seq
, " sl local_address rem_address st tx_queue "
2315 "rx_queue tr tm->when retrnsmt uid timeout "
2321 switch (st
->state
) {
2322 case TCP_SEQ_STATE_LISTENING
:
2323 case TCP_SEQ_STATE_ESTABLISHED
:
2324 if (sk
->sk_state
== TCP_TIME_WAIT
)
2325 get_timewait4_sock(v
, seq
, st
->num
);
2327 get_tcp4_sock(v
, seq
, st
->num
);
2329 case TCP_SEQ_STATE_OPENREQ
:
2330 get_openreq4(st
->syn_wait_sk
, v
, seq
, st
->num
, st
->uid
);
2338 static const struct file_operations tcp_afinfo_seq_fops
= {
2339 .owner
= THIS_MODULE
,
2340 .open
= tcp_seq_open
,
2342 .llseek
= seq_lseek
,
2343 .release
= seq_release_net
2346 static struct tcp_seq_afinfo tcp4_seq_afinfo
= {
2349 .seq_fops
= &tcp_afinfo_seq_fops
,
2351 .show
= tcp4_seq_show
,
2355 static int __net_init
tcp4_proc_init_net(struct net
*net
)
2357 return tcp_proc_register(net
, &tcp4_seq_afinfo
);
2360 static void __net_exit
tcp4_proc_exit_net(struct net
*net
)
2362 tcp_proc_unregister(net
, &tcp4_seq_afinfo
);
2365 static struct pernet_operations tcp4_net_ops
= {
2366 .init
= tcp4_proc_init_net
,
2367 .exit
= tcp4_proc_exit_net
,
2370 int __init
tcp4_proc_init(void)
2372 return register_pernet_subsys(&tcp4_net_ops
);
2375 void tcp4_proc_exit(void)
2377 unregister_pernet_subsys(&tcp4_net_ops
);
2379 #endif /* CONFIG_PROC_FS */
2381 struct proto tcp_prot
= {
2383 .owner
= THIS_MODULE
,
2385 .connect
= tcp_v4_connect
,
2386 .disconnect
= tcp_disconnect
,
2387 .accept
= inet_csk_accept
,
2389 .init
= tcp_v4_init_sock
,
2390 .destroy
= tcp_v4_destroy_sock
,
2391 .shutdown
= tcp_shutdown
,
2392 .setsockopt
= tcp_setsockopt
,
2393 .getsockopt
= tcp_getsockopt
,
2394 .recvmsg
= tcp_recvmsg
,
2395 .sendmsg
= tcp_sendmsg
,
2396 .sendpage
= tcp_sendpage
,
2397 .backlog_rcv
= tcp_v4_do_rcv
,
2398 .release_cb
= tcp_release_cb
,
2399 .mtu_reduced
= tcp_v4_mtu_reduced
,
2401 .unhash
= inet_unhash
,
2402 .get_port
= inet_csk_get_port
,
2403 .enter_memory_pressure
= tcp_enter_memory_pressure
,
2404 .stream_memory_free
= tcp_stream_memory_free
,
2405 .sockets_allocated
= &tcp_sockets_allocated
,
2406 .orphan_count
= &tcp_orphan_count
,
2407 .memory_allocated
= &tcp_memory_allocated
,
2408 .memory_pressure
= &tcp_memory_pressure
,
2409 .sysctl_mem
= sysctl_tcp_mem
,
2410 .sysctl_wmem
= sysctl_tcp_wmem
,
2411 .sysctl_rmem
= sysctl_tcp_rmem
,
2412 .max_header
= MAX_TCP_HEADER
,
2413 .obj_size
= sizeof(struct tcp_sock
),
2414 .slab_flags
= SLAB_DESTROY_BY_RCU
,
2415 .twsk_prot
= &tcp_timewait_sock_ops
,
2416 .rsk_prot
= &tcp_request_sock_ops
,
2417 .h
.hashinfo
= &tcp_hashinfo
,
2418 .no_autobind
= true,
2419 #ifdef CONFIG_COMPAT
2420 .compat_setsockopt
= compat_tcp_setsockopt
,
2421 .compat_getsockopt
= compat_tcp_getsockopt
,
2423 #ifdef CONFIG_MEMCG_KMEM
2424 .init_cgroup
= tcp_init_cgroup
,
2425 .destroy_cgroup
= tcp_destroy_cgroup
,
2426 .proto_cgroup
= tcp_proto_cgroup
,
2429 EXPORT_SYMBOL(tcp_prot
);
2431 static int __net_init
tcp_sk_init(struct net
*net
)
2433 net
->ipv4
.sysctl_tcp_ecn
= 2;
2437 static void __net_exit
tcp_sk_exit(struct net
*net
)
2441 static void __net_exit
tcp_sk_exit_batch(struct list_head
*net_exit_list
)
2443 inet_twsk_purge(&tcp_hashinfo
, &tcp_death_row
, AF_INET
);
2446 static struct pernet_operations __net_initdata tcp_sk_ops
= {
2447 .init
= tcp_sk_init
,
2448 .exit
= tcp_sk_exit
,
2449 .exit_batch
= tcp_sk_exit_batch
,
2452 void __init
tcp_v4_init(void)
2454 inet_hashinfo_init(&tcp_hashinfo
);
2455 if (register_pernet_subsys(&tcp_sk_ops
))
2456 panic("Failed to create the TCP control socket.\n");