2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Implementation of the Transmission Control Protocol(TCP).
8 * IPv4 specific functions
13 * linux/ipv4/tcp_input.c
14 * linux/ipv4/tcp_output.c
16 * See tcp.c for author information
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
26 * David S. Miller : New socket lookup architecture.
27 * This code is dedicated to John Dyson.
28 * David S. Miller : Change semantics of established hash,
29 * half is devoted to TIME_WAIT sockets
30 * and the rest go in the other half.
31 * Andi Kleen : Add support for syncookies and fixed
32 * some bugs: ip options weren't passed to
33 * the TCP layer, missed a check for an
35 * Andi Kleen : Implemented fast path mtu discovery.
36 * Fixed many serious bugs in the
37 * request_sock handling and moved
38 * most of it into the af independent code.
39 * Added tail drop and some other bugfixes.
40 * Added new listen semantics.
41 * Mike McLagan : Routing by source
42 * Juan Jose Ciarlante: ip_dynaddr bits
43 * Andi Kleen: various fixes.
44 * Vitaly E. Lavrov : Transparent proxy revived after year
46 * Andi Kleen : Fix new listen.
47 * Andi Kleen : Fix accept error reporting.
48 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
49 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
50 * a single port at the same time.
53 #define pr_fmt(fmt) "TCP: " fmt
55 #include <linux/bottom_half.h>
56 #include <linux/types.h>
57 #include <linux/fcntl.h>
58 #include <linux/module.h>
59 #include <linux/random.h>
60 #include <linux/cache.h>
61 #include <linux/jhash.h>
62 #include <linux/init.h>
63 #include <linux/times.h>
64 #include <linux/slab.h>
66 #include <net/net_namespace.h>
68 #include <net/inet_hashtables.h>
70 #include <net/transp_v6.h>
72 #include <net/inet_common.h>
73 #include <net/timewait_sock.h>
75 #include <net/secure_seq.h>
76 #include <net/tcp_memcontrol.h>
77 #include <net/busy_poll.h>
79 #include <linux/inet.h>
80 #include <linux/ipv6.h>
81 #include <linux/stddef.h>
82 #include <linux/proc_fs.h>
83 #include <linux/seq_file.h>
85 #include <linux/crypto.h>
86 #include <linux/scatterlist.h>
88 int sysctl_tcp_tw_reuse __read_mostly
;
89 int sysctl_tcp_low_latency __read_mostly
;
90 EXPORT_SYMBOL(sysctl_tcp_low_latency
);
92 #ifdef CONFIG_TCP_MD5SIG
93 static int tcp_v4_md5_hash_hdr(char *md5_hash
, const struct tcp_md5sig_key
*key
,
94 __be32 daddr
, __be32 saddr
, const struct tcphdr
*th
);
97 struct inet_hashinfo tcp_hashinfo
;
98 EXPORT_SYMBOL(tcp_hashinfo
);
100 static __u32
tcp_v4_init_sequence(const struct sk_buff
*skb
)
102 return secure_tcp_sequence_number(ip_hdr(skb
)->daddr
,
105 tcp_hdr(skb
)->source
);
108 int tcp_twsk_unique(struct sock
*sk
, struct sock
*sktw
, void *twp
)
110 const struct tcp_timewait_sock
*tcptw
= tcp_twsk(sktw
);
111 struct tcp_sock
*tp
= tcp_sk(sk
);
113 /* With PAWS, it is safe from the viewpoint
114 of data integrity. Even without PAWS it is safe provided sequence
115 spaces do not overlap i.e. at data rates <= 80Mbit/sec.
117 Actually, the idea is close to VJ's one, only timestamp cache is
118 held not per host, but per port pair and TW bucket is used as state
121 If TW bucket has been already destroyed we fall back to VJ's scheme
122 and use initial timestamp retrieved from peer table.
124 if (tcptw
->tw_ts_recent_stamp
&&
125 (twp
== NULL
|| (sysctl_tcp_tw_reuse
&&
126 get_seconds() - tcptw
->tw_ts_recent_stamp
> 1))) {
127 tp
->write_seq
= tcptw
->tw_snd_nxt
+ 65535 + 2;
128 if (tp
->write_seq
== 0)
130 tp
->rx_opt
.ts_recent
= tcptw
->tw_ts_recent
;
131 tp
->rx_opt
.ts_recent_stamp
= tcptw
->tw_ts_recent_stamp
;
138 EXPORT_SYMBOL_GPL(tcp_twsk_unique
);
140 /* This will initiate an outgoing connection. */
141 int tcp_v4_connect(struct sock
*sk
, struct sockaddr
*uaddr
, int addr_len
)
143 struct sockaddr_in
*usin
= (struct sockaddr_in
*)uaddr
;
144 struct inet_sock
*inet
= inet_sk(sk
);
145 struct tcp_sock
*tp
= tcp_sk(sk
);
146 __be16 orig_sport
, orig_dport
;
147 __be32 daddr
, nexthop
;
151 struct ip_options_rcu
*inet_opt
;
153 if (addr_len
< sizeof(struct sockaddr_in
))
156 if (usin
->sin_family
!= AF_INET
)
157 return -EAFNOSUPPORT
;
159 nexthop
= daddr
= usin
->sin_addr
.s_addr
;
160 inet_opt
= rcu_dereference_protected(inet
->inet_opt
,
161 sock_owned_by_user(sk
));
162 if (inet_opt
&& inet_opt
->opt
.srr
) {
165 nexthop
= inet_opt
->opt
.faddr
;
168 orig_sport
= inet
->inet_sport
;
169 orig_dport
= usin
->sin_port
;
170 fl4
= &inet
->cork
.fl
.u
.ip4
;
171 rt
= ip_route_connect(fl4
, nexthop
, inet
->inet_saddr
,
172 RT_CONN_FLAGS(sk
), sk
->sk_bound_dev_if
,
174 orig_sport
, orig_dport
, sk
);
177 if (err
== -ENETUNREACH
)
178 IP_INC_STATS(sock_net(sk
), IPSTATS_MIB_OUTNOROUTES
);
182 if (rt
->rt_flags
& (RTCF_MULTICAST
| RTCF_BROADCAST
)) {
187 if (!inet_opt
|| !inet_opt
->opt
.srr
)
190 if (!inet
->inet_saddr
)
191 inet
->inet_saddr
= fl4
->saddr
;
192 inet
->inet_rcv_saddr
= inet
->inet_saddr
;
194 if (tp
->rx_opt
.ts_recent_stamp
&& inet
->inet_daddr
!= daddr
) {
195 /* Reset inherited state */
196 tp
->rx_opt
.ts_recent
= 0;
197 tp
->rx_opt
.ts_recent_stamp
= 0;
198 if (likely(!tp
->repair
))
202 if (tcp_death_row
.sysctl_tw_recycle
&&
203 !tp
->rx_opt
.ts_recent_stamp
&& fl4
->daddr
== daddr
)
204 tcp_fetch_timewait_stamp(sk
, &rt
->dst
);
206 inet
->inet_dport
= usin
->sin_port
;
207 inet
->inet_daddr
= daddr
;
209 inet_csk(sk
)->icsk_ext_hdr_len
= 0;
211 inet_csk(sk
)->icsk_ext_hdr_len
= inet_opt
->opt
.optlen
;
213 tp
->rx_opt
.mss_clamp
= TCP_MSS_DEFAULT
;
215 /* Socket identity is still unknown (sport may be zero).
216 * However we set state to SYN-SENT and not releasing socket
217 * lock select source port, enter ourselves into the hash tables and
218 * complete initialization after this.
220 tcp_set_state(sk
, TCP_SYN_SENT
);
221 err
= inet_hash_connect(&tcp_death_row
, sk
);
227 rt
= ip_route_newports(fl4
, rt
, orig_sport
, orig_dport
,
228 inet
->inet_sport
, inet
->inet_dport
, sk
);
234 /* OK, now commit destination to socket. */
235 sk
->sk_gso_type
= SKB_GSO_TCPV4
;
236 sk_setup_caps(sk
, &rt
->dst
);
238 if (!tp
->write_seq
&& likely(!tp
->repair
))
239 tp
->write_seq
= secure_tcp_sequence_number(inet
->inet_saddr
,
244 inet
->inet_id
= tp
->write_seq
^ jiffies
;
246 err
= tcp_connect(sk
);
256 * This unhashes the socket and releases the local port,
259 tcp_set_state(sk
, TCP_CLOSE
);
261 sk
->sk_route_caps
= 0;
262 inet
->inet_dport
= 0;
265 EXPORT_SYMBOL(tcp_v4_connect
);
268 * This routine reacts to ICMP_FRAG_NEEDED mtu indications as defined in RFC1191.
269 * It can be called through tcp_release_cb() if socket was owned by user
270 * at the time tcp_v4_err() was called to handle ICMP message.
272 void tcp_v4_mtu_reduced(struct sock
*sk
)
274 struct dst_entry
*dst
;
275 struct inet_sock
*inet
= inet_sk(sk
);
276 u32 mtu
= tcp_sk(sk
)->mtu_info
;
278 dst
= inet_csk_update_pmtu(sk
, mtu
);
282 /* Something is about to be wrong... Remember soft error
283 * for the case, if this connection will not able to recover.
285 if (mtu
< dst_mtu(dst
) && ip_dont_fragment(sk
, dst
))
286 sk
->sk_err_soft
= EMSGSIZE
;
290 if (inet
->pmtudisc
!= IP_PMTUDISC_DONT
&&
291 ip_sk_accept_pmtu(sk
) &&
292 inet_csk(sk
)->icsk_pmtu_cookie
> mtu
) {
293 tcp_sync_mss(sk
, mtu
);
295 /* Resend the TCP packet because it's
296 * clear that the old packet has been
297 * dropped. This is the new "fast" path mtu
300 tcp_simple_retransmit(sk
);
301 } /* else let the usual retransmit timer handle it */
303 EXPORT_SYMBOL(tcp_v4_mtu_reduced
);
305 static void do_redirect(struct sk_buff
*skb
, struct sock
*sk
)
307 struct dst_entry
*dst
= __sk_dst_check(sk
, 0);
310 dst
->ops
->redirect(dst
, sk
, skb
);
314 * This routine is called by the ICMP module when it gets some
315 * sort of error condition. If err < 0 then the socket should
316 * be closed and the error returned to the user. If err > 0
317 * it's just the icmp type << 8 | icmp code. After adjustment
318 * header points to the first 8 bytes of the tcp header. We need
319 * to find the appropriate port.
321 * The locking strategy used here is very "optimistic". When
322 * someone else accesses the socket the ICMP is just dropped
323 * and for some paths there is no check at all.
324 * A more general error queue to queue errors for later handling
325 * is probably better.
329 void tcp_v4_err(struct sk_buff
*icmp_skb
, u32 info
)
331 const struct iphdr
*iph
= (const struct iphdr
*)icmp_skb
->data
;
332 struct tcphdr
*th
= (struct tcphdr
*)(icmp_skb
->data
+ (iph
->ihl
<< 2));
333 struct inet_connection_sock
*icsk
;
335 struct inet_sock
*inet
;
336 const int type
= icmp_hdr(icmp_skb
)->type
;
337 const int code
= icmp_hdr(icmp_skb
)->code
;
340 struct request_sock
*fastopen
;
344 struct net
*net
= dev_net(icmp_skb
->dev
);
346 sk
= inet_lookup(net
, &tcp_hashinfo
, iph
->daddr
, th
->dest
,
347 iph
->saddr
, th
->source
, inet_iif(icmp_skb
));
349 ICMP_INC_STATS_BH(net
, ICMP_MIB_INERRORS
);
352 if (sk
->sk_state
== TCP_TIME_WAIT
) {
353 inet_twsk_put(inet_twsk(sk
));
358 /* If too many ICMPs get dropped on busy
359 * servers this needs to be solved differently.
360 * We do take care of PMTU discovery (RFC1191) special case :
361 * we can receive locally generated ICMP messages while socket is held.
363 if (sock_owned_by_user(sk
)) {
364 if (!(type
== ICMP_DEST_UNREACH
&& code
== ICMP_FRAG_NEEDED
))
365 NET_INC_STATS_BH(net
, LINUX_MIB_LOCKDROPPEDICMPS
);
367 if (sk
->sk_state
== TCP_CLOSE
)
370 if (unlikely(iph
->ttl
< inet_sk(sk
)->min_ttl
)) {
371 NET_INC_STATS_BH(net
, LINUX_MIB_TCPMINTTLDROP
);
377 seq
= ntohl(th
->seq
);
378 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
379 fastopen
= tp
->fastopen_rsk
;
380 snd_una
= fastopen
? tcp_rsk(fastopen
)->snt_isn
: tp
->snd_una
;
381 if (sk
->sk_state
!= TCP_LISTEN
&&
382 !between(seq
, snd_una
, tp
->snd_nxt
)) {
383 NET_INC_STATS_BH(net
, LINUX_MIB_OUTOFWINDOWICMPS
);
389 do_redirect(icmp_skb
, sk
);
391 case ICMP_SOURCE_QUENCH
:
392 /* Just silently ignore these. */
394 case ICMP_PARAMETERPROB
:
397 case ICMP_DEST_UNREACH
:
398 if (code
> NR_ICMP_UNREACH
)
401 if (code
== ICMP_FRAG_NEEDED
) { /* PMTU discovery (RFC1191) */
402 /* We are not interested in TCP_LISTEN and open_requests
403 * (SYN-ACKs send out by Linux are always <576bytes so
404 * they should go through unfragmented).
406 if (sk
->sk_state
== TCP_LISTEN
)
410 if (!sock_owned_by_user(sk
)) {
411 tcp_v4_mtu_reduced(sk
);
413 if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED
, &tp
->tsq_flags
))
419 err
= icmp_err_convert
[code
].errno
;
420 /* check if icmp_skb allows revert of backoff
421 * (see draft-zimmermann-tcp-lcd) */
422 if (code
!= ICMP_NET_UNREACH
&& code
!= ICMP_HOST_UNREACH
)
424 if (seq
!= tp
->snd_una
|| !icsk
->icsk_retransmits
||
425 !icsk
->icsk_backoff
|| fastopen
)
428 if (sock_owned_by_user(sk
))
431 icsk
->icsk_backoff
--;
432 icsk
->icsk_rto
= tp
->srtt_us
? __tcp_set_rto(tp
) :
434 icsk
->icsk_rto
= inet_csk_rto_backoff(icsk
, TCP_RTO_MAX
);
436 skb
= tcp_write_queue_head(sk
);
439 remaining
= icsk
->icsk_rto
-
441 tcp_time_stamp
- tcp_skb_timestamp(skb
));
444 inet_csk_reset_xmit_timer(sk
, ICSK_TIME_RETRANS
,
445 remaining
, TCP_RTO_MAX
);
447 /* RTO revert clocked out retransmission.
448 * Will retransmit now */
449 tcp_retransmit_timer(sk
);
453 case ICMP_TIME_EXCEEDED
:
460 switch (sk
->sk_state
) {
461 struct request_sock
*req
, **prev
;
463 if (sock_owned_by_user(sk
))
466 req
= inet_csk_search_req(sk
, &prev
, th
->dest
,
467 iph
->daddr
, iph
->saddr
);
471 /* ICMPs are not backlogged, hence we cannot get
472 an established socket here.
476 if (seq
!= tcp_rsk(req
)->snt_isn
) {
477 NET_INC_STATS_BH(net
, LINUX_MIB_OUTOFWINDOWICMPS
);
482 * Still in SYN_RECV, just remove it silently.
483 * There is no good way to pass the error to the newly
484 * created socket, and POSIX does not want network
485 * errors returned from accept().
487 inet_csk_reqsk_queue_drop(sk
, req
, prev
);
488 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_LISTENDROPS
);
493 /* Only in fast or simultaneous open. If a fast open socket is
494 * is already accepted it is treated as a connected one below.
496 if (fastopen
&& fastopen
->sk
== NULL
)
499 if (!sock_owned_by_user(sk
)) {
502 sk
->sk_error_report(sk
);
506 sk
->sk_err_soft
= err
;
511 /* If we've already connected we will keep trying
512 * until we time out, or the user gives up.
514 * rfc1122 4.2.3.9 allows to consider as hard errors
515 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
516 * but it is obsoleted by pmtu discovery).
518 * Note, that in modern internet, where routing is unreliable
519 * and in each dark corner broken firewalls sit, sending random
520 * errors ordered by their masters even this two messages finally lose
521 * their original sense (even Linux sends invalid PORT_UNREACHs)
523 * Now we are in compliance with RFCs.
528 if (!sock_owned_by_user(sk
) && inet
->recverr
) {
530 sk
->sk_error_report(sk
);
531 } else { /* Only an error on timeout */
532 sk
->sk_err_soft
= err
;
540 void __tcp_v4_send_check(struct sk_buff
*skb
, __be32 saddr
, __be32 daddr
)
542 struct tcphdr
*th
= tcp_hdr(skb
);
544 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
545 th
->check
= ~tcp_v4_check(skb
->len
, saddr
, daddr
, 0);
546 skb
->csum_start
= skb_transport_header(skb
) - skb
->head
;
547 skb
->csum_offset
= offsetof(struct tcphdr
, check
);
549 th
->check
= tcp_v4_check(skb
->len
, saddr
, daddr
,
556 /* This routine computes an IPv4 TCP checksum. */
557 void tcp_v4_send_check(struct sock
*sk
, struct sk_buff
*skb
)
559 const struct inet_sock
*inet
= inet_sk(sk
);
561 __tcp_v4_send_check(skb
, inet
->inet_saddr
, inet
->inet_daddr
);
563 EXPORT_SYMBOL(tcp_v4_send_check
);
566 * This routine will send an RST to the other tcp.
568 * Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
570 * Answer: if a packet caused RST, it is not for a socket
571 * existing in our system, if it is matched to a socket,
572 * it is just duplicate segment or bug in other side's TCP.
573 * So that we build reply only basing on parameters
574 * arrived with segment.
575 * Exception: precedence violation. We do not implement it in any case.
578 static void tcp_v4_send_reset(struct sock
*sk
, struct sk_buff
*skb
)
580 const struct tcphdr
*th
= tcp_hdr(skb
);
583 #ifdef CONFIG_TCP_MD5SIG
584 __be32 opt
[(TCPOLEN_MD5SIG_ALIGNED
>> 2)];
587 struct ip_reply_arg arg
;
588 #ifdef CONFIG_TCP_MD5SIG
589 struct tcp_md5sig_key
*key
;
590 const __u8
*hash_location
= NULL
;
591 unsigned char newhash
[16];
593 struct sock
*sk1
= NULL
;
597 /* Never send a reset in response to a reset. */
601 /* If sk not NULL, it means we did a successful lookup and incoming
602 * route had to be correct. prequeue might have dropped our dst.
604 if (!sk
&& skb_rtable(skb
)->rt_type
!= RTN_LOCAL
)
607 /* Swap the send and the receive. */
608 memset(&rep
, 0, sizeof(rep
));
609 rep
.th
.dest
= th
->source
;
610 rep
.th
.source
= th
->dest
;
611 rep
.th
.doff
= sizeof(struct tcphdr
) / 4;
615 rep
.th
.seq
= th
->ack_seq
;
618 rep
.th
.ack_seq
= htonl(ntohl(th
->seq
) + th
->syn
+ th
->fin
+
619 skb
->len
- (th
->doff
<< 2));
622 memset(&arg
, 0, sizeof(arg
));
623 arg
.iov
[0].iov_base
= (unsigned char *)&rep
;
624 arg
.iov
[0].iov_len
= sizeof(rep
.th
);
626 net
= sk
? sock_net(sk
) : dev_net(skb_dst(skb
)->dev
);
627 #ifdef CONFIG_TCP_MD5SIG
628 hash_location
= tcp_parse_md5sig_option(th
);
629 if (!sk
&& hash_location
) {
631 * active side is lost. Try to find listening socket through
632 * source port, and then find md5 key through listening socket.
633 * we are not loose security here:
634 * Incoming packet is checked with md5 hash with finding key,
635 * no RST generated if md5 hash doesn't match.
637 sk1
= __inet_lookup_listener(net
,
638 &tcp_hashinfo
, ip_hdr(skb
)->saddr
,
639 th
->source
, ip_hdr(skb
)->daddr
,
640 ntohs(th
->source
), inet_iif(skb
));
641 /* don't send rst if it can't find key */
645 key
= tcp_md5_do_lookup(sk1
, (union tcp_md5_addr
*)
646 &ip_hdr(skb
)->saddr
, AF_INET
);
650 genhash
= tcp_v4_md5_hash_skb(newhash
, key
, NULL
, NULL
, skb
);
651 if (genhash
|| memcmp(hash_location
, newhash
, 16) != 0)
654 key
= sk
? tcp_md5_do_lookup(sk
, (union tcp_md5_addr
*)
660 rep
.opt
[0] = htonl((TCPOPT_NOP
<< 24) |
662 (TCPOPT_MD5SIG
<< 8) |
664 /* Update length and the length the header thinks exists */
665 arg
.iov
[0].iov_len
+= TCPOLEN_MD5SIG_ALIGNED
;
666 rep
.th
.doff
= arg
.iov
[0].iov_len
/ 4;
668 tcp_v4_md5_hash_hdr((__u8
*) &rep
.opt
[1],
669 key
, ip_hdr(skb
)->saddr
,
670 ip_hdr(skb
)->daddr
, &rep
.th
);
673 arg
.csum
= csum_tcpudp_nofold(ip_hdr(skb
)->daddr
,
674 ip_hdr(skb
)->saddr
, /* XXX */
675 arg
.iov
[0].iov_len
, IPPROTO_TCP
, 0);
676 arg
.csumoffset
= offsetof(struct tcphdr
, check
) / 2;
677 arg
.flags
= (sk
&& inet_sk(sk
)->transparent
) ? IP_REPLY_ARG_NOSRCCHECK
: 0;
678 /* When socket is gone, all binding information is lost.
679 * routing might fail in this case. No choice here, if we choose to force
680 * input interface, we will misroute in case of asymmetric route.
683 arg
.bound_dev_if
= sk
->sk_bound_dev_if
;
685 arg
.tos
= ip_hdr(skb
)->tos
;
686 ip_send_unicast_reply(net
, skb
, &TCP_SKB_CB(skb
)->header
.h4
.opt
,
687 ip_hdr(skb
)->saddr
, ip_hdr(skb
)->daddr
,
688 &arg
, arg
.iov
[0].iov_len
);
690 TCP_INC_STATS_BH(net
, TCP_MIB_OUTSEGS
);
691 TCP_INC_STATS_BH(net
, TCP_MIB_OUTRSTS
);
693 #ifdef CONFIG_TCP_MD5SIG
702 /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
703 outside socket context is ugly, certainly. What can I do?
706 static void tcp_v4_send_ack(struct sk_buff
*skb
, u32 seq
, u32 ack
,
707 u32 win
, u32 tsval
, u32 tsecr
, int oif
,
708 struct tcp_md5sig_key
*key
,
709 int reply_flags
, u8 tos
)
711 const struct tcphdr
*th
= tcp_hdr(skb
);
714 __be32 opt
[(TCPOLEN_TSTAMP_ALIGNED
>> 2)
715 #ifdef CONFIG_TCP_MD5SIG
716 + (TCPOLEN_MD5SIG_ALIGNED
>> 2)
720 struct ip_reply_arg arg
;
721 struct net
*net
= dev_net(skb_dst(skb
)->dev
);
723 memset(&rep
.th
, 0, sizeof(struct tcphdr
));
724 memset(&arg
, 0, sizeof(arg
));
726 arg
.iov
[0].iov_base
= (unsigned char *)&rep
;
727 arg
.iov
[0].iov_len
= sizeof(rep
.th
);
729 rep
.opt
[0] = htonl((TCPOPT_NOP
<< 24) | (TCPOPT_NOP
<< 16) |
730 (TCPOPT_TIMESTAMP
<< 8) |
732 rep
.opt
[1] = htonl(tsval
);
733 rep
.opt
[2] = htonl(tsecr
);
734 arg
.iov
[0].iov_len
+= TCPOLEN_TSTAMP_ALIGNED
;
737 /* Swap the send and the receive. */
738 rep
.th
.dest
= th
->source
;
739 rep
.th
.source
= th
->dest
;
740 rep
.th
.doff
= arg
.iov
[0].iov_len
/ 4;
741 rep
.th
.seq
= htonl(seq
);
742 rep
.th
.ack_seq
= htonl(ack
);
744 rep
.th
.window
= htons(win
);
746 #ifdef CONFIG_TCP_MD5SIG
748 int offset
= (tsecr
) ? 3 : 0;
750 rep
.opt
[offset
++] = htonl((TCPOPT_NOP
<< 24) |
752 (TCPOPT_MD5SIG
<< 8) |
754 arg
.iov
[0].iov_len
+= TCPOLEN_MD5SIG_ALIGNED
;
755 rep
.th
.doff
= arg
.iov
[0].iov_len
/4;
757 tcp_v4_md5_hash_hdr((__u8
*) &rep
.opt
[offset
],
758 key
, ip_hdr(skb
)->saddr
,
759 ip_hdr(skb
)->daddr
, &rep
.th
);
762 arg
.flags
= reply_flags
;
763 arg
.csum
= csum_tcpudp_nofold(ip_hdr(skb
)->daddr
,
764 ip_hdr(skb
)->saddr
, /* XXX */
765 arg
.iov
[0].iov_len
, IPPROTO_TCP
, 0);
766 arg
.csumoffset
= offsetof(struct tcphdr
, check
) / 2;
768 arg
.bound_dev_if
= oif
;
770 ip_send_unicast_reply(net
, skb
, &TCP_SKB_CB(skb
)->header
.h4
.opt
,
771 ip_hdr(skb
)->saddr
, ip_hdr(skb
)->daddr
,
772 &arg
, arg
.iov
[0].iov_len
);
774 TCP_INC_STATS_BH(net
, TCP_MIB_OUTSEGS
);
777 static void tcp_v4_timewait_ack(struct sock
*sk
, struct sk_buff
*skb
)
779 struct inet_timewait_sock
*tw
= inet_twsk(sk
);
780 struct tcp_timewait_sock
*tcptw
= tcp_twsk(sk
);
782 tcp_v4_send_ack(skb
, tcptw
->tw_snd_nxt
, tcptw
->tw_rcv_nxt
,
783 tcptw
->tw_rcv_wnd
>> tw
->tw_rcv_wscale
,
784 tcp_time_stamp
+ tcptw
->tw_ts_offset
,
787 tcp_twsk_md5_key(tcptw
),
788 tw
->tw_transparent
? IP_REPLY_ARG_NOSRCCHECK
: 0,
795 static void tcp_v4_reqsk_send_ack(struct sock
*sk
, struct sk_buff
*skb
,
796 struct request_sock
*req
)
798 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
799 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
801 tcp_v4_send_ack(skb
, (sk
->sk_state
== TCP_LISTEN
) ?
802 tcp_rsk(req
)->snt_isn
+ 1 : tcp_sk(sk
)->snd_nxt
,
803 tcp_rsk(req
)->rcv_nxt
, req
->rcv_wnd
,
807 tcp_md5_do_lookup(sk
, (union tcp_md5_addr
*)&ip_hdr(skb
)->daddr
,
809 inet_rsk(req
)->no_srccheck
? IP_REPLY_ARG_NOSRCCHECK
: 0,
814 * Send a SYN-ACK after having received a SYN.
815 * This still operates on a request_sock only, not on a big
818 static int tcp_v4_send_synack(struct sock
*sk
, struct dst_entry
*dst
,
820 struct request_sock
*req
,
822 struct tcp_fastopen_cookie
*foc
)
824 const struct inet_request_sock
*ireq
= inet_rsk(req
);
829 /* First, grab a route. */
830 if (!dst
&& (dst
= inet_csk_route_req(sk
, &fl4
, req
)) == NULL
)
833 skb
= tcp_make_synack(sk
, dst
, req
, foc
);
836 __tcp_v4_send_check(skb
, ireq
->ir_loc_addr
, ireq
->ir_rmt_addr
);
838 skb_set_queue_mapping(skb
, queue_mapping
);
839 err
= ip_build_and_send_pkt(skb
, sk
, ireq
->ir_loc_addr
,
842 err
= net_xmit_eval(err
);
849 * IPv4 request_sock destructor.
851 static void tcp_v4_reqsk_destructor(struct request_sock
*req
)
853 kfree(inet_rsk(req
)->opt
);
857 * Return true if a syncookie should be sent
859 bool tcp_syn_flood_action(struct sock
*sk
,
860 const struct sk_buff
*skb
,
863 const char *msg
= "Dropping request";
864 bool want_cookie
= false;
865 struct listen_sock
*lopt
;
867 #ifdef CONFIG_SYN_COOKIES
868 if (sysctl_tcp_syncookies
) {
869 msg
= "Sending cookies";
871 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_TCPREQQFULLDOCOOKIES
);
874 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_TCPREQQFULLDROP
);
876 lopt
= inet_csk(sk
)->icsk_accept_queue
.listen_opt
;
877 if (!lopt
->synflood_warned
&& sysctl_tcp_syncookies
!= 2) {
878 lopt
->synflood_warned
= 1;
879 pr_info("%s: Possible SYN flooding on port %d. %s. Check SNMP counters.\n",
880 proto
, ntohs(tcp_hdr(skb
)->dest
), msg
);
884 EXPORT_SYMBOL(tcp_syn_flood_action
);
886 #ifdef CONFIG_TCP_MD5SIG
888 * RFC2385 MD5 checksumming requires a mapping of
889 * IP address->MD5 Key.
890 * We need to maintain these in the sk structure.
893 /* Find the Key structure for an address. */
894 struct tcp_md5sig_key
*tcp_md5_do_lookup(struct sock
*sk
,
895 const union tcp_md5_addr
*addr
,
898 struct tcp_sock
*tp
= tcp_sk(sk
);
899 struct tcp_md5sig_key
*key
;
900 unsigned int size
= sizeof(struct in_addr
);
901 struct tcp_md5sig_info
*md5sig
;
903 /* caller either holds rcu_read_lock() or socket lock */
904 md5sig
= rcu_dereference_check(tp
->md5sig_info
,
905 sock_owned_by_user(sk
) ||
906 lockdep_is_held(&sk
->sk_lock
.slock
));
909 #if IS_ENABLED(CONFIG_IPV6)
910 if (family
== AF_INET6
)
911 size
= sizeof(struct in6_addr
);
913 hlist_for_each_entry_rcu(key
, &md5sig
->head
, node
) {
914 if (key
->family
!= family
)
916 if (!memcmp(&key
->addr
, addr
, size
))
921 EXPORT_SYMBOL(tcp_md5_do_lookup
);
923 struct tcp_md5sig_key
*tcp_v4_md5_lookup(struct sock
*sk
,
924 struct sock
*addr_sk
)
926 union tcp_md5_addr
*addr
;
928 addr
= (union tcp_md5_addr
*)&inet_sk(addr_sk
)->inet_daddr
;
929 return tcp_md5_do_lookup(sk
, addr
, AF_INET
);
931 EXPORT_SYMBOL(tcp_v4_md5_lookup
);
933 static struct tcp_md5sig_key
*tcp_v4_reqsk_md5_lookup(struct sock
*sk
,
934 struct request_sock
*req
)
936 union tcp_md5_addr
*addr
;
938 addr
= (union tcp_md5_addr
*)&inet_rsk(req
)->ir_rmt_addr
;
939 return tcp_md5_do_lookup(sk
, addr
, AF_INET
);
942 /* This can be called on a newly created socket, from other files */
943 int tcp_md5_do_add(struct sock
*sk
, const union tcp_md5_addr
*addr
,
944 int family
, const u8
*newkey
, u8 newkeylen
, gfp_t gfp
)
946 /* Add Key to the list */
947 struct tcp_md5sig_key
*key
;
948 struct tcp_sock
*tp
= tcp_sk(sk
);
949 struct tcp_md5sig_info
*md5sig
;
951 key
= tcp_md5_do_lookup(sk
, addr
, family
);
953 /* Pre-existing entry - just update that one. */
954 memcpy(key
->key
, newkey
, newkeylen
);
955 key
->keylen
= newkeylen
;
959 md5sig
= rcu_dereference_protected(tp
->md5sig_info
,
960 sock_owned_by_user(sk
));
962 md5sig
= kmalloc(sizeof(*md5sig
), gfp
);
966 sk_nocaps_add(sk
, NETIF_F_GSO_MASK
);
967 INIT_HLIST_HEAD(&md5sig
->head
);
968 rcu_assign_pointer(tp
->md5sig_info
, md5sig
);
971 key
= sock_kmalloc(sk
, sizeof(*key
), gfp
);
974 if (!tcp_alloc_md5sig_pool()) {
975 sock_kfree_s(sk
, key
, sizeof(*key
));
979 memcpy(key
->key
, newkey
, newkeylen
);
980 key
->keylen
= newkeylen
;
981 key
->family
= family
;
982 memcpy(&key
->addr
, addr
,
983 (family
== AF_INET6
) ? sizeof(struct in6_addr
) :
984 sizeof(struct in_addr
));
985 hlist_add_head_rcu(&key
->node
, &md5sig
->head
);
988 EXPORT_SYMBOL(tcp_md5_do_add
);
990 int tcp_md5_do_del(struct sock
*sk
, const union tcp_md5_addr
*addr
, int family
)
992 struct tcp_md5sig_key
*key
;
994 key
= tcp_md5_do_lookup(sk
, addr
, family
);
997 hlist_del_rcu(&key
->node
);
998 atomic_sub(sizeof(*key
), &sk
->sk_omem_alloc
);
1002 EXPORT_SYMBOL(tcp_md5_do_del
);
1004 static void tcp_clear_md5_list(struct sock
*sk
)
1006 struct tcp_sock
*tp
= tcp_sk(sk
);
1007 struct tcp_md5sig_key
*key
;
1008 struct hlist_node
*n
;
1009 struct tcp_md5sig_info
*md5sig
;
1011 md5sig
= rcu_dereference_protected(tp
->md5sig_info
, 1);
1013 hlist_for_each_entry_safe(key
, n
, &md5sig
->head
, node
) {
1014 hlist_del_rcu(&key
->node
);
1015 atomic_sub(sizeof(*key
), &sk
->sk_omem_alloc
);
1016 kfree_rcu(key
, rcu
);
1020 static int tcp_v4_parse_md5_keys(struct sock
*sk
, char __user
*optval
,
1023 struct tcp_md5sig cmd
;
1024 struct sockaddr_in
*sin
= (struct sockaddr_in
*)&cmd
.tcpm_addr
;
1026 if (optlen
< sizeof(cmd
))
1029 if (copy_from_user(&cmd
, optval
, sizeof(cmd
)))
1032 if (sin
->sin_family
!= AF_INET
)
1035 if (!cmd
.tcpm_keylen
)
1036 return tcp_md5_do_del(sk
, (union tcp_md5_addr
*)&sin
->sin_addr
.s_addr
,
1039 if (cmd
.tcpm_keylen
> TCP_MD5SIG_MAXKEYLEN
)
1042 return tcp_md5_do_add(sk
, (union tcp_md5_addr
*)&sin
->sin_addr
.s_addr
,
1043 AF_INET
, cmd
.tcpm_key
, cmd
.tcpm_keylen
,
1047 static int tcp_v4_md5_hash_pseudoheader(struct tcp_md5sig_pool
*hp
,
1048 __be32 daddr
, __be32 saddr
, int nbytes
)
1050 struct tcp4_pseudohdr
*bp
;
1051 struct scatterlist sg
;
1053 bp
= &hp
->md5_blk
.ip4
;
1056 * 1. the TCP pseudo-header (in the order: source IP address,
1057 * destination IP address, zero-padded protocol number, and
1063 bp
->protocol
= IPPROTO_TCP
;
1064 bp
->len
= cpu_to_be16(nbytes
);
1066 sg_init_one(&sg
, bp
, sizeof(*bp
));
1067 return crypto_hash_update(&hp
->md5_desc
, &sg
, sizeof(*bp
));
1070 static int tcp_v4_md5_hash_hdr(char *md5_hash
, const struct tcp_md5sig_key
*key
,
1071 __be32 daddr
, __be32 saddr
, const struct tcphdr
*th
)
1073 struct tcp_md5sig_pool
*hp
;
1074 struct hash_desc
*desc
;
1076 hp
= tcp_get_md5sig_pool();
1078 goto clear_hash_noput
;
1079 desc
= &hp
->md5_desc
;
1081 if (crypto_hash_init(desc
))
1083 if (tcp_v4_md5_hash_pseudoheader(hp
, daddr
, saddr
, th
->doff
<< 2))
1085 if (tcp_md5_hash_header(hp
, th
))
1087 if (tcp_md5_hash_key(hp
, key
))
1089 if (crypto_hash_final(desc
, md5_hash
))
1092 tcp_put_md5sig_pool();
1096 tcp_put_md5sig_pool();
1098 memset(md5_hash
, 0, 16);
1102 int tcp_v4_md5_hash_skb(char *md5_hash
, struct tcp_md5sig_key
*key
,
1103 const struct sock
*sk
, const struct request_sock
*req
,
1104 const struct sk_buff
*skb
)
1106 struct tcp_md5sig_pool
*hp
;
1107 struct hash_desc
*desc
;
1108 const struct tcphdr
*th
= tcp_hdr(skb
);
1109 __be32 saddr
, daddr
;
1112 saddr
= inet_sk(sk
)->inet_saddr
;
1113 daddr
= inet_sk(sk
)->inet_daddr
;
1115 saddr
= inet_rsk(req
)->ir_loc_addr
;
1116 daddr
= inet_rsk(req
)->ir_rmt_addr
;
1118 const struct iphdr
*iph
= ip_hdr(skb
);
1123 hp
= tcp_get_md5sig_pool();
1125 goto clear_hash_noput
;
1126 desc
= &hp
->md5_desc
;
1128 if (crypto_hash_init(desc
))
1131 if (tcp_v4_md5_hash_pseudoheader(hp
, daddr
, saddr
, skb
->len
))
1133 if (tcp_md5_hash_header(hp
, th
))
1135 if (tcp_md5_hash_skb_data(hp
, skb
, th
->doff
<< 2))
1137 if (tcp_md5_hash_key(hp
, key
))
1139 if (crypto_hash_final(desc
, md5_hash
))
1142 tcp_put_md5sig_pool();
1146 tcp_put_md5sig_pool();
1148 memset(md5_hash
, 0, 16);
1151 EXPORT_SYMBOL(tcp_v4_md5_hash_skb
);
1153 static bool __tcp_v4_inbound_md5_hash(struct sock
*sk
,
1154 const struct sk_buff
*skb
)
1157 * This gets called for each TCP segment that arrives
1158 * so we want to be efficient.
1159 * We have 3 drop cases:
1160 * o No MD5 hash and one expected.
1161 * o MD5 hash and we're not expecting one.
1162 * o MD5 hash and its wrong.
1164 const __u8
*hash_location
= NULL
;
1165 struct tcp_md5sig_key
*hash_expected
;
1166 const struct iphdr
*iph
= ip_hdr(skb
);
1167 const struct tcphdr
*th
= tcp_hdr(skb
);
1169 unsigned char newhash
[16];
1171 hash_expected
= tcp_md5_do_lookup(sk
, (union tcp_md5_addr
*)&iph
->saddr
,
1173 hash_location
= tcp_parse_md5sig_option(th
);
1175 /* We've parsed the options - do we have a hash? */
1176 if (!hash_expected
&& !hash_location
)
1179 if (hash_expected
&& !hash_location
) {
1180 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_TCPMD5NOTFOUND
);
1184 if (!hash_expected
&& hash_location
) {
1185 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_TCPMD5UNEXPECTED
);
1189 /* Okay, so this is hash_expected and hash_location -
1190 * so we need to calculate the checksum.
1192 genhash
= tcp_v4_md5_hash_skb(newhash
,
1196 if (genhash
|| memcmp(hash_location
, newhash
, 16) != 0) {
1197 net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
1198 &iph
->saddr
, ntohs(th
->source
),
1199 &iph
->daddr
, ntohs(th
->dest
),
1200 genhash
? " tcp_v4_calc_md5_hash failed"
1207 static bool tcp_v4_inbound_md5_hash(struct sock
*sk
, const struct sk_buff
*skb
)
1212 ret
= __tcp_v4_inbound_md5_hash(sk
, skb
);
1220 static void tcp_v4_init_req(struct request_sock
*req
, struct sock
*sk
,
1221 struct sk_buff
*skb
)
1223 struct inet_request_sock
*ireq
= inet_rsk(req
);
1225 ireq
->ir_loc_addr
= ip_hdr(skb
)->daddr
;
1226 ireq
->ir_rmt_addr
= ip_hdr(skb
)->saddr
;
1227 ireq
->no_srccheck
= inet_sk(sk
)->transparent
;
1228 ireq
->opt
= tcp_v4_save_options(skb
);
1231 static struct dst_entry
*tcp_v4_route_req(struct sock
*sk
, struct flowi
*fl
,
1232 const struct request_sock
*req
,
1235 struct dst_entry
*dst
= inet_csk_route_req(sk
, &fl
->u
.ip4
, req
);
1238 if (fl
->u
.ip4
.daddr
== inet_rsk(req
)->ir_rmt_addr
)
1247 struct request_sock_ops tcp_request_sock_ops __read_mostly
= {
1249 .obj_size
= sizeof(struct tcp_request_sock
),
1250 .rtx_syn_ack
= tcp_rtx_synack
,
1251 .send_ack
= tcp_v4_reqsk_send_ack
,
1252 .destructor
= tcp_v4_reqsk_destructor
,
1253 .send_reset
= tcp_v4_send_reset
,
1254 .syn_ack_timeout
= tcp_syn_ack_timeout
,
1257 static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops
= {
1258 .mss_clamp
= TCP_MSS_DEFAULT
,
1259 #ifdef CONFIG_TCP_MD5SIG
1260 .md5_lookup
= tcp_v4_reqsk_md5_lookup
,
1261 .calc_md5_hash
= tcp_v4_md5_hash_skb
,
1263 .init_req
= tcp_v4_init_req
,
1264 #ifdef CONFIG_SYN_COOKIES
1265 .cookie_init_seq
= cookie_v4_init_sequence
,
1267 .route_req
= tcp_v4_route_req
,
1268 .init_seq
= tcp_v4_init_sequence
,
1269 .send_synack
= tcp_v4_send_synack
,
1270 .queue_hash_add
= inet_csk_reqsk_queue_hash_add
,
1273 int tcp_v4_conn_request(struct sock
*sk
, struct sk_buff
*skb
)
1275 /* Never answer to SYNs send to broadcast or multicast */
1276 if (skb_rtable(skb
)->rt_flags
& (RTCF_BROADCAST
| RTCF_MULTICAST
))
1279 return tcp_conn_request(&tcp_request_sock_ops
,
1280 &tcp_request_sock_ipv4_ops
, sk
, skb
);
1283 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_LISTENDROPS
);
1286 EXPORT_SYMBOL(tcp_v4_conn_request
);
1290 * The three way handshake has completed - we got a valid synack -
1291 * now create the new socket.
1293 struct sock
*tcp_v4_syn_recv_sock(struct sock
*sk
, struct sk_buff
*skb
,
1294 struct request_sock
*req
,
1295 struct dst_entry
*dst
)
1297 struct inet_request_sock
*ireq
;
1298 struct inet_sock
*newinet
;
1299 struct tcp_sock
*newtp
;
1301 #ifdef CONFIG_TCP_MD5SIG
1302 struct tcp_md5sig_key
*key
;
1304 struct ip_options_rcu
*inet_opt
;
1306 if (sk_acceptq_is_full(sk
))
1309 newsk
= tcp_create_openreq_child(sk
, req
, skb
);
1313 newsk
->sk_gso_type
= SKB_GSO_TCPV4
;
1314 inet_sk_rx_dst_set(newsk
, skb
);
1316 newtp
= tcp_sk(newsk
);
1317 newinet
= inet_sk(newsk
);
1318 ireq
= inet_rsk(req
);
1319 newinet
->inet_daddr
= ireq
->ir_rmt_addr
;
1320 newinet
->inet_rcv_saddr
= ireq
->ir_loc_addr
;
1321 newinet
->inet_saddr
= ireq
->ir_loc_addr
;
1322 inet_opt
= ireq
->opt
;
1323 rcu_assign_pointer(newinet
->inet_opt
, inet_opt
);
1325 newinet
->mc_index
= inet_iif(skb
);
1326 newinet
->mc_ttl
= ip_hdr(skb
)->ttl
;
1327 newinet
->rcv_tos
= ip_hdr(skb
)->tos
;
1328 inet_csk(newsk
)->icsk_ext_hdr_len
= 0;
1329 inet_set_txhash(newsk
);
1331 inet_csk(newsk
)->icsk_ext_hdr_len
= inet_opt
->opt
.optlen
;
1332 newinet
->inet_id
= newtp
->write_seq
^ jiffies
;
1335 dst
= inet_csk_route_child_sock(sk
, newsk
, req
);
1339 /* syncookie case : see end of cookie_v4_check() */
1341 sk_setup_caps(newsk
, dst
);
1343 tcp_ca_openreq_child(newsk
, dst
);
1345 tcp_sync_mss(newsk
, dst_mtu(dst
));
1346 newtp
->advmss
= dst_metric_advmss(dst
);
1347 if (tcp_sk(sk
)->rx_opt
.user_mss
&&
1348 tcp_sk(sk
)->rx_opt
.user_mss
< newtp
->advmss
)
1349 newtp
->advmss
= tcp_sk(sk
)->rx_opt
.user_mss
;
1351 tcp_initialize_rcv_mss(newsk
);
1353 #ifdef CONFIG_TCP_MD5SIG
1354 /* Copy over the MD5 key from the original socket */
1355 key
= tcp_md5_do_lookup(sk
, (union tcp_md5_addr
*)&newinet
->inet_daddr
,
1359 * We're using one, so create a matching key
1360 * on the newsk structure. If we fail to get
1361 * memory, then we end up not copying the key
1364 tcp_md5_do_add(newsk
, (union tcp_md5_addr
*)&newinet
->inet_daddr
,
1365 AF_INET
, key
->key
, key
->keylen
, GFP_ATOMIC
);
1366 sk_nocaps_add(newsk
, NETIF_F_GSO_MASK
);
1370 if (__inet_inherit_port(sk
, newsk
) < 0)
1372 __inet_hash_nolisten(newsk
, NULL
);
1377 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_LISTENOVERFLOWS
);
1381 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_LISTENDROPS
);
1384 inet_csk_prepare_forced_close(newsk
);
1388 EXPORT_SYMBOL(tcp_v4_syn_recv_sock
);
1390 static struct sock
*tcp_v4_hnd_req(struct sock
*sk
, struct sk_buff
*skb
)
1392 struct tcphdr
*th
= tcp_hdr(skb
);
1393 const struct iphdr
*iph
= ip_hdr(skb
);
1395 struct request_sock
**prev
;
1396 /* Find possible connection requests. */
1397 struct request_sock
*req
= inet_csk_search_req(sk
, &prev
, th
->source
,
1398 iph
->saddr
, iph
->daddr
);
1400 return tcp_check_req(sk
, skb
, req
, prev
, false);
1402 nsk
= inet_lookup_established(sock_net(sk
), &tcp_hashinfo
, iph
->saddr
,
1403 th
->source
, iph
->daddr
, th
->dest
, inet_iif(skb
));
1406 if (nsk
->sk_state
!= TCP_TIME_WAIT
) {
1410 inet_twsk_put(inet_twsk(nsk
));
1414 #ifdef CONFIG_SYN_COOKIES
1416 sk
= cookie_v4_check(sk
, skb
);
1421 /* The socket must have it's spinlock held when we get
1424 * We have a potential double-lock case here, so even when
1425 * doing backlog processing we use the BH locking scheme.
1426 * This is because we cannot sleep with the original spinlock
1429 int tcp_v4_do_rcv(struct sock
*sk
, struct sk_buff
*skb
)
1433 if (sk
->sk_state
== TCP_ESTABLISHED
) { /* Fast path */
1434 struct dst_entry
*dst
= sk
->sk_rx_dst
;
1436 sock_rps_save_rxhash(sk
, skb
);
1437 sk_mark_napi_id(sk
, skb
);
1439 if (inet_sk(sk
)->rx_dst_ifindex
!= skb
->skb_iif
||
1440 dst
->ops
->check(dst
, 0) == NULL
) {
1442 sk
->sk_rx_dst
= NULL
;
1445 tcp_rcv_established(sk
, skb
, tcp_hdr(skb
), skb
->len
);
1449 if (skb
->len
< tcp_hdrlen(skb
) || tcp_checksum_complete(skb
))
1452 if (sk
->sk_state
== TCP_LISTEN
) {
1453 struct sock
*nsk
= tcp_v4_hnd_req(sk
, skb
);
1458 sock_rps_save_rxhash(nsk
, skb
);
1459 sk_mark_napi_id(sk
, skb
);
1460 if (tcp_child_process(sk
, nsk
, skb
)) {
1467 sock_rps_save_rxhash(sk
, skb
);
1469 if (tcp_rcv_state_process(sk
, skb
, tcp_hdr(skb
), skb
->len
)) {
1476 tcp_v4_send_reset(rsk
, skb
);
1479 /* Be careful here. If this function gets more complicated and
1480 * gcc suffers from register pressure on the x86, sk (in %ebx)
1481 * might be destroyed here. This current version compiles correctly,
1482 * but you have been warned.
1487 TCP_INC_STATS_BH(sock_net(sk
), TCP_MIB_CSUMERRORS
);
1488 TCP_INC_STATS_BH(sock_net(sk
), TCP_MIB_INERRS
);
1491 EXPORT_SYMBOL(tcp_v4_do_rcv
);
1493 void tcp_v4_early_demux(struct sk_buff
*skb
)
1495 const struct iphdr
*iph
;
1496 const struct tcphdr
*th
;
1499 if (skb
->pkt_type
!= PACKET_HOST
)
1502 if (!pskb_may_pull(skb
, skb_transport_offset(skb
) + sizeof(struct tcphdr
)))
1508 if (th
->doff
< sizeof(struct tcphdr
) / 4)
1511 sk
= __inet_lookup_established(dev_net(skb
->dev
), &tcp_hashinfo
,
1512 iph
->saddr
, th
->source
,
1513 iph
->daddr
, ntohs(th
->dest
),
1517 skb
->destructor
= sock_edemux
;
1518 if (sk
->sk_state
!= TCP_TIME_WAIT
) {
1519 struct dst_entry
*dst
= sk
->sk_rx_dst
;
1522 dst
= dst_check(dst
, 0);
1524 inet_sk(sk
)->rx_dst_ifindex
== skb
->skb_iif
)
1525 skb_dst_set_noref(skb
, dst
);
1530 /* Packet is added to VJ-style prequeue for processing in process
1531 * context, if a reader task is waiting. Apparently, this exciting
1532 * idea (VJ's mail "Re: query about TCP header on tcp-ip" of 07 Sep 93)
1533 * failed somewhere. Latency? Burstiness? Well, at least now we will
1534 * see, why it failed. 8)8) --ANK
1537 bool tcp_prequeue(struct sock
*sk
, struct sk_buff
*skb
)
1539 struct tcp_sock
*tp
= tcp_sk(sk
);
1541 if (sysctl_tcp_low_latency
|| !tp
->ucopy
.task
)
1544 if (skb
->len
<= tcp_hdrlen(skb
) &&
1545 skb_queue_len(&tp
->ucopy
.prequeue
) == 0)
1548 /* Before escaping RCU protected region, we need to take care of skb
1549 * dst. Prequeue is only enabled for established sockets.
1550 * For such sockets, we might need the skb dst only to set sk->sk_rx_dst
1551 * Instead of doing full sk_rx_dst validity here, let's perform
1552 * an optimistic check.
1554 if (likely(sk
->sk_rx_dst
))
1559 __skb_queue_tail(&tp
->ucopy
.prequeue
, skb
);
1560 tp
->ucopy
.memory
+= skb
->truesize
;
1561 if (tp
->ucopy
.memory
> sk
->sk_rcvbuf
) {
1562 struct sk_buff
*skb1
;
1564 BUG_ON(sock_owned_by_user(sk
));
1566 while ((skb1
= __skb_dequeue(&tp
->ucopy
.prequeue
)) != NULL
) {
1567 sk_backlog_rcv(sk
, skb1
);
1568 NET_INC_STATS_BH(sock_net(sk
),
1569 LINUX_MIB_TCPPREQUEUEDROPPED
);
1572 tp
->ucopy
.memory
= 0;
1573 } else if (skb_queue_len(&tp
->ucopy
.prequeue
) == 1) {
1574 wake_up_interruptible_sync_poll(sk_sleep(sk
),
1575 POLLIN
| POLLRDNORM
| POLLRDBAND
);
1576 if (!inet_csk_ack_scheduled(sk
))
1577 inet_csk_reset_xmit_timer(sk
, ICSK_TIME_DACK
,
1578 (3 * tcp_rto_min(sk
)) / 4,
1583 EXPORT_SYMBOL(tcp_prequeue
);
1589 int tcp_v4_rcv(struct sk_buff
*skb
)
1591 const struct iphdr
*iph
;
1592 const struct tcphdr
*th
;
1595 struct net
*net
= dev_net(skb
->dev
);
1597 if (skb
->pkt_type
!= PACKET_HOST
)
1600 /* Count it even if it's bad */
1601 TCP_INC_STATS_BH(net
, TCP_MIB_INSEGS
);
1603 if (!pskb_may_pull(skb
, sizeof(struct tcphdr
)))
1608 if (th
->doff
< sizeof(struct tcphdr
) / 4)
1610 if (!pskb_may_pull(skb
, th
->doff
* 4))
1613 /* An explanation is required here, I think.
1614 * Packet length and doff are validated by header prediction,
1615 * provided case of th->doff==0 is eliminated.
1616 * So, we defer the checks. */
1618 if (skb_checksum_init(skb
, IPPROTO_TCP
, inet_compute_pseudo
))
1623 /* This is tricky : We move IPCB at its correct location into TCP_SKB_CB()
1624 * barrier() makes sure compiler wont play fool^Waliasing games.
1626 memmove(&TCP_SKB_CB(skb
)->header
.h4
, IPCB(skb
),
1627 sizeof(struct inet_skb_parm
));
1630 TCP_SKB_CB(skb
)->seq
= ntohl(th
->seq
);
1631 TCP_SKB_CB(skb
)->end_seq
= (TCP_SKB_CB(skb
)->seq
+ th
->syn
+ th
->fin
+
1632 skb
->len
- th
->doff
* 4);
1633 TCP_SKB_CB(skb
)->ack_seq
= ntohl(th
->ack_seq
);
1634 TCP_SKB_CB(skb
)->tcp_flags
= tcp_flag_byte(th
);
1635 TCP_SKB_CB(skb
)->tcp_tw_isn
= 0;
1636 TCP_SKB_CB(skb
)->ip_dsfield
= ipv4_get_dsfield(iph
);
1637 TCP_SKB_CB(skb
)->sacked
= 0;
1639 sk
= __inet_lookup_skb(&tcp_hashinfo
, skb
, th
->source
, th
->dest
);
1644 if (sk
->sk_state
== TCP_TIME_WAIT
)
1647 if (unlikely(iph
->ttl
< inet_sk(sk
)->min_ttl
)) {
1648 NET_INC_STATS_BH(net
, LINUX_MIB_TCPMINTTLDROP
);
1649 goto discard_and_relse
;
1652 if (!xfrm4_policy_check(sk
, XFRM_POLICY_IN
, skb
))
1653 goto discard_and_relse
;
1655 #ifdef CONFIG_TCP_MD5SIG
1657 * We really want to reject the packet as early as possible
1659 * o We're expecting an MD5'd packet and this is no MD5 tcp option
1660 * o There is an MD5 option and we're not expecting one
1662 if (tcp_v4_inbound_md5_hash(sk
, skb
))
1663 goto discard_and_relse
;
1668 if (sk_filter(sk
, skb
))
1669 goto discard_and_relse
;
1671 sk_incoming_cpu_update(sk
);
1674 bh_lock_sock_nested(sk
);
1676 if (!sock_owned_by_user(sk
)) {
1677 if (!tcp_prequeue(sk
, skb
))
1678 ret
= tcp_v4_do_rcv(sk
, skb
);
1679 } else if (unlikely(sk_add_backlog(sk
, skb
,
1680 sk
->sk_rcvbuf
+ sk
->sk_sndbuf
))) {
1682 NET_INC_STATS_BH(net
, LINUX_MIB_TCPBACKLOGDROP
);
1683 goto discard_and_relse
;
1692 if (!xfrm4_policy_check(NULL
, XFRM_POLICY_IN
, skb
))
1695 if (skb
->len
< (th
->doff
<< 2) || tcp_checksum_complete(skb
)) {
1697 TCP_INC_STATS_BH(net
, TCP_MIB_CSUMERRORS
);
1699 TCP_INC_STATS_BH(net
, TCP_MIB_INERRS
);
1701 tcp_v4_send_reset(NULL
, skb
);
1705 /* Discard frame. */
1714 if (!xfrm4_policy_check(NULL
, XFRM_POLICY_IN
, skb
)) {
1715 inet_twsk_put(inet_twsk(sk
));
1719 if (skb
->len
< (th
->doff
<< 2)) {
1720 inet_twsk_put(inet_twsk(sk
));
1723 if (tcp_checksum_complete(skb
)) {
1724 inet_twsk_put(inet_twsk(sk
));
1727 switch (tcp_timewait_state_process(inet_twsk(sk
), skb
, th
)) {
1729 struct sock
*sk2
= inet_lookup_listener(dev_net(skb
->dev
),
1731 iph
->saddr
, th
->source
,
1732 iph
->daddr
, th
->dest
,
1735 inet_twsk_deschedule(inet_twsk(sk
), &tcp_death_row
);
1736 inet_twsk_put(inet_twsk(sk
));
1740 /* Fall through to ACK */
1743 tcp_v4_timewait_ack(sk
, skb
);
1747 case TCP_TW_SUCCESS
:;
1752 static struct timewait_sock_ops tcp_timewait_sock_ops
= {
1753 .twsk_obj_size
= sizeof(struct tcp_timewait_sock
),
1754 .twsk_unique
= tcp_twsk_unique
,
1755 .twsk_destructor
= tcp_twsk_destructor
,
1758 void inet_sk_rx_dst_set(struct sock
*sk
, const struct sk_buff
*skb
)
1760 struct dst_entry
*dst
= skb_dst(skb
);
1764 sk
->sk_rx_dst
= dst
;
1765 inet_sk(sk
)->rx_dst_ifindex
= skb
->skb_iif
;
1768 EXPORT_SYMBOL(inet_sk_rx_dst_set
);
1770 const struct inet_connection_sock_af_ops ipv4_specific
= {
1771 .queue_xmit
= ip_queue_xmit
,
1772 .send_check
= tcp_v4_send_check
,
1773 .rebuild_header
= inet_sk_rebuild_header
,
1774 .sk_rx_dst_set
= inet_sk_rx_dst_set
,
1775 .conn_request
= tcp_v4_conn_request
,
1776 .syn_recv_sock
= tcp_v4_syn_recv_sock
,
1777 .net_header_len
= sizeof(struct iphdr
),
1778 .setsockopt
= ip_setsockopt
,
1779 .getsockopt
= ip_getsockopt
,
1780 .addr2sockaddr
= inet_csk_addr2sockaddr
,
1781 .sockaddr_len
= sizeof(struct sockaddr_in
),
1782 .bind_conflict
= inet_csk_bind_conflict
,
1783 #ifdef CONFIG_COMPAT
1784 .compat_setsockopt
= compat_ip_setsockopt
,
1785 .compat_getsockopt
= compat_ip_getsockopt
,
1787 .mtu_reduced
= tcp_v4_mtu_reduced
,
1789 EXPORT_SYMBOL(ipv4_specific
);
1791 #ifdef CONFIG_TCP_MD5SIG
1792 static const struct tcp_sock_af_ops tcp_sock_ipv4_specific
= {
1793 .md5_lookup
= tcp_v4_md5_lookup
,
1794 .calc_md5_hash
= tcp_v4_md5_hash_skb
,
1795 .md5_parse
= tcp_v4_parse_md5_keys
,
1799 /* NOTE: A lot of things set to zero explicitly by call to
1800 * sk_alloc() so need not be done here.
1802 static int tcp_v4_init_sock(struct sock
*sk
)
1804 struct inet_connection_sock
*icsk
= inet_csk(sk
);
1808 icsk
->icsk_af_ops
= &ipv4_specific
;
1810 #ifdef CONFIG_TCP_MD5SIG
1811 tcp_sk(sk
)->af_specific
= &tcp_sock_ipv4_specific
;
1817 void tcp_v4_destroy_sock(struct sock
*sk
)
1819 struct tcp_sock
*tp
= tcp_sk(sk
);
1821 tcp_clear_xmit_timers(sk
);
1823 tcp_cleanup_congestion_control(sk
);
1825 /* Cleanup up the write buffer. */
1826 tcp_write_queue_purge(sk
);
1828 /* Cleans up our, hopefully empty, out_of_order_queue. */
1829 __skb_queue_purge(&tp
->out_of_order_queue
);
1831 #ifdef CONFIG_TCP_MD5SIG
1832 /* Clean up the MD5 key list, if any */
1833 if (tp
->md5sig_info
) {
1834 tcp_clear_md5_list(sk
);
1835 kfree_rcu(tp
->md5sig_info
, rcu
);
1836 tp
->md5sig_info
= NULL
;
1840 /* Clean prequeue, it must be empty really */
1841 __skb_queue_purge(&tp
->ucopy
.prequeue
);
1843 /* Clean up a referenced TCP bind bucket. */
1844 if (inet_csk(sk
)->icsk_bind_hash
)
1847 BUG_ON(tp
->fastopen_rsk
!= NULL
);
1849 /* If socket is aborted during connect operation */
1850 tcp_free_fastopen_req(tp
);
1852 sk_sockets_allocated_dec(sk
);
1853 sock_release_memcg(sk
);
1855 EXPORT_SYMBOL(tcp_v4_destroy_sock
);
1857 #ifdef CONFIG_PROC_FS
1858 /* Proc filesystem TCP sock list dumping. */
1861 * Get next listener socket follow cur. If cur is NULL, get first socket
1862 * starting from bucket given in st->bucket; when st->bucket is zero the
1863 * very first socket in the hash table is returned.
1865 static void *listening_get_next(struct seq_file
*seq
, void *cur
)
1867 struct inet_connection_sock
*icsk
;
1868 struct hlist_nulls_node
*node
;
1869 struct sock
*sk
= cur
;
1870 struct inet_listen_hashbucket
*ilb
;
1871 struct tcp_iter_state
*st
= seq
->private;
1872 struct net
*net
= seq_file_net(seq
);
1875 ilb
= &tcp_hashinfo
.listening_hash
[st
->bucket
];
1876 spin_lock_bh(&ilb
->lock
);
1877 sk
= sk_nulls_head(&ilb
->head
);
1881 ilb
= &tcp_hashinfo
.listening_hash
[st
->bucket
];
1885 if (st
->state
== TCP_SEQ_STATE_OPENREQ
) {
1886 struct request_sock
*req
= cur
;
1888 icsk
= inet_csk(st
->syn_wait_sk
);
1892 if (req
->rsk_ops
->family
== st
->family
) {
1898 if (++st
->sbucket
>= icsk
->icsk_accept_queue
.listen_opt
->nr_table_entries
)
1901 req
= icsk
->icsk_accept_queue
.listen_opt
->syn_table
[st
->sbucket
];
1903 sk
= sk_nulls_next(st
->syn_wait_sk
);
1904 st
->state
= TCP_SEQ_STATE_LISTENING
;
1905 read_unlock_bh(&icsk
->icsk_accept_queue
.syn_wait_lock
);
1907 icsk
= inet_csk(sk
);
1908 read_lock_bh(&icsk
->icsk_accept_queue
.syn_wait_lock
);
1909 if (reqsk_queue_len(&icsk
->icsk_accept_queue
))
1911 read_unlock_bh(&icsk
->icsk_accept_queue
.syn_wait_lock
);
1912 sk
= sk_nulls_next(sk
);
1915 sk_nulls_for_each_from(sk
, node
) {
1916 if (!net_eq(sock_net(sk
), net
))
1918 if (sk
->sk_family
== st
->family
) {
1922 icsk
= inet_csk(sk
);
1923 read_lock_bh(&icsk
->icsk_accept_queue
.syn_wait_lock
);
1924 if (reqsk_queue_len(&icsk
->icsk_accept_queue
)) {
1926 st
->uid
= sock_i_uid(sk
);
1927 st
->syn_wait_sk
= sk
;
1928 st
->state
= TCP_SEQ_STATE_OPENREQ
;
1932 read_unlock_bh(&icsk
->icsk_accept_queue
.syn_wait_lock
);
1934 spin_unlock_bh(&ilb
->lock
);
1936 if (++st
->bucket
< INET_LHTABLE_SIZE
) {
1937 ilb
= &tcp_hashinfo
.listening_hash
[st
->bucket
];
1938 spin_lock_bh(&ilb
->lock
);
1939 sk
= sk_nulls_head(&ilb
->head
);
1947 static void *listening_get_idx(struct seq_file
*seq
, loff_t
*pos
)
1949 struct tcp_iter_state
*st
= seq
->private;
1954 rc
= listening_get_next(seq
, NULL
);
1956 while (rc
&& *pos
) {
1957 rc
= listening_get_next(seq
, rc
);
1963 static inline bool empty_bucket(const struct tcp_iter_state
*st
)
1965 return hlist_nulls_empty(&tcp_hashinfo
.ehash
[st
->bucket
].chain
);
1969 * Get first established socket starting from bucket given in st->bucket.
1970 * If st->bucket is zero, the very first socket in the hash is returned.
1972 static void *established_get_first(struct seq_file
*seq
)
1974 struct tcp_iter_state
*st
= seq
->private;
1975 struct net
*net
= seq_file_net(seq
);
1979 for (; st
->bucket
<= tcp_hashinfo
.ehash_mask
; ++st
->bucket
) {
1981 struct hlist_nulls_node
*node
;
1982 spinlock_t
*lock
= inet_ehash_lockp(&tcp_hashinfo
, st
->bucket
);
1984 /* Lockless fast path for the common case of empty buckets */
1985 if (empty_bucket(st
))
1989 sk_nulls_for_each(sk
, node
, &tcp_hashinfo
.ehash
[st
->bucket
].chain
) {
1990 if (sk
->sk_family
!= st
->family
||
1991 !net_eq(sock_net(sk
), net
)) {
1997 spin_unlock_bh(lock
);
2003 static void *established_get_next(struct seq_file
*seq
, void *cur
)
2005 struct sock
*sk
= cur
;
2006 struct hlist_nulls_node
*node
;
2007 struct tcp_iter_state
*st
= seq
->private;
2008 struct net
*net
= seq_file_net(seq
);
2013 sk
= sk_nulls_next(sk
);
2015 sk_nulls_for_each_from(sk
, node
) {
2016 if (sk
->sk_family
== st
->family
&& net_eq(sock_net(sk
), net
))
2020 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo
, st
->bucket
));
2022 return established_get_first(seq
);
2025 static void *established_get_idx(struct seq_file
*seq
, loff_t pos
)
2027 struct tcp_iter_state
*st
= seq
->private;
2031 rc
= established_get_first(seq
);
2034 rc
= established_get_next(seq
, rc
);
2040 static void *tcp_get_idx(struct seq_file
*seq
, loff_t pos
)
2043 struct tcp_iter_state
*st
= seq
->private;
2045 st
->state
= TCP_SEQ_STATE_LISTENING
;
2046 rc
= listening_get_idx(seq
, &pos
);
2049 st
->state
= TCP_SEQ_STATE_ESTABLISHED
;
2050 rc
= established_get_idx(seq
, pos
);
2056 static void *tcp_seek_last_pos(struct seq_file
*seq
)
2058 struct tcp_iter_state
*st
= seq
->private;
2059 int offset
= st
->offset
;
2060 int orig_num
= st
->num
;
2063 switch (st
->state
) {
2064 case TCP_SEQ_STATE_OPENREQ
:
2065 case TCP_SEQ_STATE_LISTENING
:
2066 if (st
->bucket
>= INET_LHTABLE_SIZE
)
2068 st
->state
= TCP_SEQ_STATE_LISTENING
;
2069 rc
= listening_get_next(seq
, NULL
);
2070 while (offset
-- && rc
)
2071 rc
= listening_get_next(seq
, rc
);
2075 st
->state
= TCP_SEQ_STATE_ESTABLISHED
;
2077 case TCP_SEQ_STATE_ESTABLISHED
:
2078 if (st
->bucket
> tcp_hashinfo
.ehash_mask
)
2080 rc
= established_get_first(seq
);
2081 while (offset
-- && rc
)
2082 rc
= established_get_next(seq
, rc
);
2090 static void *tcp_seq_start(struct seq_file
*seq
, loff_t
*pos
)
2092 struct tcp_iter_state
*st
= seq
->private;
2095 if (*pos
&& *pos
== st
->last_pos
) {
2096 rc
= tcp_seek_last_pos(seq
);
2101 st
->state
= TCP_SEQ_STATE_LISTENING
;
2105 rc
= *pos
? tcp_get_idx(seq
, *pos
- 1) : SEQ_START_TOKEN
;
2108 st
->last_pos
= *pos
;
2112 static void *tcp_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
2114 struct tcp_iter_state
*st
= seq
->private;
2117 if (v
== SEQ_START_TOKEN
) {
2118 rc
= tcp_get_idx(seq
, 0);
2122 switch (st
->state
) {
2123 case TCP_SEQ_STATE_OPENREQ
:
2124 case TCP_SEQ_STATE_LISTENING
:
2125 rc
= listening_get_next(seq
, v
);
2127 st
->state
= TCP_SEQ_STATE_ESTABLISHED
;
2130 rc
= established_get_first(seq
);
2133 case TCP_SEQ_STATE_ESTABLISHED
:
2134 rc
= established_get_next(seq
, v
);
2139 st
->last_pos
= *pos
;
2143 static void tcp_seq_stop(struct seq_file
*seq
, void *v
)
2145 struct tcp_iter_state
*st
= seq
->private;
2147 switch (st
->state
) {
2148 case TCP_SEQ_STATE_OPENREQ
:
2150 struct inet_connection_sock
*icsk
= inet_csk(st
->syn_wait_sk
);
2151 read_unlock_bh(&icsk
->icsk_accept_queue
.syn_wait_lock
);
2153 case TCP_SEQ_STATE_LISTENING
:
2154 if (v
!= SEQ_START_TOKEN
)
2155 spin_unlock_bh(&tcp_hashinfo
.listening_hash
[st
->bucket
].lock
);
2157 case TCP_SEQ_STATE_ESTABLISHED
:
2159 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo
, st
->bucket
));
2164 int tcp_seq_open(struct inode
*inode
, struct file
*file
)
2166 struct tcp_seq_afinfo
*afinfo
= PDE_DATA(inode
);
2167 struct tcp_iter_state
*s
;
2170 err
= seq_open_net(inode
, file
, &afinfo
->seq_ops
,
2171 sizeof(struct tcp_iter_state
));
2175 s
= ((struct seq_file
*)file
->private_data
)->private;
2176 s
->family
= afinfo
->family
;
2180 EXPORT_SYMBOL(tcp_seq_open
);
2182 int tcp_proc_register(struct net
*net
, struct tcp_seq_afinfo
*afinfo
)
2185 struct proc_dir_entry
*p
;
2187 afinfo
->seq_ops
.start
= tcp_seq_start
;
2188 afinfo
->seq_ops
.next
= tcp_seq_next
;
2189 afinfo
->seq_ops
.stop
= tcp_seq_stop
;
2191 p
= proc_create_data(afinfo
->name
, S_IRUGO
, net
->proc_net
,
2192 afinfo
->seq_fops
, afinfo
);
2197 EXPORT_SYMBOL(tcp_proc_register
);
2199 void tcp_proc_unregister(struct net
*net
, struct tcp_seq_afinfo
*afinfo
)
2201 remove_proc_entry(afinfo
->name
, net
->proc_net
);
2203 EXPORT_SYMBOL(tcp_proc_unregister
);
2205 static void get_openreq4(const struct sock
*sk
, const struct request_sock
*req
,
2206 struct seq_file
*f
, int i
, kuid_t uid
)
2208 const struct inet_request_sock
*ireq
= inet_rsk(req
);
2209 long delta
= req
->expires
- jiffies
;
2211 seq_printf(f
, "%4d: %08X:%04X %08X:%04X"
2212 " %02X %08X:%08X %02X:%08lX %08X %5u %8d %u %d %pK",
2215 ntohs(inet_sk(sk
)->inet_sport
),
2217 ntohs(ireq
->ir_rmt_port
),
2219 0, 0, /* could print option size, but that is af dependent. */
2220 1, /* timers active (only the expire timer) */
2221 jiffies_delta_to_clock_t(delta
),
2223 from_kuid_munged(seq_user_ns(f
), uid
),
2224 0, /* non standard timer */
2225 0, /* open_requests have no inode */
2226 atomic_read(&sk
->sk_refcnt
),
2230 static void get_tcp4_sock(struct sock
*sk
, struct seq_file
*f
, int i
)
2233 unsigned long timer_expires
;
2234 const struct tcp_sock
*tp
= tcp_sk(sk
);
2235 const struct inet_connection_sock
*icsk
= inet_csk(sk
);
2236 const struct inet_sock
*inet
= inet_sk(sk
);
2237 struct fastopen_queue
*fastopenq
= icsk
->icsk_accept_queue
.fastopenq
;
2238 __be32 dest
= inet
->inet_daddr
;
2239 __be32 src
= inet
->inet_rcv_saddr
;
2240 __u16 destp
= ntohs(inet
->inet_dport
);
2241 __u16 srcp
= ntohs(inet
->inet_sport
);
2244 if (icsk
->icsk_pending
== ICSK_TIME_RETRANS
||
2245 icsk
->icsk_pending
== ICSK_TIME_EARLY_RETRANS
||
2246 icsk
->icsk_pending
== ICSK_TIME_LOSS_PROBE
) {
2248 timer_expires
= icsk
->icsk_timeout
;
2249 } else if (icsk
->icsk_pending
== ICSK_TIME_PROBE0
) {
2251 timer_expires
= icsk
->icsk_timeout
;
2252 } else if (timer_pending(&sk
->sk_timer
)) {
2254 timer_expires
= sk
->sk_timer
.expires
;
2257 timer_expires
= jiffies
;
2260 if (sk
->sk_state
== TCP_LISTEN
)
2261 rx_queue
= sk
->sk_ack_backlog
;
2264 * because we dont lock socket, we might find a transient negative value
2266 rx_queue
= max_t(int, tp
->rcv_nxt
- tp
->copied_seq
, 0);
2268 seq_printf(f
, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2269 "%08X %5u %8d %lu %d %pK %lu %lu %u %u %d",
2270 i
, src
, srcp
, dest
, destp
, sk
->sk_state
,
2271 tp
->write_seq
- tp
->snd_una
,
2274 jiffies_delta_to_clock_t(timer_expires
- jiffies
),
2275 icsk
->icsk_retransmits
,
2276 from_kuid_munged(seq_user_ns(f
), sock_i_uid(sk
)),
2277 icsk
->icsk_probes_out
,
2279 atomic_read(&sk
->sk_refcnt
), sk
,
2280 jiffies_to_clock_t(icsk
->icsk_rto
),
2281 jiffies_to_clock_t(icsk
->icsk_ack
.ato
),
2282 (icsk
->icsk_ack
.quick
<< 1) | icsk
->icsk_ack
.pingpong
,
2284 sk
->sk_state
== TCP_LISTEN
?
2285 (fastopenq
? fastopenq
->max_qlen
: 0) :
2286 (tcp_in_initial_slowstart(tp
) ? -1 : tp
->snd_ssthresh
));
2289 static void get_timewait4_sock(const struct inet_timewait_sock
*tw
,
2290 struct seq_file
*f
, int i
)
2294 s32 delta
= tw
->tw_ttd
- inet_tw_time_stamp();
2296 dest
= tw
->tw_daddr
;
2297 src
= tw
->tw_rcv_saddr
;
2298 destp
= ntohs(tw
->tw_dport
);
2299 srcp
= ntohs(tw
->tw_sport
);
2301 seq_printf(f
, "%4d: %08X:%04X %08X:%04X"
2302 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK",
2303 i
, src
, srcp
, dest
, destp
, tw
->tw_substate
, 0, 0,
2304 3, jiffies_delta_to_clock_t(delta
), 0, 0, 0, 0,
2305 atomic_read(&tw
->tw_refcnt
), tw
);
2310 static int tcp4_seq_show(struct seq_file
*seq
, void *v
)
2312 struct tcp_iter_state
*st
;
2313 struct sock
*sk
= v
;
2315 seq_setwidth(seq
, TMPSZ
- 1);
2316 if (v
== SEQ_START_TOKEN
) {
2317 seq_puts(seq
, " sl local_address rem_address st tx_queue "
2318 "rx_queue tr tm->when retrnsmt uid timeout "
2324 switch (st
->state
) {
2325 case TCP_SEQ_STATE_LISTENING
:
2326 case TCP_SEQ_STATE_ESTABLISHED
:
2327 if (sk
->sk_state
== TCP_TIME_WAIT
)
2328 get_timewait4_sock(v
, seq
, st
->num
);
2330 get_tcp4_sock(v
, seq
, st
->num
);
2332 case TCP_SEQ_STATE_OPENREQ
:
2333 get_openreq4(st
->syn_wait_sk
, v
, seq
, st
->num
, st
->uid
);
2341 static const struct file_operations tcp_afinfo_seq_fops
= {
2342 .owner
= THIS_MODULE
,
2343 .open
= tcp_seq_open
,
2345 .llseek
= seq_lseek
,
2346 .release
= seq_release_net
2349 static struct tcp_seq_afinfo tcp4_seq_afinfo
= {
2352 .seq_fops
= &tcp_afinfo_seq_fops
,
2354 .show
= tcp4_seq_show
,
2358 static int __net_init
tcp4_proc_init_net(struct net
*net
)
2360 return tcp_proc_register(net
, &tcp4_seq_afinfo
);
2363 static void __net_exit
tcp4_proc_exit_net(struct net
*net
)
2365 tcp_proc_unregister(net
, &tcp4_seq_afinfo
);
2368 static struct pernet_operations tcp4_net_ops
= {
2369 .init
= tcp4_proc_init_net
,
2370 .exit
= tcp4_proc_exit_net
,
2373 int __init
tcp4_proc_init(void)
2375 return register_pernet_subsys(&tcp4_net_ops
);
2378 void tcp4_proc_exit(void)
2380 unregister_pernet_subsys(&tcp4_net_ops
);
2382 #endif /* CONFIG_PROC_FS */
2384 struct proto tcp_prot
= {
2386 .owner
= THIS_MODULE
,
2388 .connect
= tcp_v4_connect
,
2389 .disconnect
= tcp_disconnect
,
2390 .accept
= inet_csk_accept
,
2392 .init
= tcp_v4_init_sock
,
2393 .destroy
= tcp_v4_destroy_sock
,
2394 .shutdown
= tcp_shutdown
,
2395 .setsockopt
= tcp_setsockopt
,
2396 .getsockopt
= tcp_getsockopt
,
2397 .recvmsg
= tcp_recvmsg
,
2398 .sendmsg
= tcp_sendmsg
,
2399 .sendpage
= tcp_sendpage
,
2400 .backlog_rcv
= tcp_v4_do_rcv
,
2401 .release_cb
= tcp_release_cb
,
2403 .unhash
= inet_unhash
,
2404 .get_port
= inet_csk_get_port
,
2405 .enter_memory_pressure
= tcp_enter_memory_pressure
,
2406 .stream_memory_free
= tcp_stream_memory_free
,
2407 .sockets_allocated
= &tcp_sockets_allocated
,
2408 .orphan_count
= &tcp_orphan_count
,
2409 .memory_allocated
= &tcp_memory_allocated
,
2410 .memory_pressure
= &tcp_memory_pressure
,
2411 .sysctl_mem
= sysctl_tcp_mem
,
2412 .sysctl_wmem
= sysctl_tcp_wmem
,
2413 .sysctl_rmem
= sysctl_tcp_rmem
,
2414 .max_header
= MAX_TCP_HEADER
,
2415 .obj_size
= sizeof(struct tcp_sock
),
2416 .slab_flags
= SLAB_DESTROY_BY_RCU
,
2417 .twsk_prot
= &tcp_timewait_sock_ops
,
2418 .rsk_prot
= &tcp_request_sock_ops
,
2419 .h
.hashinfo
= &tcp_hashinfo
,
2420 .no_autobind
= true,
2421 #ifdef CONFIG_COMPAT
2422 .compat_setsockopt
= compat_tcp_setsockopt
,
2423 .compat_getsockopt
= compat_tcp_getsockopt
,
2425 #ifdef CONFIG_MEMCG_KMEM
2426 .init_cgroup
= tcp_init_cgroup
,
2427 .destroy_cgroup
= tcp_destroy_cgroup
,
2428 .proto_cgroup
= tcp_proto_cgroup
,
2431 EXPORT_SYMBOL(tcp_prot
);
2433 static int __net_init
tcp_sk_init(struct net
*net
)
2435 net
->ipv4
.sysctl_tcp_ecn
= 2;
2439 static void __net_exit
tcp_sk_exit(struct net
*net
)
2443 static void __net_exit
tcp_sk_exit_batch(struct list_head
*net_exit_list
)
2445 inet_twsk_purge(&tcp_hashinfo
, &tcp_death_row
, AF_INET
);
2448 static struct pernet_operations __net_initdata tcp_sk_ops
= {
2449 .init
= tcp_sk_init
,
2450 .exit
= tcp_sk_exit
,
2451 .exit_batch
= tcp_sk_exit_batch
,
2454 void __init
tcp_v4_init(void)
2456 inet_hashinfo_init(&tcp_hashinfo
);
2457 if (register_pernet_subsys(&tcp_sk_ops
))
2458 panic("Failed to create the TCP control socket.\n");