2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Implementation of the Transmission Control Protocol(TCP).
8 * IPv4 specific functions
13 * linux/ipv4/tcp_input.c
14 * linux/ipv4/tcp_output.c
16 * See tcp.c for author information
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
26 * David S. Miller : New socket lookup architecture.
27 * This code is dedicated to John Dyson.
28 * David S. Miller : Change semantics of established hash,
29 * half is devoted to TIME_WAIT sockets
30 * and the rest go in the other half.
31 * Andi Kleen : Add support for syncookies and fixed
32 * some bugs: ip options weren't passed to
33 * the TCP layer, missed a check for an
35 * Andi Kleen : Implemented fast path mtu discovery.
36 * Fixed many serious bugs in the
37 * request_sock handling and moved
38 * most of it into the af independent code.
39 * Added tail drop and some other bugfixes.
40 * Added new listen semantics.
41 * Mike McLagan : Routing by source
42 * Juan Jose Ciarlante: ip_dynaddr bits
43 * Andi Kleen: various fixes.
44 * Vitaly E. Lavrov : Transparent proxy revived after year
46 * Andi Kleen : Fix new listen.
47 * Andi Kleen : Fix accept error reporting.
48 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
49 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
50 * a single port at the same time.
53 #define pr_fmt(fmt) "TCP: " fmt
55 #include <linux/bottom_half.h>
56 #include <linux/types.h>
57 #include <linux/fcntl.h>
58 #include <linux/module.h>
59 #include <linux/random.h>
60 #include <linux/cache.h>
61 #include <linux/jhash.h>
62 #include <linux/init.h>
63 #include <linux/times.h>
64 #include <linux/slab.h>
66 #include <net/net_namespace.h>
68 #include <net/inet_hashtables.h>
70 #include <net/transp_v6.h>
72 #include <net/inet_common.h>
73 #include <net/timewait_sock.h>
75 #include <net/netdma.h>
76 #include <net/secure_seq.h>
77 #include <net/tcp_memcontrol.h>
79 #include <linux/inet.h>
80 #include <linux/ipv6.h>
81 #include <linux/stddef.h>
82 #include <linux/proc_fs.h>
83 #include <linux/seq_file.h>
85 #include <linux/crypto.h>
86 #include <linux/scatterlist.h>
88 int sysctl_tcp_tw_reuse __read_mostly
;
89 int sysctl_tcp_low_latency __read_mostly
;
90 EXPORT_SYMBOL(sysctl_tcp_low_latency
);
93 #ifdef CONFIG_TCP_MD5SIG
94 static int tcp_v4_md5_hash_hdr(char *md5_hash
, const struct tcp_md5sig_key
*key
,
95 __be32 daddr
, __be32 saddr
, const struct tcphdr
*th
);
98 struct inet_hashinfo tcp_hashinfo
;
99 EXPORT_SYMBOL(tcp_hashinfo
);
101 static inline __u32
tcp_v4_init_sequence(const struct sk_buff
*skb
)
103 return secure_tcp_sequence_number(ip_hdr(skb
)->daddr
,
106 tcp_hdr(skb
)->source
);
109 int tcp_twsk_unique(struct sock
*sk
, struct sock
*sktw
, void *twp
)
111 const struct tcp_timewait_sock
*tcptw
= tcp_twsk(sktw
);
112 struct tcp_sock
*tp
= tcp_sk(sk
);
114 /* With PAWS, it is safe from the viewpoint
115 of data integrity. Even without PAWS it is safe provided sequence
116 spaces do not overlap i.e. at data rates <= 80Mbit/sec.
118 Actually, the idea is close to VJ's one, only timestamp cache is
119 held not per host, but per port pair and TW bucket is used as state
122 If TW bucket has been already destroyed we fall back to VJ's scheme
123 and use initial timestamp retrieved from peer table.
125 if (tcptw
->tw_ts_recent_stamp
&&
126 (twp
== NULL
|| (sysctl_tcp_tw_reuse
&&
127 get_seconds() - tcptw
->tw_ts_recent_stamp
> 1))) {
128 tp
->write_seq
= tcptw
->tw_snd_nxt
+ 65535 + 2;
129 if (tp
->write_seq
== 0)
131 tp
->rx_opt
.ts_recent
= tcptw
->tw_ts_recent
;
132 tp
->rx_opt
.ts_recent_stamp
= tcptw
->tw_ts_recent_stamp
;
139 EXPORT_SYMBOL_GPL(tcp_twsk_unique
);
141 static int tcp_repair_connect(struct sock
*sk
)
143 tcp_connect_init(sk
);
144 tcp_finish_connect(sk
, NULL
);
149 /* This will initiate an outgoing connection. */
150 int tcp_v4_connect(struct sock
*sk
, struct sockaddr
*uaddr
, int addr_len
)
152 struct sockaddr_in
*usin
= (struct sockaddr_in
*)uaddr
;
153 struct inet_sock
*inet
= inet_sk(sk
);
154 struct tcp_sock
*tp
= tcp_sk(sk
);
155 __be16 orig_sport
, orig_dport
;
156 __be32 daddr
, nexthop
;
160 struct ip_options_rcu
*inet_opt
;
162 if (addr_len
< sizeof(struct sockaddr_in
))
165 if (usin
->sin_family
!= AF_INET
)
166 return -EAFNOSUPPORT
;
168 nexthop
= daddr
= usin
->sin_addr
.s_addr
;
169 inet_opt
= rcu_dereference_protected(inet
->inet_opt
,
170 sock_owned_by_user(sk
));
171 if (inet_opt
&& inet_opt
->opt
.srr
) {
174 nexthop
= inet_opt
->opt
.faddr
;
177 orig_sport
= inet
->inet_sport
;
178 orig_dport
= usin
->sin_port
;
179 fl4
= &inet
->cork
.fl
.u
.ip4
;
180 rt
= ip_route_connect(fl4
, nexthop
, inet
->inet_saddr
,
181 RT_CONN_FLAGS(sk
), sk
->sk_bound_dev_if
,
183 orig_sport
, orig_dport
, sk
, true);
186 if (err
== -ENETUNREACH
)
187 IP_INC_STATS_BH(sock_net(sk
), IPSTATS_MIB_OUTNOROUTES
);
191 if (rt
->rt_flags
& (RTCF_MULTICAST
| RTCF_BROADCAST
)) {
196 if (!inet_opt
|| !inet_opt
->opt
.srr
)
199 if (!inet
->inet_saddr
)
200 inet
->inet_saddr
= fl4
->saddr
;
201 inet
->inet_rcv_saddr
= inet
->inet_saddr
;
203 if (tp
->rx_opt
.ts_recent_stamp
&& inet
->inet_daddr
!= daddr
) {
204 /* Reset inherited state */
205 tp
->rx_opt
.ts_recent
= 0;
206 tp
->rx_opt
.ts_recent_stamp
= 0;
207 if (likely(!tp
->repair
))
211 if (tcp_death_row
.sysctl_tw_recycle
&&
212 !tp
->rx_opt
.ts_recent_stamp
&& fl4
->daddr
== daddr
)
213 tcp_fetch_timewait_stamp(sk
, &rt
->dst
);
215 inet
->inet_dport
= usin
->sin_port
;
216 inet
->inet_daddr
= daddr
;
218 inet_csk(sk
)->icsk_ext_hdr_len
= 0;
220 inet_csk(sk
)->icsk_ext_hdr_len
= inet_opt
->opt
.optlen
;
222 tp
->rx_opt
.mss_clamp
= TCP_MSS_DEFAULT
;
224 /* Socket identity is still unknown (sport may be zero).
225 * However we set state to SYN-SENT and not releasing socket
226 * lock select source port, enter ourselves into the hash tables and
227 * complete initialization after this.
229 tcp_set_state(sk
, TCP_SYN_SENT
);
230 err
= inet_hash_connect(&tcp_death_row
, sk
);
234 rt
= ip_route_newports(fl4
, rt
, orig_sport
, orig_dport
,
235 inet
->inet_sport
, inet
->inet_dport
, sk
);
241 /* OK, now commit destination to socket. */
242 sk
->sk_gso_type
= SKB_GSO_TCPV4
;
243 sk_setup_caps(sk
, &rt
->dst
);
245 if (!tp
->write_seq
&& likely(!tp
->repair
))
246 tp
->write_seq
= secure_tcp_sequence_number(inet
->inet_saddr
,
251 inet
->inet_id
= tp
->write_seq
^ jiffies
;
253 if (likely(!tp
->repair
))
254 err
= tcp_connect(sk
);
256 err
= tcp_repair_connect(sk
);
266 * This unhashes the socket and releases the local port,
269 tcp_set_state(sk
, TCP_CLOSE
);
271 sk
->sk_route_caps
= 0;
272 inet
->inet_dport
= 0;
275 EXPORT_SYMBOL(tcp_v4_connect
);
278 * This routine reacts to ICMP_FRAG_NEEDED mtu indications as defined in RFC1191.
279 * It can be called through tcp_release_cb() if socket was owned by user
280 * at the time tcp_v4_err() was called to handle ICMP message.
282 static void tcp_v4_mtu_reduced(struct sock
*sk
)
284 struct dst_entry
*dst
;
285 struct inet_sock
*inet
= inet_sk(sk
);
286 u32 mtu
= tcp_sk(sk
)->mtu_info
;
288 /* We are not interested in TCP_LISTEN and open_requests (SYN-ACKs
289 * send out by Linux are always <576bytes so they should go through
292 if (sk
->sk_state
== TCP_LISTEN
)
295 dst
= inet_csk_update_pmtu(sk
, mtu
);
299 /* Something is about to be wrong... Remember soft error
300 * for the case, if this connection will not able to recover.
302 if (mtu
< dst_mtu(dst
) && ip_dont_fragment(sk
, dst
))
303 sk
->sk_err_soft
= EMSGSIZE
;
307 if (inet
->pmtudisc
!= IP_PMTUDISC_DONT
&&
308 inet_csk(sk
)->icsk_pmtu_cookie
> mtu
) {
309 tcp_sync_mss(sk
, mtu
);
311 /* Resend the TCP packet because it's
312 * clear that the old packet has been
313 * dropped. This is the new "fast" path mtu
316 tcp_simple_retransmit(sk
);
317 } /* else let the usual retransmit timer handle it */
320 static void do_redirect(struct sk_buff
*skb
, struct sock
*sk
)
322 struct dst_entry
*dst
= __sk_dst_check(sk
, 0);
325 dst
->ops
->redirect(dst
, sk
, skb
);
329 * This routine is called by the ICMP module when it gets some
330 * sort of error condition. If err < 0 then the socket should
331 * be closed and the error returned to the user. If err > 0
332 * it's just the icmp type << 8 | icmp code. After adjustment
333 * header points to the first 8 bytes of the tcp header. We need
334 * to find the appropriate port.
336 * The locking strategy used here is very "optimistic". When
337 * someone else accesses the socket the ICMP is just dropped
338 * and for some paths there is no check at all.
339 * A more general error queue to queue errors for later handling
340 * is probably better.
344 void tcp_v4_err(struct sk_buff
*icmp_skb
, u32 info
)
346 const struct iphdr
*iph
= (const struct iphdr
*)icmp_skb
->data
;
347 struct tcphdr
*th
= (struct tcphdr
*)(icmp_skb
->data
+ (iph
->ihl
<< 2));
348 struct inet_connection_sock
*icsk
;
350 struct inet_sock
*inet
;
351 const int type
= icmp_hdr(icmp_skb
)->type
;
352 const int code
= icmp_hdr(icmp_skb
)->code
;
355 struct request_sock
*req
;
359 struct net
*net
= dev_net(icmp_skb
->dev
);
361 if (icmp_skb
->len
< (iph
->ihl
<< 2) + 8) {
362 ICMP_INC_STATS_BH(net
, ICMP_MIB_INERRORS
);
366 sk
= inet_lookup(net
, &tcp_hashinfo
, iph
->daddr
, th
->dest
,
367 iph
->saddr
, th
->source
, inet_iif(icmp_skb
));
369 ICMP_INC_STATS_BH(net
, ICMP_MIB_INERRORS
);
372 if (sk
->sk_state
== TCP_TIME_WAIT
) {
373 inet_twsk_put(inet_twsk(sk
));
378 /* If too many ICMPs get dropped on busy
379 * servers this needs to be solved differently.
380 * We do take care of PMTU discovery (RFC1191) special case :
381 * we can receive locally generated ICMP messages while socket is held.
383 if (sock_owned_by_user(sk
) &&
384 type
!= ICMP_DEST_UNREACH
&&
385 code
!= ICMP_FRAG_NEEDED
)
386 NET_INC_STATS_BH(net
, LINUX_MIB_LOCKDROPPEDICMPS
);
388 if (sk
->sk_state
== TCP_CLOSE
)
391 if (unlikely(iph
->ttl
< inet_sk(sk
)->min_ttl
)) {
392 NET_INC_STATS_BH(net
, LINUX_MIB_TCPMINTTLDROP
);
398 req
= tp
->fastopen_rsk
;
399 seq
= ntohl(th
->seq
);
400 if (sk
->sk_state
!= TCP_LISTEN
&&
401 !between(seq
, tp
->snd_una
, tp
->snd_nxt
) &&
402 (req
== NULL
|| seq
!= tcp_rsk(req
)->snt_isn
)) {
403 /* For a Fast Open socket, allow seq to be snt_isn. */
404 NET_INC_STATS_BH(net
, LINUX_MIB_OUTOFWINDOWICMPS
);
410 do_redirect(icmp_skb
, sk
);
412 case ICMP_SOURCE_QUENCH
:
413 /* Just silently ignore these. */
415 case ICMP_PARAMETERPROB
:
418 case ICMP_DEST_UNREACH
:
419 if (code
> NR_ICMP_UNREACH
)
422 if (code
== ICMP_FRAG_NEEDED
) { /* PMTU discovery (RFC1191) */
424 if (!sock_owned_by_user(sk
)) {
425 tcp_v4_mtu_reduced(sk
);
427 if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED
, &tp
->tsq_flags
))
433 err
= icmp_err_convert
[code
].errno
;
434 /* check if icmp_skb allows revert of backoff
435 * (see draft-zimmermann-tcp-lcd) */
436 if (code
!= ICMP_NET_UNREACH
&& code
!= ICMP_HOST_UNREACH
)
438 if (seq
!= tp
->snd_una
|| !icsk
->icsk_retransmits
||
442 /* XXX (TFO) - revisit the following logic for TFO */
444 if (sock_owned_by_user(sk
))
447 icsk
->icsk_backoff
--;
448 inet_csk(sk
)->icsk_rto
= (tp
->srtt
? __tcp_set_rto(tp
) :
449 TCP_TIMEOUT_INIT
) << icsk
->icsk_backoff
;
452 skb
= tcp_write_queue_head(sk
);
455 remaining
= icsk
->icsk_rto
- min(icsk
->icsk_rto
,
456 tcp_time_stamp
- TCP_SKB_CB(skb
)->when
);
459 inet_csk_reset_xmit_timer(sk
, ICSK_TIME_RETRANS
,
460 remaining
, TCP_RTO_MAX
);
462 /* RTO revert clocked out retransmission.
463 * Will retransmit now */
464 tcp_retransmit_timer(sk
);
468 case ICMP_TIME_EXCEEDED
:
475 /* XXX (TFO) - if it's a TFO socket and has been accepted, rather
476 * than following the TCP_SYN_RECV case and closing the socket,
477 * we ignore the ICMP error and keep trying like a fully established
478 * socket. Is this the right thing to do?
480 if (req
&& req
->sk
== NULL
)
483 switch (sk
->sk_state
) {
484 struct request_sock
*req
, **prev
;
486 if (sock_owned_by_user(sk
))
489 req
= inet_csk_search_req(sk
, &prev
, th
->dest
,
490 iph
->daddr
, iph
->saddr
);
494 /* ICMPs are not backlogged, hence we cannot get
495 an established socket here.
499 if (seq
!= tcp_rsk(req
)->snt_isn
) {
500 NET_INC_STATS_BH(net
, LINUX_MIB_OUTOFWINDOWICMPS
);
505 * Still in SYN_RECV, just remove it silently.
506 * There is no good way to pass the error to the newly
507 * created socket, and POSIX does not want network
508 * errors returned from accept().
510 inet_csk_reqsk_queue_drop(sk
, req
, prev
);
514 case TCP_SYN_RECV
: /* Cannot happen.
515 It can f.e. if SYNs crossed,
518 if (!sock_owned_by_user(sk
)) {
521 sk
->sk_error_report(sk
);
525 sk
->sk_err_soft
= err
;
530 /* If we've already connected we will keep trying
531 * until we time out, or the user gives up.
533 * rfc1122 4.2.3.9 allows to consider as hard errors
534 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
535 * but it is obsoleted by pmtu discovery).
537 * Note, that in modern internet, where routing is unreliable
538 * and in each dark corner broken firewalls sit, sending random
539 * errors ordered by their masters even this two messages finally lose
540 * their original sense (even Linux sends invalid PORT_UNREACHs)
542 * Now we are in compliance with RFCs.
547 if (!sock_owned_by_user(sk
) && inet
->recverr
) {
549 sk
->sk_error_report(sk
);
550 } else { /* Only an error on timeout */
551 sk
->sk_err_soft
= err
;
559 static void __tcp_v4_send_check(struct sk_buff
*skb
,
560 __be32 saddr
, __be32 daddr
)
562 struct tcphdr
*th
= tcp_hdr(skb
);
564 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
565 th
->check
= ~tcp_v4_check(skb
->len
, saddr
, daddr
, 0);
566 skb
->csum_start
= skb_transport_header(skb
) - skb
->head
;
567 skb
->csum_offset
= offsetof(struct tcphdr
, check
);
569 th
->check
= tcp_v4_check(skb
->len
, saddr
, daddr
,
576 /* This routine computes an IPv4 TCP checksum. */
577 void tcp_v4_send_check(struct sock
*sk
, struct sk_buff
*skb
)
579 const struct inet_sock
*inet
= inet_sk(sk
);
581 __tcp_v4_send_check(skb
, inet
->inet_saddr
, inet
->inet_daddr
);
583 EXPORT_SYMBOL(tcp_v4_send_check
);
585 int tcp_v4_gso_send_check(struct sk_buff
*skb
)
587 const struct iphdr
*iph
;
590 if (!pskb_may_pull(skb
, sizeof(*th
)))
597 skb
->ip_summed
= CHECKSUM_PARTIAL
;
598 __tcp_v4_send_check(skb
, iph
->saddr
, iph
->daddr
);
603 * This routine will send an RST to the other tcp.
605 * Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
607 * Answer: if a packet caused RST, it is not for a socket
608 * existing in our system, if it is matched to a socket,
609 * it is just duplicate segment or bug in other side's TCP.
610 * So that we build reply only basing on parameters
611 * arrived with segment.
612 * Exception: precedence violation. We do not implement it in any case.
615 static void tcp_v4_send_reset(struct sock
*sk
, struct sk_buff
*skb
)
617 const struct tcphdr
*th
= tcp_hdr(skb
);
620 #ifdef CONFIG_TCP_MD5SIG
621 __be32 opt
[(TCPOLEN_MD5SIG_ALIGNED
>> 2)];
624 struct ip_reply_arg arg
;
625 #ifdef CONFIG_TCP_MD5SIG
626 struct tcp_md5sig_key
*key
;
627 const __u8
*hash_location
= NULL
;
628 unsigned char newhash
[16];
630 struct sock
*sk1
= NULL
;
634 /* Never send a reset in response to a reset. */
638 if (skb_rtable(skb
)->rt_type
!= RTN_LOCAL
)
641 /* Swap the send and the receive. */
642 memset(&rep
, 0, sizeof(rep
));
643 rep
.th
.dest
= th
->source
;
644 rep
.th
.source
= th
->dest
;
645 rep
.th
.doff
= sizeof(struct tcphdr
) / 4;
649 rep
.th
.seq
= th
->ack_seq
;
652 rep
.th
.ack_seq
= htonl(ntohl(th
->seq
) + th
->syn
+ th
->fin
+
653 skb
->len
- (th
->doff
<< 2));
656 memset(&arg
, 0, sizeof(arg
));
657 arg
.iov
[0].iov_base
= (unsigned char *)&rep
;
658 arg
.iov
[0].iov_len
= sizeof(rep
.th
);
660 #ifdef CONFIG_TCP_MD5SIG
661 hash_location
= tcp_parse_md5sig_option(th
);
662 if (!sk
&& hash_location
) {
664 * active side is lost. Try to find listening socket through
665 * source port, and then find md5 key through listening socket.
666 * we are not loose security here:
667 * Incoming packet is checked with md5 hash with finding key,
668 * no RST generated if md5 hash doesn't match.
670 sk1
= __inet_lookup_listener(dev_net(skb_dst(skb
)->dev
),
671 &tcp_hashinfo
, ip_hdr(skb
)->daddr
,
672 ntohs(th
->source
), inet_iif(skb
));
673 /* don't send rst if it can't find key */
677 key
= tcp_md5_do_lookup(sk1
, (union tcp_md5_addr
*)
678 &ip_hdr(skb
)->saddr
, AF_INET
);
682 genhash
= tcp_v4_md5_hash_skb(newhash
, key
, NULL
, NULL
, skb
);
683 if (genhash
|| memcmp(hash_location
, newhash
, 16) != 0)
686 key
= sk
? tcp_md5_do_lookup(sk
, (union tcp_md5_addr
*)
692 rep
.opt
[0] = htonl((TCPOPT_NOP
<< 24) |
694 (TCPOPT_MD5SIG
<< 8) |
696 /* Update length and the length the header thinks exists */
697 arg
.iov
[0].iov_len
+= TCPOLEN_MD5SIG_ALIGNED
;
698 rep
.th
.doff
= arg
.iov
[0].iov_len
/ 4;
700 tcp_v4_md5_hash_hdr((__u8
*) &rep
.opt
[1],
701 key
, ip_hdr(skb
)->saddr
,
702 ip_hdr(skb
)->daddr
, &rep
.th
);
705 arg
.csum
= csum_tcpudp_nofold(ip_hdr(skb
)->daddr
,
706 ip_hdr(skb
)->saddr
, /* XXX */
707 arg
.iov
[0].iov_len
, IPPROTO_TCP
, 0);
708 arg
.csumoffset
= offsetof(struct tcphdr
, check
) / 2;
709 arg
.flags
= (sk
&& inet_sk(sk
)->transparent
) ? IP_REPLY_ARG_NOSRCCHECK
: 0;
710 /* When socket is gone, all binding information is lost.
711 * routing might fail in this case. using iif for oif to
712 * make sure we can deliver it
714 arg
.bound_dev_if
= sk
? sk
->sk_bound_dev_if
: inet_iif(skb
);
716 net
= dev_net(skb_dst(skb
)->dev
);
717 arg
.tos
= ip_hdr(skb
)->tos
;
718 ip_send_unicast_reply(net
, skb
, ip_hdr(skb
)->saddr
,
719 ip_hdr(skb
)->daddr
, &arg
, arg
.iov
[0].iov_len
);
721 TCP_INC_STATS_BH(net
, TCP_MIB_OUTSEGS
);
722 TCP_INC_STATS_BH(net
, TCP_MIB_OUTRSTS
);
724 #ifdef CONFIG_TCP_MD5SIG
733 /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
734 outside socket context is ugly, certainly. What can I do?
737 static void tcp_v4_send_ack(struct sk_buff
*skb
, u32 seq
, u32 ack
,
738 u32 win
, u32 ts
, int oif
,
739 struct tcp_md5sig_key
*key
,
740 int reply_flags
, u8 tos
)
742 const struct tcphdr
*th
= tcp_hdr(skb
);
745 __be32 opt
[(TCPOLEN_TSTAMP_ALIGNED
>> 2)
746 #ifdef CONFIG_TCP_MD5SIG
747 + (TCPOLEN_MD5SIG_ALIGNED
>> 2)
751 struct ip_reply_arg arg
;
752 struct net
*net
= dev_net(skb_dst(skb
)->dev
);
754 memset(&rep
.th
, 0, sizeof(struct tcphdr
));
755 memset(&arg
, 0, sizeof(arg
));
757 arg
.iov
[0].iov_base
= (unsigned char *)&rep
;
758 arg
.iov
[0].iov_len
= sizeof(rep
.th
);
760 rep
.opt
[0] = htonl((TCPOPT_NOP
<< 24) | (TCPOPT_NOP
<< 16) |
761 (TCPOPT_TIMESTAMP
<< 8) |
763 rep
.opt
[1] = htonl(tcp_time_stamp
);
764 rep
.opt
[2] = htonl(ts
);
765 arg
.iov
[0].iov_len
+= TCPOLEN_TSTAMP_ALIGNED
;
768 /* Swap the send and the receive. */
769 rep
.th
.dest
= th
->source
;
770 rep
.th
.source
= th
->dest
;
771 rep
.th
.doff
= arg
.iov
[0].iov_len
/ 4;
772 rep
.th
.seq
= htonl(seq
);
773 rep
.th
.ack_seq
= htonl(ack
);
775 rep
.th
.window
= htons(win
);
777 #ifdef CONFIG_TCP_MD5SIG
779 int offset
= (ts
) ? 3 : 0;
781 rep
.opt
[offset
++] = htonl((TCPOPT_NOP
<< 24) |
783 (TCPOPT_MD5SIG
<< 8) |
785 arg
.iov
[0].iov_len
+= TCPOLEN_MD5SIG_ALIGNED
;
786 rep
.th
.doff
= arg
.iov
[0].iov_len
/4;
788 tcp_v4_md5_hash_hdr((__u8
*) &rep
.opt
[offset
],
789 key
, ip_hdr(skb
)->saddr
,
790 ip_hdr(skb
)->daddr
, &rep
.th
);
793 arg
.flags
= reply_flags
;
794 arg
.csum
= csum_tcpudp_nofold(ip_hdr(skb
)->daddr
,
795 ip_hdr(skb
)->saddr
, /* XXX */
796 arg
.iov
[0].iov_len
, IPPROTO_TCP
, 0);
797 arg
.csumoffset
= offsetof(struct tcphdr
, check
) / 2;
799 arg
.bound_dev_if
= oif
;
801 ip_send_unicast_reply(net
, skb
, ip_hdr(skb
)->saddr
,
802 ip_hdr(skb
)->daddr
, &arg
, arg
.iov
[0].iov_len
);
804 TCP_INC_STATS_BH(net
, TCP_MIB_OUTSEGS
);
807 static void tcp_v4_timewait_ack(struct sock
*sk
, struct sk_buff
*skb
)
809 struct inet_timewait_sock
*tw
= inet_twsk(sk
);
810 struct tcp_timewait_sock
*tcptw
= tcp_twsk(sk
);
812 tcp_v4_send_ack(skb
, tcptw
->tw_snd_nxt
, tcptw
->tw_rcv_nxt
,
813 tcptw
->tw_rcv_wnd
>> tw
->tw_rcv_wscale
,
816 tcp_twsk_md5_key(tcptw
),
817 tw
->tw_transparent
? IP_REPLY_ARG_NOSRCCHECK
: 0,
824 static void tcp_v4_reqsk_send_ack(struct sock
*sk
, struct sk_buff
*skb
,
825 struct request_sock
*req
)
827 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
828 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
830 tcp_v4_send_ack(skb
, (sk
->sk_state
== TCP_LISTEN
) ?
831 tcp_rsk(req
)->snt_isn
+ 1 : tcp_sk(sk
)->snd_nxt
,
832 tcp_rsk(req
)->rcv_nxt
, req
->rcv_wnd
,
835 tcp_md5_do_lookup(sk
, (union tcp_md5_addr
*)&ip_hdr(skb
)->daddr
,
837 inet_rsk(req
)->no_srccheck
? IP_REPLY_ARG_NOSRCCHECK
: 0,
842 * Send a SYN-ACK after having received a SYN.
843 * This still operates on a request_sock only, not on a big
846 static int tcp_v4_send_synack(struct sock
*sk
, struct dst_entry
*dst
,
847 struct request_sock
*req
,
848 struct request_values
*rvp
,
852 const struct inet_request_sock
*ireq
= inet_rsk(req
);
855 struct sk_buff
* skb
;
857 /* First, grab a route. */
858 if (!dst
&& (dst
= inet_csk_route_req(sk
, &fl4
, req
)) == NULL
)
861 skb
= tcp_make_synack(sk
, dst
, req
, rvp
, NULL
);
864 __tcp_v4_send_check(skb
, ireq
->loc_addr
, ireq
->rmt_addr
);
866 skb_set_queue_mapping(skb
, queue_mapping
);
867 err
= ip_build_and_send_pkt(skb
, sk
, ireq
->loc_addr
,
870 err
= net_xmit_eval(err
);
871 if (!tcp_rsk(req
)->snt_synack
&& !err
)
872 tcp_rsk(req
)->snt_synack
= tcp_time_stamp
;
878 static int tcp_v4_rtx_synack(struct sock
*sk
, struct request_sock
*req
,
879 struct request_values
*rvp
)
881 TCP_INC_STATS_BH(sock_net(sk
), TCP_MIB_RETRANSSEGS
);
882 return tcp_v4_send_synack(sk
, NULL
, req
, rvp
, 0, false);
886 * IPv4 request_sock destructor.
888 static void tcp_v4_reqsk_destructor(struct request_sock
*req
)
890 kfree(inet_rsk(req
)->opt
);
894 * Return true if a syncookie should be sent
896 bool tcp_syn_flood_action(struct sock
*sk
,
897 const struct sk_buff
*skb
,
900 const char *msg
= "Dropping request";
901 bool want_cookie
= false;
902 struct listen_sock
*lopt
;
906 #ifdef CONFIG_SYN_COOKIES
907 if (sysctl_tcp_syncookies
) {
908 msg
= "Sending cookies";
910 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_TCPREQQFULLDOCOOKIES
);
913 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_TCPREQQFULLDROP
);
915 lopt
= inet_csk(sk
)->icsk_accept_queue
.listen_opt
;
916 if (!lopt
->synflood_warned
) {
917 lopt
->synflood_warned
= 1;
918 pr_info("%s: Possible SYN flooding on port %d. %s. Check SNMP counters.\n",
919 proto
, ntohs(tcp_hdr(skb
)->dest
), msg
);
923 EXPORT_SYMBOL(tcp_syn_flood_action
);
926 * Save and compile IPv4 options into the request_sock if needed.
928 static struct ip_options_rcu
*tcp_v4_save_options(struct sock
*sk
,
931 const struct ip_options
*opt
= &(IPCB(skb
)->opt
);
932 struct ip_options_rcu
*dopt
= NULL
;
934 if (opt
&& opt
->optlen
) {
935 int opt_size
= sizeof(*dopt
) + opt
->optlen
;
937 dopt
= kmalloc(opt_size
, GFP_ATOMIC
);
939 if (ip_options_echo(&dopt
->opt
, skb
)) {
948 #ifdef CONFIG_TCP_MD5SIG
950 * RFC2385 MD5 checksumming requires a mapping of
951 * IP address->MD5 Key.
952 * We need to maintain these in the sk structure.
955 /* Find the Key structure for an address. */
956 struct tcp_md5sig_key
*tcp_md5_do_lookup(struct sock
*sk
,
957 const union tcp_md5_addr
*addr
,
960 struct tcp_sock
*tp
= tcp_sk(sk
);
961 struct tcp_md5sig_key
*key
;
962 struct hlist_node
*pos
;
963 unsigned int size
= sizeof(struct in_addr
);
964 struct tcp_md5sig_info
*md5sig
;
966 /* caller either holds rcu_read_lock() or socket lock */
967 md5sig
= rcu_dereference_check(tp
->md5sig_info
,
968 sock_owned_by_user(sk
) ||
969 lockdep_is_held(&sk
->sk_lock
.slock
));
972 #if IS_ENABLED(CONFIG_IPV6)
973 if (family
== AF_INET6
)
974 size
= sizeof(struct in6_addr
);
976 hlist_for_each_entry_rcu(key
, pos
, &md5sig
->head
, node
) {
977 if (key
->family
!= family
)
979 if (!memcmp(&key
->addr
, addr
, size
))
984 EXPORT_SYMBOL(tcp_md5_do_lookup
);
986 struct tcp_md5sig_key
*tcp_v4_md5_lookup(struct sock
*sk
,
987 struct sock
*addr_sk
)
989 union tcp_md5_addr
*addr
;
991 addr
= (union tcp_md5_addr
*)&inet_sk(addr_sk
)->inet_daddr
;
992 return tcp_md5_do_lookup(sk
, addr
, AF_INET
);
994 EXPORT_SYMBOL(tcp_v4_md5_lookup
);
996 static struct tcp_md5sig_key
*tcp_v4_reqsk_md5_lookup(struct sock
*sk
,
997 struct request_sock
*req
)
999 union tcp_md5_addr
*addr
;
1001 addr
= (union tcp_md5_addr
*)&inet_rsk(req
)->rmt_addr
;
1002 return tcp_md5_do_lookup(sk
, addr
, AF_INET
);
1005 /* This can be called on a newly created socket, from other files */
1006 int tcp_md5_do_add(struct sock
*sk
, const union tcp_md5_addr
*addr
,
1007 int family
, const u8
*newkey
, u8 newkeylen
, gfp_t gfp
)
1009 /* Add Key to the list */
1010 struct tcp_md5sig_key
*key
;
1011 struct tcp_sock
*tp
= tcp_sk(sk
);
1012 struct tcp_md5sig_info
*md5sig
;
1014 key
= tcp_md5_do_lookup(sk
, (union tcp_md5_addr
*)&addr
, AF_INET
);
1016 /* Pre-existing entry - just update that one. */
1017 memcpy(key
->key
, newkey
, newkeylen
);
1018 key
->keylen
= newkeylen
;
1022 md5sig
= rcu_dereference_protected(tp
->md5sig_info
,
1023 sock_owned_by_user(sk
));
1025 md5sig
= kmalloc(sizeof(*md5sig
), gfp
);
1029 sk_nocaps_add(sk
, NETIF_F_GSO_MASK
);
1030 INIT_HLIST_HEAD(&md5sig
->head
);
1031 rcu_assign_pointer(tp
->md5sig_info
, md5sig
);
1034 key
= sock_kmalloc(sk
, sizeof(*key
), gfp
);
1037 if (hlist_empty(&md5sig
->head
) && !tcp_alloc_md5sig_pool(sk
)) {
1038 sock_kfree_s(sk
, key
, sizeof(*key
));
1042 memcpy(key
->key
, newkey
, newkeylen
);
1043 key
->keylen
= newkeylen
;
1044 key
->family
= family
;
1045 memcpy(&key
->addr
, addr
,
1046 (family
== AF_INET6
) ? sizeof(struct in6_addr
) :
1047 sizeof(struct in_addr
));
1048 hlist_add_head_rcu(&key
->node
, &md5sig
->head
);
1051 EXPORT_SYMBOL(tcp_md5_do_add
);
1053 int tcp_md5_do_del(struct sock
*sk
, const union tcp_md5_addr
*addr
, int family
)
1055 struct tcp_sock
*tp
= tcp_sk(sk
);
1056 struct tcp_md5sig_key
*key
;
1057 struct tcp_md5sig_info
*md5sig
;
1059 key
= tcp_md5_do_lookup(sk
, (union tcp_md5_addr
*)&addr
, AF_INET
);
1062 hlist_del_rcu(&key
->node
);
1063 atomic_sub(sizeof(*key
), &sk
->sk_omem_alloc
);
1064 kfree_rcu(key
, rcu
);
1065 md5sig
= rcu_dereference_protected(tp
->md5sig_info
,
1066 sock_owned_by_user(sk
));
1067 if (hlist_empty(&md5sig
->head
))
1068 tcp_free_md5sig_pool();
1071 EXPORT_SYMBOL(tcp_md5_do_del
);
1073 void tcp_clear_md5_list(struct sock
*sk
)
1075 struct tcp_sock
*tp
= tcp_sk(sk
);
1076 struct tcp_md5sig_key
*key
;
1077 struct hlist_node
*pos
, *n
;
1078 struct tcp_md5sig_info
*md5sig
;
1080 md5sig
= rcu_dereference_protected(tp
->md5sig_info
, 1);
1082 if (!hlist_empty(&md5sig
->head
))
1083 tcp_free_md5sig_pool();
1084 hlist_for_each_entry_safe(key
, pos
, n
, &md5sig
->head
, node
) {
1085 hlist_del_rcu(&key
->node
);
1086 atomic_sub(sizeof(*key
), &sk
->sk_omem_alloc
);
1087 kfree_rcu(key
, rcu
);
1091 static int tcp_v4_parse_md5_keys(struct sock
*sk
, char __user
*optval
,
1094 struct tcp_md5sig cmd
;
1095 struct sockaddr_in
*sin
= (struct sockaddr_in
*)&cmd
.tcpm_addr
;
1097 if (optlen
< sizeof(cmd
))
1100 if (copy_from_user(&cmd
, optval
, sizeof(cmd
)))
1103 if (sin
->sin_family
!= AF_INET
)
1106 if (!cmd
.tcpm_key
|| !cmd
.tcpm_keylen
)
1107 return tcp_md5_do_del(sk
, (union tcp_md5_addr
*)&sin
->sin_addr
.s_addr
,
1110 if (cmd
.tcpm_keylen
> TCP_MD5SIG_MAXKEYLEN
)
1113 return tcp_md5_do_add(sk
, (union tcp_md5_addr
*)&sin
->sin_addr
.s_addr
,
1114 AF_INET
, cmd
.tcpm_key
, cmd
.tcpm_keylen
,
1118 static int tcp_v4_md5_hash_pseudoheader(struct tcp_md5sig_pool
*hp
,
1119 __be32 daddr
, __be32 saddr
, int nbytes
)
1121 struct tcp4_pseudohdr
*bp
;
1122 struct scatterlist sg
;
1124 bp
= &hp
->md5_blk
.ip4
;
1127 * 1. the TCP pseudo-header (in the order: source IP address,
1128 * destination IP address, zero-padded protocol number, and
1134 bp
->protocol
= IPPROTO_TCP
;
1135 bp
->len
= cpu_to_be16(nbytes
);
1137 sg_init_one(&sg
, bp
, sizeof(*bp
));
1138 return crypto_hash_update(&hp
->md5_desc
, &sg
, sizeof(*bp
));
1141 static int tcp_v4_md5_hash_hdr(char *md5_hash
, const struct tcp_md5sig_key
*key
,
1142 __be32 daddr
, __be32 saddr
, const struct tcphdr
*th
)
1144 struct tcp_md5sig_pool
*hp
;
1145 struct hash_desc
*desc
;
1147 hp
= tcp_get_md5sig_pool();
1149 goto clear_hash_noput
;
1150 desc
= &hp
->md5_desc
;
1152 if (crypto_hash_init(desc
))
1154 if (tcp_v4_md5_hash_pseudoheader(hp
, daddr
, saddr
, th
->doff
<< 2))
1156 if (tcp_md5_hash_header(hp
, th
))
1158 if (tcp_md5_hash_key(hp
, key
))
1160 if (crypto_hash_final(desc
, md5_hash
))
1163 tcp_put_md5sig_pool();
1167 tcp_put_md5sig_pool();
1169 memset(md5_hash
, 0, 16);
1173 int tcp_v4_md5_hash_skb(char *md5_hash
, struct tcp_md5sig_key
*key
,
1174 const struct sock
*sk
, const struct request_sock
*req
,
1175 const struct sk_buff
*skb
)
1177 struct tcp_md5sig_pool
*hp
;
1178 struct hash_desc
*desc
;
1179 const struct tcphdr
*th
= tcp_hdr(skb
);
1180 __be32 saddr
, daddr
;
1183 saddr
= inet_sk(sk
)->inet_saddr
;
1184 daddr
= inet_sk(sk
)->inet_daddr
;
1186 saddr
= inet_rsk(req
)->loc_addr
;
1187 daddr
= inet_rsk(req
)->rmt_addr
;
1189 const struct iphdr
*iph
= ip_hdr(skb
);
1194 hp
= tcp_get_md5sig_pool();
1196 goto clear_hash_noput
;
1197 desc
= &hp
->md5_desc
;
1199 if (crypto_hash_init(desc
))
1202 if (tcp_v4_md5_hash_pseudoheader(hp
, daddr
, saddr
, skb
->len
))
1204 if (tcp_md5_hash_header(hp
, th
))
1206 if (tcp_md5_hash_skb_data(hp
, skb
, th
->doff
<< 2))
1208 if (tcp_md5_hash_key(hp
, key
))
1210 if (crypto_hash_final(desc
, md5_hash
))
1213 tcp_put_md5sig_pool();
1217 tcp_put_md5sig_pool();
1219 memset(md5_hash
, 0, 16);
1222 EXPORT_SYMBOL(tcp_v4_md5_hash_skb
);
1224 static bool tcp_v4_inbound_md5_hash(struct sock
*sk
, const struct sk_buff
*skb
)
1227 * This gets called for each TCP segment that arrives
1228 * so we want to be efficient.
1229 * We have 3 drop cases:
1230 * o No MD5 hash and one expected.
1231 * o MD5 hash and we're not expecting one.
1232 * o MD5 hash and its wrong.
1234 const __u8
*hash_location
= NULL
;
1235 struct tcp_md5sig_key
*hash_expected
;
1236 const struct iphdr
*iph
= ip_hdr(skb
);
1237 const struct tcphdr
*th
= tcp_hdr(skb
);
1239 unsigned char newhash
[16];
1241 hash_expected
= tcp_md5_do_lookup(sk
, (union tcp_md5_addr
*)&iph
->saddr
,
1243 hash_location
= tcp_parse_md5sig_option(th
);
1245 /* We've parsed the options - do we have a hash? */
1246 if (!hash_expected
&& !hash_location
)
1249 if (hash_expected
&& !hash_location
) {
1250 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_TCPMD5NOTFOUND
);
1254 if (!hash_expected
&& hash_location
) {
1255 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_TCPMD5UNEXPECTED
);
1259 /* Okay, so this is hash_expected and hash_location -
1260 * so we need to calculate the checksum.
1262 genhash
= tcp_v4_md5_hash_skb(newhash
,
1266 if (genhash
|| memcmp(hash_location
, newhash
, 16) != 0) {
1267 net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
1268 &iph
->saddr
, ntohs(th
->source
),
1269 &iph
->daddr
, ntohs(th
->dest
),
1270 genhash
? " tcp_v4_calc_md5_hash failed"
1279 struct request_sock_ops tcp_request_sock_ops __read_mostly
= {
1281 .obj_size
= sizeof(struct tcp_request_sock
),
1282 .rtx_syn_ack
= tcp_v4_rtx_synack
,
1283 .send_ack
= tcp_v4_reqsk_send_ack
,
1284 .destructor
= tcp_v4_reqsk_destructor
,
1285 .send_reset
= tcp_v4_send_reset
,
1286 .syn_ack_timeout
= tcp_syn_ack_timeout
,
1289 #ifdef CONFIG_TCP_MD5SIG
1290 static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops
= {
1291 .md5_lookup
= tcp_v4_reqsk_md5_lookup
,
1292 .calc_md5_hash
= tcp_v4_md5_hash_skb
,
1296 static bool tcp_fastopen_check(struct sock
*sk
, struct sk_buff
*skb
,
1297 struct request_sock
*req
,
1298 struct tcp_fastopen_cookie
*foc
,
1299 struct tcp_fastopen_cookie
*valid_foc
)
1301 bool skip_cookie
= false;
1302 struct fastopen_queue
*fastopenq
;
1304 if (likely(!fastopen_cookie_present(foc
))) {
1305 /* See include/net/tcp.h for the meaning of these knobs */
1306 if ((sysctl_tcp_fastopen
& TFO_SERVER_ALWAYS
) ||
1307 ((sysctl_tcp_fastopen
& TFO_SERVER_COOKIE_NOT_REQD
) &&
1308 (TCP_SKB_CB(skb
)->end_seq
!= TCP_SKB_CB(skb
)->seq
+ 1)))
1309 skip_cookie
= true; /* no cookie to validate */
1313 fastopenq
= inet_csk(sk
)->icsk_accept_queue
.fastopenq
;
1314 /* A FO option is present; bump the counter. */
1315 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_TCPFASTOPENPASSIVE
);
1317 /* Make sure the listener has enabled fastopen, and we don't
1318 * exceed the max # of pending TFO requests allowed before trying
1319 * to validating the cookie in order to avoid burning CPU cycles
1322 * XXX (TFO) - The implication of checking the max_qlen before
1323 * processing a cookie request is that clients can't differentiate
1324 * between qlen overflow causing Fast Open to be disabled
1325 * temporarily vs a server not supporting Fast Open at all.
1327 if ((sysctl_tcp_fastopen
& TFO_SERVER_ENABLE
) == 0 ||
1328 fastopenq
== NULL
|| fastopenq
->max_qlen
== 0)
1331 if (fastopenq
->qlen
>= fastopenq
->max_qlen
) {
1332 struct request_sock
*req1
;
1333 spin_lock(&fastopenq
->lock
);
1334 req1
= fastopenq
->rskq_rst_head
;
1335 if ((req1
== NULL
) || time_after(req1
->expires
, jiffies
)) {
1336 spin_unlock(&fastopenq
->lock
);
1337 NET_INC_STATS_BH(sock_net(sk
),
1338 LINUX_MIB_TCPFASTOPENLISTENOVERFLOW
);
1339 /* Avoid bumping LINUX_MIB_TCPFASTOPENPASSIVEFAIL*/
1343 fastopenq
->rskq_rst_head
= req1
->dl_next
;
1345 spin_unlock(&fastopenq
->lock
);
1349 tcp_rsk(req
)->rcv_nxt
= TCP_SKB_CB(skb
)->end_seq
;
1352 if (foc
->len
== TCP_FASTOPEN_COOKIE_SIZE
) {
1353 if ((sysctl_tcp_fastopen
& TFO_SERVER_COOKIE_NOT_CHKED
) == 0) {
1354 tcp_fastopen_cookie_gen(ip_hdr(skb
)->saddr
, valid_foc
);
1355 if ((valid_foc
->len
!= TCP_FASTOPEN_COOKIE_SIZE
) ||
1356 memcmp(&foc
->val
[0], &valid_foc
->val
[0],
1357 TCP_FASTOPEN_COOKIE_SIZE
) != 0)
1359 valid_foc
->len
= -1;
1361 /* Acknowledge the data received from the peer. */
1362 tcp_rsk(req
)->rcv_nxt
= TCP_SKB_CB(skb
)->end_seq
;
1364 } else if (foc
->len
== 0) { /* Client requesting a cookie */
1365 tcp_fastopen_cookie_gen(ip_hdr(skb
)->saddr
, valid_foc
);
1366 NET_INC_STATS_BH(sock_net(sk
),
1367 LINUX_MIB_TCPFASTOPENCOOKIEREQD
);
1369 /* Client sent a cookie with wrong size. Treat it
1370 * the same as invalid and return a valid one.
1372 tcp_fastopen_cookie_gen(ip_hdr(skb
)->saddr
, valid_foc
);
1377 static int tcp_v4_conn_req_fastopen(struct sock
*sk
,
1378 struct sk_buff
*skb
,
1379 struct sk_buff
*skb_synack
,
1380 struct request_sock
*req
,
1381 struct request_values
*rvp
)
1383 struct tcp_sock
*tp
= tcp_sk(sk
);
1384 struct request_sock_queue
*queue
= &inet_csk(sk
)->icsk_accept_queue
;
1385 const struct inet_request_sock
*ireq
= inet_rsk(req
);
1392 child
= inet_csk(sk
)->icsk_af_ops
->syn_recv_sock(sk
, skb
, req
, NULL
);
1393 if (child
== NULL
) {
1394 NET_INC_STATS_BH(sock_net(sk
),
1395 LINUX_MIB_TCPFASTOPENPASSIVEFAIL
);
1396 kfree_skb(skb_synack
);
1399 err
= ip_build_and_send_pkt(skb_synack
, sk
, ireq
->loc_addr
,
1400 ireq
->rmt_addr
, ireq
->opt
);
1401 err
= net_xmit_eval(err
);
1403 tcp_rsk(req
)->snt_synack
= tcp_time_stamp
;
1404 /* XXX (TFO) - is it ok to ignore error and continue? */
1406 spin_lock(&queue
->fastopenq
->lock
);
1407 queue
->fastopenq
->qlen
++;
1408 spin_unlock(&queue
->fastopenq
->lock
);
1410 /* Initialize the child socket. Have to fix some values to take
1411 * into account the child is a Fast Open socket and is created
1412 * only out of the bits carried in the SYN packet.
1416 tp
->fastopen_rsk
= req
;
1417 /* Do a hold on the listner sk so that if the listener is being
1418 * closed, the child that has been accepted can live on and still
1419 * access listen_lock.
1422 tcp_rsk(req
)->listener
= sk
;
1424 /* RFC1323: The window in SYN & SYN/ACK segments is never
1425 * scaled. So correct it appropriately.
1427 tp
->snd_wnd
= ntohs(tcp_hdr(skb
)->window
);
1429 /* Activate the retrans timer so that SYNACK can be retransmitted.
1430 * The request socket is not added to the SYN table of the parent
1431 * because it's been added to the accept queue directly.
1433 inet_csk_reset_xmit_timer(child
, ICSK_TIME_RETRANS
,
1434 TCP_TIMEOUT_INIT
, TCP_RTO_MAX
);
1436 /* Add the child socket directly into the accept queue */
1437 inet_csk_reqsk_queue_add(sk
, req
, child
);
1439 /* Now finish processing the fastopen child socket. */
1440 inet_csk(child
)->icsk_af_ops
->rebuild_header(child
);
1441 tcp_init_congestion_control(child
);
1442 tcp_mtup_init(child
);
1443 tcp_init_buffer_space(child
);
1444 tcp_init_metrics(child
);
1446 /* Queue the data carried in the SYN packet. We need to first
1447 * bump skb's refcnt because the caller will attempt to free it.
1449 * XXX (TFO) - we honor a zero-payload TFO request for now.
1450 * (Any reason not to?)
1452 if (TCP_SKB_CB(skb
)->end_seq
== TCP_SKB_CB(skb
)->seq
+ 1) {
1453 /* Don't queue the skb if there is no payload in SYN.
1454 * XXX (TFO) - How about SYN+FIN?
1456 tp
->rcv_nxt
= TCP_SKB_CB(skb
)->end_seq
;
1460 __skb_pull(skb
, tcp_hdr(skb
)->doff
* 4);
1461 skb_set_owner_r(skb
, child
);
1462 __skb_queue_tail(&child
->sk_receive_queue
, skb
);
1463 tp
->rcv_nxt
= TCP_SKB_CB(skb
)->end_seq
;
1465 sk
->sk_data_ready(sk
, 0);
1466 bh_unlock_sock(child
);
1468 WARN_ON(req
->sk
== NULL
);
1472 int tcp_v4_conn_request(struct sock
*sk
, struct sk_buff
*skb
)
1474 struct tcp_extend_values tmp_ext
;
1475 struct tcp_options_received tmp_opt
;
1476 const u8
*hash_location
;
1477 struct request_sock
*req
;
1478 struct inet_request_sock
*ireq
;
1479 struct tcp_sock
*tp
= tcp_sk(sk
);
1480 struct dst_entry
*dst
= NULL
;
1481 __be32 saddr
= ip_hdr(skb
)->saddr
;
1482 __be32 daddr
= ip_hdr(skb
)->daddr
;
1483 __u32 isn
= TCP_SKB_CB(skb
)->when
;
1484 bool want_cookie
= false;
1486 struct tcp_fastopen_cookie foc
= { .len
= -1 };
1487 struct tcp_fastopen_cookie valid_foc
= { .len
= -1 };
1488 struct sk_buff
*skb_synack
;
1491 /* Never answer to SYNs send to broadcast or multicast */
1492 if (skb_rtable(skb
)->rt_flags
& (RTCF_BROADCAST
| RTCF_MULTICAST
))
1495 /* TW buckets are converted to open requests without
1496 * limitations, they conserve resources and peer is
1497 * evidently real one.
1499 if (inet_csk_reqsk_queue_is_full(sk
) && !isn
) {
1500 want_cookie
= tcp_syn_flood_action(sk
, skb
, "TCP");
1505 /* Accept backlog is full. If we have already queued enough
1506 * of warm entries in syn queue, drop request. It is better than
1507 * clogging syn queue with openreqs with exponentially increasing
1510 if (sk_acceptq_is_full(sk
) && inet_csk_reqsk_queue_young(sk
) > 1)
1513 req
= inet_reqsk_alloc(&tcp_request_sock_ops
);
1517 #ifdef CONFIG_TCP_MD5SIG
1518 tcp_rsk(req
)->af_specific
= &tcp_request_sock_ipv4_ops
;
1521 tcp_clear_options(&tmp_opt
);
1522 tmp_opt
.mss_clamp
= TCP_MSS_DEFAULT
;
1523 tmp_opt
.user_mss
= tp
->rx_opt
.user_mss
;
1524 tcp_parse_options(skb
, &tmp_opt
, &hash_location
, 0,
1525 want_cookie
? NULL
: &foc
);
1527 if (tmp_opt
.cookie_plus
> 0 &&
1528 tmp_opt
.saw_tstamp
&&
1529 !tp
->rx_opt
.cookie_out_never
&&
1530 (sysctl_tcp_cookie_size
> 0 ||
1531 (tp
->cookie_values
!= NULL
&&
1532 tp
->cookie_values
->cookie_desired
> 0))) {
1534 u32
*mess
= &tmp_ext
.cookie_bakery
[COOKIE_DIGEST_WORDS
];
1535 int l
= tmp_opt
.cookie_plus
- TCPOLEN_COOKIE_BASE
;
1537 if (tcp_cookie_generator(&tmp_ext
.cookie_bakery
[0]) != 0)
1538 goto drop_and_release
;
1540 /* Secret recipe starts with IP addresses */
1541 *mess
++ ^= (__force u32
)daddr
;
1542 *mess
++ ^= (__force u32
)saddr
;
1544 /* plus variable length Initiator Cookie */
1547 *c
++ ^= *hash_location
++;
1549 want_cookie
= false; /* not our kind of cookie */
1550 tmp_ext
.cookie_out_never
= 0; /* false */
1551 tmp_ext
.cookie_plus
= tmp_opt
.cookie_plus
;
1552 } else if (!tp
->rx_opt
.cookie_in_always
) {
1553 /* redundant indications, but ensure initialization. */
1554 tmp_ext
.cookie_out_never
= 1; /* true */
1555 tmp_ext
.cookie_plus
= 0;
1557 goto drop_and_release
;
1559 tmp_ext
.cookie_in_always
= tp
->rx_opt
.cookie_in_always
;
1561 if (want_cookie
&& !tmp_opt
.saw_tstamp
)
1562 tcp_clear_options(&tmp_opt
);
1564 tmp_opt
.tstamp_ok
= tmp_opt
.saw_tstamp
;
1565 tcp_openreq_init(req
, &tmp_opt
, skb
);
1567 ireq
= inet_rsk(req
);
1568 ireq
->loc_addr
= daddr
;
1569 ireq
->rmt_addr
= saddr
;
1570 ireq
->no_srccheck
= inet_sk(sk
)->transparent
;
1571 ireq
->opt
= tcp_v4_save_options(sk
, skb
);
1573 if (security_inet_conn_request(sk
, skb
, req
))
1576 if (!want_cookie
|| tmp_opt
.tstamp_ok
)
1577 TCP_ECN_create_request(req
, skb
);
1580 isn
= cookie_v4_init_sequence(sk
, skb
, &req
->mss
);
1581 req
->cookie_ts
= tmp_opt
.tstamp_ok
;
1583 /* VJ's idea. We save last timestamp seen
1584 * from the destination in peer table, when entering
1585 * state TIME-WAIT, and check against it before
1586 * accepting new connection request.
1588 * If "isn" is not zero, this request hit alive
1589 * timewait bucket, so that all the necessary checks
1590 * are made in the function processing timewait state.
1592 if (tmp_opt
.saw_tstamp
&&
1593 tcp_death_row
.sysctl_tw_recycle
&&
1594 (dst
= inet_csk_route_req(sk
, &fl4
, req
)) != NULL
&&
1595 fl4
.daddr
== saddr
) {
1596 if (!tcp_peer_is_proven(req
, dst
, true)) {
1597 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_PAWSPASSIVEREJECTED
);
1598 goto drop_and_release
;
1601 /* Kill the following clause, if you dislike this way. */
1602 else if (!sysctl_tcp_syncookies
&&
1603 (sysctl_max_syn_backlog
- inet_csk_reqsk_queue_len(sk
) <
1604 (sysctl_max_syn_backlog
>> 2)) &&
1605 !tcp_peer_is_proven(req
, dst
, false)) {
1606 /* Without syncookies last quarter of
1607 * backlog is filled with destinations,
1608 * proven to be alive.
1609 * It means that we continue to communicate
1610 * to destinations, already remembered
1611 * to the moment of synflood.
1613 LIMIT_NETDEBUG(KERN_DEBUG
pr_fmt("drop open request from %pI4/%u\n"),
1614 &saddr
, ntohs(tcp_hdr(skb
)->source
));
1615 goto drop_and_release
;
1618 isn
= tcp_v4_init_sequence(skb
);
1620 tcp_rsk(req
)->snt_isn
= isn
;
1623 dst
= inet_csk_route_req(sk
, &fl4
, req
);
1627 do_fastopen
= tcp_fastopen_check(sk
, skb
, req
, &foc
, &valid_foc
);
1629 /* We don't call tcp_v4_send_synack() directly because we need
1630 * to make sure a child socket can be created successfully before
1631 * sending back synack!
1633 * XXX (TFO) - Ideally one would simply call tcp_v4_send_synack()
1634 * (or better yet, call tcp_send_synack() in the child context
1635 * directly, but will have to fix bunch of other code first)
1636 * after syn_recv_sock() except one will need to first fix the
1637 * latter to remove its dependency on the current implementation
1638 * of tcp_v4_send_synack()->tcp_select_initial_window().
1640 skb_synack
= tcp_make_synack(sk
, dst
, req
,
1641 (struct request_values
*)&tmp_ext
,
1642 fastopen_cookie_present(&valid_foc
) ? &valid_foc
: NULL
);
1645 __tcp_v4_send_check(skb_synack
, ireq
->loc_addr
, ireq
->rmt_addr
);
1646 skb_set_queue_mapping(skb_synack
, skb_get_queue_mapping(skb
));
1650 if (likely(!do_fastopen
)) {
1652 err
= ip_build_and_send_pkt(skb_synack
, sk
, ireq
->loc_addr
,
1653 ireq
->rmt_addr
, ireq
->opt
);
1654 err
= net_xmit_eval(err
);
1655 if (err
|| want_cookie
)
1658 tcp_rsk(req
)->snt_synack
= tcp_time_stamp
;
1659 tcp_rsk(req
)->listener
= NULL
;
1660 /* Add the request_sock to the SYN table */
1661 inet_csk_reqsk_queue_hash_add(sk
, req
, TCP_TIMEOUT_INIT
);
1662 if (fastopen_cookie_present(&foc
) && foc
.len
!= 0)
1663 NET_INC_STATS_BH(sock_net(sk
),
1664 LINUX_MIB_TCPFASTOPENPASSIVEFAIL
);
1665 } else if (tcp_v4_conn_req_fastopen(sk
, skb
, skb_synack
, req
,
1666 (struct request_values
*)&tmp_ext
))
1678 EXPORT_SYMBOL(tcp_v4_conn_request
);
1682 * The three way handshake has completed - we got a valid synack -
1683 * now create the new socket.
1685 struct sock
*tcp_v4_syn_recv_sock(struct sock
*sk
, struct sk_buff
*skb
,
1686 struct request_sock
*req
,
1687 struct dst_entry
*dst
)
1689 struct inet_request_sock
*ireq
;
1690 struct inet_sock
*newinet
;
1691 struct tcp_sock
*newtp
;
1693 #ifdef CONFIG_TCP_MD5SIG
1694 struct tcp_md5sig_key
*key
;
1696 struct ip_options_rcu
*inet_opt
;
1698 if (sk_acceptq_is_full(sk
))
1701 newsk
= tcp_create_openreq_child(sk
, req
, skb
);
1705 newsk
->sk_gso_type
= SKB_GSO_TCPV4
;
1706 inet_sk_rx_dst_set(newsk
, skb
);
1708 newtp
= tcp_sk(newsk
);
1709 newinet
= inet_sk(newsk
);
1710 ireq
= inet_rsk(req
);
1711 newinet
->inet_daddr
= ireq
->rmt_addr
;
1712 newinet
->inet_rcv_saddr
= ireq
->loc_addr
;
1713 newinet
->inet_saddr
= ireq
->loc_addr
;
1714 inet_opt
= ireq
->opt
;
1715 rcu_assign_pointer(newinet
->inet_opt
, inet_opt
);
1717 newinet
->mc_index
= inet_iif(skb
);
1718 newinet
->mc_ttl
= ip_hdr(skb
)->ttl
;
1719 newinet
->rcv_tos
= ip_hdr(skb
)->tos
;
1720 inet_csk(newsk
)->icsk_ext_hdr_len
= 0;
1722 inet_csk(newsk
)->icsk_ext_hdr_len
= inet_opt
->opt
.optlen
;
1723 newinet
->inet_id
= newtp
->write_seq
^ jiffies
;
1726 dst
= inet_csk_route_child_sock(sk
, newsk
, req
);
1730 /* syncookie case : see end of cookie_v4_check() */
1732 sk_setup_caps(newsk
, dst
);
1734 tcp_mtup_init(newsk
);
1735 tcp_sync_mss(newsk
, dst_mtu(dst
));
1736 newtp
->advmss
= dst_metric_advmss(dst
);
1737 if (tcp_sk(sk
)->rx_opt
.user_mss
&&
1738 tcp_sk(sk
)->rx_opt
.user_mss
< newtp
->advmss
)
1739 newtp
->advmss
= tcp_sk(sk
)->rx_opt
.user_mss
;
1741 tcp_initialize_rcv_mss(newsk
);
1742 tcp_synack_rtt_meas(newsk
, req
);
1743 newtp
->total_retrans
= req
->retrans
;
1745 #ifdef CONFIG_TCP_MD5SIG
1746 /* Copy over the MD5 key from the original socket */
1747 key
= tcp_md5_do_lookup(sk
, (union tcp_md5_addr
*)&newinet
->inet_daddr
,
1751 * We're using one, so create a matching key
1752 * on the newsk structure. If we fail to get
1753 * memory, then we end up not copying the key
1756 tcp_md5_do_add(newsk
, (union tcp_md5_addr
*)&newinet
->inet_daddr
,
1757 AF_INET
, key
->key
, key
->keylen
, GFP_ATOMIC
);
1758 sk_nocaps_add(newsk
, NETIF_F_GSO_MASK
);
1762 if (__inet_inherit_port(sk
, newsk
) < 0)
1764 __inet_hash_nolisten(newsk
, NULL
);
1769 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_LISTENOVERFLOWS
);
1773 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_LISTENDROPS
);
1776 tcp_clear_xmit_timers(newsk
);
1777 tcp_cleanup_congestion_control(newsk
);
1778 bh_unlock_sock(newsk
);
1782 EXPORT_SYMBOL(tcp_v4_syn_recv_sock
);
1784 static struct sock
*tcp_v4_hnd_req(struct sock
*sk
, struct sk_buff
*skb
)
1786 struct tcphdr
*th
= tcp_hdr(skb
);
1787 const struct iphdr
*iph
= ip_hdr(skb
);
1789 struct request_sock
**prev
;
1790 /* Find possible connection requests. */
1791 struct request_sock
*req
= inet_csk_search_req(sk
, &prev
, th
->source
,
1792 iph
->saddr
, iph
->daddr
);
1794 return tcp_check_req(sk
, skb
, req
, prev
, false);
1796 nsk
= inet_lookup_established(sock_net(sk
), &tcp_hashinfo
, iph
->saddr
,
1797 th
->source
, iph
->daddr
, th
->dest
, inet_iif(skb
));
1800 if (nsk
->sk_state
!= TCP_TIME_WAIT
) {
1804 inet_twsk_put(inet_twsk(nsk
));
1808 #ifdef CONFIG_SYN_COOKIES
1810 sk
= cookie_v4_check(sk
, skb
, &(IPCB(skb
)->opt
));
1815 static __sum16
tcp_v4_checksum_init(struct sk_buff
*skb
)
1817 const struct iphdr
*iph
= ip_hdr(skb
);
1819 if (skb
->ip_summed
== CHECKSUM_COMPLETE
) {
1820 if (!tcp_v4_check(skb
->len
, iph
->saddr
,
1821 iph
->daddr
, skb
->csum
)) {
1822 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1827 skb
->csum
= csum_tcpudp_nofold(iph
->saddr
, iph
->daddr
,
1828 skb
->len
, IPPROTO_TCP
, 0);
1830 if (skb
->len
<= 76) {
1831 return __skb_checksum_complete(skb
);
1837 /* The socket must have it's spinlock held when we get
1840 * We have a potential double-lock case here, so even when
1841 * doing backlog processing we use the BH locking scheme.
1842 * This is because we cannot sleep with the original spinlock
1845 int tcp_v4_do_rcv(struct sock
*sk
, struct sk_buff
*skb
)
1848 #ifdef CONFIG_TCP_MD5SIG
1850 * We really want to reject the packet as early as possible
1852 * o We're expecting an MD5'd packet and this is no MD5 tcp option
1853 * o There is an MD5 option and we're not expecting one
1855 if (tcp_v4_inbound_md5_hash(sk
, skb
))
1859 if (sk
->sk_state
== TCP_ESTABLISHED
) { /* Fast path */
1860 struct dst_entry
*dst
= sk
->sk_rx_dst
;
1862 sock_rps_save_rxhash(sk
, skb
);
1864 if (inet_sk(sk
)->rx_dst_ifindex
!= skb
->skb_iif
||
1865 dst
->ops
->check(dst
, 0) == NULL
) {
1867 sk
->sk_rx_dst
= NULL
;
1870 if (tcp_rcv_established(sk
, skb
, tcp_hdr(skb
), skb
->len
)) {
1877 if (skb
->len
< tcp_hdrlen(skb
) || tcp_checksum_complete(skb
))
1880 if (sk
->sk_state
== TCP_LISTEN
) {
1881 struct sock
*nsk
= tcp_v4_hnd_req(sk
, skb
);
1886 sock_rps_save_rxhash(nsk
, skb
);
1887 if (tcp_child_process(sk
, nsk
, skb
)) {
1894 sock_rps_save_rxhash(sk
, skb
);
1896 if (tcp_rcv_state_process(sk
, skb
, tcp_hdr(skb
), skb
->len
)) {
1903 tcp_v4_send_reset(rsk
, skb
);
1906 /* Be careful here. If this function gets more complicated and
1907 * gcc suffers from register pressure on the x86, sk (in %ebx)
1908 * might be destroyed here. This current version compiles correctly,
1909 * but you have been warned.
1914 TCP_INC_STATS_BH(sock_net(sk
), TCP_MIB_INERRS
);
1917 EXPORT_SYMBOL(tcp_v4_do_rcv
);
1919 void tcp_v4_early_demux(struct sk_buff
*skb
)
1921 struct net
*net
= dev_net(skb
->dev
);
1922 const struct iphdr
*iph
;
1923 const struct tcphdr
*th
;
1926 if (skb
->pkt_type
!= PACKET_HOST
)
1929 if (!pskb_may_pull(skb
, ip_hdrlen(skb
) + sizeof(struct tcphdr
)))
1933 th
= (struct tcphdr
*) ((char *)iph
+ ip_hdrlen(skb
));
1935 if (th
->doff
< sizeof(struct tcphdr
) / 4)
1938 sk
= __inet_lookup_established(net
, &tcp_hashinfo
,
1939 iph
->saddr
, th
->source
,
1940 iph
->daddr
, ntohs(th
->dest
),
1944 skb
->destructor
= sock_edemux
;
1945 if (sk
->sk_state
!= TCP_TIME_WAIT
) {
1946 struct dst_entry
*dst
= sk
->sk_rx_dst
;
1949 dst
= dst_check(dst
, 0);
1951 inet_sk(sk
)->rx_dst_ifindex
== skb
->skb_iif
)
1952 skb_dst_set_noref(skb
, dst
);
1961 int tcp_v4_rcv(struct sk_buff
*skb
)
1963 const struct iphdr
*iph
;
1964 const struct tcphdr
*th
;
1967 struct net
*net
= dev_net(skb
->dev
);
1969 if (skb
->pkt_type
!= PACKET_HOST
)
1972 /* Count it even if it's bad */
1973 TCP_INC_STATS_BH(net
, TCP_MIB_INSEGS
);
1975 if (!pskb_may_pull(skb
, sizeof(struct tcphdr
)))
1980 if (th
->doff
< sizeof(struct tcphdr
) / 4)
1982 if (!pskb_may_pull(skb
, th
->doff
* 4))
1985 /* An explanation is required here, I think.
1986 * Packet length and doff are validated by header prediction,
1987 * provided case of th->doff==0 is eliminated.
1988 * So, we defer the checks. */
1989 if (!skb_csum_unnecessary(skb
) && tcp_v4_checksum_init(skb
))
1994 TCP_SKB_CB(skb
)->seq
= ntohl(th
->seq
);
1995 TCP_SKB_CB(skb
)->end_seq
= (TCP_SKB_CB(skb
)->seq
+ th
->syn
+ th
->fin
+
1996 skb
->len
- th
->doff
* 4);
1997 TCP_SKB_CB(skb
)->ack_seq
= ntohl(th
->ack_seq
);
1998 TCP_SKB_CB(skb
)->when
= 0;
1999 TCP_SKB_CB(skb
)->ip_dsfield
= ipv4_get_dsfield(iph
);
2000 TCP_SKB_CB(skb
)->sacked
= 0;
2002 sk
= __inet_lookup_skb(&tcp_hashinfo
, skb
, th
->source
, th
->dest
);
2007 if (sk
->sk_state
== TCP_TIME_WAIT
)
2010 if (unlikely(iph
->ttl
< inet_sk(sk
)->min_ttl
)) {
2011 NET_INC_STATS_BH(net
, LINUX_MIB_TCPMINTTLDROP
);
2012 goto discard_and_relse
;
2015 if (!xfrm4_policy_check(sk
, XFRM_POLICY_IN
, skb
))
2016 goto discard_and_relse
;
2019 if (sk_filter(sk
, skb
))
2020 goto discard_and_relse
;
2024 bh_lock_sock_nested(sk
);
2026 if (!sock_owned_by_user(sk
)) {
2027 #ifdef CONFIG_NET_DMA
2028 struct tcp_sock
*tp
= tcp_sk(sk
);
2029 if (!tp
->ucopy
.dma_chan
&& tp
->ucopy
.pinned_list
)
2030 tp
->ucopy
.dma_chan
= net_dma_find_channel();
2031 if (tp
->ucopy
.dma_chan
)
2032 ret
= tcp_v4_do_rcv(sk
, skb
);
2036 if (!tcp_prequeue(sk
, skb
))
2037 ret
= tcp_v4_do_rcv(sk
, skb
);
2039 } else if (unlikely(sk_add_backlog(sk
, skb
,
2040 sk
->sk_rcvbuf
+ sk
->sk_sndbuf
))) {
2042 NET_INC_STATS_BH(net
, LINUX_MIB_TCPBACKLOGDROP
);
2043 goto discard_and_relse
;
2052 if (!xfrm4_policy_check(NULL
, XFRM_POLICY_IN
, skb
))
2055 if (skb
->len
< (th
->doff
<< 2) || tcp_checksum_complete(skb
)) {
2057 TCP_INC_STATS_BH(net
, TCP_MIB_INERRS
);
2059 tcp_v4_send_reset(NULL
, skb
);
2063 /* Discard frame. */
2072 if (!xfrm4_policy_check(NULL
, XFRM_POLICY_IN
, skb
)) {
2073 inet_twsk_put(inet_twsk(sk
));
2077 if (skb
->len
< (th
->doff
<< 2) || tcp_checksum_complete(skb
)) {
2078 TCP_INC_STATS_BH(net
, TCP_MIB_INERRS
);
2079 inet_twsk_put(inet_twsk(sk
));
2082 switch (tcp_timewait_state_process(inet_twsk(sk
), skb
, th
)) {
2084 struct sock
*sk2
= inet_lookup_listener(dev_net(skb
->dev
),
2086 iph
->daddr
, th
->dest
,
2089 inet_twsk_deschedule(inet_twsk(sk
), &tcp_death_row
);
2090 inet_twsk_put(inet_twsk(sk
));
2094 /* Fall through to ACK */
2097 tcp_v4_timewait_ack(sk
, skb
);
2101 case TCP_TW_SUCCESS
:;
2106 static struct timewait_sock_ops tcp_timewait_sock_ops
= {
2107 .twsk_obj_size
= sizeof(struct tcp_timewait_sock
),
2108 .twsk_unique
= tcp_twsk_unique
,
2109 .twsk_destructor
= tcp_twsk_destructor
,
2112 void inet_sk_rx_dst_set(struct sock
*sk
, const struct sk_buff
*skb
)
2114 struct dst_entry
*dst
= skb_dst(skb
);
2117 sk
->sk_rx_dst
= dst
;
2118 inet_sk(sk
)->rx_dst_ifindex
= skb
->skb_iif
;
2120 EXPORT_SYMBOL(inet_sk_rx_dst_set
);
2122 const struct inet_connection_sock_af_ops ipv4_specific
= {
2123 .queue_xmit
= ip_queue_xmit
,
2124 .send_check
= tcp_v4_send_check
,
2125 .rebuild_header
= inet_sk_rebuild_header
,
2126 .sk_rx_dst_set
= inet_sk_rx_dst_set
,
2127 .conn_request
= tcp_v4_conn_request
,
2128 .syn_recv_sock
= tcp_v4_syn_recv_sock
,
2129 .net_header_len
= sizeof(struct iphdr
),
2130 .setsockopt
= ip_setsockopt
,
2131 .getsockopt
= ip_getsockopt
,
2132 .addr2sockaddr
= inet_csk_addr2sockaddr
,
2133 .sockaddr_len
= sizeof(struct sockaddr_in
),
2134 .bind_conflict
= inet_csk_bind_conflict
,
2135 #ifdef CONFIG_COMPAT
2136 .compat_setsockopt
= compat_ip_setsockopt
,
2137 .compat_getsockopt
= compat_ip_getsockopt
,
2140 EXPORT_SYMBOL(ipv4_specific
);
2142 #ifdef CONFIG_TCP_MD5SIG
2143 static const struct tcp_sock_af_ops tcp_sock_ipv4_specific
= {
2144 .md5_lookup
= tcp_v4_md5_lookup
,
2145 .calc_md5_hash
= tcp_v4_md5_hash_skb
,
2146 .md5_parse
= tcp_v4_parse_md5_keys
,
2150 /* NOTE: A lot of things set to zero explicitly by call to
2151 * sk_alloc() so need not be done here.
2153 static int tcp_v4_init_sock(struct sock
*sk
)
2155 struct inet_connection_sock
*icsk
= inet_csk(sk
);
2159 icsk
->icsk_af_ops
= &ipv4_specific
;
2161 #ifdef CONFIG_TCP_MD5SIG
2162 tcp_sk(sk
)->af_specific
= &tcp_sock_ipv4_specific
;
2168 void tcp_v4_destroy_sock(struct sock
*sk
)
2170 struct tcp_sock
*tp
= tcp_sk(sk
);
2172 tcp_clear_xmit_timers(sk
);
2174 tcp_cleanup_congestion_control(sk
);
2176 /* Cleanup up the write buffer. */
2177 tcp_write_queue_purge(sk
);
2179 /* Cleans up our, hopefully empty, out_of_order_queue. */
2180 __skb_queue_purge(&tp
->out_of_order_queue
);
2182 #ifdef CONFIG_TCP_MD5SIG
2183 /* Clean up the MD5 key list, if any */
2184 if (tp
->md5sig_info
) {
2185 tcp_clear_md5_list(sk
);
2186 kfree_rcu(tp
->md5sig_info
, rcu
);
2187 tp
->md5sig_info
= NULL
;
2191 #ifdef CONFIG_NET_DMA
2192 /* Cleans up our sk_async_wait_queue */
2193 __skb_queue_purge(&sk
->sk_async_wait_queue
);
2196 /* Clean prequeue, it must be empty really */
2197 __skb_queue_purge(&tp
->ucopy
.prequeue
);
2199 /* Clean up a referenced TCP bind bucket. */
2200 if (inet_csk(sk
)->icsk_bind_hash
)
2204 * If sendmsg cached page exists, toss it.
2206 if (sk
->sk_sndmsg_page
) {
2207 __free_page(sk
->sk_sndmsg_page
);
2208 sk
->sk_sndmsg_page
= NULL
;
2211 /* TCP Cookie Transactions */
2212 if (tp
->cookie_values
!= NULL
) {
2213 kref_put(&tp
->cookie_values
->kref
,
2214 tcp_cookie_values_release
);
2215 tp
->cookie_values
= NULL
;
2217 BUG_ON(tp
->fastopen_rsk
!= NULL
);
2219 /* If socket is aborted during connect operation */
2220 tcp_free_fastopen_req(tp
);
2222 sk_sockets_allocated_dec(sk
);
2223 sock_release_memcg(sk
);
2225 EXPORT_SYMBOL(tcp_v4_destroy_sock
);
2227 #ifdef CONFIG_PROC_FS
2228 /* Proc filesystem TCP sock list dumping. */
2230 static inline struct inet_timewait_sock
*tw_head(struct hlist_nulls_head
*head
)
2232 return hlist_nulls_empty(head
) ? NULL
:
2233 list_entry(head
->first
, struct inet_timewait_sock
, tw_node
);
2236 static inline struct inet_timewait_sock
*tw_next(struct inet_timewait_sock
*tw
)
2238 return !is_a_nulls(tw
->tw_node
.next
) ?
2239 hlist_nulls_entry(tw
->tw_node
.next
, typeof(*tw
), tw_node
) : NULL
;
2243 * Get next listener socket follow cur. If cur is NULL, get first socket
2244 * starting from bucket given in st->bucket; when st->bucket is zero the
2245 * very first socket in the hash table is returned.
2247 static void *listening_get_next(struct seq_file
*seq
, void *cur
)
2249 struct inet_connection_sock
*icsk
;
2250 struct hlist_nulls_node
*node
;
2251 struct sock
*sk
= cur
;
2252 struct inet_listen_hashbucket
*ilb
;
2253 struct tcp_iter_state
*st
= seq
->private;
2254 struct net
*net
= seq_file_net(seq
);
2257 ilb
= &tcp_hashinfo
.listening_hash
[st
->bucket
];
2258 spin_lock_bh(&ilb
->lock
);
2259 sk
= sk_nulls_head(&ilb
->head
);
2263 ilb
= &tcp_hashinfo
.listening_hash
[st
->bucket
];
2267 if (st
->state
== TCP_SEQ_STATE_OPENREQ
) {
2268 struct request_sock
*req
= cur
;
2270 icsk
= inet_csk(st
->syn_wait_sk
);
2274 if (req
->rsk_ops
->family
== st
->family
) {
2280 if (++st
->sbucket
>= icsk
->icsk_accept_queue
.listen_opt
->nr_table_entries
)
2283 req
= icsk
->icsk_accept_queue
.listen_opt
->syn_table
[st
->sbucket
];
2285 sk
= sk_nulls_next(st
->syn_wait_sk
);
2286 st
->state
= TCP_SEQ_STATE_LISTENING
;
2287 read_unlock_bh(&icsk
->icsk_accept_queue
.syn_wait_lock
);
2289 icsk
= inet_csk(sk
);
2290 read_lock_bh(&icsk
->icsk_accept_queue
.syn_wait_lock
);
2291 if (reqsk_queue_len(&icsk
->icsk_accept_queue
))
2293 read_unlock_bh(&icsk
->icsk_accept_queue
.syn_wait_lock
);
2294 sk
= sk_nulls_next(sk
);
2297 sk_nulls_for_each_from(sk
, node
) {
2298 if (!net_eq(sock_net(sk
), net
))
2300 if (sk
->sk_family
== st
->family
) {
2304 icsk
= inet_csk(sk
);
2305 read_lock_bh(&icsk
->icsk_accept_queue
.syn_wait_lock
);
2306 if (reqsk_queue_len(&icsk
->icsk_accept_queue
)) {
2308 st
->uid
= sock_i_uid(sk
);
2309 st
->syn_wait_sk
= sk
;
2310 st
->state
= TCP_SEQ_STATE_OPENREQ
;
2314 read_unlock_bh(&icsk
->icsk_accept_queue
.syn_wait_lock
);
2316 spin_unlock_bh(&ilb
->lock
);
2318 if (++st
->bucket
< INET_LHTABLE_SIZE
) {
2319 ilb
= &tcp_hashinfo
.listening_hash
[st
->bucket
];
2320 spin_lock_bh(&ilb
->lock
);
2321 sk
= sk_nulls_head(&ilb
->head
);
2329 static void *listening_get_idx(struct seq_file
*seq
, loff_t
*pos
)
2331 struct tcp_iter_state
*st
= seq
->private;
2336 rc
= listening_get_next(seq
, NULL
);
2338 while (rc
&& *pos
) {
2339 rc
= listening_get_next(seq
, rc
);
2345 static inline bool empty_bucket(struct tcp_iter_state
*st
)
2347 return hlist_nulls_empty(&tcp_hashinfo
.ehash
[st
->bucket
].chain
) &&
2348 hlist_nulls_empty(&tcp_hashinfo
.ehash
[st
->bucket
].twchain
);
2352 * Get first established socket starting from bucket given in st->bucket.
2353 * If st->bucket is zero, the very first socket in the hash is returned.
2355 static void *established_get_first(struct seq_file
*seq
)
2357 struct tcp_iter_state
*st
= seq
->private;
2358 struct net
*net
= seq_file_net(seq
);
2362 for (; st
->bucket
<= tcp_hashinfo
.ehash_mask
; ++st
->bucket
) {
2364 struct hlist_nulls_node
*node
;
2365 struct inet_timewait_sock
*tw
;
2366 spinlock_t
*lock
= inet_ehash_lockp(&tcp_hashinfo
, st
->bucket
);
2368 /* Lockless fast path for the common case of empty buckets */
2369 if (empty_bucket(st
))
2373 sk_nulls_for_each(sk
, node
, &tcp_hashinfo
.ehash
[st
->bucket
].chain
) {
2374 if (sk
->sk_family
!= st
->family
||
2375 !net_eq(sock_net(sk
), net
)) {
2381 st
->state
= TCP_SEQ_STATE_TIME_WAIT
;
2382 inet_twsk_for_each(tw
, node
,
2383 &tcp_hashinfo
.ehash
[st
->bucket
].twchain
) {
2384 if (tw
->tw_family
!= st
->family
||
2385 !net_eq(twsk_net(tw
), net
)) {
2391 spin_unlock_bh(lock
);
2392 st
->state
= TCP_SEQ_STATE_ESTABLISHED
;
2398 static void *established_get_next(struct seq_file
*seq
, void *cur
)
2400 struct sock
*sk
= cur
;
2401 struct inet_timewait_sock
*tw
;
2402 struct hlist_nulls_node
*node
;
2403 struct tcp_iter_state
*st
= seq
->private;
2404 struct net
*net
= seq_file_net(seq
);
2409 if (st
->state
== TCP_SEQ_STATE_TIME_WAIT
) {
2413 while (tw
&& (tw
->tw_family
!= st
->family
|| !net_eq(twsk_net(tw
), net
))) {
2420 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo
, st
->bucket
));
2421 st
->state
= TCP_SEQ_STATE_ESTABLISHED
;
2423 /* Look for next non empty bucket */
2425 while (++st
->bucket
<= tcp_hashinfo
.ehash_mask
&&
2428 if (st
->bucket
> tcp_hashinfo
.ehash_mask
)
2431 spin_lock_bh(inet_ehash_lockp(&tcp_hashinfo
, st
->bucket
));
2432 sk
= sk_nulls_head(&tcp_hashinfo
.ehash
[st
->bucket
].chain
);
2434 sk
= sk_nulls_next(sk
);
2436 sk_nulls_for_each_from(sk
, node
) {
2437 if (sk
->sk_family
== st
->family
&& net_eq(sock_net(sk
), net
))
2441 st
->state
= TCP_SEQ_STATE_TIME_WAIT
;
2442 tw
= tw_head(&tcp_hashinfo
.ehash
[st
->bucket
].twchain
);
2450 static void *established_get_idx(struct seq_file
*seq
, loff_t pos
)
2452 struct tcp_iter_state
*st
= seq
->private;
2456 rc
= established_get_first(seq
);
2459 rc
= established_get_next(seq
, rc
);
2465 static void *tcp_get_idx(struct seq_file
*seq
, loff_t pos
)
2468 struct tcp_iter_state
*st
= seq
->private;
2470 st
->state
= TCP_SEQ_STATE_LISTENING
;
2471 rc
= listening_get_idx(seq
, &pos
);
2474 st
->state
= TCP_SEQ_STATE_ESTABLISHED
;
2475 rc
= established_get_idx(seq
, pos
);
2481 static void *tcp_seek_last_pos(struct seq_file
*seq
)
2483 struct tcp_iter_state
*st
= seq
->private;
2484 int offset
= st
->offset
;
2485 int orig_num
= st
->num
;
2488 switch (st
->state
) {
2489 case TCP_SEQ_STATE_OPENREQ
:
2490 case TCP_SEQ_STATE_LISTENING
:
2491 if (st
->bucket
>= INET_LHTABLE_SIZE
)
2493 st
->state
= TCP_SEQ_STATE_LISTENING
;
2494 rc
= listening_get_next(seq
, NULL
);
2495 while (offset
-- && rc
)
2496 rc
= listening_get_next(seq
, rc
);
2501 case TCP_SEQ_STATE_ESTABLISHED
:
2502 case TCP_SEQ_STATE_TIME_WAIT
:
2503 st
->state
= TCP_SEQ_STATE_ESTABLISHED
;
2504 if (st
->bucket
> tcp_hashinfo
.ehash_mask
)
2506 rc
= established_get_first(seq
);
2507 while (offset
-- && rc
)
2508 rc
= established_get_next(seq
, rc
);
2516 static void *tcp_seq_start(struct seq_file
*seq
, loff_t
*pos
)
2518 struct tcp_iter_state
*st
= seq
->private;
2521 if (*pos
&& *pos
== st
->last_pos
) {
2522 rc
= tcp_seek_last_pos(seq
);
2527 st
->state
= TCP_SEQ_STATE_LISTENING
;
2531 rc
= *pos
? tcp_get_idx(seq
, *pos
- 1) : SEQ_START_TOKEN
;
2534 st
->last_pos
= *pos
;
2538 static void *tcp_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
2540 struct tcp_iter_state
*st
= seq
->private;
2543 if (v
== SEQ_START_TOKEN
) {
2544 rc
= tcp_get_idx(seq
, 0);
2548 switch (st
->state
) {
2549 case TCP_SEQ_STATE_OPENREQ
:
2550 case TCP_SEQ_STATE_LISTENING
:
2551 rc
= listening_get_next(seq
, v
);
2553 st
->state
= TCP_SEQ_STATE_ESTABLISHED
;
2556 rc
= established_get_first(seq
);
2559 case TCP_SEQ_STATE_ESTABLISHED
:
2560 case TCP_SEQ_STATE_TIME_WAIT
:
2561 rc
= established_get_next(seq
, v
);
2566 st
->last_pos
= *pos
;
2570 static void tcp_seq_stop(struct seq_file
*seq
, void *v
)
2572 struct tcp_iter_state
*st
= seq
->private;
2574 switch (st
->state
) {
2575 case TCP_SEQ_STATE_OPENREQ
:
2577 struct inet_connection_sock
*icsk
= inet_csk(st
->syn_wait_sk
);
2578 read_unlock_bh(&icsk
->icsk_accept_queue
.syn_wait_lock
);
2580 case TCP_SEQ_STATE_LISTENING
:
2581 if (v
!= SEQ_START_TOKEN
)
2582 spin_unlock_bh(&tcp_hashinfo
.listening_hash
[st
->bucket
].lock
);
2584 case TCP_SEQ_STATE_TIME_WAIT
:
2585 case TCP_SEQ_STATE_ESTABLISHED
:
2587 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo
, st
->bucket
));
2592 int tcp_seq_open(struct inode
*inode
, struct file
*file
)
2594 struct tcp_seq_afinfo
*afinfo
= PDE(inode
)->data
;
2595 struct tcp_iter_state
*s
;
2598 err
= seq_open_net(inode
, file
, &afinfo
->seq_ops
,
2599 sizeof(struct tcp_iter_state
));
2603 s
= ((struct seq_file
*)file
->private_data
)->private;
2604 s
->family
= afinfo
->family
;
2608 EXPORT_SYMBOL(tcp_seq_open
);
2610 int tcp_proc_register(struct net
*net
, struct tcp_seq_afinfo
*afinfo
)
2613 struct proc_dir_entry
*p
;
2615 afinfo
->seq_ops
.start
= tcp_seq_start
;
2616 afinfo
->seq_ops
.next
= tcp_seq_next
;
2617 afinfo
->seq_ops
.stop
= tcp_seq_stop
;
2619 p
= proc_create_data(afinfo
->name
, S_IRUGO
, net
->proc_net
,
2620 afinfo
->seq_fops
, afinfo
);
2625 EXPORT_SYMBOL(tcp_proc_register
);
2627 void tcp_proc_unregister(struct net
*net
, struct tcp_seq_afinfo
*afinfo
)
2629 proc_net_remove(net
, afinfo
->name
);
2631 EXPORT_SYMBOL(tcp_proc_unregister
);
2633 static void get_openreq4(const struct sock
*sk
, const struct request_sock
*req
,
2634 struct seq_file
*f
, int i
, kuid_t uid
, int *len
)
2636 const struct inet_request_sock
*ireq
= inet_rsk(req
);
2637 long delta
= req
->expires
- jiffies
;
2639 seq_printf(f
, "%4d: %08X:%04X %08X:%04X"
2640 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %u %d %pK%n",
2643 ntohs(inet_sk(sk
)->inet_sport
),
2645 ntohs(ireq
->rmt_port
),
2647 0, 0, /* could print option size, but that is af dependent. */
2648 1, /* timers active (only the expire timer) */
2649 jiffies_delta_to_clock_t(delta
),
2651 from_kuid_munged(seq_user_ns(f
), uid
),
2652 0, /* non standard timer */
2653 0, /* open_requests have no inode */
2654 atomic_read(&sk
->sk_refcnt
),
2659 static void get_tcp4_sock(struct sock
*sk
, struct seq_file
*f
, int i
, int *len
)
2662 unsigned long timer_expires
;
2663 const struct tcp_sock
*tp
= tcp_sk(sk
);
2664 const struct inet_connection_sock
*icsk
= inet_csk(sk
);
2665 const struct inet_sock
*inet
= inet_sk(sk
);
2666 struct fastopen_queue
*fastopenq
= icsk
->icsk_accept_queue
.fastopenq
;
2667 __be32 dest
= inet
->inet_daddr
;
2668 __be32 src
= inet
->inet_rcv_saddr
;
2669 __u16 destp
= ntohs(inet
->inet_dport
);
2670 __u16 srcp
= ntohs(inet
->inet_sport
);
2673 if (icsk
->icsk_pending
== ICSK_TIME_RETRANS
) {
2675 timer_expires
= icsk
->icsk_timeout
;
2676 } else if (icsk
->icsk_pending
== ICSK_TIME_PROBE0
) {
2678 timer_expires
= icsk
->icsk_timeout
;
2679 } else if (timer_pending(&sk
->sk_timer
)) {
2681 timer_expires
= sk
->sk_timer
.expires
;
2684 timer_expires
= jiffies
;
2687 if (sk
->sk_state
== TCP_LISTEN
)
2688 rx_queue
= sk
->sk_ack_backlog
;
2691 * because we dont lock socket, we might find a transient negative value
2693 rx_queue
= max_t(int, tp
->rcv_nxt
- tp
->copied_seq
, 0);
2695 seq_printf(f
, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2696 "%08X %5d %8d %lu %d %pK %lu %lu %u %u %d%n",
2697 i
, src
, srcp
, dest
, destp
, sk
->sk_state
,
2698 tp
->write_seq
- tp
->snd_una
,
2701 jiffies_delta_to_clock_t(timer_expires
- jiffies
),
2702 icsk
->icsk_retransmits
,
2703 from_kuid_munged(seq_user_ns(f
), sock_i_uid(sk
)),
2704 icsk
->icsk_probes_out
,
2706 atomic_read(&sk
->sk_refcnt
), sk
,
2707 jiffies_to_clock_t(icsk
->icsk_rto
),
2708 jiffies_to_clock_t(icsk
->icsk_ack
.ato
),
2709 (icsk
->icsk_ack
.quick
<< 1) | icsk
->icsk_ack
.pingpong
,
2711 sk
->sk_state
== TCP_LISTEN
?
2712 (fastopenq
? fastopenq
->max_qlen
: 0) :
2713 (tcp_in_initial_slowstart(tp
) ? -1 : tp
->snd_ssthresh
),
2717 static void get_timewait4_sock(const struct inet_timewait_sock
*tw
,
2718 struct seq_file
*f
, int i
, int *len
)
2722 long delta
= tw
->tw_ttd
- jiffies
;
2724 dest
= tw
->tw_daddr
;
2725 src
= tw
->tw_rcv_saddr
;
2726 destp
= ntohs(tw
->tw_dport
);
2727 srcp
= ntohs(tw
->tw_sport
);
2729 seq_printf(f
, "%4d: %08X:%04X %08X:%04X"
2730 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK%n",
2731 i
, src
, srcp
, dest
, destp
, tw
->tw_substate
, 0, 0,
2732 3, jiffies_delta_to_clock_t(delta
), 0, 0, 0, 0,
2733 atomic_read(&tw
->tw_refcnt
), tw
, len
);
2738 static int tcp4_seq_show(struct seq_file
*seq
, void *v
)
2740 struct tcp_iter_state
*st
;
2743 if (v
== SEQ_START_TOKEN
) {
2744 seq_printf(seq
, "%-*s\n", TMPSZ
- 1,
2745 " sl local_address rem_address st tx_queue "
2746 "rx_queue tr tm->when retrnsmt uid timeout "
2752 switch (st
->state
) {
2753 case TCP_SEQ_STATE_LISTENING
:
2754 case TCP_SEQ_STATE_ESTABLISHED
:
2755 get_tcp4_sock(v
, seq
, st
->num
, &len
);
2757 case TCP_SEQ_STATE_OPENREQ
:
2758 get_openreq4(st
->syn_wait_sk
, v
, seq
, st
->num
, st
->uid
, &len
);
2760 case TCP_SEQ_STATE_TIME_WAIT
:
2761 get_timewait4_sock(v
, seq
, st
->num
, &len
);
2764 seq_printf(seq
, "%*s\n", TMPSZ
- 1 - len
, "");
2769 static const struct file_operations tcp_afinfo_seq_fops
= {
2770 .owner
= THIS_MODULE
,
2771 .open
= tcp_seq_open
,
2773 .llseek
= seq_lseek
,
2774 .release
= seq_release_net
2777 static struct tcp_seq_afinfo tcp4_seq_afinfo
= {
2780 .seq_fops
= &tcp_afinfo_seq_fops
,
2782 .show
= tcp4_seq_show
,
2786 static int __net_init
tcp4_proc_init_net(struct net
*net
)
2788 return tcp_proc_register(net
, &tcp4_seq_afinfo
);
2791 static void __net_exit
tcp4_proc_exit_net(struct net
*net
)
2793 tcp_proc_unregister(net
, &tcp4_seq_afinfo
);
2796 static struct pernet_operations tcp4_net_ops
= {
2797 .init
= tcp4_proc_init_net
,
2798 .exit
= tcp4_proc_exit_net
,
2801 int __init
tcp4_proc_init(void)
2803 return register_pernet_subsys(&tcp4_net_ops
);
2806 void tcp4_proc_exit(void)
2808 unregister_pernet_subsys(&tcp4_net_ops
);
2810 #endif /* CONFIG_PROC_FS */
2812 struct sk_buff
**tcp4_gro_receive(struct sk_buff
**head
, struct sk_buff
*skb
)
2814 const struct iphdr
*iph
= skb_gro_network_header(skb
);
2816 switch (skb
->ip_summed
) {
2817 case CHECKSUM_COMPLETE
:
2818 if (!tcp_v4_check(skb_gro_len(skb
), iph
->saddr
, iph
->daddr
,
2820 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
2826 NAPI_GRO_CB(skb
)->flush
= 1;
2830 return tcp_gro_receive(head
, skb
);
2833 int tcp4_gro_complete(struct sk_buff
*skb
)
2835 const struct iphdr
*iph
= ip_hdr(skb
);
2836 struct tcphdr
*th
= tcp_hdr(skb
);
2838 th
->check
= ~tcp_v4_check(skb
->len
- skb_transport_offset(skb
),
2839 iph
->saddr
, iph
->daddr
, 0);
2840 skb_shinfo(skb
)->gso_type
= SKB_GSO_TCPV4
;
2842 return tcp_gro_complete(skb
);
2845 struct proto tcp_prot
= {
2847 .owner
= THIS_MODULE
,
2849 .connect
= tcp_v4_connect
,
2850 .disconnect
= tcp_disconnect
,
2851 .accept
= inet_csk_accept
,
2853 .init
= tcp_v4_init_sock
,
2854 .destroy
= tcp_v4_destroy_sock
,
2855 .shutdown
= tcp_shutdown
,
2856 .setsockopt
= tcp_setsockopt
,
2857 .getsockopt
= tcp_getsockopt
,
2858 .recvmsg
= tcp_recvmsg
,
2859 .sendmsg
= tcp_sendmsg
,
2860 .sendpage
= tcp_sendpage
,
2861 .backlog_rcv
= tcp_v4_do_rcv
,
2862 .release_cb
= tcp_release_cb
,
2863 .mtu_reduced
= tcp_v4_mtu_reduced
,
2865 .unhash
= inet_unhash
,
2866 .get_port
= inet_csk_get_port
,
2867 .enter_memory_pressure
= tcp_enter_memory_pressure
,
2868 .sockets_allocated
= &tcp_sockets_allocated
,
2869 .orphan_count
= &tcp_orphan_count
,
2870 .memory_allocated
= &tcp_memory_allocated
,
2871 .memory_pressure
= &tcp_memory_pressure
,
2872 .sysctl_wmem
= sysctl_tcp_wmem
,
2873 .sysctl_rmem
= sysctl_tcp_rmem
,
2874 .max_header
= MAX_TCP_HEADER
,
2875 .obj_size
= sizeof(struct tcp_sock
),
2876 .slab_flags
= SLAB_DESTROY_BY_RCU
,
2877 .twsk_prot
= &tcp_timewait_sock_ops
,
2878 .rsk_prot
= &tcp_request_sock_ops
,
2879 .h
.hashinfo
= &tcp_hashinfo
,
2880 .no_autobind
= true,
2881 #ifdef CONFIG_COMPAT
2882 .compat_setsockopt
= compat_tcp_setsockopt
,
2883 .compat_getsockopt
= compat_tcp_getsockopt
,
2885 #ifdef CONFIG_MEMCG_KMEM
2886 .init_cgroup
= tcp_init_cgroup
,
2887 .destroy_cgroup
= tcp_destroy_cgroup
,
2888 .proto_cgroup
= tcp_proto_cgroup
,
2891 EXPORT_SYMBOL(tcp_prot
);
2893 static int __net_init
tcp_sk_init(struct net
*net
)
2898 static void __net_exit
tcp_sk_exit(struct net
*net
)
2902 static void __net_exit
tcp_sk_exit_batch(struct list_head
*net_exit_list
)
2904 inet_twsk_purge(&tcp_hashinfo
, &tcp_death_row
, AF_INET
);
2907 static struct pernet_operations __net_initdata tcp_sk_ops
= {
2908 .init
= tcp_sk_init
,
2909 .exit
= tcp_sk_exit
,
2910 .exit_batch
= tcp_sk_exit_batch
,
2913 void __init
tcp_v4_init(void)
2915 inet_hashinfo_init(&tcp_hashinfo
);
2916 if (register_pernet_subsys(&tcp_sk_ops
))
2917 panic("Failed to create the TCP control socket.\n");