3 * Linux INET6 implementation
6 * Pedro Roque <roque@di.fc.ul.pt>
10 * linux/net/ipv4/tcp_input.c
11 * linux/net/ipv4/tcp_output.c
14 * Hideaki YOSHIFUJI : sin6_scope_id support
15 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
16 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
17 * a single port at the same time.
18 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
26 #include <linux/bottom_half.h>
27 #include <linux/module.h>
28 #include <linux/errno.h>
29 #include <linux/types.h>
30 #include <linux/socket.h>
31 #include <linux/sockios.h>
32 #include <linux/net.h>
33 #include <linux/jiffies.h>
35 #include <linux/in6.h>
36 #include <linux/netdevice.h>
37 #include <linux/init.h>
38 #include <linux/jhash.h>
39 #include <linux/ipsec.h>
40 #include <linux/times.h>
41 #include <linux/slab.h>
42 #include <linux/uaccess.h>
43 #include <linux/ipv6.h>
44 #include <linux/icmpv6.h>
45 #include <linux/random.h>
48 #include <net/ndisc.h>
49 #include <net/inet6_hashtables.h>
50 #include <net/inet6_connection_sock.h>
52 #include <net/transp_v6.h>
53 #include <net/addrconf.h>
54 #include <net/ip6_route.h>
55 #include <net/ip6_checksum.h>
56 #include <net/inet_ecn.h>
57 #include <net/protocol.h>
60 #include <net/dsfield.h>
61 #include <net/timewait_sock.h>
62 #include <net/inet_common.h>
63 #include <net/secure_seq.h>
64 #include <net/busy_poll.h>
66 #include <linux/proc_fs.h>
67 #include <linux/seq_file.h>
69 #include <crypto/hash.h>
70 #include <linux/scatterlist.h>
72 static void tcp_v6_send_reset(const struct sock
*sk
, struct sk_buff
*skb
);
73 static void tcp_v6_reqsk_send_ack(const struct sock
*sk
, struct sk_buff
*skb
,
74 struct request_sock
*req
);
76 static int tcp_v6_do_rcv(struct sock
*sk
, struct sk_buff
*skb
);
78 static const struct inet_connection_sock_af_ops ipv6_mapped
;
79 static const struct inet_connection_sock_af_ops ipv6_specific
;
80 #ifdef CONFIG_TCP_MD5SIG
81 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific
;
82 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific
;
84 static struct tcp_md5sig_key
*tcp_v6_md5_do_lookup(const struct sock
*sk
,
85 const struct in6_addr
*addr
)
91 static void inet6_sk_rx_dst_set(struct sock
*sk
, const struct sk_buff
*skb
)
93 struct dst_entry
*dst
= skb_dst(skb
);
95 if (dst
&& dst_hold_safe(dst
)) {
96 const struct rt6_info
*rt
= (const struct rt6_info
*)dst
;
99 inet_sk(sk
)->rx_dst_ifindex
= skb
->skb_iif
;
100 inet6_sk(sk
)->rx_dst_cookie
= rt6_get_cookie(rt
);
104 static __u32
tcp_v6_init_sequence(const struct sk_buff
*skb
)
106 return secure_tcpv6_sequence_number(ipv6_hdr(skb
)->daddr
.s6_addr32
,
107 ipv6_hdr(skb
)->saddr
.s6_addr32
,
109 tcp_hdr(skb
)->source
);
112 static int tcp_v6_connect(struct sock
*sk
, struct sockaddr
*uaddr
,
115 struct sockaddr_in6
*usin
= (struct sockaddr_in6
*) uaddr
;
116 struct inet_sock
*inet
= inet_sk(sk
);
117 struct inet_connection_sock
*icsk
= inet_csk(sk
);
118 struct ipv6_pinfo
*np
= inet6_sk(sk
);
119 struct tcp_sock
*tp
= tcp_sk(sk
);
120 struct in6_addr
*saddr
= NULL
, *final_p
, final
;
121 struct ipv6_txoptions
*opt
;
123 struct dst_entry
*dst
;
127 if (addr_len
< SIN6_LEN_RFC2133
)
130 if (usin
->sin6_family
!= AF_INET6
)
131 return -EAFNOSUPPORT
;
133 memset(&fl6
, 0, sizeof(fl6
));
136 fl6
.flowlabel
= usin
->sin6_flowinfo
&IPV6_FLOWINFO_MASK
;
137 IP6_ECN_flow_init(fl6
.flowlabel
);
138 if (fl6
.flowlabel
&IPV6_FLOWLABEL_MASK
) {
139 struct ip6_flowlabel
*flowlabel
;
140 flowlabel
= fl6_sock_lookup(sk
, fl6
.flowlabel
);
143 fl6_sock_release(flowlabel
);
148 * connect() to INADDR_ANY means loopback (BSD'ism).
151 if (ipv6_addr_any(&usin
->sin6_addr
))
152 usin
->sin6_addr
.s6_addr
[15] = 0x1;
154 addr_type
= ipv6_addr_type(&usin
->sin6_addr
);
156 if (addr_type
& IPV6_ADDR_MULTICAST
)
159 if (addr_type
&IPV6_ADDR_LINKLOCAL
) {
160 if (addr_len
>= sizeof(struct sockaddr_in6
) &&
161 usin
->sin6_scope_id
) {
162 /* If interface is set while binding, indices
165 if (sk
->sk_bound_dev_if
&&
166 sk
->sk_bound_dev_if
!= usin
->sin6_scope_id
)
169 sk
->sk_bound_dev_if
= usin
->sin6_scope_id
;
172 /* Connect to link-local address requires an interface */
173 if (!sk
->sk_bound_dev_if
)
177 if (tp
->rx_opt
.ts_recent_stamp
&&
178 !ipv6_addr_equal(&sk
->sk_v6_daddr
, &usin
->sin6_addr
)) {
179 tp
->rx_opt
.ts_recent
= 0;
180 tp
->rx_opt
.ts_recent_stamp
= 0;
184 sk
->sk_v6_daddr
= usin
->sin6_addr
;
185 np
->flow_label
= fl6
.flowlabel
;
191 if (addr_type
== IPV6_ADDR_MAPPED
) {
192 u32 exthdrlen
= icsk
->icsk_ext_hdr_len
;
193 struct sockaddr_in sin
;
195 SOCK_DEBUG(sk
, "connect: ipv4 mapped\n");
197 if (__ipv6_only_sock(sk
))
200 sin
.sin_family
= AF_INET
;
201 sin
.sin_port
= usin
->sin6_port
;
202 sin
.sin_addr
.s_addr
= usin
->sin6_addr
.s6_addr32
[3];
204 icsk
->icsk_af_ops
= &ipv6_mapped
;
205 sk
->sk_backlog_rcv
= tcp_v4_do_rcv
;
206 #ifdef CONFIG_TCP_MD5SIG
207 tp
->af_specific
= &tcp_sock_ipv6_mapped_specific
;
210 err
= tcp_v4_connect(sk
, (struct sockaddr
*)&sin
, sizeof(sin
));
213 icsk
->icsk_ext_hdr_len
= exthdrlen
;
214 icsk
->icsk_af_ops
= &ipv6_specific
;
215 sk
->sk_backlog_rcv
= tcp_v6_do_rcv
;
216 #ifdef CONFIG_TCP_MD5SIG
217 tp
->af_specific
= &tcp_sock_ipv6_specific
;
221 np
->saddr
= sk
->sk_v6_rcv_saddr
;
226 if (!ipv6_addr_any(&sk
->sk_v6_rcv_saddr
))
227 saddr
= &sk
->sk_v6_rcv_saddr
;
229 fl6
.flowi6_proto
= IPPROTO_TCP
;
230 fl6
.daddr
= sk
->sk_v6_daddr
;
231 fl6
.saddr
= saddr
? *saddr
: np
->saddr
;
232 fl6
.flowi6_oif
= sk
->sk_bound_dev_if
;
233 fl6
.flowi6_mark
= sk
->sk_mark
;
234 fl6
.fl6_dport
= usin
->sin6_port
;
235 fl6
.fl6_sport
= inet
->inet_sport
;
237 opt
= rcu_dereference_protected(np
->opt
, sock_owned_by_user(sk
));
238 final_p
= fl6_update_dst(&fl6
, opt
, &final
);
240 security_sk_classify_flow(sk
, flowi6_to_flowi(&fl6
));
242 dst
= ip6_dst_lookup_flow(sk
, &fl6
, final_p
);
250 sk
->sk_v6_rcv_saddr
= *saddr
;
253 /* set the source address */
255 inet
->inet_rcv_saddr
= LOOPBACK4_IPV6
;
257 sk
->sk_gso_type
= SKB_GSO_TCPV6
;
258 ip6_dst_store(sk
, dst
, NULL
, NULL
);
260 if (tcp_death_row
.sysctl_tw_recycle
&&
261 !tp
->rx_opt
.ts_recent_stamp
&&
262 ipv6_addr_equal(&fl6
.daddr
, &sk
->sk_v6_daddr
))
263 tcp_fetch_timewait_stamp(sk
, dst
);
265 icsk
->icsk_ext_hdr_len
= 0;
267 icsk
->icsk_ext_hdr_len
= opt
->opt_flen
+
270 tp
->rx_opt
.mss_clamp
= IPV6_MIN_MTU
- sizeof(struct tcphdr
) - sizeof(struct ipv6hdr
);
272 inet
->inet_dport
= usin
->sin6_port
;
274 tcp_set_state(sk
, TCP_SYN_SENT
);
275 err
= inet6_hash_connect(&tcp_death_row
, sk
);
281 if (!tp
->write_seq
&& likely(!tp
->repair
))
282 tp
->write_seq
= secure_tcpv6_sequence_number(np
->saddr
.s6_addr32
,
283 sk
->sk_v6_daddr
.s6_addr32
,
287 err
= tcp_connect(sk
);
294 tcp_set_state(sk
, TCP_CLOSE
);
297 inet
->inet_dport
= 0;
298 sk
->sk_route_caps
= 0;
302 static void tcp_v6_mtu_reduced(struct sock
*sk
)
304 struct dst_entry
*dst
;
306 if ((1 << sk
->sk_state
) & (TCPF_LISTEN
| TCPF_CLOSE
))
309 dst
= inet6_csk_update_pmtu(sk
, tcp_sk(sk
)->mtu_info
);
313 if (inet_csk(sk
)->icsk_pmtu_cookie
> dst_mtu(dst
)) {
314 tcp_sync_mss(sk
, dst_mtu(dst
));
315 tcp_simple_retransmit(sk
);
319 static void tcp_v6_err(struct sk_buff
*skb
, struct inet6_skb_parm
*opt
,
320 u8 type
, u8 code
, int offset
, __be32 info
)
322 const struct ipv6hdr
*hdr
= (const struct ipv6hdr
*)skb
->data
;
323 const struct tcphdr
*th
= (struct tcphdr
*)(skb
->data
+offset
);
324 struct net
*net
= dev_net(skb
->dev
);
325 struct request_sock
*fastopen
;
326 struct ipv6_pinfo
*np
;
333 sk
= __inet6_lookup_established(net
, &tcp_hashinfo
,
334 &hdr
->daddr
, th
->dest
,
335 &hdr
->saddr
, ntohs(th
->source
),
339 ICMP6_INC_STATS_BH(net
, __in6_dev_get(skb
->dev
),
344 if (sk
->sk_state
== TCP_TIME_WAIT
) {
345 inet_twsk_put(inet_twsk(sk
));
348 seq
= ntohl(th
->seq
);
349 fatal
= icmpv6_err_convert(type
, code
, &err
);
350 if (sk
->sk_state
== TCP_NEW_SYN_RECV
)
351 return tcp_req_err(sk
, seq
, fatal
);
354 if (sock_owned_by_user(sk
) && type
!= ICMPV6_PKT_TOOBIG
)
355 NET_INC_STATS_BH(net
, LINUX_MIB_LOCKDROPPEDICMPS
);
357 if (sk
->sk_state
== TCP_CLOSE
)
360 if (ipv6_hdr(skb
)->hop_limit
< inet6_sk(sk
)->min_hopcount
) {
361 NET_INC_STATS_BH(net
, LINUX_MIB_TCPMINTTLDROP
);
366 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
367 fastopen
= tp
->fastopen_rsk
;
368 snd_una
= fastopen
? tcp_rsk(fastopen
)->snt_isn
: tp
->snd_una
;
369 if (sk
->sk_state
!= TCP_LISTEN
&&
370 !between(seq
, snd_una
, tp
->snd_nxt
)) {
371 NET_INC_STATS_BH(net
, LINUX_MIB_OUTOFWINDOWICMPS
);
377 if (type
== NDISC_REDIRECT
) {
378 struct dst_entry
*dst
= __sk_dst_check(sk
, np
->dst_cookie
);
381 dst
->ops
->redirect(dst
, sk
, skb
);
385 if (type
== ICMPV6_PKT_TOOBIG
) {
386 /* We are not interested in TCP_LISTEN and open_requests
387 * (SYN-ACKs send out by Linux are always <576bytes so
388 * they should go through unfragmented).
390 if (sk
->sk_state
== TCP_LISTEN
)
393 if (!ip6_sk_accept_pmtu(sk
))
396 tp
->mtu_info
= ntohl(info
);
397 if (!sock_owned_by_user(sk
))
398 tcp_v6_mtu_reduced(sk
);
399 else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED
,
406 /* Might be for an request_sock */
407 switch (sk
->sk_state
) {
410 /* Only in fast or simultaneous open. If a fast open socket is
411 * is already accepted it is treated as a connected one below.
413 if (fastopen
&& !fastopen
->sk
)
416 if (!sock_owned_by_user(sk
)) {
418 sk
->sk_error_report(sk
); /* Wake people up to see the error (see connect in sock.c) */
422 sk
->sk_err_soft
= err
;
426 if (!sock_owned_by_user(sk
) && np
->recverr
) {
428 sk
->sk_error_report(sk
);
430 sk
->sk_err_soft
= err
;
438 static int tcp_v6_send_synack(const struct sock
*sk
, struct dst_entry
*dst
,
440 struct request_sock
*req
,
441 struct tcp_fastopen_cookie
*foc
,
444 struct inet_request_sock
*ireq
= inet_rsk(req
);
445 struct ipv6_pinfo
*np
= inet6_sk(sk
);
446 struct flowi6
*fl6
= &fl
->u
.ip6
;
450 /* First, grab a route. */
451 if (!dst
&& (dst
= inet6_csk_route_req(sk
, fl6
, req
,
452 IPPROTO_TCP
)) == NULL
)
455 skb
= tcp_make_synack(sk
, dst
, req
, foc
, attach_req
);
458 __tcp_v6_send_check(skb
, &ireq
->ir_v6_loc_addr
,
459 &ireq
->ir_v6_rmt_addr
);
461 fl6
->daddr
= ireq
->ir_v6_rmt_addr
;
462 if (np
->repflow
&& ireq
->pktopts
)
463 fl6
->flowlabel
= ip6_flowlabel(ipv6_hdr(ireq
->pktopts
));
466 err
= ip6_xmit(sk
, skb
, fl6
, rcu_dereference(np
->opt
),
469 err
= net_xmit_eval(err
);
477 static void tcp_v6_reqsk_destructor(struct request_sock
*req
)
479 kfree_skb(inet_rsk(req
)->pktopts
);
482 #ifdef CONFIG_TCP_MD5SIG
483 static struct tcp_md5sig_key
*tcp_v6_md5_do_lookup(const struct sock
*sk
,
484 const struct in6_addr
*addr
)
486 return tcp_md5_do_lookup(sk
, (union tcp_md5_addr
*)addr
, AF_INET6
);
489 static struct tcp_md5sig_key
*tcp_v6_md5_lookup(const struct sock
*sk
,
490 const struct sock
*addr_sk
)
492 return tcp_v6_md5_do_lookup(sk
, &addr_sk
->sk_v6_daddr
);
495 static int tcp_v6_parse_md5_keys(struct sock
*sk
, char __user
*optval
,
498 struct tcp_md5sig cmd
;
499 struct sockaddr_in6
*sin6
= (struct sockaddr_in6
*)&cmd
.tcpm_addr
;
501 if (optlen
< sizeof(cmd
))
504 if (copy_from_user(&cmd
, optval
, sizeof(cmd
)))
507 if (sin6
->sin6_family
!= AF_INET6
)
510 if (!cmd
.tcpm_keylen
) {
511 if (ipv6_addr_v4mapped(&sin6
->sin6_addr
))
512 return tcp_md5_do_del(sk
, (union tcp_md5_addr
*)&sin6
->sin6_addr
.s6_addr32
[3],
514 return tcp_md5_do_del(sk
, (union tcp_md5_addr
*)&sin6
->sin6_addr
,
518 if (cmd
.tcpm_keylen
> TCP_MD5SIG_MAXKEYLEN
)
521 if (ipv6_addr_v4mapped(&sin6
->sin6_addr
))
522 return tcp_md5_do_add(sk
, (union tcp_md5_addr
*)&sin6
->sin6_addr
.s6_addr32
[3],
523 AF_INET
, cmd
.tcpm_key
, cmd
.tcpm_keylen
, GFP_KERNEL
);
525 return tcp_md5_do_add(sk
, (union tcp_md5_addr
*)&sin6
->sin6_addr
,
526 AF_INET6
, cmd
.tcpm_key
, cmd
.tcpm_keylen
, GFP_KERNEL
);
529 static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool
*hp
,
530 const struct in6_addr
*daddr
,
531 const struct in6_addr
*saddr
, int nbytes
)
533 struct tcp6_pseudohdr
*bp
;
534 struct scatterlist sg
;
536 bp
= &hp
->md5_blk
.ip6
;
537 /* 1. TCP pseudo-header (RFC2460) */
540 bp
->protocol
= cpu_to_be32(IPPROTO_TCP
);
541 bp
->len
= cpu_to_be32(nbytes
);
543 sg_init_one(&sg
, bp
, sizeof(*bp
));
544 ahash_request_set_crypt(hp
->md5_req
, &sg
, NULL
, sizeof(*bp
));
545 return crypto_ahash_update(hp
->md5_req
);
548 static int tcp_v6_md5_hash_hdr(char *md5_hash
, struct tcp_md5sig_key
*key
,
549 const struct in6_addr
*daddr
, struct in6_addr
*saddr
,
550 const struct tcphdr
*th
)
552 struct tcp_md5sig_pool
*hp
;
553 struct ahash_request
*req
;
555 hp
= tcp_get_md5sig_pool();
557 goto clear_hash_noput
;
560 if (crypto_ahash_init(req
))
562 if (tcp_v6_md5_hash_pseudoheader(hp
, daddr
, saddr
, th
->doff
<< 2))
564 if (tcp_md5_hash_header(hp
, th
))
566 if (tcp_md5_hash_key(hp
, key
))
568 ahash_request_set_crypt(req
, NULL
, md5_hash
, 0);
569 if (crypto_ahash_final(req
))
572 tcp_put_md5sig_pool();
576 tcp_put_md5sig_pool();
578 memset(md5_hash
, 0, 16);
582 static int tcp_v6_md5_hash_skb(char *md5_hash
,
583 const struct tcp_md5sig_key
*key
,
584 const struct sock
*sk
,
585 const struct sk_buff
*skb
)
587 const struct in6_addr
*saddr
, *daddr
;
588 struct tcp_md5sig_pool
*hp
;
589 struct ahash_request
*req
;
590 const struct tcphdr
*th
= tcp_hdr(skb
);
592 if (sk
) { /* valid for establish/request sockets */
593 saddr
= &sk
->sk_v6_rcv_saddr
;
594 daddr
= &sk
->sk_v6_daddr
;
596 const struct ipv6hdr
*ip6h
= ipv6_hdr(skb
);
597 saddr
= &ip6h
->saddr
;
598 daddr
= &ip6h
->daddr
;
601 hp
= tcp_get_md5sig_pool();
603 goto clear_hash_noput
;
606 if (crypto_ahash_init(req
))
609 if (tcp_v6_md5_hash_pseudoheader(hp
, daddr
, saddr
, skb
->len
))
611 if (tcp_md5_hash_header(hp
, th
))
613 if (tcp_md5_hash_skb_data(hp
, skb
, th
->doff
<< 2))
615 if (tcp_md5_hash_key(hp
, key
))
617 ahash_request_set_crypt(req
, NULL
, md5_hash
, 0);
618 if (crypto_ahash_final(req
))
621 tcp_put_md5sig_pool();
625 tcp_put_md5sig_pool();
627 memset(md5_hash
, 0, 16);
633 static bool tcp_v6_inbound_md5_hash(const struct sock
*sk
,
634 const struct sk_buff
*skb
)
636 #ifdef CONFIG_TCP_MD5SIG
637 const __u8
*hash_location
= NULL
;
638 struct tcp_md5sig_key
*hash_expected
;
639 const struct ipv6hdr
*ip6h
= ipv6_hdr(skb
);
640 const struct tcphdr
*th
= tcp_hdr(skb
);
644 hash_expected
= tcp_v6_md5_do_lookup(sk
, &ip6h
->saddr
);
645 hash_location
= tcp_parse_md5sig_option(th
);
647 /* We've parsed the options - do we have a hash? */
648 if (!hash_expected
&& !hash_location
)
651 if (hash_expected
&& !hash_location
) {
652 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_TCPMD5NOTFOUND
);
656 if (!hash_expected
&& hash_location
) {
657 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_TCPMD5UNEXPECTED
);
661 /* check the signature */
662 genhash
= tcp_v6_md5_hash_skb(newhash
,
666 if (genhash
|| memcmp(hash_location
, newhash
, 16) != 0) {
667 net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
668 genhash
? "failed" : "mismatch",
669 &ip6h
->saddr
, ntohs(th
->source
),
670 &ip6h
->daddr
, ntohs(th
->dest
));
677 static void tcp_v6_init_req(struct request_sock
*req
,
678 const struct sock
*sk_listener
,
681 struct inet_request_sock
*ireq
= inet_rsk(req
);
682 const struct ipv6_pinfo
*np
= inet6_sk(sk_listener
);
684 ireq
->ir_v6_rmt_addr
= ipv6_hdr(skb
)->saddr
;
685 ireq
->ir_v6_loc_addr
= ipv6_hdr(skb
)->daddr
;
687 /* So that link locals have meaning */
688 if (!sk_listener
->sk_bound_dev_if
&&
689 ipv6_addr_type(&ireq
->ir_v6_rmt_addr
) & IPV6_ADDR_LINKLOCAL
)
690 ireq
->ir_iif
= tcp_v6_iif(skb
);
692 if (!TCP_SKB_CB(skb
)->tcp_tw_isn
&&
693 (ipv6_opt_accepted(sk_listener
, skb
, &TCP_SKB_CB(skb
)->header
.h6
) ||
694 np
->rxopt
.bits
.rxinfo
||
695 np
->rxopt
.bits
.rxoinfo
|| np
->rxopt
.bits
.rxhlim
||
696 np
->rxopt
.bits
.rxohlim
|| np
->repflow
)) {
697 atomic_inc(&skb
->users
);
702 static struct dst_entry
*tcp_v6_route_req(const struct sock
*sk
,
704 const struct request_sock
*req
,
709 return inet6_csk_route_req(sk
, &fl
->u
.ip6
, req
, IPPROTO_TCP
);
712 struct request_sock_ops tcp6_request_sock_ops __read_mostly
= {
714 .obj_size
= sizeof(struct tcp6_request_sock
),
715 .rtx_syn_ack
= tcp_rtx_synack
,
716 .send_ack
= tcp_v6_reqsk_send_ack
,
717 .destructor
= tcp_v6_reqsk_destructor
,
718 .send_reset
= tcp_v6_send_reset
,
719 .syn_ack_timeout
= tcp_syn_ack_timeout
,
722 static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops
= {
723 .mss_clamp
= IPV6_MIN_MTU
- sizeof(struct tcphdr
) -
724 sizeof(struct ipv6hdr
),
725 #ifdef CONFIG_TCP_MD5SIG
726 .req_md5_lookup
= tcp_v6_md5_lookup
,
727 .calc_md5_hash
= tcp_v6_md5_hash_skb
,
729 .init_req
= tcp_v6_init_req
,
730 #ifdef CONFIG_SYN_COOKIES
731 .cookie_init_seq
= cookie_v6_init_sequence
,
733 .route_req
= tcp_v6_route_req
,
734 .init_seq
= tcp_v6_init_sequence
,
735 .send_synack
= tcp_v6_send_synack
,
738 static void tcp_v6_send_response(const struct sock
*sk
, struct sk_buff
*skb
, u32 seq
,
739 u32 ack
, u32 win
, u32 tsval
, u32 tsecr
,
740 int oif
, struct tcp_md5sig_key
*key
, int rst
,
741 u8 tclass
, u32 label
)
743 const struct tcphdr
*th
= tcp_hdr(skb
);
745 struct sk_buff
*buff
;
747 struct net
*net
= sk
? sock_net(sk
) : dev_net(skb_dst(skb
)->dev
);
748 struct sock
*ctl_sk
= net
->ipv6
.tcp_sk
;
749 unsigned int tot_len
= sizeof(struct tcphdr
);
750 struct dst_entry
*dst
;
754 tot_len
+= TCPOLEN_TSTAMP_ALIGNED
;
755 #ifdef CONFIG_TCP_MD5SIG
757 tot_len
+= TCPOLEN_MD5SIG_ALIGNED
;
760 buff
= alloc_skb(MAX_HEADER
+ sizeof(struct ipv6hdr
) + tot_len
,
765 skb_reserve(buff
, MAX_HEADER
+ sizeof(struct ipv6hdr
) + tot_len
);
767 t1
= (struct tcphdr
*) skb_push(buff
, tot_len
);
768 skb_reset_transport_header(buff
);
770 /* Swap the send and the receive. */
771 memset(t1
, 0, sizeof(*t1
));
772 t1
->dest
= th
->source
;
773 t1
->source
= th
->dest
;
774 t1
->doff
= tot_len
/ 4;
775 t1
->seq
= htonl(seq
);
776 t1
->ack_seq
= htonl(ack
);
777 t1
->ack
= !rst
|| !th
->ack
;
779 t1
->window
= htons(win
);
781 topt
= (__be32
*)(t1
+ 1);
784 *topt
++ = htonl((TCPOPT_NOP
<< 24) | (TCPOPT_NOP
<< 16) |
785 (TCPOPT_TIMESTAMP
<< 8) | TCPOLEN_TIMESTAMP
);
786 *topt
++ = htonl(tsval
);
787 *topt
++ = htonl(tsecr
);
790 #ifdef CONFIG_TCP_MD5SIG
792 *topt
++ = htonl((TCPOPT_NOP
<< 24) | (TCPOPT_NOP
<< 16) |
793 (TCPOPT_MD5SIG
<< 8) | TCPOLEN_MD5SIG
);
794 tcp_v6_md5_hash_hdr((__u8
*)topt
, key
,
795 &ipv6_hdr(skb
)->saddr
,
796 &ipv6_hdr(skb
)->daddr
, t1
);
800 memset(&fl6
, 0, sizeof(fl6
));
801 fl6
.daddr
= ipv6_hdr(skb
)->saddr
;
802 fl6
.saddr
= ipv6_hdr(skb
)->daddr
;
803 fl6
.flowlabel
= label
;
805 buff
->ip_summed
= CHECKSUM_PARTIAL
;
808 __tcp_v6_send_check(buff
, &fl6
.saddr
, &fl6
.daddr
);
810 fl6
.flowi6_proto
= IPPROTO_TCP
;
811 if (rt6_need_strict(&fl6
.daddr
) && !oif
)
812 fl6
.flowi6_oif
= tcp_v6_iif(skb
);
814 fl6
.flowi6_oif
= oif
;
815 fl6
.flowi6_mark
= IP6_REPLY_MARK(net
, skb
->mark
);
816 fl6
.fl6_dport
= t1
->dest
;
817 fl6
.fl6_sport
= t1
->source
;
818 security_skb_classify_flow(skb
, flowi6_to_flowi(&fl6
));
820 /* Pass a socket to ip6_dst_lookup either it is for RST
821 * Underlying function will use this to retrieve the network
824 dst
= ip6_dst_lookup_flow(ctl_sk
, &fl6
, NULL
);
826 skb_dst_set(buff
, dst
);
827 ip6_xmit(ctl_sk
, buff
, &fl6
, NULL
, tclass
);
828 TCP_INC_STATS_BH(net
, TCP_MIB_OUTSEGS
);
830 TCP_INC_STATS_BH(net
, TCP_MIB_OUTRSTS
);
837 static void tcp_v6_send_reset(const struct sock
*sk
, struct sk_buff
*skb
)
839 const struct tcphdr
*th
= tcp_hdr(skb
);
840 u32 seq
= 0, ack_seq
= 0;
841 struct tcp_md5sig_key
*key
= NULL
;
842 #ifdef CONFIG_TCP_MD5SIG
843 const __u8
*hash_location
= NULL
;
844 struct ipv6hdr
*ipv6h
= ipv6_hdr(skb
);
845 unsigned char newhash
[16];
847 struct sock
*sk1
= NULL
;
854 /* If sk not NULL, it means we did a successful lookup and incoming
855 * route had to be correct. prequeue might have dropped our dst.
857 if (!sk
&& !ipv6_unicast_destination(skb
))
860 #ifdef CONFIG_TCP_MD5SIG
861 hash_location
= tcp_parse_md5sig_option(th
);
862 if (sk
&& sk_fullsock(sk
)) {
863 key
= tcp_v6_md5_do_lookup(sk
, &ipv6h
->saddr
);
864 } else if (hash_location
) {
866 * active side is lost. Try to find listening socket through
867 * source port, and then find md5 key through listening socket.
868 * we are not loose security here:
869 * Incoming packet is checked with md5 hash with finding key,
870 * no RST generated if md5 hash doesn't match.
872 sk1
= inet6_lookup_listener(dev_net(skb_dst(skb
)->dev
),
873 &tcp_hashinfo
, &ipv6h
->saddr
,
874 th
->source
, &ipv6h
->daddr
,
875 ntohs(th
->source
), tcp_v6_iif(skb
));
880 key
= tcp_v6_md5_do_lookup(sk1
, &ipv6h
->saddr
);
884 genhash
= tcp_v6_md5_hash_skb(newhash
, key
, NULL
, skb
);
885 if (genhash
|| memcmp(hash_location
, newhash
, 16) != 0)
891 seq
= ntohl(th
->ack_seq
);
893 ack_seq
= ntohl(th
->seq
) + th
->syn
+ th
->fin
+ skb
->len
-
896 oif
= sk
? sk
->sk_bound_dev_if
: 0;
897 tcp_v6_send_response(sk
, skb
, seq
, ack_seq
, 0, 0, 0, oif
, key
, 1, 0, 0);
899 #ifdef CONFIG_TCP_MD5SIG
908 static void tcp_v6_send_ack(const struct sock
*sk
, struct sk_buff
*skb
, u32 seq
,
909 u32 ack
, u32 win
, u32 tsval
, u32 tsecr
, int oif
,
910 struct tcp_md5sig_key
*key
, u8 tclass
,
913 tcp_v6_send_response(sk
, skb
, seq
, ack
, win
, tsval
, tsecr
, oif
, key
, 0,
917 static void tcp_v6_timewait_ack(struct sock
*sk
, struct sk_buff
*skb
)
919 struct inet_timewait_sock
*tw
= inet_twsk(sk
);
920 struct tcp_timewait_sock
*tcptw
= tcp_twsk(sk
);
922 tcp_v6_send_ack(sk
, skb
, tcptw
->tw_snd_nxt
, tcptw
->tw_rcv_nxt
,
923 tcptw
->tw_rcv_wnd
>> tw
->tw_rcv_wscale
,
924 tcp_time_stamp
+ tcptw
->tw_ts_offset
,
925 tcptw
->tw_ts_recent
, tw
->tw_bound_dev_if
, tcp_twsk_md5_key(tcptw
),
926 tw
->tw_tclass
, cpu_to_be32(tw
->tw_flowlabel
));
931 static void tcp_v6_reqsk_send_ack(const struct sock
*sk
, struct sk_buff
*skb
,
932 struct request_sock
*req
)
934 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
935 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
937 tcp_v6_send_ack(sk
, skb
, (sk
->sk_state
== TCP_LISTEN
) ?
938 tcp_rsk(req
)->snt_isn
+ 1 : tcp_sk(sk
)->snd_nxt
,
939 tcp_rsk(req
)->rcv_nxt
, req
->rsk_rcv_wnd
,
940 tcp_time_stamp
, req
->ts_recent
, sk
->sk_bound_dev_if
,
941 tcp_v6_md5_do_lookup(sk
, &ipv6_hdr(skb
)->daddr
),
946 static struct sock
*tcp_v6_cookie_check(struct sock
*sk
, struct sk_buff
*skb
)
948 #ifdef CONFIG_SYN_COOKIES
949 const struct tcphdr
*th
= tcp_hdr(skb
);
952 sk
= cookie_v6_check(sk
, skb
);
957 static int tcp_v6_conn_request(struct sock
*sk
, struct sk_buff
*skb
)
959 if (skb
->protocol
== htons(ETH_P_IP
))
960 return tcp_v4_conn_request(sk
, skb
);
962 if (!ipv6_unicast_destination(skb
))
965 return tcp_conn_request(&tcp6_request_sock_ops
,
966 &tcp_request_sock_ipv6_ops
, sk
, skb
);
969 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_LISTENDROPS
);
970 return 0; /* don't send reset */
973 static struct sock
*tcp_v6_syn_recv_sock(const struct sock
*sk
, struct sk_buff
*skb
,
974 struct request_sock
*req
,
975 struct dst_entry
*dst
,
976 struct request_sock
*req_unhash
,
979 struct inet_request_sock
*ireq
;
980 struct ipv6_pinfo
*newnp
;
981 const struct ipv6_pinfo
*np
= inet6_sk(sk
);
982 struct ipv6_txoptions
*opt
;
983 struct tcp6_sock
*newtcp6sk
;
984 struct inet_sock
*newinet
;
985 struct tcp_sock
*newtp
;
987 #ifdef CONFIG_TCP_MD5SIG
988 struct tcp_md5sig_key
*key
;
992 if (skb
->protocol
== htons(ETH_P_IP
)) {
997 newsk
= tcp_v4_syn_recv_sock(sk
, skb
, req
, dst
,
998 req_unhash
, own_req
);
1003 newtcp6sk
= (struct tcp6_sock
*)newsk
;
1004 inet_sk(newsk
)->pinet6
= &newtcp6sk
->inet6
;
1006 newinet
= inet_sk(newsk
);
1007 newnp
= inet6_sk(newsk
);
1008 newtp
= tcp_sk(newsk
);
1010 memcpy(newnp
, np
, sizeof(struct ipv6_pinfo
));
1012 newnp
->saddr
= newsk
->sk_v6_rcv_saddr
;
1014 inet_csk(newsk
)->icsk_af_ops
= &ipv6_mapped
;
1015 newsk
->sk_backlog_rcv
= tcp_v4_do_rcv
;
1016 #ifdef CONFIG_TCP_MD5SIG
1017 newtp
->af_specific
= &tcp_sock_ipv6_mapped_specific
;
1020 newnp
->ipv6_ac_list
= NULL
;
1021 newnp
->ipv6_fl_list
= NULL
;
1022 newnp
->pktoptions
= NULL
;
1024 newnp
->mcast_oif
= tcp_v6_iif(skb
);
1025 newnp
->mcast_hops
= ipv6_hdr(skb
)->hop_limit
;
1026 newnp
->rcv_flowinfo
= ip6_flowinfo(ipv6_hdr(skb
));
1028 newnp
->flow_label
= ip6_flowlabel(ipv6_hdr(skb
));
1031 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1032 * here, tcp_create_openreq_child now does this for us, see the comment in
1033 * that function for the gory details. -acme
1036 /* It is tricky place. Until this moment IPv4 tcp
1037 worked with IPv6 icsk.icsk_af_ops.
1040 tcp_sync_mss(newsk
, inet_csk(newsk
)->icsk_pmtu_cookie
);
1045 ireq
= inet_rsk(req
);
1047 if (sk_acceptq_is_full(sk
))
1051 dst
= inet6_csk_route_req(sk
, &fl6
, req
, IPPROTO_TCP
);
1056 newsk
= tcp_create_openreq_child(sk
, req
, skb
);
1061 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1062 * count here, tcp_create_openreq_child now does this for us, see the
1063 * comment in that function for the gory details. -acme
1066 newsk
->sk_gso_type
= SKB_GSO_TCPV6
;
1067 ip6_dst_store(newsk
, dst
, NULL
, NULL
);
1068 inet6_sk_rx_dst_set(newsk
, skb
);
1070 newtcp6sk
= (struct tcp6_sock
*)newsk
;
1071 inet_sk(newsk
)->pinet6
= &newtcp6sk
->inet6
;
1073 newtp
= tcp_sk(newsk
);
1074 newinet
= inet_sk(newsk
);
1075 newnp
= inet6_sk(newsk
);
1077 memcpy(newnp
, np
, sizeof(struct ipv6_pinfo
));
1079 newsk
->sk_v6_daddr
= ireq
->ir_v6_rmt_addr
;
1080 newnp
->saddr
= ireq
->ir_v6_loc_addr
;
1081 newsk
->sk_v6_rcv_saddr
= ireq
->ir_v6_loc_addr
;
1082 newsk
->sk_bound_dev_if
= ireq
->ir_iif
;
1084 /* Now IPv6 options...
1086 First: no IPv4 options.
1088 newinet
->inet_opt
= NULL
;
1089 newnp
->ipv6_ac_list
= NULL
;
1090 newnp
->ipv6_fl_list
= NULL
;
1093 newnp
->rxopt
.all
= np
->rxopt
.all
;
1095 newnp
->pktoptions
= NULL
;
1097 newnp
->mcast_oif
= tcp_v6_iif(skb
);
1098 newnp
->mcast_hops
= ipv6_hdr(skb
)->hop_limit
;
1099 newnp
->rcv_flowinfo
= ip6_flowinfo(ipv6_hdr(skb
));
1101 newnp
->flow_label
= ip6_flowlabel(ipv6_hdr(skb
));
1103 /* Clone native IPv6 options from listening socket (if any)
1105 Yes, keeping reference count would be much more clever,
1106 but we make one more one thing there: reattach optmem
1109 opt
= rcu_dereference(np
->opt
);
1111 opt
= ipv6_dup_options(newsk
, opt
);
1112 RCU_INIT_POINTER(newnp
->opt
, opt
);
1114 inet_csk(newsk
)->icsk_ext_hdr_len
= 0;
1116 inet_csk(newsk
)->icsk_ext_hdr_len
= opt
->opt_nflen
+
1119 tcp_ca_openreq_child(newsk
, dst
);
1121 tcp_sync_mss(newsk
, dst_mtu(dst
));
1122 newtp
->advmss
= dst_metric_advmss(dst
);
1123 if (tcp_sk(sk
)->rx_opt
.user_mss
&&
1124 tcp_sk(sk
)->rx_opt
.user_mss
< newtp
->advmss
)
1125 newtp
->advmss
= tcp_sk(sk
)->rx_opt
.user_mss
;
1127 tcp_initialize_rcv_mss(newsk
);
1129 newinet
->inet_daddr
= newinet
->inet_saddr
= LOOPBACK4_IPV6
;
1130 newinet
->inet_rcv_saddr
= LOOPBACK4_IPV6
;
1132 #ifdef CONFIG_TCP_MD5SIG
1133 /* Copy over the MD5 key from the original socket */
1134 key
= tcp_v6_md5_do_lookup(sk
, &newsk
->sk_v6_daddr
);
1136 /* We're using one, so create a matching key
1137 * on the newsk structure. If we fail to get
1138 * memory, then we end up not copying the key
1141 tcp_md5_do_add(newsk
, (union tcp_md5_addr
*)&newsk
->sk_v6_daddr
,
1142 AF_INET6
, key
->key
, key
->keylen
,
1143 sk_gfp_mask(sk
, GFP_ATOMIC
));
1147 if (__inet_inherit_port(sk
, newsk
) < 0) {
1148 inet_csk_prepare_forced_close(newsk
);
1152 *own_req
= inet_ehash_nolisten(newsk
, req_to_sk(req_unhash
));
1154 tcp_move_syn(newtp
, req
);
1156 /* Clone pktoptions received with SYN, if we own the req */
1157 if (ireq
->pktopts
) {
1158 newnp
->pktoptions
= skb_clone(ireq
->pktopts
,
1159 sk_gfp_mask(sk
, GFP_ATOMIC
));
1160 consume_skb(ireq
->pktopts
);
1161 ireq
->pktopts
= NULL
;
1162 if (newnp
->pktoptions
)
1163 skb_set_owner_r(newnp
->pktoptions
, newsk
);
1170 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_LISTENOVERFLOWS
);
1174 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_LISTENDROPS
);
1178 /* The socket must have it's spinlock held when we get
1179 * here, unless it is a TCP_LISTEN socket.
1181 * We have a potential double-lock case here, so even when
1182 * doing backlog processing we use the BH locking scheme.
1183 * This is because we cannot sleep with the original spinlock
1186 static int tcp_v6_do_rcv(struct sock
*sk
, struct sk_buff
*skb
)
1188 struct ipv6_pinfo
*np
= inet6_sk(sk
);
1189 struct tcp_sock
*tp
;
1190 struct sk_buff
*opt_skb
= NULL
;
1192 /* Imagine: socket is IPv6. IPv4 packet arrives,
1193 goes to IPv4 receive handler and backlogged.
1194 From backlog it always goes here. Kerboom...
1195 Fortunately, tcp_rcv_established and rcv_established
1196 handle them correctly, but it is not case with
1197 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1200 if (skb
->protocol
== htons(ETH_P_IP
))
1201 return tcp_v4_do_rcv(sk
, skb
);
1203 if (sk_filter(sk
, skb
))
1207 * socket locking is here for SMP purposes as backlog rcv
1208 * is currently called with bh processing disabled.
1211 /* Do Stevens' IPV6_PKTOPTIONS.
1213 Yes, guys, it is the only place in our code, where we
1214 may make it not affecting IPv4.
1215 The rest of code is protocol independent,
1216 and I do not like idea to uglify IPv4.
1218 Actually, all the idea behind IPV6_PKTOPTIONS
1219 looks not very well thought. For now we latch
1220 options, received in the last packet, enqueued
1221 by tcp. Feel free to propose better solution.
1225 opt_skb
= skb_clone(skb
, sk_gfp_mask(sk
, GFP_ATOMIC
));
1227 if (sk
->sk_state
== TCP_ESTABLISHED
) { /* Fast path */
1228 struct dst_entry
*dst
= sk
->sk_rx_dst
;
1230 sock_rps_save_rxhash(sk
, skb
);
1231 sk_mark_napi_id(sk
, skb
);
1233 if (inet_sk(sk
)->rx_dst_ifindex
!= skb
->skb_iif
||
1234 dst
->ops
->check(dst
, np
->rx_dst_cookie
) == NULL
) {
1236 sk
->sk_rx_dst
= NULL
;
1240 tcp_rcv_established(sk
, skb
, tcp_hdr(skb
), skb
->len
);
1242 goto ipv6_pktoptions
;
1246 if (tcp_checksum_complete(skb
))
1249 if (sk
->sk_state
== TCP_LISTEN
) {
1250 struct sock
*nsk
= tcp_v6_cookie_check(sk
, skb
);
1256 sock_rps_save_rxhash(nsk
, skb
);
1257 sk_mark_napi_id(nsk
, skb
);
1258 if (tcp_child_process(sk
, nsk
, skb
))
1261 __kfree_skb(opt_skb
);
1265 sock_rps_save_rxhash(sk
, skb
);
1267 if (tcp_rcv_state_process(sk
, skb
))
1270 goto ipv6_pktoptions
;
1274 tcp_v6_send_reset(sk
, skb
);
1277 __kfree_skb(opt_skb
);
1281 TCP_INC_STATS_BH(sock_net(sk
), TCP_MIB_CSUMERRORS
);
1282 TCP_INC_STATS_BH(sock_net(sk
), TCP_MIB_INERRS
);
1287 /* Do you ask, what is it?
1289 1. skb was enqueued by tcp.
1290 2. skb is added to tail of read queue, rather than out of order.
1291 3. socket is not in passive state.
1292 4. Finally, it really contains options, which user wants to receive.
1295 if (TCP_SKB_CB(opt_skb
)->end_seq
== tp
->rcv_nxt
&&
1296 !((1 << sk
->sk_state
) & (TCPF_CLOSE
| TCPF_LISTEN
))) {
1297 if (np
->rxopt
.bits
.rxinfo
|| np
->rxopt
.bits
.rxoinfo
)
1298 np
->mcast_oif
= tcp_v6_iif(opt_skb
);
1299 if (np
->rxopt
.bits
.rxhlim
|| np
->rxopt
.bits
.rxohlim
)
1300 np
->mcast_hops
= ipv6_hdr(opt_skb
)->hop_limit
;
1301 if (np
->rxopt
.bits
.rxflow
|| np
->rxopt
.bits
.rxtclass
)
1302 np
->rcv_flowinfo
= ip6_flowinfo(ipv6_hdr(opt_skb
));
1304 np
->flow_label
= ip6_flowlabel(ipv6_hdr(opt_skb
));
1305 if (ipv6_opt_accepted(sk
, opt_skb
, &TCP_SKB_CB(opt_skb
)->header
.h6
)) {
1306 skb_set_owner_r(opt_skb
, sk
);
1307 opt_skb
= xchg(&np
->pktoptions
, opt_skb
);
1309 __kfree_skb(opt_skb
);
1310 opt_skb
= xchg(&np
->pktoptions
, NULL
);
1318 static void tcp_v6_fill_cb(struct sk_buff
*skb
, const struct ipv6hdr
*hdr
,
1319 const struct tcphdr
*th
)
1321 /* This is tricky: we move IP6CB at its correct location into
1322 * TCP_SKB_CB(). It must be done after xfrm6_policy_check(), because
1323 * _decode_session6() uses IP6CB().
1324 * barrier() makes sure compiler won't play aliasing games.
1326 memmove(&TCP_SKB_CB(skb
)->header
.h6
, IP6CB(skb
),
1327 sizeof(struct inet6_skb_parm
));
1330 TCP_SKB_CB(skb
)->seq
= ntohl(th
->seq
);
1331 TCP_SKB_CB(skb
)->end_seq
= (TCP_SKB_CB(skb
)->seq
+ th
->syn
+ th
->fin
+
1332 skb
->len
- th
->doff
*4);
1333 TCP_SKB_CB(skb
)->ack_seq
= ntohl(th
->ack_seq
);
1334 TCP_SKB_CB(skb
)->tcp_flags
= tcp_flag_byte(th
);
1335 TCP_SKB_CB(skb
)->tcp_tw_isn
= 0;
1336 TCP_SKB_CB(skb
)->ip_dsfield
= ipv6_get_dsfield(hdr
);
1337 TCP_SKB_CB(skb
)->sacked
= 0;
1340 static void tcp_v6_restore_cb(struct sk_buff
*skb
)
1342 /* We need to move header back to the beginning if xfrm6_policy_check()
1343 * and tcp_v6_fill_cb() are going to be called again.
1345 memmove(IP6CB(skb
), &TCP_SKB_CB(skb
)->header
.h6
,
1346 sizeof(struct inet6_skb_parm
));
1349 static int tcp_v6_rcv(struct sk_buff
*skb
)
1351 const struct tcphdr
*th
;
1352 const struct ipv6hdr
*hdr
;
1355 struct net
*net
= dev_net(skb
->dev
);
1357 if (skb
->pkt_type
!= PACKET_HOST
)
1361 * Count it even if it's bad.
1363 TCP_INC_STATS_BH(net
, TCP_MIB_INSEGS
);
1365 if (!pskb_may_pull(skb
, sizeof(struct tcphdr
)))
1370 if (th
->doff
< sizeof(struct tcphdr
)/4)
1372 if (!pskb_may_pull(skb
, th
->doff
*4))
1375 if (skb_checksum_init(skb
, IPPROTO_TCP
, ip6_compute_pseudo
))
1379 hdr
= ipv6_hdr(skb
);
1382 sk
= __inet6_lookup_skb(&tcp_hashinfo
, skb
, th
->source
, th
->dest
,
1388 if (sk
->sk_state
== TCP_TIME_WAIT
)
1391 if (sk
->sk_state
== TCP_NEW_SYN_RECV
) {
1392 struct request_sock
*req
= inet_reqsk(sk
);
1395 sk
= req
->rsk_listener
;
1396 tcp_v6_fill_cb(skb
, hdr
, th
);
1397 if (tcp_v6_inbound_md5_hash(sk
, skb
)) {
1401 if (unlikely(sk
->sk_state
!= TCP_LISTEN
)) {
1402 inet_csk_reqsk_queue_drop_and_put(sk
, req
);
1406 nsk
= tcp_check_req(sk
, skb
, req
, false);
1409 goto discard_and_relse
;
1413 tcp_v6_restore_cb(skb
);
1414 } else if (tcp_child_process(sk
, nsk
, skb
)) {
1415 tcp_v6_send_reset(nsk
, skb
);
1416 goto discard_and_relse
;
1422 if (hdr
->hop_limit
< inet6_sk(sk
)->min_hopcount
) {
1423 NET_INC_STATS_BH(net
, LINUX_MIB_TCPMINTTLDROP
);
1424 goto discard_and_relse
;
1427 if (!xfrm6_policy_check(sk
, XFRM_POLICY_IN
, skb
))
1428 goto discard_and_relse
;
1430 tcp_v6_fill_cb(skb
, hdr
, th
);
1432 if (tcp_v6_inbound_md5_hash(sk
, skb
))
1433 goto discard_and_relse
;
1435 if (sk_filter(sk
, skb
))
1436 goto discard_and_relse
;
1440 if (sk
->sk_state
== TCP_LISTEN
) {
1441 ret
= tcp_v6_do_rcv(sk
, skb
);
1442 goto put_and_return
;
1445 sk_incoming_cpu_update(sk
);
1447 bh_lock_sock_nested(sk
);
1448 tcp_sk(sk
)->segs_in
+= max_t(u16
, 1, skb_shinfo(skb
)->gso_segs
);
1450 if (!sock_owned_by_user(sk
)) {
1451 if (!tcp_prequeue(sk
, skb
))
1452 ret
= tcp_v6_do_rcv(sk
, skb
);
1453 } else if (unlikely(sk_add_backlog(sk
, skb
,
1454 sk
->sk_rcvbuf
+ sk
->sk_sndbuf
))) {
1456 NET_INC_STATS_BH(net
, LINUX_MIB_TCPBACKLOGDROP
);
1457 goto discard_and_relse
;
1463 return ret
? -1 : 0;
1466 if (!xfrm6_policy_check(NULL
, XFRM_POLICY_IN
, skb
))
1469 tcp_v6_fill_cb(skb
, hdr
, th
);
1471 if (tcp_checksum_complete(skb
)) {
1473 TCP_INC_STATS_BH(net
, TCP_MIB_CSUMERRORS
);
1475 TCP_INC_STATS_BH(net
, TCP_MIB_INERRS
);
1477 tcp_v6_send_reset(NULL
, skb
);
1489 if (!xfrm6_policy_check(NULL
, XFRM_POLICY_IN
, skb
)) {
1490 inet_twsk_put(inet_twsk(sk
));
1494 tcp_v6_fill_cb(skb
, hdr
, th
);
1496 if (tcp_checksum_complete(skb
)) {
1497 inet_twsk_put(inet_twsk(sk
));
1501 switch (tcp_timewait_state_process(inet_twsk(sk
), skb
, th
)) {
1506 sk2
= inet6_lookup_listener(dev_net(skb
->dev
), &tcp_hashinfo
,
1507 &ipv6_hdr(skb
)->saddr
, th
->source
,
1508 &ipv6_hdr(skb
)->daddr
,
1509 ntohs(th
->dest
), tcp_v6_iif(skb
));
1511 struct inet_timewait_sock
*tw
= inet_twsk(sk
);
1512 inet_twsk_deschedule_put(tw
);
1514 tcp_v6_restore_cb(skb
);
1517 /* Fall through to ACK */
1520 tcp_v6_timewait_ack(sk
, skb
);
1523 tcp_v6_restore_cb(skb
);
1524 tcp_v6_send_reset(sk
, skb
);
1525 inet_twsk_deschedule_put(inet_twsk(sk
));
1527 case TCP_TW_SUCCESS
:
1533 static void tcp_v6_early_demux(struct sk_buff
*skb
)
1535 const struct ipv6hdr
*hdr
;
1536 const struct tcphdr
*th
;
1539 if (skb
->pkt_type
!= PACKET_HOST
)
1542 if (!pskb_may_pull(skb
, skb_transport_offset(skb
) + sizeof(struct tcphdr
)))
1545 hdr
= ipv6_hdr(skb
);
1548 if (th
->doff
< sizeof(struct tcphdr
) / 4)
1551 /* Note : We use inet6_iif() here, not tcp_v6_iif() */
1552 sk
= __inet6_lookup_established(dev_net(skb
->dev
), &tcp_hashinfo
,
1553 &hdr
->saddr
, th
->source
,
1554 &hdr
->daddr
, ntohs(th
->dest
),
1558 skb
->destructor
= sock_edemux
;
1559 if (sk_fullsock(sk
)) {
1560 struct dst_entry
*dst
= READ_ONCE(sk
->sk_rx_dst
);
1563 dst
= dst_check(dst
, inet6_sk(sk
)->rx_dst_cookie
);
1565 inet_sk(sk
)->rx_dst_ifindex
== skb
->skb_iif
)
1566 skb_dst_set_noref(skb
, dst
);
1571 static struct timewait_sock_ops tcp6_timewait_sock_ops
= {
1572 .twsk_obj_size
= sizeof(struct tcp6_timewait_sock
),
1573 .twsk_unique
= tcp_twsk_unique
,
1574 .twsk_destructor
= tcp_twsk_destructor
,
1577 static const struct inet_connection_sock_af_ops ipv6_specific
= {
1578 .queue_xmit
= inet6_csk_xmit
,
1579 .send_check
= tcp_v6_send_check
,
1580 .rebuild_header
= inet6_sk_rebuild_header
,
1581 .sk_rx_dst_set
= inet6_sk_rx_dst_set
,
1582 .conn_request
= tcp_v6_conn_request
,
1583 .syn_recv_sock
= tcp_v6_syn_recv_sock
,
1584 .net_header_len
= sizeof(struct ipv6hdr
),
1585 .net_frag_header_len
= sizeof(struct frag_hdr
),
1586 .setsockopt
= ipv6_setsockopt
,
1587 .getsockopt
= ipv6_getsockopt
,
1588 .addr2sockaddr
= inet6_csk_addr2sockaddr
,
1589 .sockaddr_len
= sizeof(struct sockaddr_in6
),
1590 .bind_conflict
= inet6_csk_bind_conflict
,
1591 #ifdef CONFIG_COMPAT
1592 .compat_setsockopt
= compat_ipv6_setsockopt
,
1593 .compat_getsockopt
= compat_ipv6_getsockopt
,
1595 .mtu_reduced
= tcp_v6_mtu_reduced
,
1598 #ifdef CONFIG_TCP_MD5SIG
1599 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific
= {
1600 .md5_lookup
= tcp_v6_md5_lookup
,
1601 .calc_md5_hash
= tcp_v6_md5_hash_skb
,
1602 .md5_parse
= tcp_v6_parse_md5_keys
,
1607 * TCP over IPv4 via INET6 API
1609 static const struct inet_connection_sock_af_ops ipv6_mapped
= {
1610 .queue_xmit
= ip_queue_xmit
,
1611 .send_check
= tcp_v4_send_check
,
1612 .rebuild_header
= inet_sk_rebuild_header
,
1613 .sk_rx_dst_set
= inet_sk_rx_dst_set
,
1614 .conn_request
= tcp_v6_conn_request
,
1615 .syn_recv_sock
= tcp_v6_syn_recv_sock
,
1616 .net_header_len
= sizeof(struct iphdr
),
1617 .setsockopt
= ipv6_setsockopt
,
1618 .getsockopt
= ipv6_getsockopt
,
1619 .addr2sockaddr
= inet6_csk_addr2sockaddr
,
1620 .sockaddr_len
= sizeof(struct sockaddr_in6
),
1621 .bind_conflict
= inet6_csk_bind_conflict
,
1622 #ifdef CONFIG_COMPAT
1623 .compat_setsockopt
= compat_ipv6_setsockopt
,
1624 .compat_getsockopt
= compat_ipv6_getsockopt
,
1626 .mtu_reduced
= tcp_v4_mtu_reduced
,
1629 #ifdef CONFIG_TCP_MD5SIG
1630 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific
= {
1631 .md5_lookup
= tcp_v4_md5_lookup
,
1632 .calc_md5_hash
= tcp_v4_md5_hash_skb
,
1633 .md5_parse
= tcp_v6_parse_md5_keys
,
1637 /* NOTE: A lot of things set to zero explicitly by call to
1638 * sk_alloc() so need not be done here.
1640 static int tcp_v6_init_sock(struct sock
*sk
)
1642 struct inet_connection_sock
*icsk
= inet_csk(sk
);
1646 icsk
->icsk_af_ops
= &ipv6_specific
;
1648 #ifdef CONFIG_TCP_MD5SIG
1649 tcp_sk(sk
)->af_specific
= &tcp_sock_ipv6_specific
;
1655 static void tcp_v6_destroy_sock(struct sock
*sk
)
1657 tcp_v4_destroy_sock(sk
);
1658 inet6_destroy_sock(sk
);
1661 #ifdef CONFIG_PROC_FS
1662 /* Proc filesystem TCPv6 sock list dumping. */
1663 static void get_openreq6(struct seq_file
*seq
,
1664 const struct request_sock
*req
, int i
)
1666 long ttd
= req
->rsk_timer
.expires
- jiffies
;
1667 const struct in6_addr
*src
= &inet_rsk(req
)->ir_v6_loc_addr
;
1668 const struct in6_addr
*dest
= &inet_rsk(req
)->ir_v6_rmt_addr
;
1674 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1675 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %d %d %pK\n",
1677 src
->s6_addr32
[0], src
->s6_addr32
[1],
1678 src
->s6_addr32
[2], src
->s6_addr32
[3],
1679 inet_rsk(req
)->ir_num
,
1680 dest
->s6_addr32
[0], dest
->s6_addr32
[1],
1681 dest
->s6_addr32
[2], dest
->s6_addr32
[3],
1682 ntohs(inet_rsk(req
)->ir_rmt_port
),
1684 0, 0, /* could print option size, but that is af dependent. */
1685 1, /* timers active (only the expire timer) */
1686 jiffies_to_clock_t(ttd
),
1688 from_kuid_munged(seq_user_ns(seq
),
1689 sock_i_uid(req
->rsk_listener
)),
1690 0, /* non standard timer */
1691 0, /* open_requests have no inode */
1695 static void get_tcp6_sock(struct seq_file
*seq
, struct sock
*sp
, int i
)
1697 const struct in6_addr
*dest
, *src
;
1700 unsigned long timer_expires
;
1701 const struct inet_sock
*inet
= inet_sk(sp
);
1702 const struct tcp_sock
*tp
= tcp_sk(sp
);
1703 const struct inet_connection_sock
*icsk
= inet_csk(sp
);
1704 const struct fastopen_queue
*fastopenq
= &icsk
->icsk_accept_queue
.fastopenq
;
1708 dest
= &sp
->sk_v6_daddr
;
1709 src
= &sp
->sk_v6_rcv_saddr
;
1710 destp
= ntohs(inet
->inet_dport
);
1711 srcp
= ntohs(inet
->inet_sport
);
1713 if (icsk
->icsk_pending
== ICSK_TIME_RETRANS
) {
1715 timer_expires
= icsk
->icsk_timeout
;
1716 } else if (icsk
->icsk_pending
== ICSK_TIME_PROBE0
) {
1718 timer_expires
= icsk
->icsk_timeout
;
1719 } else if (timer_pending(&sp
->sk_timer
)) {
1721 timer_expires
= sp
->sk_timer
.expires
;
1724 timer_expires
= jiffies
;
1727 state
= sk_state_load(sp
);
1728 if (state
== TCP_LISTEN
)
1729 rx_queue
= sp
->sk_ack_backlog
;
1731 /* Because we don't lock the socket,
1732 * we might find a transient negative value.
1734 rx_queue
= max_t(int, tp
->rcv_nxt
- tp
->copied_seq
, 0);
1737 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1738 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n",
1740 src
->s6_addr32
[0], src
->s6_addr32
[1],
1741 src
->s6_addr32
[2], src
->s6_addr32
[3], srcp
,
1742 dest
->s6_addr32
[0], dest
->s6_addr32
[1],
1743 dest
->s6_addr32
[2], dest
->s6_addr32
[3], destp
,
1745 tp
->write_seq
- tp
->snd_una
,
1748 jiffies_delta_to_clock_t(timer_expires
- jiffies
),
1749 icsk
->icsk_retransmits
,
1750 from_kuid_munged(seq_user_ns(seq
), sock_i_uid(sp
)),
1751 icsk
->icsk_probes_out
,
1753 atomic_read(&sp
->sk_refcnt
), sp
,
1754 jiffies_to_clock_t(icsk
->icsk_rto
),
1755 jiffies_to_clock_t(icsk
->icsk_ack
.ato
),
1756 (icsk
->icsk_ack
.quick
<< 1) | icsk
->icsk_ack
.pingpong
,
1758 state
== TCP_LISTEN
?
1759 fastopenq
->max_qlen
:
1760 (tcp_in_initial_slowstart(tp
) ? -1 : tp
->snd_ssthresh
)
1764 static void get_timewait6_sock(struct seq_file
*seq
,
1765 struct inet_timewait_sock
*tw
, int i
)
1767 long delta
= tw
->tw_timer
.expires
- jiffies
;
1768 const struct in6_addr
*dest
, *src
;
1771 dest
= &tw
->tw_v6_daddr
;
1772 src
= &tw
->tw_v6_rcv_saddr
;
1773 destp
= ntohs(tw
->tw_dport
);
1774 srcp
= ntohs(tw
->tw_sport
);
1777 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1778 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1780 src
->s6_addr32
[0], src
->s6_addr32
[1],
1781 src
->s6_addr32
[2], src
->s6_addr32
[3], srcp
,
1782 dest
->s6_addr32
[0], dest
->s6_addr32
[1],
1783 dest
->s6_addr32
[2], dest
->s6_addr32
[3], destp
,
1784 tw
->tw_substate
, 0, 0,
1785 3, jiffies_delta_to_clock_t(delta
), 0, 0, 0, 0,
1786 atomic_read(&tw
->tw_refcnt
), tw
);
1789 static int tcp6_seq_show(struct seq_file
*seq
, void *v
)
1791 struct tcp_iter_state
*st
;
1792 struct sock
*sk
= v
;
1794 if (v
== SEQ_START_TOKEN
) {
1799 "st tx_queue rx_queue tr tm->when retrnsmt"
1800 " uid timeout inode\n");
1805 if (sk
->sk_state
== TCP_TIME_WAIT
)
1806 get_timewait6_sock(seq
, v
, st
->num
);
1807 else if (sk
->sk_state
== TCP_NEW_SYN_RECV
)
1808 get_openreq6(seq
, v
, st
->num
);
1810 get_tcp6_sock(seq
, v
, st
->num
);
1815 static const struct file_operations tcp6_afinfo_seq_fops
= {
1816 .owner
= THIS_MODULE
,
1817 .open
= tcp_seq_open
,
1819 .llseek
= seq_lseek
,
1820 .release
= seq_release_net
1823 static struct tcp_seq_afinfo tcp6_seq_afinfo
= {
1826 .seq_fops
= &tcp6_afinfo_seq_fops
,
1828 .show
= tcp6_seq_show
,
1832 int __net_init
tcp6_proc_init(struct net
*net
)
1834 return tcp_proc_register(net
, &tcp6_seq_afinfo
);
1837 void tcp6_proc_exit(struct net
*net
)
1839 tcp_proc_unregister(net
, &tcp6_seq_afinfo
);
1843 static void tcp_v6_clear_sk(struct sock
*sk
, int size
)
1845 struct inet_sock
*inet
= inet_sk(sk
);
1847 /* we do not want to clear pinet6 field, because of RCU lookups */
1848 sk_prot_clear_nulls(sk
, offsetof(struct inet_sock
, pinet6
));
1850 size
-= offsetof(struct inet_sock
, pinet6
) + sizeof(inet
->pinet6
);
1851 memset(&inet
->pinet6
+ 1, 0, size
);
1854 struct proto tcpv6_prot
= {
1856 .owner
= THIS_MODULE
,
1858 .connect
= tcp_v6_connect
,
1859 .disconnect
= tcp_disconnect
,
1860 .accept
= inet_csk_accept
,
1862 .init
= tcp_v6_init_sock
,
1863 .destroy
= tcp_v6_destroy_sock
,
1864 .shutdown
= tcp_shutdown
,
1865 .setsockopt
= tcp_setsockopt
,
1866 .getsockopt
= tcp_getsockopt
,
1867 .recvmsg
= tcp_recvmsg
,
1868 .sendmsg
= tcp_sendmsg
,
1869 .sendpage
= tcp_sendpage
,
1870 .backlog_rcv
= tcp_v6_do_rcv
,
1871 .release_cb
= tcp_release_cb
,
1873 .unhash
= inet_unhash
,
1874 .get_port
= inet_csk_get_port
,
1875 .enter_memory_pressure
= tcp_enter_memory_pressure
,
1876 .stream_memory_free
= tcp_stream_memory_free
,
1877 .sockets_allocated
= &tcp_sockets_allocated
,
1878 .memory_allocated
= &tcp_memory_allocated
,
1879 .memory_pressure
= &tcp_memory_pressure
,
1880 .orphan_count
= &tcp_orphan_count
,
1881 .sysctl_mem
= sysctl_tcp_mem
,
1882 .sysctl_wmem
= sysctl_tcp_wmem
,
1883 .sysctl_rmem
= sysctl_tcp_rmem
,
1884 .max_header
= MAX_TCP_HEADER
,
1885 .obj_size
= sizeof(struct tcp6_sock
),
1886 .slab_flags
= SLAB_DESTROY_BY_RCU
,
1887 .twsk_prot
= &tcp6_timewait_sock_ops
,
1888 .rsk_prot
= &tcp6_request_sock_ops
,
1889 .h
.hashinfo
= &tcp_hashinfo
,
1890 .no_autobind
= true,
1891 #ifdef CONFIG_COMPAT
1892 .compat_setsockopt
= compat_tcp_setsockopt
,
1893 .compat_getsockopt
= compat_tcp_getsockopt
,
1895 .clear_sk
= tcp_v6_clear_sk
,
1896 .diag_destroy
= tcp_abort
,
1899 static const struct inet6_protocol tcpv6_protocol
= {
1900 .early_demux
= tcp_v6_early_demux
,
1901 .handler
= tcp_v6_rcv
,
1902 .err_handler
= tcp_v6_err
,
1903 .flags
= INET6_PROTO_NOPOLICY
|INET6_PROTO_FINAL
,
1906 static struct inet_protosw tcpv6_protosw
= {
1907 .type
= SOCK_STREAM
,
1908 .protocol
= IPPROTO_TCP
,
1909 .prot
= &tcpv6_prot
,
1910 .ops
= &inet6_stream_ops
,
1911 .flags
= INET_PROTOSW_PERMANENT
|
1915 static int __net_init
tcpv6_net_init(struct net
*net
)
1917 return inet_ctl_sock_create(&net
->ipv6
.tcp_sk
, PF_INET6
,
1918 SOCK_RAW
, IPPROTO_TCP
, net
);
1921 static void __net_exit
tcpv6_net_exit(struct net
*net
)
1923 inet_ctl_sock_destroy(net
->ipv6
.tcp_sk
);
1926 static void __net_exit
tcpv6_net_exit_batch(struct list_head
*net_exit_list
)
1928 inet_twsk_purge(&tcp_hashinfo
, &tcp_death_row
, AF_INET6
);
1931 static struct pernet_operations tcpv6_net_ops
= {
1932 .init
= tcpv6_net_init
,
1933 .exit
= tcpv6_net_exit
,
1934 .exit_batch
= tcpv6_net_exit_batch
,
1937 int __init
tcpv6_init(void)
1941 ret
= inet6_add_protocol(&tcpv6_protocol
, IPPROTO_TCP
);
1945 /* register inet6 protocol */
1946 ret
= inet6_register_protosw(&tcpv6_protosw
);
1948 goto out_tcpv6_protocol
;
1950 ret
= register_pernet_subsys(&tcpv6_net_ops
);
1952 goto out_tcpv6_protosw
;
1957 inet6_unregister_protosw(&tcpv6_protosw
);
1959 inet6_del_protocol(&tcpv6_protocol
, IPPROTO_TCP
);
1963 void tcpv6_exit(void)
1965 unregister_pernet_subsys(&tcpv6_net_ops
);
1966 inet6_unregister_protosw(&tcpv6_protosw
);
1967 inet6_del_protocol(&tcpv6_protocol
, IPPROTO_TCP
);