3 * Linux INET6 implementation
6 * Pedro Roque <roque@di.fc.ul.pt>
10 * linux/net/ipv4/tcp_input.c
11 * linux/net/ipv4/tcp_output.c
14 * Hideaki YOSHIFUJI : sin6_scope_id support
15 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
16 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
17 * a single port at the same time.
18 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
26 #include <linux/bottom_half.h>
27 #include <linux/module.h>
28 #include <linux/errno.h>
29 #include <linux/types.h>
30 #include <linux/socket.h>
31 #include <linux/sockios.h>
32 #include <linux/net.h>
33 #include <linux/jiffies.h>
35 #include <linux/in6.h>
36 #include <linux/netdevice.h>
37 #include <linux/init.h>
38 #include <linux/jhash.h>
39 #include <linux/ipsec.h>
40 #include <linux/times.h>
41 #include <linux/slab.h>
42 #include <linux/uaccess.h>
43 #include <linux/ipv6.h>
44 #include <linux/icmpv6.h>
45 #include <linux/random.h>
48 #include <net/ndisc.h>
49 #include <net/inet6_hashtables.h>
50 #include <net/inet6_connection_sock.h>
52 #include <net/transp_v6.h>
53 #include <net/addrconf.h>
54 #include <net/ip6_route.h>
55 #include <net/ip6_checksum.h>
56 #include <net/inet_ecn.h>
57 #include <net/protocol.h>
60 #include <net/dsfield.h>
61 #include <net/timewait_sock.h>
62 #include <net/inet_common.h>
63 #include <net/secure_seq.h>
64 #include <net/busy_poll.h>
66 #include <linux/proc_fs.h>
67 #include <linux/seq_file.h>
69 #include <linux/crypto.h>
70 #include <linux/scatterlist.h>
72 static void tcp_v6_send_reset(const struct sock
*sk
, struct sk_buff
*skb
);
73 static void tcp_v6_reqsk_send_ack(const struct sock
*sk
, struct sk_buff
*skb
,
74 struct request_sock
*req
);
76 static int tcp_v6_do_rcv(struct sock
*sk
, struct sk_buff
*skb
);
78 static const struct inet_connection_sock_af_ops ipv6_mapped
;
79 static const struct inet_connection_sock_af_ops ipv6_specific
;
80 #ifdef CONFIG_TCP_MD5SIG
81 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific
;
82 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific
;
84 static struct tcp_md5sig_key
*tcp_v6_md5_do_lookup(const struct sock
*sk
,
85 const struct in6_addr
*addr
)
91 static void inet6_sk_rx_dst_set(struct sock
*sk
, const struct sk_buff
*skb
)
93 struct dst_entry
*dst
= skb_dst(skb
);
95 if (dst
&& dst_hold_safe(dst
)) {
96 const struct rt6_info
*rt
= (const struct rt6_info
*)dst
;
99 inet_sk(sk
)->rx_dst_ifindex
= skb
->skb_iif
;
100 inet6_sk(sk
)->rx_dst_cookie
= rt6_get_cookie(rt
);
104 static __u32
tcp_v6_init_sequence(const struct sk_buff
*skb
)
106 return secure_tcpv6_sequence_number(ipv6_hdr(skb
)->daddr
.s6_addr32
,
107 ipv6_hdr(skb
)->saddr
.s6_addr32
,
109 tcp_hdr(skb
)->source
);
112 static int tcp_v6_connect(struct sock
*sk
, struct sockaddr
*uaddr
,
115 struct sockaddr_in6
*usin
= (struct sockaddr_in6
*) uaddr
;
116 struct inet_sock
*inet
= inet_sk(sk
);
117 struct inet_connection_sock
*icsk
= inet_csk(sk
);
118 struct ipv6_pinfo
*np
= inet6_sk(sk
);
119 struct tcp_sock
*tp
= tcp_sk(sk
);
120 struct in6_addr
*saddr
= NULL
, *final_p
, final
;
121 struct ipv6_txoptions
*opt
;
123 struct dst_entry
*dst
;
127 if (addr_len
< SIN6_LEN_RFC2133
)
130 if (usin
->sin6_family
!= AF_INET6
)
131 return -EAFNOSUPPORT
;
133 memset(&fl6
, 0, sizeof(fl6
));
136 fl6
.flowlabel
= usin
->sin6_flowinfo
&IPV6_FLOWINFO_MASK
;
137 IP6_ECN_flow_init(fl6
.flowlabel
);
138 if (fl6
.flowlabel
&IPV6_FLOWLABEL_MASK
) {
139 struct ip6_flowlabel
*flowlabel
;
140 flowlabel
= fl6_sock_lookup(sk
, fl6
.flowlabel
);
143 fl6_sock_release(flowlabel
);
148 * connect() to INADDR_ANY means loopback (BSD'ism).
151 if (ipv6_addr_any(&usin
->sin6_addr
))
152 usin
->sin6_addr
.s6_addr
[15] = 0x1;
154 addr_type
= ipv6_addr_type(&usin
->sin6_addr
);
156 if (addr_type
& IPV6_ADDR_MULTICAST
)
159 if (addr_type
&IPV6_ADDR_LINKLOCAL
) {
160 if (addr_len
>= sizeof(struct sockaddr_in6
) &&
161 usin
->sin6_scope_id
) {
162 /* If interface is set while binding, indices
165 if (sk
->sk_bound_dev_if
&&
166 sk
->sk_bound_dev_if
!= usin
->sin6_scope_id
)
169 sk
->sk_bound_dev_if
= usin
->sin6_scope_id
;
172 /* Connect to link-local address requires an interface */
173 if (!sk
->sk_bound_dev_if
)
177 if (tp
->rx_opt
.ts_recent_stamp
&&
178 !ipv6_addr_equal(&sk
->sk_v6_daddr
, &usin
->sin6_addr
)) {
179 tp
->rx_opt
.ts_recent
= 0;
180 tp
->rx_opt
.ts_recent_stamp
= 0;
184 sk
->sk_v6_daddr
= usin
->sin6_addr
;
185 np
->flow_label
= fl6
.flowlabel
;
191 if (addr_type
== IPV6_ADDR_MAPPED
) {
192 u32 exthdrlen
= icsk
->icsk_ext_hdr_len
;
193 struct sockaddr_in sin
;
195 SOCK_DEBUG(sk
, "connect: ipv4 mapped\n");
197 if (__ipv6_only_sock(sk
))
200 sin
.sin_family
= AF_INET
;
201 sin
.sin_port
= usin
->sin6_port
;
202 sin
.sin_addr
.s_addr
= usin
->sin6_addr
.s6_addr32
[3];
204 icsk
->icsk_af_ops
= &ipv6_mapped
;
205 sk
->sk_backlog_rcv
= tcp_v4_do_rcv
;
206 #ifdef CONFIG_TCP_MD5SIG
207 tp
->af_specific
= &tcp_sock_ipv6_mapped_specific
;
210 err
= tcp_v4_connect(sk
, (struct sockaddr
*)&sin
, sizeof(sin
));
213 icsk
->icsk_ext_hdr_len
= exthdrlen
;
214 icsk
->icsk_af_ops
= &ipv6_specific
;
215 sk
->sk_backlog_rcv
= tcp_v6_do_rcv
;
216 #ifdef CONFIG_TCP_MD5SIG
217 tp
->af_specific
= &tcp_sock_ipv6_specific
;
221 np
->saddr
= sk
->sk_v6_rcv_saddr
;
226 if (!ipv6_addr_any(&sk
->sk_v6_rcv_saddr
))
227 saddr
= &sk
->sk_v6_rcv_saddr
;
229 fl6
.flowi6_proto
= IPPROTO_TCP
;
230 fl6
.daddr
= sk
->sk_v6_daddr
;
231 fl6
.saddr
= saddr
? *saddr
: np
->saddr
;
232 fl6
.flowi6_oif
= sk
->sk_bound_dev_if
;
233 fl6
.flowi6_mark
= sk
->sk_mark
;
234 fl6
.fl6_dport
= usin
->sin6_port
;
235 fl6
.fl6_sport
= inet
->inet_sport
;
237 opt
= rcu_dereference_protected(np
->opt
, sock_owned_by_user(sk
));
238 final_p
= fl6_update_dst(&fl6
, opt
, &final
);
240 security_sk_classify_flow(sk
, flowi6_to_flowi(&fl6
));
242 dst
= ip6_dst_lookup_flow(sk
, &fl6
, final_p
);
250 sk
->sk_v6_rcv_saddr
= *saddr
;
253 /* set the source address */
255 inet
->inet_rcv_saddr
= LOOPBACK4_IPV6
;
257 sk
->sk_gso_type
= SKB_GSO_TCPV6
;
258 ip6_dst_store(sk
, dst
, NULL
, NULL
);
260 if (tcp_death_row
.sysctl_tw_recycle
&&
261 !tp
->rx_opt
.ts_recent_stamp
&&
262 ipv6_addr_equal(&fl6
.daddr
, &sk
->sk_v6_daddr
))
263 tcp_fetch_timewait_stamp(sk
, dst
);
265 icsk
->icsk_ext_hdr_len
= 0;
267 icsk
->icsk_ext_hdr_len
= opt
->opt_flen
+
270 tp
->rx_opt
.mss_clamp
= IPV6_MIN_MTU
- sizeof(struct tcphdr
) - sizeof(struct ipv6hdr
);
272 inet
->inet_dport
= usin
->sin6_port
;
274 tcp_set_state(sk
, TCP_SYN_SENT
);
275 err
= inet6_hash_connect(&tcp_death_row
, sk
);
281 if (!tp
->write_seq
&& likely(!tp
->repair
))
282 tp
->write_seq
= secure_tcpv6_sequence_number(np
->saddr
.s6_addr32
,
283 sk
->sk_v6_daddr
.s6_addr32
,
287 err
= tcp_connect(sk
);
294 tcp_set_state(sk
, TCP_CLOSE
);
297 inet
->inet_dport
= 0;
298 sk
->sk_route_caps
= 0;
302 static void tcp_v6_mtu_reduced(struct sock
*sk
)
304 struct dst_entry
*dst
;
306 if ((1 << sk
->sk_state
) & (TCPF_LISTEN
| TCPF_CLOSE
))
309 dst
= inet6_csk_update_pmtu(sk
, tcp_sk(sk
)->mtu_info
);
313 if (inet_csk(sk
)->icsk_pmtu_cookie
> dst_mtu(dst
)) {
314 tcp_sync_mss(sk
, dst_mtu(dst
));
315 tcp_simple_retransmit(sk
);
319 static void tcp_v6_err(struct sk_buff
*skb
, struct inet6_skb_parm
*opt
,
320 u8 type
, u8 code
, int offset
, __be32 info
)
322 const struct ipv6hdr
*hdr
= (const struct ipv6hdr
*)skb
->data
;
323 const struct tcphdr
*th
= (struct tcphdr
*)(skb
->data
+offset
);
324 struct net
*net
= dev_net(skb
->dev
);
325 struct request_sock
*fastopen
;
326 struct ipv6_pinfo
*np
;
333 sk
= __inet6_lookup_established(net
, &tcp_hashinfo
,
334 &hdr
->daddr
, th
->dest
,
335 &hdr
->saddr
, ntohs(th
->source
),
339 ICMP6_INC_STATS_BH(net
, __in6_dev_get(skb
->dev
),
344 if (sk
->sk_state
== TCP_TIME_WAIT
) {
345 inet_twsk_put(inet_twsk(sk
));
348 seq
= ntohl(th
->seq
);
349 fatal
= icmpv6_err_convert(type
, code
, &err
);
350 if (sk
->sk_state
== TCP_NEW_SYN_RECV
)
351 return tcp_req_err(sk
, seq
, fatal
);
354 if (sock_owned_by_user(sk
) && type
!= ICMPV6_PKT_TOOBIG
)
355 NET_INC_STATS_BH(net
, LINUX_MIB_LOCKDROPPEDICMPS
);
357 if (sk
->sk_state
== TCP_CLOSE
)
360 if (ipv6_hdr(skb
)->hop_limit
< inet6_sk(sk
)->min_hopcount
) {
361 NET_INC_STATS_BH(net
, LINUX_MIB_TCPMINTTLDROP
);
366 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
367 fastopen
= tp
->fastopen_rsk
;
368 snd_una
= fastopen
? tcp_rsk(fastopen
)->snt_isn
: tp
->snd_una
;
369 if (sk
->sk_state
!= TCP_LISTEN
&&
370 !between(seq
, snd_una
, tp
->snd_nxt
)) {
371 NET_INC_STATS_BH(net
, LINUX_MIB_OUTOFWINDOWICMPS
);
377 if (type
== NDISC_REDIRECT
) {
378 struct dst_entry
*dst
= __sk_dst_check(sk
, np
->dst_cookie
);
381 dst
->ops
->redirect(dst
, sk
, skb
);
385 if (type
== ICMPV6_PKT_TOOBIG
) {
386 /* We are not interested in TCP_LISTEN and open_requests
387 * (SYN-ACKs send out by Linux are always <576bytes so
388 * they should go through unfragmented).
390 if (sk
->sk_state
== TCP_LISTEN
)
393 if (!ip6_sk_accept_pmtu(sk
))
396 tp
->mtu_info
= ntohl(info
);
397 if (!sock_owned_by_user(sk
))
398 tcp_v6_mtu_reduced(sk
);
399 else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED
,
406 /* Might be for an request_sock */
407 switch (sk
->sk_state
) {
410 /* Only in fast or simultaneous open. If a fast open socket is
411 * is already accepted it is treated as a connected one below.
413 if (fastopen
&& !fastopen
->sk
)
416 if (!sock_owned_by_user(sk
)) {
418 sk
->sk_error_report(sk
); /* Wake people up to see the error (see connect in sock.c) */
422 sk
->sk_err_soft
= err
;
426 if (!sock_owned_by_user(sk
) && np
->recverr
) {
428 sk
->sk_error_report(sk
);
430 sk
->sk_err_soft
= err
;
438 static int tcp_v6_send_synack(const struct sock
*sk
, struct dst_entry
*dst
,
440 struct request_sock
*req
,
441 struct tcp_fastopen_cookie
*foc
,
444 struct inet_request_sock
*ireq
= inet_rsk(req
);
445 struct ipv6_pinfo
*np
= inet6_sk(sk
);
446 struct flowi6
*fl6
= &fl
->u
.ip6
;
450 /* First, grab a route. */
451 if (!dst
&& (dst
= inet6_csk_route_req(sk
, fl6
, req
,
452 IPPROTO_TCP
)) == NULL
)
455 skb
= tcp_make_synack(sk
, dst
, req
, foc
, attach_req
);
458 __tcp_v6_send_check(skb
, &ireq
->ir_v6_loc_addr
,
459 &ireq
->ir_v6_rmt_addr
);
461 fl6
->daddr
= ireq
->ir_v6_rmt_addr
;
462 if (np
->repflow
&& ireq
->pktopts
)
463 fl6
->flowlabel
= ip6_flowlabel(ipv6_hdr(ireq
->pktopts
));
466 err
= ip6_xmit(sk
, skb
, fl6
, rcu_dereference(np
->opt
),
469 err
= net_xmit_eval(err
);
477 static void tcp_v6_reqsk_destructor(struct request_sock
*req
)
479 kfree_skb(inet_rsk(req
)->pktopts
);
482 #ifdef CONFIG_TCP_MD5SIG
483 static struct tcp_md5sig_key
*tcp_v6_md5_do_lookup(const struct sock
*sk
,
484 const struct in6_addr
*addr
)
486 return tcp_md5_do_lookup(sk
, (union tcp_md5_addr
*)addr
, AF_INET6
);
489 static struct tcp_md5sig_key
*tcp_v6_md5_lookup(const struct sock
*sk
,
490 const struct sock
*addr_sk
)
492 return tcp_v6_md5_do_lookup(sk
, &addr_sk
->sk_v6_daddr
);
495 static int tcp_v6_parse_md5_keys(struct sock
*sk
, char __user
*optval
,
498 struct tcp_md5sig cmd
;
499 struct sockaddr_in6
*sin6
= (struct sockaddr_in6
*)&cmd
.tcpm_addr
;
501 if (optlen
< sizeof(cmd
))
504 if (copy_from_user(&cmd
, optval
, sizeof(cmd
)))
507 if (sin6
->sin6_family
!= AF_INET6
)
510 if (!cmd
.tcpm_keylen
) {
511 if (ipv6_addr_v4mapped(&sin6
->sin6_addr
))
512 return tcp_md5_do_del(sk
, (union tcp_md5_addr
*)&sin6
->sin6_addr
.s6_addr32
[3],
514 return tcp_md5_do_del(sk
, (union tcp_md5_addr
*)&sin6
->sin6_addr
,
518 if (cmd
.tcpm_keylen
> TCP_MD5SIG_MAXKEYLEN
)
521 if (ipv6_addr_v4mapped(&sin6
->sin6_addr
))
522 return tcp_md5_do_add(sk
, (union tcp_md5_addr
*)&sin6
->sin6_addr
.s6_addr32
[3],
523 AF_INET
, cmd
.tcpm_key
, cmd
.tcpm_keylen
, GFP_KERNEL
);
525 return tcp_md5_do_add(sk
, (union tcp_md5_addr
*)&sin6
->sin6_addr
,
526 AF_INET6
, cmd
.tcpm_key
, cmd
.tcpm_keylen
, GFP_KERNEL
);
529 static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool
*hp
,
530 const struct in6_addr
*daddr
,
531 const struct in6_addr
*saddr
, int nbytes
)
533 struct tcp6_pseudohdr
*bp
;
534 struct scatterlist sg
;
536 bp
= &hp
->md5_blk
.ip6
;
537 /* 1. TCP pseudo-header (RFC2460) */
540 bp
->protocol
= cpu_to_be32(IPPROTO_TCP
);
541 bp
->len
= cpu_to_be32(nbytes
);
543 sg_init_one(&sg
, bp
, sizeof(*bp
));
544 return crypto_hash_update(&hp
->md5_desc
, &sg
, sizeof(*bp
));
547 static int tcp_v6_md5_hash_hdr(char *md5_hash
, struct tcp_md5sig_key
*key
,
548 const struct in6_addr
*daddr
, struct in6_addr
*saddr
,
549 const struct tcphdr
*th
)
551 struct tcp_md5sig_pool
*hp
;
552 struct hash_desc
*desc
;
554 hp
= tcp_get_md5sig_pool();
556 goto clear_hash_noput
;
557 desc
= &hp
->md5_desc
;
559 if (crypto_hash_init(desc
))
561 if (tcp_v6_md5_hash_pseudoheader(hp
, daddr
, saddr
, th
->doff
<< 2))
563 if (tcp_md5_hash_header(hp
, th
))
565 if (tcp_md5_hash_key(hp
, key
))
567 if (crypto_hash_final(desc
, md5_hash
))
570 tcp_put_md5sig_pool();
574 tcp_put_md5sig_pool();
576 memset(md5_hash
, 0, 16);
580 static int tcp_v6_md5_hash_skb(char *md5_hash
,
581 const struct tcp_md5sig_key
*key
,
582 const struct sock
*sk
,
583 const struct sk_buff
*skb
)
585 const struct in6_addr
*saddr
, *daddr
;
586 struct tcp_md5sig_pool
*hp
;
587 struct hash_desc
*desc
;
588 const struct tcphdr
*th
= tcp_hdr(skb
);
590 if (sk
) { /* valid for establish/request sockets */
591 saddr
= &sk
->sk_v6_rcv_saddr
;
592 daddr
= &sk
->sk_v6_daddr
;
594 const struct ipv6hdr
*ip6h
= ipv6_hdr(skb
);
595 saddr
= &ip6h
->saddr
;
596 daddr
= &ip6h
->daddr
;
599 hp
= tcp_get_md5sig_pool();
601 goto clear_hash_noput
;
602 desc
= &hp
->md5_desc
;
604 if (crypto_hash_init(desc
))
607 if (tcp_v6_md5_hash_pseudoheader(hp
, daddr
, saddr
, skb
->len
))
609 if (tcp_md5_hash_header(hp
, th
))
611 if (tcp_md5_hash_skb_data(hp
, skb
, th
->doff
<< 2))
613 if (tcp_md5_hash_key(hp
, key
))
615 if (crypto_hash_final(desc
, md5_hash
))
618 tcp_put_md5sig_pool();
622 tcp_put_md5sig_pool();
624 memset(md5_hash
, 0, 16);
630 static bool tcp_v6_inbound_md5_hash(const struct sock
*sk
,
631 const struct sk_buff
*skb
)
633 #ifdef CONFIG_TCP_MD5SIG
634 const __u8
*hash_location
= NULL
;
635 struct tcp_md5sig_key
*hash_expected
;
636 const struct ipv6hdr
*ip6h
= ipv6_hdr(skb
);
637 const struct tcphdr
*th
= tcp_hdr(skb
);
641 hash_expected
= tcp_v6_md5_do_lookup(sk
, &ip6h
->saddr
);
642 hash_location
= tcp_parse_md5sig_option(th
);
644 /* We've parsed the options - do we have a hash? */
645 if (!hash_expected
&& !hash_location
)
648 if (hash_expected
&& !hash_location
) {
649 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_TCPMD5NOTFOUND
);
653 if (!hash_expected
&& hash_location
) {
654 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_TCPMD5UNEXPECTED
);
658 /* check the signature */
659 genhash
= tcp_v6_md5_hash_skb(newhash
,
663 if (genhash
|| memcmp(hash_location
, newhash
, 16) != 0) {
664 net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
665 genhash
? "failed" : "mismatch",
666 &ip6h
->saddr
, ntohs(th
->source
),
667 &ip6h
->daddr
, ntohs(th
->dest
));
674 static void tcp_v6_init_req(struct request_sock
*req
,
675 const struct sock
*sk_listener
,
678 struct inet_request_sock
*ireq
= inet_rsk(req
);
679 const struct ipv6_pinfo
*np
= inet6_sk(sk_listener
);
681 ireq
->ir_v6_rmt_addr
= ipv6_hdr(skb
)->saddr
;
682 ireq
->ir_v6_loc_addr
= ipv6_hdr(skb
)->daddr
;
684 /* So that link locals have meaning */
685 if (!sk_listener
->sk_bound_dev_if
&&
686 ipv6_addr_type(&ireq
->ir_v6_rmt_addr
) & IPV6_ADDR_LINKLOCAL
)
687 ireq
->ir_iif
= tcp_v6_iif(skb
);
689 if (!TCP_SKB_CB(skb
)->tcp_tw_isn
&&
690 (ipv6_opt_accepted(sk_listener
, skb
, &TCP_SKB_CB(skb
)->header
.h6
) ||
691 np
->rxopt
.bits
.rxinfo
||
692 np
->rxopt
.bits
.rxoinfo
|| np
->rxopt
.bits
.rxhlim
||
693 np
->rxopt
.bits
.rxohlim
|| np
->repflow
)) {
694 atomic_inc(&skb
->users
);
699 static struct dst_entry
*tcp_v6_route_req(const struct sock
*sk
,
701 const struct request_sock
*req
,
706 return inet6_csk_route_req(sk
, &fl
->u
.ip6
, req
, IPPROTO_TCP
);
709 struct request_sock_ops tcp6_request_sock_ops __read_mostly
= {
711 .obj_size
= sizeof(struct tcp6_request_sock
),
712 .rtx_syn_ack
= tcp_rtx_synack
,
713 .send_ack
= tcp_v6_reqsk_send_ack
,
714 .destructor
= tcp_v6_reqsk_destructor
,
715 .send_reset
= tcp_v6_send_reset
,
716 .syn_ack_timeout
= tcp_syn_ack_timeout
,
719 static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops
= {
720 .mss_clamp
= IPV6_MIN_MTU
- sizeof(struct tcphdr
) -
721 sizeof(struct ipv6hdr
),
722 #ifdef CONFIG_TCP_MD5SIG
723 .req_md5_lookup
= tcp_v6_md5_lookup
,
724 .calc_md5_hash
= tcp_v6_md5_hash_skb
,
726 .init_req
= tcp_v6_init_req
,
727 #ifdef CONFIG_SYN_COOKIES
728 .cookie_init_seq
= cookie_v6_init_sequence
,
730 .route_req
= tcp_v6_route_req
,
731 .init_seq
= tcp_v6_init_sequence
,
732 .send_synack
= tcp_v6_send_synack
,
735 static void tcp_v6_send_response(const struct sock
*sk
, struct sk_buff
*skb
, u32 seq
,
736 u32 ack
, u32 win
, u32 tsval
, u32 tsecr
,
737 int oif
, struct tcp_md5sig_key
*key
, int rst
,
738 u8 tclass
, u32 label
)
740 const struct tcphdr
*th
= tcp_hdr(skb
);
742 struct sk_buff
*buff
;
744 struct net
*net
= sk
? sock_net(sk
) : dev_net(skb_dst(skb
)->dev
);
745 struct sock
*ctl_sk
= net
->ipv6
.tcp_sk
;
746 unsigned int tot_len
= sizeof(struct tcphdr
);
747 struct dst_entry
*dst
;
751 tot_len
+= TCPOLEN_TSTAMP_ALIGNED
;
752 #ifdef CONFIG_TCP_MD5SIG
754 tot_len
+= TCPOLEN_MD5SIG_ALIGNED
;
757 buff
= alloc_skb(MAX_HEADER
+ sizeof(struct ipv6hdr
) + tot_len
,
762 skb_reserve(buff
, MAX_HEADER
+ sizeof(struct ipv6hdr
) + tot_len
);
764 t1
= (struct tcphdr
*) skb_push(buff
, tot_len
);
765 skb_reset_transport_header(buff
);
767 /* Swap the send and the receive. */
768 memset(t1
, 0, sizeof(*t1
));
769 t1
->dest
= th
->source
;
770 t1
->source
= th
->dest
;
771 t1
->doff
= tot_len
/ 4;
772 t1
->seq
= htonl(seq
);
773 t1
->ack_seq
= htonl(ack
);
774 t1
->ack
= !rst
|| !th
->ack
;
776 t1
->window
= htons(win
);
778 topt
= (__be32
*)(t1
+ 1);
781 *topt
++ = htonl((TCPOPT_NOP
<< 24) | (TCPOPT_NOP
<< 16) |
782 (TCPOPT_TIMESTAMP
<< 8) | TCPOLEN_TIMESTAMP
);
783 *topt
++ = htonl(tsval
);
784 *topt
++ = htonl(tsecr
);
787 #ifdef CONFIG_TCP_MD5SIG
789 *topt
++ = htonl((TCPOPT_NOP
<< 24) | (TCPOPT_NOP
<< 16) |
790 (TCPOPT_MD5SIG
<< 8) | TCPOLEN_MD5SIG
);
791 tcp_v6_md5_hash_hdr((__u8
*)topt
, key
,
792 &ipv6_hdr(skb
)->saddr
,
793 &ipv6_hdr(skb
)->daddr
, t1
);
797 memset(&fl6
, 0, sizeof(fl6
));
798 fl6
.daddr
= ipv6_hdr(skb
)->saddr
;
799 fl6
.saddr
= ipv6_hdr(skb
)->daddr
;
800 fl6
.flowlabel
= label
;
802 buff
->ip_summed
= CHECKSUM_PARTIAL
;
805 __tcp_v6_send_check(buff
, &fl6
.saddr
, &fl6
.daddr
);
807 fl6
.flowi6_proto
= IPPROTO_TCP
;
808 if (rt6_need_strict(&fl6
.daddr
) && !oif
)
809 fl6
.flowi6_oif
= tcp_v6_iif(skb
);
811 fl6
.flowi6_oif
= oif
;
812 fl6
.flowi6_mark
= IP6_REPLY_MARK(net
, skb
->mark
);
813 fl6
.fl6_dport
= t1
->dest
;
814 fl6
.fl6_sport
= t1
->source
;
815 security_skb_classify_flow(skb
, flowi6_to_flowi(&fl6
));
817 /* Pass a socket to ip6_dst_lookup either it is for RST
818 * Underlying function will use this to retrieve the network
821 dst
= ip6_dst_lookup_flow(ctl_sk
, &fl6
, NULL
);
823 skb_dst_set(buff
, dst
);
824 ip6_xmit(ctl_sk
, buff
, &fl6
, NULL
, tclass
);
825 TCP_INC_STATS_BH(net
, TCP_MIB_OUTSEGS
);
827 TCP_INC_STATS_BH(net
, TCP_MIB_OUTRSTS
);
834 static void tcp_v6_send_reset(const struct sock
*sk
, struct sk_buff
*skb
)
836 const struct tcphdr
*th
= tcp_hdr(skb
);
837 u32 seq
= 0, ack_seq
= 0;
838 struct tcp_md5sig_key
*key
= NULL
;
839 #ifdef CONFIG_TCP_MD5SIG
840 const __u8
*hash_location
= NULL
;
841 struct ipv6hdr
*ipv6h
= ipv6_hdr(skb
);
842 unsigned char newhash
[16];
844 struct sock
*sk1
= NULL
;
851 /* If sk not NULL, it means we did a successful lookup and incoming
852 * route had to be correct. prequeue might have dropped our dst.
854 if (!sk
&& !ipv6_unicast_destination(skb
))
857 #ifdef CONFIG_TCP_MD5SIG
858 hash_location
= tcp_parse_md5sig_option(th
);
859 if (sk
&& sk_fullsock(sk
)) {
860 key
= tcp_v6_md5_do_lookup(sk
, &ipv6h
->saddr
);
861 } else if (hash_location
) {
863 * active side is lost. Try to find listening socket through
864 * source port, and then find md5 key through listening socket.
865 * we are not loose security here:
866 * Incoming packet is checked with md5 hash with finding key,
867 * no RST generated if md5 hash doesn't match.
869 sk1
= inet6_lookup_listener(dev_net(skb_dst(skb
)->dev
),
870 &tcp_hashinfo
, NULL
, 0,
872 th
->source
, &ipv6h
->daddr
,
873 ntohs(th
->source
), tcp_v6_iif(skb
));
878 key
= tcp_v6_md5_do_lookup(sk1
, &ipv6h
->saddr
);
882 genhash
= tcp_v6_md5_hash_skb(newhash
, key
, NULL
, skb
);
883 if (genhash
|| memcmp(hash_location
, newhash
, 16) != 0)
889 seq
= ntohl(th
->ack_seq
);
891 ack_seq
= ntohl(th
->seq
) + th
->syn
+ th
->fin
+ skb
->len
-
894 oif
= sk
? sk
->sk_bound_dev_if
: 0;
895 tcp_v6_send_response(sk
, skb
, seq
, ack_seq
, 0, 0, 0, oif
, key
, 1, 0, 0);
897 #ifdef CONFIG_TCP_MD5SIG
906 static void tcp_v6_send_ack(const struct sock
*sk
, struct sk_buff
*skb
, u32 seq
,
907 u32 ack
, u32 win
, u32 tsval
, u32 tsecr
, int oif
,
908 struct tcp_md5sig_key
*key
, u8 tclass
,
911 tcp_v6_send_response(sk
, skb
, seq
, ack
, win
, tsval
, tsecr
, oif
, key
, 0,
915 static void tcp_v6_timewait_ack(struct sock
*sk
, struct sk_buff
*skb
)
917 struct inet_timewait_sock
*tw
= inet_twsk(sk
);
918 struct tcp_timewait_sock
*tcptw
= tcp_twsk(sk
);
920 tcp_v6_send_ack(sk
, skb
, tcptw
->tw_snd_nxt
, tcptw
->tw_rcv_nxt
,
921 tcptw
->tw_rcv_wnd
>> tw
->tw_rcv_wscale
,
922 tcp_time_stamp
+ tcptw
->tw_ts_offset
,
923 tcptw
->tw_ts_recent
, tw
->tw_bound_dev_if
, tcp_twsk_md5_key(tcptw
),
924 tw
->tw_tclass
, cpu_to_be32(tw
->tw_flowlabel
));
929 static void tcp_v6_reqsk_send_ack(const struct sock
*sk
, struct sk_buff
*skb
,
930 struct request_sock
*req
)
932 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
933 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
935 tcp_v6_send_ack(sk
, skb
, (sk
->sk_state
== TCP_LISTEN
) ?
936 tcp_rsk(req
)->snt_isn
+ 1 : tcp_sk(sk
)->snd_nxt
,
937 tcp_rsk(req
)->rcv_nxt
, req
->rsk_rcv_wnd
,
938 tcp_time_stamp
, req
->ts_recent
, sk
->sk_bound_dev_if
,
939 tcp_v6_md5_do_lookup(sk
, &ipv6_hdr(skb
)->daddr
),
944 static struct sock
*tcp_v6_cookie_check(struct sock
*sk
, struct sk_buff
*skb
)
946 #ifdef CONFIG_SYN_COOKIES
947 const struct tcphdr
*th
= tcp_hdr(skb
);
950 sk
= cookie_v6_check(sk
, skb
);
955 static int tcp_v6_conn_request(struct sock
*sk
, struct sk_buff
*skb
)
957 if (skb
->protocol
== htons(ETH_P_IP
))
958 return tcp_v4_conn_request(sk
, skb
);
960 if (!ipv6_unicast_destination(skb
))
963 return tcp_conn_request(&tcp6_request_sock_ops
,
964 &tcp_request_sock_ipv6_ops
, sk
, skb
);
967 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_LISTENDROPS
);
968 return 0; /* don't send reset */
971 static struct sock
*tcp_v6_syn_recv_sock(const struct sock
*sk
, struct sk_buff
*skb
,
972 struct request_sock
*req
,
973 struct dst_entry
*dst
,
974 struct request_sock
*req_unhash
,
977 struct inet_request_sock
*ireq
;
978 struct ipv6_pinfo
*newnp
;
979 const struct ipv6_pinfo
*np
= inet6_sk(sk
);
980 struct ipv6_txoptions
*opt
;
981 struct tcp6_sock
*newtcp6sk
;
982 struct inet_sock
*newinet
;
983 struct tcp_sock
*newtp
;
985 #ifdef CONFIG_TCP_MD5SIG
986 struct tcp_md5sig_key
*key
;
990 if (skb
->protocol
== htons(ETH_P_IP
)) {
995 newsk
= tcp_v4_syn_recv_sock(sk
, skb
, req
, dst
,
996 req_unhash
, own_req
);
1001 newtcp6sk
= (struct tcp6_sock
*)newsk
;
1002 inet_sk(newsk
)->pinet6
= &newtcp6sk
->inet6
;
1004 newinet
= inet_sk(newsk
);
1005 newnp
= inet6_sk(newsk
);
1006 newtp
= tcp_sk(newsk
);
1008 memcpy(newnp
, np
, sizeof(struct ipv6_pinfo
));
1010 newnp
->saddr
= newsk
->sk_v6_rcv_saddr
;
1012 inet_csk(newsk
)->icsk_af_ops
= &ipv6_mapped
;
1013 newsk
->sk_backlog_rcv
= tcp_v4_do_rcv
;
1014 #ifdef CONFIG_TCP_MD5SIG
1015 newtp
->af_specific
= &tcp_sock_ipv6_mapped_specific
;
1018 newnp
->ipv6_ac_list
= NULL
;
1019 newnp
->ipv6_fl_list
= NULL
;
1020 newnp
->pktoptions
= NULL
;
1022 newnp
->mcast_oif
= tcp_v6_iif(skb
);
1023 newnp
->mcast_hops
= ipv6_hdr(skb
)->hop_limit
;
1024 newnp
->rcv_flowinfo
= ip6_flowinfo(ipv6_hdr(skb
));
1026 newnp
->flow_label
= ip6_flowlabel(ipv6_hdr(skb
));
1029 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1030 * here, tcp_create_openreq_child now does this for us, see the comment in
1031 * that function for the gory details. -acme
1034 /* It is tricky place. Until this moment IPv4 tcp
1035 worked with IPv6 icsk.icsk_af_ops.
1038 tcp_sync_mss(newsk
, inet_csk(newsk
)->icsk_pmtu_cookie
);
1043 ireq
= inet_rsk(req
);
1045 if (sk_acceptq_is_full(sk
))
1049 dst
= inet6_csk_route_req(sk
, &fl6
, req
, IPPROTO_TCP
);
1054 newsk
= tcp_create_openreq_child(sk
, req
, skb
);
1059 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1060 * count here, tcp_create_openreq_child now does this for us, see the
1061 * comment in that function for the gory details. -acme
1064 newsk
->sk_gso_type
= SKB_GSO_TCPV6
;
1065 ip6_dst_store(newsk
, dst
, NULL
, NULL
);
1066 inet6_sk_rx_dst_set(newsk
, skb
);
1068 newtcp6sk
= (struct tcp6_sock
*)newsk
;
1069 inet_sk(newsk
)->pinet6
= &newtcp6sk
->inet6
;
1071 newtp
= tcp_sk(newsk
);
1072 newinet
= inet_sk(newsk
);
1073 newnp
= inet6_sk(newsk
);
1075 memcpy(newnp
, np
, sizeof(struct ipv6_pinfo
));
1077 newsk
->sk_v6_daddr
= ireq
->ir_v6_rmt_addr
;
1078 newnp
->saddr
= ireq
->ir_v6_loc_addr
;
1079 newsk
->sk_v6_rcv_saddr
= ireq
->ir_v6_loc_addr
;
1080 newsk
->sk_bound_dev_if
= ireq
->ir_iif
;
1082 /* Now IPv6 options...
1084 First: no IPv4 options.
1086 newinet
->inet_opt
= NULL
;
1087 newnp
->ipv6_ac_list
= NULL
;
1088 newnp
->ipv6_fl_list
= NULL
;
1091 newnp
->rxopt
.all
= np
->rxopt
.all
;
1093 newnp
->pktoptions
= NULL
;
1095 newnp
->mcast_oif
= tcp_v6_iif(skb
);
1096 newnp
->mcast_hops
= ipv6_hdr(skb
)->hop_limit
;
1097 newnp
->rcv_flowinfo
= ip6_flowinfo(ipv6_hdr(skb
));
1099 newnp
->flow_label
= ip6_flowlabel(ipv6_hdr(skb
));
1101 /* Clone native IPv6 options from listening socket (if any)
1103 Yes, keeping reference count would be much more clever,
1104 but we make one more one thing there: reattach optmem
1107 opt
= rcu_dereference(np
->opt
);
1109 opt
= ipv6_dup_options(newsk
, opt
);
1110 RCU_INIT_POINTER(newnp
->opt
, opt
);
1112 inet_csk(newsk
)->icsk_ext_hdr_len
= 0;
1114 inet_csk(newsk
)->icsk_ext_hdr_len
= opt
->opt_nflen
+
1117 tcp_ca_openreq_child(newsk
, dst
);
1119 tcp_sync_mss(newsk
, dst_mtu(dst
));
1120 newtp
->advmss
= dst_metric_advmss(dst
);
1121 if (tcp_sk(sk
)->rx_opt
.user_mss
&&
1122 tcp_sk(sk
)->rx_opt
.user_mss
< newtp
->advmss
)
1123 newtp
->advmss
= tcp_sk(sk
)->rx_opt
.user_mss
;
1125 tcp_initialize_rcv_mss(newsk
);
1127 newinet
->inet_daddr
= newinet
->inet_saddr
= LOOPBACK4_IPV6
;
1128 newinet
->inet_rcv_saddr
= LOOPBACK4_IPV6
;
1130 #ifdef CONFIG_TCP_MD5SIG
1131 /* Copy over the MD5 key from the original socket */
1132 key
= tcp_v6_md5_do_lookup(sk
, &newsk
->sk_v6_daddr
);
1134 /* We're using one, so create a matching key
1135 * on the newsk structure. If we fail to get
1136 * memory, then we end up not copying the key
1139 tcp_md5_do_add(newsk
, (union tcp_md5_addr
*)&newsk
->sk_v6_daddr
,
1140 AF_INET6
, key
->key
, key
->keylen
,
1141 sk_gfp_mask(sk
, GFP_ATOMIC
));
1145 if (__inet_inherit_port(sk
, newsk
) < 0) {
1146 inet_csk_prepare_forced_close(newsk
);
1150 *own_req
= inet_ehash_nolisten(newsk
, req_to_sk(req_unhash
));
1152 tcp_move_syn(newtp
, req
);
1154 /* Clone pktoptions received with SYN, if we own the req */
1155 if (ireq
->pktopts
) {
1156 newnp
->pktoptions
= skb_clone(ireq
->pktopts
,
1157 sk_gfp_mask(sk
, GFP_ATOMIC
));
1158 consume_skb(ireq
->pktopts
);
1159 ireq
->pktopts
= NULL
;
1160 if (newnp
->pktoptions
)
1161 skb_set_owner_r(newnp
->pktoptions
, newsk
);
1168 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_LISTENOVERFLOWS
);
1172 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_LISTENDROPS
);
1176 /* The socket must have it's spinlock held when we get
1177 * here, unless it is a TCP_LISTEN socket.
1179 * We have a potential double-lock case here, so even when
1180 * doing backlog processing we use the BH locking scheme.
1181 * This is because we cannot sleep with the original spinlock
1184 static int tcp_v6_do_rcv(struct sock
*sk
, struct sk_buff
*skb
)
1186 struct ipv6_pinfo
*np
= inet6_sk(sk
);
1187 struct tcp_sock
*tp
;
1188 struct sk_buff
*opt_skb
= NULL
;
1190 /* Imagine: socket is IPv6. IPv4 packet arrives,
1191 goes to IPv4 receive handler and backlogged.
1192 From backlog it always goes here. Kerboom...
1193 Fortunately, tcp_rcv_established and rcv_established
1194 handle them correctly, but it is not case with
1195 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1198 if (skb
->protocol
== htons(ETH_P_IP
))
1199 return tcp_v4_do_rcv(sk
, skb
);
1201 if (sk_filter(sk
, skb
))
1205 * socket locking is here for SMP purposes as backlog rcv
1206 * is currently called with bh processing disabled.
1209 /* Do Stevens' IPV6_PKTOPTIONS.
1211 Yes, guys, it is the only place in our code, where we
1212 may make it not affecting IPv4.
1213 The rest of code is protocol independent,
1214 and I do not like idea to uglify IPv4.
1216 Actually, all the idea behind IPV6_PKTOPTIONS
1217 looks not very well thought. For now we latch
1218 options, received in the last packet, enqueued
1219 by tcp. Feel free to propose better solution.
1223 opt_skb
= skb_clone(skb
, sk_gfp_mask(sk
, GFP_ATOMIC
));
1225 if (sk
->sk_state
== TCP_ESTABLISHED
) { /* Fast path */
1226 struct dst_entry
*dst
= sk
->sk_rx_dst
;
1228 sock_rps_save_rxhash(sk
, skb
);
1229 sk_mark_napi_id(sk
, skb
);
1231 if (inet_sk(sk
)->rx_dst_ifindex
!= skb
->skb_iif
||
1232 dst
->ops
->check(dst
, np
->rx_dst_cookie
) == NULL
) {
1234 sk
->sk_rx_dst
= NULL
;
1238 tcp_rcv_established(sk
, skb
, tcp_hdr(skb
), skb
->len
);
1240 goto ipv6_pktoptions
;
1244 if (tcp_checksum_complete(skb
))
1247 if (sk
->sk_state
== TCP_LISTEN
) {
1248 struct sock
*nsk
= tcp_v6_cookie_check(sk
, skb
);
1254 sock_rps_save_rxhash(nsk
, skb
);
1255 sk_mark_napi_id(nsk
, skb
);
1256 if (tcp_child_process(sk
, nsk
, skb
))
1259 __kfree_skb(opt_skb
);
1263 sock_rps_save_rxhash(sk
, skb
);
1265 if (tcp_rcv_state_process(sk
, skb
))
1268 goto ipv6_pktoptions
;
1272 tcp_v6_send_reset(sk
, skb
);
1275 __kfree_skb(opt_skb
);
1279 TCP_INC_STATS_BH(sock_net(sk
), TCP_MIB_CSUMERRORS
);
1280 TCP_INC_STATS_BH(sock_net(sk
), TCP_MIB_INERRS
);
1285 /* Do you ask, what is it?
1287 1. skb was enqueued by tcp.
1288 2. skb is added to tail of read queue, rather than out of order.
1289 3. socket is not in passive state.
1290 4. Finally, it really contains options, which user wants to receive.
1293 if (TCP_SKB_CB(opt_skb
)->end_seq
== tp
->rcv_nxt
&&
1294 !((1 << sk
->sk_state
) & (TCPF_CLOSE
| TCPF_LISTEN
))) {
1295 if (np
->rxopt
.bits
.rxinfo
|| np
->rxopt
.bits
.rxoinfo
)
1296 np
->mcast_oif
= tcp_v6_iif(opt_skb
);
1297 if (np
->rxopt
.bits
.rxhlim
|| np
->rxopt
.bits
.rxohlim
)
1298 np
->mcast_hops
= ipv6_hdr(opt_skb
)->hop_limit
;
1299 if (np
->rxopt
.bits
.rxflow
|| np
->rxopt
.bits
.rxtclass
)
1300 np
->rcv_flowinfo
= ip6_flowinfo(ipv6_hdr(opt_skb
));
1302 np
->flow_label
= ip6_flowlabel(ipv6_hdr(opt_skb
));
1303 if (ipv6_opt_accepted(sk
, opt_skb
, &TCP_SKB_CB(opt_skb
)->header
.h6
)) {
1304 skb_set_owner_r(opt_skb
, sk
);
1305 opt_skb
= xchg(&np
->pktoptions
, opt_skb
);
1307 __kfree_skb(opt_skb
);
1308 opt_skb
= xchg(&np
->pktoptions
, NULL
);
1316 static void tcp_v6_fill_cb(struct sk_buff
*skb
, const struct ipv6hdr
*hdr
,
1317 const struct tcphdr
*th
)
1319 /* This is tricky: we move IP6CB at its correct location into
1320 * TCP_SKB_CB(). It must be done after xfrm6_policy_check(), because
1321 * _decode_session6() uses IP6CB().
1322 * barrier() makes sure compiler won't play aliasing games.
1324 memmove(&TCP_SKB_CB(skb
)->header
.h6
, IP6CB(skb
),
1325 sizeof(struct inet6_skb_parm
));
1328 TCP_SKB_CB(skb
)->seq
= ntohl(th
->seq
);
1329 TCP_SKB_CB(skb
)->end_seq
= (TCP_SKB_CB(skb
)->seq
+ th
->syn
+ th
->fin
+
1330 skb
->len
- th
->doff
*4);
1331 TCP_SKB_CB(skb
)->ack_seq
= ntohl(th
->ack_seq
);
1332 TCP_SKB_CB(skb
)->tcp_flags
= tcp_flag_byte(th
);
1333 TCP_SKB_CB(skb
)->tcp_tw_isn
= 0;
1334 TCP_SKB_CB(skb
)->ip_dsfield
= ipv6_get_dsfield(hdr
);
1335 TCP_SKB_CB(skb
)->sacked
= 0;
1338 static void tcp_v6_restore_cb(struct sk_buff
*skb
)
1340 /* We need to move header back to the beginning if xfrm6_policy_check()
1341 * and tcp_v6_fill_cb() are going to be called again.
1343 memmove(IP6CB(skb
), &TCP_SKB_CB(skb
)->header
.h6
,
1344 sizeof(struct inet6_skb_parm
));
1347 static int tcp_v6_rcv(struct sk_buff
*skb
)
1349 const struct tcphdr
*th
;
1350 const struct ipv6hdr
*hdr
;
1353 struct net
*net
= dev_net(skb
->dev
);
1355 if (skb
->pkt_type
!= PACKET_HOST
)
1359 * Count it even if it's bad.
1361 TCP_INC_STATS_BH(net
, TCP_MIB_INSEGS
);
1363 if (!pskb_may_pull(skb
, sizeof(struct tcphdr
)))
1368 if (th
->doff
< sizeof(struct tcphdr
)/4)
1370 if (!pskb_may_pull(skb
, th
->doff
*4))
1373 if (skb_checksum_init(skb
, IPPROTO_TCP
, ip6_compute_pseudo
))
1377 hdr
= ipv6_hdr(skb
);
1380 sk
= __inet6_lookup_skb(&tcp_hashinfo
, skb
, __tcp_hdrlen(th
),
1381 th
->source
, th
->dest
, inet6_iif(skb
));
1386 if (sk
->sk_state
== TCP_TIME_WAIT
)
1389 if (sk
->sk_state
== TCP_NEW_SYN_RECV
) {
1390 struct request_sock
*req
= inet_reqsk(sk
);
1393 sk
= req
->rsk_listener
;
1394 tcp_v6_fill_cb(skb
, hdr
, th
);
1395 if (tcp_v6_inbound_md5_hash(sk
, skb
)) {
1399 if (unlikely(sk
->sk_state
!= TCP_LISTEN
)) {
1400 inet_csk_reqsk_queue_drop_and_put(sk
, req
);
1404 nsk
= tcp_check_req(sk
, skb
, req
, false);
1407 goto discard_and_relse
;
1411 tcp_v6_restore_cb(skb
);
1412 } else if (tcp_child_process(sk
, nsk
, skb
)) {
1413 tcp_v6_send_reset(nsk
, skb
);
1414 goto discard_and_relse
;
1420 if (hdr
->hop_limit
< inet6_sk(sk
)->min_hopcount
) {
1421 NET_INC_STATS_BH(net
, LINUX_MIB_TCPMINTTLDROP
);
1422 goto discard_and_relse
;
1425 if (!xfrm6_policy_check(sk
, XFRM_POLICY_IN
, skb
))
1426 goto discard_and_relse
;
1428 tcp_v6_fill_cb(skb
, hdr
, th
);
1430 if (tcp_v6_inbound_md5_hash(sk
, skb
))
1431 goto discard_and_relse
;
1433 if (sk_filter(sk
, skb
))
1434 goto discard_and_relse
;
1438 if (sk
->sk_state
== TCP_LISTEN
) {
1439 ret
= tcp_v6_do_rcv(sk
, skb
);
1440 goto put_and_return
;
1443 sk_incoming_cpu_update(sk
);
1445 bh_lock_sock_nested(sk
);
1446 tcp_sk(sk
)->segs_in
+= max_t(u16
, 1, skb_shinfo(skb
)->gso_segs
);
1448 if (!sock_owned_by_user(sk
)) {
1449 if (!tcp_prequeue(sk
, skb
))
1450 ret
= tcp_v6_do_rcv(sk
, skb
);
1451 } else if (unlikely(sk_add_backlog(sk
, skb
,
1452 sk
->sk_rcvbuf
+ sk
->sk_sndbuf
))) {
1454 NET_INC_STATS_BH(net
, LINUX_MIB_TCPBACKLOGDROP
);
1455 goto discard_and_relse
;
1461 return ret
? -1 : 0;
1464 if (!xfrm6_policy_check(NULL
, XFRM_POLICY_IN
, skb
))
1467 tcp_v6_fill_cb(skb
, hdr
, th
);
1469 if (tcp_checksum_complete(skb
)) {
1471 TCP_INC_STATS_BH(net
, TCP_MIB_CSUMERRORS
);
1473 TCP_INC_STATS_BH(net
, TCP_MIB_INERRS
);
1475 tcp_v6_send_reset(NULL
, skb
);
1487 if (!xfrm6_policy_check(NULL
, XFRM_POLICY_IN
, skb
)) {
1488 inet_twsk_put(inet_twsk(sk
));
1492 tcp_v6_fill_cb(skb
, hdr
, th
);
1494 if (tcp_checksum_complete(skb
)) {
1495 inet_twsk_put(inet_twsk(sk
));
1499 switch (tcp_timewait_state_process(inet_twsk(sk
), skb
, th
)) {
1504 sk2
= inet6_lookup_listener(dev_net(skb
->dev
), &tcp_hashinfo
,
1505 skb
, __tcp_hdrlen(th
),
1506 &ipv6_hdr(skb
)->saddr
, th
->source
,
1507 &ipv6_hdr(skb
)->daddr
,
1508 ntohs(th
->dest
), tcp_v6_iif(skb
));
1510 struct inet_timewait_sock
*tw
= inet_twsk(sk
);
1511 inet_twsk_deschedule_put(tw
);
1513 tcp_v6_restore_cb(skb
);
1516 /* Fall through to ACK */
1519 tcp_v6_timewait_ack(sk
, skb
);
1522 tcp_v6_restore_cb(skb
);
1523 tcp_v6_send_reset(sk
, skb
);
1524 inet_twsk_deschedule_put(inet_twsk(sk
));
1526 case TCP_TW_SUCCESS
:
1532 static void tcp_v6_early_demux(struct sk_buff
*skb
)
1534 const struct ipv6hdr
*hdr
;
1535 const struct tcphdr
*th
;
1538 if (skb
->pkt_type
!= PACKET_HOST
)
1541 if (!pskb_may_pull(skb
, skb_transport_offset(skb
) + sizeof(struct tcphdr
)))
1544 hdr
= ipv6_hdr(skb
);
1547 if (th
->doff
< sizeof(struct tcphdr
) / 4)
1550 /* Note : We use inet6_iif() here, not tcp_v6_iif() */
1551 sk
= __inet6_lookup_established(dev_net(skb
->dev
), &tcp_hashinfo
,
1552 &hdr
->saddr
, th
->source
,
1553 &hdr
->daddr
, ntohs(th
->dest
),
1557 skb
->destructor
= sock_edemux
;
1558 if (sk_fullsock(sk
)) {
1559 struct dst_entry
*dst
= READ_ONCE(sk
->sk_rx_dst
);
1562 dst
= dst_check(dst
, inet6_sk(sk
)->rx_dst_cookie
);
1564 inet_sk(sk
)->rx_dst_ifindex
== skb
->skb_iif
)
1565 skb_dst_set_noref(skb
, dst
);
1570 static struct timewait_sock_ops tcp6_timewait_sock_ops
= {
1571 .twsk_obj_size
= sizeof(struct tcp6_timewait_sock
),
1572 .twsk_unique
= tcp_twsk_unique
,
1573 .twsk_destructor
= tcp_twsk_destructor
,
1576 static const struct inet_connection_sock_af_ops ipv6_specific
= {
1577 .queue_xmit
= inet6_csk_xmit
,
1578 .send_check
= tcp_v6_send_check
,
1579 .rebuild_header
= inet6_sk_rebuild_header
,
1580 .sk_rx_dst_set
= inet6_sk_rx_dst_set
,
1581 .conn_request
= tcp_v6_conn_request
,
1582 .syn_recv_sock
= tcp_v6_syn_recv_sock
,
1583 .net_header_len
= sizeof(struct ipv6hdr
),
1584 .net_frag_header_len
= sizeof(struct frag_hdr
),
1585 .setsockopt
= ipv6_setsockopt
,
1586 .getsockopt
= ipv6_getsockopt
,
1587 .addr2sockaddr
= inet6_csk_addr2sockaddr
,
1588 .sockaddr_len
= sizeof(struct sockaddr_in6
),
1589 .bind_conflict
= inet6_csk_bind_conflict
,
1590 #ifdef CONFIG_COMPAT
1591 .compat_setsockopt
= compat_ipv6_setsockopt
,
1592 .compat_getsockopt
= compat_ipv6_getsockopt
,
1594 .mtu_reduced
= tcp_v6_mtu_reduced
,
1597 #ifdef CONFIG_TCP_MD5SIG
1598 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific
= {
1599 .md5_lookup
= tcp_v6_md5_lookup
,
1600 .calc_md5_hash
= tcp_v6_md5_hash_skb
,
1601 .md5_parse
= tcp_v6_parse_md5_keys
,
1606 * TCP over IPv4 via INET6 API
1608 static const struct inet_connection_sock_af_ops ipv6_mapped
= {
1609 .queue_xmit
= ip_queue_xmit
,
1610 .send_check
= tcp_v4_send_check
,
1611 .rebuild_header
= inet_sk_rebuild_header
,
1612 .sk_rx_dst_set
= inet_sk_rx_dst_set
,
1613 .conn_request
= tcp_v6_conn_request
,
1614 .syn_recv_sock
= tcp_v6_syn_recv_sock
,
1615 .net_header_len
= sizeof(struct iphdr
),
1616 .setsockopt
= ipv6_setsockopt
,
1617 .getsockopt
= ipv6_getsockopt
,
1618 .addr2sockaddr
= inet6_csk_addr2sockaddr
,
1619 .sockaddr_len
= sizeof(struct sockaddr_in6
),
1620 .bind_conflict
= inet6_csk_bind_conflict
,
1621 #ifdef CONFIG_COMPAT
1622 .compat_setsockopt
= compat_ipv6_setsockopt
,
1623 .compat_getsockopt
= compat_ipv6_getsockopt
,
1625 .mtu_reduced
= tcp_v4_mtu_reduced
,
1628 #ifdef CONFIG_TCP_MD5SIG
1629 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific
= {
1630 .md5_lookup
= tcp_v4_md5_lookup
,
1631 .calc_md5_hash
= tcp_v4_md5_hash_skb
,
1632 .md5_parse
= tcp_v6_parse_md5_keys
,
1636 /* NOTE: A lot of things set to zero explicitly by call to
1637 * sk_alloc() so need not be done here.
1639 static int tcp_v6_init_sock(struct sock
*sk
)
1641 struct inet_connection_sock
*icsk
= inet_csk(sk
);
1645 icsk
->icsk_af_ops
= &ipv6_specific
;
1647 #ifdef CONFIG_TCP_MD5SIG
1648 tcp_sk(sk
)->af_specific
= &tcp_sock_ipv6_specific
;
1654 static void tcp_v6_destroy_sock(struct sock
*sk
)
1656 tcp_v4_destroy_sock(sk
);
1657 inet6_destroy_sock(sk
);
1660 #ifdef CONFIG_PROC_FS
1661 /* Proc filesystem TCPv6 sock list dumping. */
1662 static void get_openreq6(struct seq_file
*seq
,
1663 const struct request_sock
*req
, int i
)
1665 long ttd
= req
->rsk_timer
.expires
- jiffies
;
1666 const struct in6_addr
*src
= &inet_rsk(req
)->ir_v6_loc_addr
;
1667 const struct in6_addr
*dest
= &inet_rsk(req
)->ir_v6_rmt_addr
;
1673 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1674 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %d %d %pK\n",
1676 src
->s6_addr32
[0], src
->s6_addr32
[1],
1677 src
->s6_addr32
[2], src
->s6_addr32
[3],
1678 inet_rsk(req
)->ir_num
,
1679 dest
->s6_addr32
[0], dest
->s6_addr32
[1],
1680 dest
->s6_addr32
[2], dest
->s6_addr32
[3],
1681 ntohs(inet_rsk(req
)->ir_rmt_port
),
1683 0, 0, /* could print option size, but that is af dependent. */
1684 1, /* timers active (only the expire timer) */
1685 jiffies_to_clock_t(ttd
),
1687 from_kuid_munged(seq_user_ns(seq
),
1688 sock_i_uid(req
->rsk_listener
)),
1689 0, /* non standard timer */
1690 0, /* open_requests have no inode */
1694 static void get_tcp6_sock(struct seq_file
*seq
, struct sock
*sp
, int i
)
1696 const struct in6_addr
*dest
, *src
;
1699 unsigned long timer_expires
;
1700 const struct inet_sock
*inet
= inet_sk(sp
);
1701 const struct tcp_sock
*tp
= tcp_sk(sp
);
1702 const struct inet_connection_sock
*icsk
= inet_csk(sp
);
1703 const struct fastopen_queue
*fastopenq
= &icsk
->icsk_accept_queue
.fastopenq
;
1707 dest
= &sp
->sk_v6_daddr
;
1708 src
= &sp
->sk_v6_rcv_saddr
;
1709 destp
= ntohs(inet
->inet_dport
);
1710 srcp
= ntohs(inet
->inet_sport
);
1712 if (icsk
->icsk_pending
== ICSK_TIME_RETRANS
) {
1714 timer_expires
= icsk
->icsk_timeout
;
1715 } else if (icsk
->icsk_pending
== ICSK_TIME_PROBE0
) {
1717 timer_expires
= icsk
->icsk_timeout
;
1718 } else if (timer_pending(&sp
->sk_timer
)) {
1720 timer_expires
= sp
->sk_timer
.expires
;
1723 timer_expires
= jiffies
;
1726 state
= sk_state_load(sp
);
1727 if (state
== TCP_LISTEN
)
1728 rx_queue
= sp
->sk_ack_backlog
;
1730 /* Because we don't lock the socket,
1731 * we might find a transient negative value.
1733 rx_queue
= max_t(int, tp
->rcv_nxt
- tp
->copied_seq
, 0);
1736 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1737 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n",
1739 src
->s6_addr32
[0], src
->s6_addr32
[1],
1740 src
->s6_addr32
[2], src
->s6_addr32
[3], srcp
,
1741 dest
->s6_addr32
[0], dest
->s6_addr32
[1],
1742 dest
->s6_addr32
[2], dest
->s6_addr32
[3], destp
,
1744 tp
->write_seq
- tp
->snd_una
,
1747 jiffies_delta_to_clock_t(timer_expires
- jiffies
),
1748 icsk
->icsk_retransmits
,
1749 from_kuid_munged(seq_user_ns(seq
), sock_i_uid(sp
)),
1750 icsk
->icsk_probes_out
,
1752 atomic_read(&sp
->sk_refcnt
), sp
,
1753 jiffies_to_clock_t(icsk
->icsk_rto
),
1754 jiffies_to_clock_t(icsk
->icsk_ack
.ato
),
1755 (icsk
->icsk_ack
.quick
<< 1) | icsk
->icsk_ack
.pingpong
,
1757 state
== TCP_LISTEN
?
1758 fastopenq
->max_qlen
:
1759 (tcp_in_initial_slowstart(tp
) ? -1 : tp
->snd_ssthresh
)
1763 static void get_timewait6_sock(struct seq_file
*seq
,
1764 struct inet_timewait_sock
*tw
, int i
)
1766 long delta
= tw
->tw_timer
.expires
- jiffies
;
1767 const struct in6_addr
*dest
, *src
;
1770 dest
= &tw
->tw_v6_daddr
;
1771 src
= &tw
->tw_v6_rcv_saddr
;
1772 destp
= ntohs(tw
->tw_dport
);
1773 srcp
= ntohs(tw
->tw_sport
);
1776 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1777 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1779 src
->s6_addr32
[0], src
->s6_addr32
[1],
1780 src
->s6_addr32
[2], src
->s6_addr32
[3], srcp
,
1781 dest
->s6_addr32
[0], dest
->s6_addr32
[1],
1782 dest
->s6_addr32
[2], dest
->s6_addr32
[3], destp
,
1783 tw
->tw_substate
, 0, 0,
1784 3, jiffies_delta_to_clock_t(delta
), 0, 0, 0, 0,
1785 atomic_read(&tw
->tw_refcnt
), tw
);
1788 static int tcp6_seq_show(struct seq_file
*seq
, void *v
)
1790 struct tcp_iter_state
*st
;
1791 struct sock
*sk
= v
;
1793 if (v
== SEQ_START_TOKEN
) {
1798 "st tx_queue rx_queue tr tm->when retrnsmt"
1799 " uid timeout inode\n");
1804 if (sk
->sk_state
== TCP_TIME_WAIT
)
1805 get_timewait6_sock(seq
, v
, st
->num
);
1806 else if (sk
->sk_state
== TCP_NEW_SYN_RECV
)
1807 get_openreq6(seq
, v
, st
->num
);
1809 get_tcp6_sock(seq
, v
, st
->num
);
1814 static const struct file_operations tcp6_afinfo_seq_fops
= {
1815 .owner
= THIS_MODULE
,
1816 .open
= tcp_seq_open
,
1818 .llseek
= seq_lseek
,
1819 .release
= seq_release_net
1822 static struct tcp_seq_afinfo tcp6_seq_afinfo
= {
1825 .seq_fops
= &tcp6_afinfo_seq_fops
,
1827 .show
= tcp6_seq_show
,
1831 int __net_init
tcp6_proc_init(struct net
*net
)
1833 return tcp_proc_register(net
, &tcp6_seq_afinfo
);
1836 void tcp6_proc_exit(struct net
*net
)
1838 tcp_proc_unregister(net
, &tcp6_seq_afinfo
);
1842 static void tcp_v6_clear_sk(struct sock
*sk
, int size
)
1844 struct inet_sock
*inet
= inet_sk(sk
);
1846 /* we do not want to clear pinet6 field, because of RCU lookups */
1847 sk_prot_clear_nulls(sk
, offsetof(struct inet_sock
, pinet6
));
1849 size
-= offsetof(struct inet_sock
, pinet6
) + sizeof(inet
->pinet6
);
1850 memset(&inet
->pinet6
+ 1, 0, size
);
1853 struct proto tcpv6_prot
= {
1855 .owner
= THIS_MODULE
,
1857 .connect
= tcp_v6_connect
,
1858 .disconnect
= tcp_disconnect
,
1859 .accept
= inet_csk_accept
,
1861 .init
= tcp_v6_init_sock
,
1862 .destroy
= tcp_v6_destroy_sock
,
1863 .shutdown
= tcp_shutdown
,
1864 .setsockopt
= tcp_setsockopt
,
1865 .getsockopt
= tcp_getsockopt
,
1866 .recvmsg
= tcp_recvmsg
,
1867 .sendmsg
= tcp_sendmsg
,
1868 .sendpage
= tcp_sendpage
,
1869 .backlog_rcv
= tcp_v6_do_rcv
,
1870 .release_cb
= tcp_release_cb
,
1872 .unhash
= inet_unhash
,
1873 .get_port
= inet_csk_get_port
,
1874 .enter_memory_pressure
= tcp_enter_memory_pressure
,
1875 .stream_memory_free
= tcp_stream_memory_free
,
1876 .sockets_allocated
= &tcp_sockets_allocated
,
1877 .memory_allocated
= &tcp_memory_allocated
,
1878 .memory_pressure
= &tcp_memory_pressure
,
1879 .orphan_count
= &tcp_orphan_count
,
1880 .sysctl_mem
= sysctl_tcp_mem
,
1881 .sysctl_wmem
= sysctl_tcp_wmem
,
1882 .sysctl_rmem
= sysctl_tcp_rmem
,
1883 .max_header
= MAX_TCP_HEADER
,
1884 .obj_size
= sizeof(struct tcp6_sock
),
1885 .slab_flags
= SLAB_DESTROY_BY_RCU
,
1886 .twsk_prot
= &tcp6_timewait_sock_ops
,
1887 .rsk_prot
= &tcp6_request_sock_ops
,
1888 .h
.hashinfo
= &tcp_hashinfo
,
1889 .no_autobind
= true,
1890 #ifdef CONFIG_COMPAT
1891 .compat_setsockopt
= compat_tcp_setsockopt
,
1892 .compat_getsockopt
= compat_tcp_getsockopt
,
1894 .clear_sk
= tcp_v6_clear_sk
,
1895 .diag_destroy
= tcp_abort
,
1898 static const struct inet6_protocol tcpv6_protocol
= {
1899 .early_demux
= tcp_v6_early_demux
,
1900 .handler
= tcp_v6_rcv
,
1901 .err_handler
= tcp_v6_err
,
1902 .flags
= INET6_PROTO_NOPOLICY
|INET6_PROTO_FINAL
,
1905 static struct inet_protosw tcpv6_protosw
= {
1906 .type
= SOCK_STREAM
,
1907 .protocol
= IPPROTO_TCP
,
1908 .prot
= &tcpv6_prot
,
1909 .ops
= &inet6_stream_ops
,
1910 .flags
= INET_PROTOSW_PERMANENT
|
1914 static int __net_init
tcpv6_net_init(struct net
*net
)
1916 return inet_ctl_sock_create(&net
->ipv6
.tcp_sk
, PF_INET6
,
1917 SOCK_RAW
, IPPROTO_TCP
, net
);
1920 static void __net_exit
tcpv6_net_exit(struct net
*net
)
1922 inet_ctl_sock_destroy(net
->ipv6
.tcp_sk
);
1925 static void __net_exit
tcpv6_net_exit_batch(struct list_head
*net_exit_list
)
1927 inet_twsk_purge(&tcp_hashinfo
, &tcp_death_row
, AF_INET6
);
1930 static struct pernet_operations tcpv6_net_ops
= {
1931 .init
= tcpv6_net_init
,
1932 .exit
= tcpv6_net_exit
,
1933 .exit_batch
= tcpv6_net_exit_batch
,
1936 int __init
tcpv6_init(void)
1940 ret
= inet6_add_protocol(&tcpv6_protocol
, IPPROTO_TCP
);
1944 /* register inet6 protocol */
1945 ret
= inet6_register_protosw(&tcpv6_protosw
);
1947 goto out_tcpv6_protocol
;
1949 ret
= register_pernet_subsys(&tcpv6_net_ops
);
1951 goto out_tcpv6_protosw
;
1956 inet6_unregister_protosw(&tcpv6_protosw
);
1958 inet6_del_protocol(&tcpv6_protocol
, IPPROTO_TCP
);
1962 void tcpv6_exit(void)
1964 unregister_pernet_subsys(&tcpv6_net_ops
);
1965 inet6_unregister_protosw(&tcpv6_protosw
);
1966 inet6_del_protocol(&tcpv6_protocol
, IPPROTO_TCP
);