3 * Linux INET6 implementation
6 * Pedro Roque <roque@di.fc.ul.pt>
10 * linux/net/ipv4/tcp_input.c
11 * linux/net/ipv4/tcp_output.c
14 * Hideaki YOSHIFUJI : sin6_scope_id support
15 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
16 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
17 * a single port at the same time.
18 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
26 #include <linux/bottom_half.h>
27 #include <linux/module.h>
28 #include <linux/errno.h>
29 #include <linux/types.h>
30 #include <linux/socket.h>
31 #include <linux/sockios.h>
32 #include <linux/net.h>
33 #include <linux/jiffies.h>
35 #include <linux/in6.h>
36 #include <linux/netdevice.h>
37 #include <linux/init.h>
38 #include <linux/jhash.h>
39 #include <linux/ipsec.h>
40 #include <linux/times.h>
41 #include <linux/slab.h>
43 #include <linux/ipv6.h>
44 #include <linux/icmpv6.h>
45 #include <linux/random.h>
48 #include <net/ndisc.h>
49 #include <net/inet6_hashtables.h>
50 #include <net/inet6_connection_sock.h>
52 #include <net/transp_v6.h>
53 #include <net/addrconf.h>
54 #include <net/ip6_route.h>
55 #include <net/ip6_checksum.h>
56 #include <net/inet_ecn.h>
57 #include <net/protocol.h>
60 #include <net/dsfield.h>
61 #include <net/timewait_sock.h>
62 #include <net/netdma.h>
63 #include <net/inet_common.h>
64 #include <net/secure_seq.h>
65 #include <net/tcp_memcontrol.h>
67 #include <asm/uaccess.h>
69 #include <linux/proc_fs.h>
70 #include <linux/seq_file.h>
72 #include <linux/crypto.h>
73 #include <linux/scatterlist.h>
75 static void tcp_v6_send_reset(struct sock
*sk
, struct sk_buff
*skb
);
76 static void tcp_v6_reqsk_send_ack(struct sock
*sk
, struct sk_buff
*skb
,
77 struct request_sock
*req
);
79 static int tcp_v6_do_rcv(struct sock
*sk
, struct sk_buff
*skb
);
80 static void __tcp_v6_send_check(struct sk_buff
*skb
,
81 const struct in6_addr
*saddr
,
82 const struct in6_addr
*daddr
);
84 static const struct inet_connection_sock_af_ops ipv6_mapped
;
85 static const struct inet_connection_sock_af_ops ipv6_specific
;
86 #ifdef CONFIG_TCP_MD5SIG
87 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific
;
88 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific
;
90 static struct tcp_md5sig_key
*tcp_v6_md5_do_lookup(struct sock
*sk
,
91 const struct in6_addr
*addr
)
97 static void inet6_sk_rx_dst_set(struct sock
*sk
, const struct sk_buff
*skb
)
99 struct dst_entry
*dst
= skb_dst(skb
);
100 const struct rt6_info
*rt
= (const struct rt6_info
*)dst
;
104 inet_sk(sk
)->rx_dst_ifindex
= skb
->skb_iif
;
106 inet6_sk(sk
)->rx_dst_cookie
= rt
->rt6i_node
->fn_sernum
;
109 static void tcp_v6_hash(struct sock
*sk
)
111 if (sk
->sk_state
!= TCP_CLOSE
) {
112 if (inet_csk(sk
)->icsk_af_ops
== &ipv6_mapped
) {
117 __inet6_hash(sk
, NULL
);
122 static __inline__ __sum16
tcp_v6_check(int len
,
123 const struct in6_addr
*saddr
,
124 const struct in6_addr
*daddr
,
127 return csum_ipv6_magic(saddr
, daddr
, len
, IPPROTO_TCP
, base
);
130 static __u32
tcp_v6_init_sequence(const struct sk_buff
*skb
)
132 return secure_tcpv6_sequence_number(ipv6_hdr(skb
)->daddr
.s6_addr32
,
133 ipv6_hdr(skb
)->saddr
.s6_addr32
,
135 tcp_hdr(skb
)->source
);
138 static int tcp_v6_connect(struct sock
*sk
, struct sockaddr
*uaddr
,
141 struct sockaddr_in6
*usin
= (struct sockaddr_in6
*) uaddr
;
142 struct inet_sock
*inet
= inet_sk(sk
);
143 struct inet_connection_sock
*icsk
= inet_csk(sk
);
144 struct ipv6_pinfo
*np
= inet6_sk(sk
);
145 struct tcp_sock
*tp
= tcp_sk(sk
);
146 struct in6_addr
*saddr
= NULL
, *final_p
, final
;
149 struct dst_entry
*dst
;
153 if (addr_len
< SIN6_LEN_RFC2133
)
156 if (usin
->sin6_family
!= AF_INET6
)
157 return -EAFNOSUPPORT
;
159 memset(&fl6
, 0, sizeof(fl6
));
162 fl6
.flowlabel
= usin
->sin6_flowinfo
&IPV6_FLOWINFO_MASK
;
163 IP6_ECN_flow_init(fl6
.flowlabel
);
164 if (fl6
.flowlabel
&IPV6_FLOWLABEL_MASK
) {
165 struct ip6_flowlabel
*flowlabel
;
166 flowlabel
= fl6_sock_lookup(sk
, fl6
.flowlabel
);
167 if (flowlabel
== NULL
)
169 usin
->sin6_addr
= flowlabel
->dst
;
170 fl6_sock_release(flowlabel
);
175 * connect() to INADDR_ANY means loopback (BSD'ism).
178 if(ipv6_addr_any(&usin
->sin6_addr
))
179 usin
->sin6_addr
.s6_addr
[15] = 0x1;
181 addr_type
= ipv6_addr_type(&usin
->sin6_addr
);
183 if(addr_type
& IPV6_ADDR_MULTICAST
)
186 if (addr_type
&IPV6_ADDR_LINKLOCAL
) {
187 if (addr_len
>= sizeof(struct sockaddr_in6
) &&
188 usin
->sin6_scope_id
) {
189 /* If interface is set while binding, indices
192 if (sk
->sk_bound_dev_if
&&
193 sk
->sk_bound_dev_if
!= usin
->sin6_scope_id
)
196 sk
->sk_bound_dev_if
= usin
->sin6_scope_id
;
199 /* Connect to link-local address requires an interface */
200 if (!sk
->sk_bound_dev_if
)
204 if (tp
->rx_opt
.ts_recent_stamp
&&
205 !ipv6_addr_equal(&np
->daddr
, &usin
->sin6_addr
)) {
206 tp
->rx_opt
.ts_recent
= 0;
207 tp
->rx_opt
.ts_recent_stamp
= 0;
211 np
->daddr
= usin
->sin6_addr
;
212 np
->flow_label
= fl6
.flowlabel
;
218 if (addr_type
== IPV6_ADDR_MAPPED
) {
219 u32 exthdrlen
= icsk
->icsk_ext_hdr_len
;
220 struct sockaddr_in sin
;
222 SOCK_DEBUG(sk
, "connect: ipv4 mapped\n");
224 if (__ipv6_only_sock(sk
))
227 sin
.sin_family
= AF_INET
;
228 sin
.sin_port
= usin
->sin6_port
;
229 sin
.sin_addr
.s_addr
= usin
->sin6_addr
.s6_addr32
[3];
231 icsk
->icsk_af_ops
= &ipv6_mapped
;
232 sk
->sk_backlog_rcv
= tcp_v4_do_rcv
;
233 #ifdef CONFIG_TCP_MD5SIG
234 tp
->af_specific
= &tcp_sock_ipv6_mapped_specific
;
237 err
= tcp_v4_connect(sk
, (struct sockaddr
*)&sin
, sizeof(sin
));
240 icsk
->icsk_ext_hdr_len
= exthdrlen
;
241 icsk
->icsk_af_ops
= &ipv6_specific
;
242 sk
->sk_backlog_rcv
= tcp_v6_do_rcv
;
243 #ifdef CONFIG_TCP_MD5SIG
244 tp
->af_specific
= &tcp_sock_ipv6_specific
;
248 ipv6_addr_set_v4mapped(inet
->inet_saddr
, &np
->saddr
);
249 ipv6_addr_set_v4mapped(inet
->inet_rcv_saddr
,
256 if (!ipv6_addr_any(&np
->rcv_saddr
))
257 saddr
= &np
->rcv_saddr
;
259 fl6
.flowi6_proto
= IPPROTO_TCP
;
260 fl6
.daddr
= np
->daddr
;
261 fl6
.saddr
= saddr
? *saddr
: np
->saddr
;
262 fl6
.flowi6_oif
= sk
->sk_bound_dev_if
;
263 fl6
.flowi6_mark
= sk
->sk_mark
;
264 fl6
.fl6_dport
= usin
->sin6_port
;
265 fl6
.fl6_sport
= inet
->inet_sport
;
267 final_p
= fl6_update_dst(&fl6
, np
->opt
, &final
);
269 security_sk_classify_flow(sk
, flowi6_to_flowi(&fl6
));
271 dst
= ip6_dst_lookup_flow(sk
, &fl6
, final_p
, true);
279 np
->rcv_saddr
= *saddr
;
282 /* set the source address */
284 inet
->inet_rcv_saddr
= LOOPBACK4_IPV6
;
286 sk
->sk_gso_type
= SKB_GSO_TCPV6
;
287 __ip6_dst_store(sk
, dst
, NULL
, NULL
);
289 rt
= (struct rt6_info
*) dst
;
290 if (tcp_death_row
.sysctl_tw_recycle
&&
291 !tp
->rx_opt
.ts_recent_stamp
&&
292 ipv6_addr_equal(&rt
->rt6i_dst
.addr
, &np
->daddr
))
293 tcp_fetch_timewait_stamp(sk
, dst
);
295 icsk
->icsk_ext_hdr_len
= 0;
297 icsk
->icsk_ext_hdr_len
= (np
->opt
->opt_flen
+
300 tp
->rx_opt
.mss_clamp
= IPV6_MIN_MTU
- sizeof(struct tcphdr
) - sizeof(struct ipv6hdr
);
302 inet
->inet_dport
= usin
->sin6_port
;
304 tcp_set_state(sk
, TCP_SYN_SENT
);
305 err
= inet6_hash_connect(&tcp_death_row
, sk
);
310 tp
->write_seq
= secure_tcpv6_sequence_number(np
->saddr
.s6_addr32
,
315 err
= tcp_connect(sk
);
322 tcp_set_state(sk
, TCP_CLOSE
);
325 inet
->inet_dport
= 0;
326 sk
->sk_route_caps
= 0;
330 static void tcp_v6_mtu_reduced(struct sock
*sk
)
332 struct dst_entry
*dst
;
334 if ((1 << sk
->sk_state
) & (TCPF_LISTEN
| TCPF_CLOSE
))
337 dst
= inet6_csk_update_pmtu(sk
, tcp_sk(sk
)->mtu_info
);
341 if (inet_csk(sk
)->icsk_pmtu_cookie
> dst_mtu(dst
)) {
342 tcp_sync_mss(sk
, dst_mtu(dst
));
343 tcp_simple_retransmit(sk
);
347 static void tcp_v6_err(struct sk_buff
*skb
, struct inet6_skb_parm
*opt
,
348 u8 type
, u8 code
, int offset
, __be32 info
)
350 const struct ipv6hdr
*hdr
= (const struct ipv6hdr
*)skb
->data
;
351 const struct tcphdr
*th
= (struct tcphdr
*)(skb
->data
+offset
);
352 struct ipv6_pinfo
*np
;
357 struct net
*net
= dev_net(skb
->dev
);
359 sk
= inet6_lookup(net
, &tcp_hashinfo
, &hdr
->daddr
,
360 th
->dest
, &hdr
->saddr
, th
->source
, skb
->dev
->ifindex
);
363 ICMP6_INC_STATS_BH(net
, __in6_dev_get(skb
->dev
),
368 if (sk
->sk_state
== TCP_TIME_WAIT
) {
369 inet_twsk_put(inet_twsk(sk
));
374 if (sock_owned_by_user(sk
) && type
!= ICMPV6_PKT_TOOBIG
)
375 NET_INC_STATS_BH(net
, LINUX_MIB_LOCKDROPPEDICMPS
);
377 if (sk
->sk_state
== TCP_CLOSE
)
380 if (ipv6_hdr(skb
)->hop_limit
< inet6_sk(sk
)->min_hopcount
) {
381 NET_INC_STATS_BH(net
, LINUX_MIB_TCPMINTTLDROP
);
386 seq
= ntohl(th
->seq
);
387 if (sk
->sk_state
!= TCP_LISTEN
&&
388 !between(seq
, tp
->snd_una
, tp
->snd_nxt
)) {
389 NET_INC_STATS_BH(net
, LINUX_MIB_OUTOFWINDOWICMPS
);
395 if (type
== NDISC_REDIRECT
) {
396 struct dst_entry
*dst
= __sk_dst_check(sk
, np
->dst_cookie
);
399 dst
->ops
->redirect(dst
, sk
, skb
);
402 if (type
== ICMPV6_PKT_TOOBIG
) {
403 tp
->mtu_info
= ntohl(info
);
404 if (!sock_owned_by_user(sk
))
405 tcp_v6_mtu_reduced(sk
);
406 else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED
,
412 icmpv6_err_convert(type
, code
, &err
);
414 /* Might be for an request_sock */
415 switch (sk
->sk_state
) {
416 struct request_sock
*req
, **prev
;
418 if (sock_owned_by_user(sk
))
421 req
= inet6_csk_search_req(sk
, &prev
, th
->dest
, &hdr
->daddr
,
422 &hdr
->saddr
, inet6_iif(skb
));
426 /* ICMPs are not backlogged, hence we cannot get
427 * an established socket here.
429 WARN_ON(req
->sk
!= NULL
);
431 if (seq
!= tcp_rsk(req
)->snt_isn
) {
432 NET_INC_STATS_BH(net
, LINUX_MIB_OUTOFWINDOWICMPS
);
436 inet_csk_reqsk_queue_drop(sk
, req
, prev
);
440 case TCP_SYN_RECV
: /* Cannot happen.
441 It can, it SYNs are crossed. --ANK */
442 if (!sock_owned_by_user(sk
)) {
444 sk
->sk_error_report(sk
); /* Wake people up to see the error (see connect in sock.c) */
448 sk
->sk_err_soft
= err
;
452 if (!sock_owned_by_user(sk
) && np
->recverr
) {
454 sk
->sk_error_report(sk
);
456 sk
->sk_err_soft
= err
;
464 static int tcp_v6_send_synack(struct sock
*sk
, struct dst_entry
*dst
,
466 struct request_sock
*req
,
467 struct request_values
*rvp
,
470 struct inet6_request_sock
*treq
= inet6_rsk(req
);
471 struct ipv6_pinfo
*np
= inet6_sk(sk
);
472 struct sk_buff
* skb
;
475 /* First, grab a route. */
476 if (!dst
&& (dst
= inet6_csk_route_req(sk
, fl6
, req
)) == NULL
)
479 skb
= tcp_make_synack(sk
, dst
, req
, rvp
, NULL
);
482 __tcp_v6_send_check(skb
, &treq
->loc_addr
, &treq
->rmt_addr
);
484 fl6
->daddr
= treq
->rmt_addr
;
485 skb_set_queue_mapping(skb
, queue_mapping
);
486 err
= ip6_xmit(sk
, skb
, fl6
, np
->opt
, np
->tclass
);
487 err
= net_xmit_eval(err
);
494 static int tcp_v6_rtx_synack(struct sock
*sk
, struct request_sock
*req
,
495 struct request_values
*rvp
)
499 TCP_INC_STATS_BH(sock_net(sk
), TCP_MIB_RETRANSSEGS
);
500 return tcp_v6_send_synack(sk
, NULL
, &fl6
, req
, rvp
, 0);
503 static void tcp_v6_reqsk_destructor(struct request_sock
*req
)
505 kfree_skb(inet6_rsk(req
)->pktopts
);
508 #ifdef CONFIG_TCP_MD5SIG
509 static struct tcp_md5sig_key
*tcp_v6_md5_do_lookup(struct sock
*sk
,
510 const struct in6_addr
*addr
)
512 return tcp_md5_do_lookup(sk
, (union tcp_md5_addr
*)addr
, AF_INET6
);
515 static struct tcp_md5sig_key
*tcp_v6_md5_lookup(struct sock
*sk
,
516 struct sock
*addr_sk
)
518 return tcp_v6_md5_do_lookup(sk
, &inet6_sk(addr_sk
)->daddr
);
521 static struct tcp_md5sig_key
*tcp_v6_reqsk_md5_lookup(struct sock
*sk
,
522 struct request_sock
*req
)
524 return tcp_v6_md5_do_lookup(sk
, &inet6_rsk(req
)->rmt_addr
);
527 static int tcp_v6_parse_md5_keys (struct sock
*sk
, char __user
*optval
,
530 struct tcp_md5sig cmd
;
531 struct sockaddr_in6
*sin6
= (struct sockaddr_in6
*)&cmd
.tcpm_addr
;
533 if (optlen
< sizeof(cmd
))
536 if (copy_from_user(&cmd
, optval
, sizeof(cmd
)))
539 if (sin6
->sin6_family
!= AF_INET6
)
542 if (!cmd
.tcpm_keylen
) {
543 if (ipv6_addr_v4mapped(&sin6
->sin6_addr
))
544 return tcp_md5_do_del(sk
, (union tcp_md5_addr
*)&sin6
->sin6_addr
.s6_addr32
[3],
546 return tcp_md5_do_del(sk
, (union tcp_md5_addr
*)&sin6
->sin6_addr
,
550 if (cmd
.tcpm_keylen
> TCP_MD5SIG_MAXKEYLEN
)
553 if (ipv6_addr_v4mapped(&sin6
->sin6_addr
))
554 return tcp_md5_do_add(sk
, (union tcp_md5_addr
*)&sin6
->sin6_addr
.s6_addr32
[3],
555 AF_INET
, cmd
.tcpm_key
, cmd
.tcpm_keylen
, GFP_KERNEL
);
557 return tcp_md5_do_add(sk
, (union tcp_md5_addr
*)&sin6
->sin6_addr
,
558 AF_INET6
, cmd
.tcpm_key
, cmd
.tcpm_keylen
, GFP_KERNEL
);
561 static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool
*hp
,
562 const struct in6_addr
*daddr
,
563 const struct in6_addr
*saddr
, int nbytes
)
565 struct tcp6_pseudohdr
*bp
;
566 struct scatterlist sg
;
568 bp
= &hp
->md5_blk
.ip6
;
569 /* 1. TCP pseudo-header (RFC2460) */
572 bp
->protocol
= cpu_to_be32(IPPROTO_TCP
);
573 bp
->len
= cpu_to_be32(nbytes
);
575 sg_init_one(&sg
, bp
, sizeof(*bp
));
576 return crypto_hash_update(&hp
->md5_desc
, &sg
, sizeof(*bp
));
579 static int tcp_v6_md5_hash_hdr(char *md5_hash
, struct tcp_md5sig_key
*key
,
580 const struct in6_addr
*daddr
, struct in6_addr
*saddr
,
581 const struct tcphdr
*th
)
583 struct tcp_md5sig_pool
*hp
;
584 struct hash_desc
*desc
;
586 hp
= tcp_get_md5sig_pool();
588 goto clear_hash_noput
;
589 desc
= &hp
->md5_desc
;
591 if (crypto_hash_init(desc
))
593 if (tcp_v6_md5_hash_pseudoheader(hp
, daddr
, saddr
, th
->doff
<< 2))
595 if (tcp_md5_hash_header(hp
, th
))
597 if (tcp_md5_hash_key(hp
, key
))
599 if (crypto_hash_final(desc
, md5_hash
))
602 tcp_put_md5sig_pool();
606 tcp_put_md5sig_pool();
608 memset(md5_hash
, 0, 16);
612 static int tcp_v6_md5_hash_skb(char *md5_hash
, struct tcp_md5sig_key
*key
,
613 const struct sock
*sk
,
614 const struct request_sock
*req
,
615 const struct sk_buff
*skb
)
617 const struct in6_addr
*saddr
, *daddr
;
618 struct tcp_md5sig_pool
*hp
;
619 struct hash_desc
*desc
;
620 const struct tcphdr
*th
= tcp_hdr(skb
);
623 saddr
= &inet6_sk(sk
)->saddr
;
624 daddr
= &inet6_sk(sk
)->daddr
;
626 saddr
= &inet6_rsk(req
)->loc_addr
;
627 daddr
= &inet6_rsk(req
)->rmt_addr
;
629 const struct ipv6hdr
*ip6h
= ipv6_hdr(skb
);
630 saddr
= &ip6h
->saddr
;
631 daddr
= &ip6h
->daddr
;
634 hp
= tcp_get_md5sig_pool();
636 goto clear_hash_noput
;
637 desc
= &hp
->md5_desc
;
639 if (crypto_hash_init(desc
))
642 if (tcp_v6_md5_hash_pseudoheader(hp
, daddr
, saddr
, skb
->len
))
644 if (tcp_md5_hash_header(hp
, th
))
646 if (tcp_md5_hash_skb_data(hp
, skb
, th
->doff
<< 2))
648 if (tcp_md5_hash_key(hp
, key
))
650 if (crypto_hash_final(desc
, md5_hash
))
653 tcp_put_md5sig_pool();
657 tcp_put_md5sig_pool();
659 memset(md5_hash
, 0, 16);
663 static int tcp_v6_inbound_md5_hash(struct sock
*sk
, const struct sk_buff
*skb
)
665 const __u8
*hash_location
= NULL
;
666 struct tcp_md5sig_key
*hash_expected
;
667 const struct ipv6hdr
*ip6h
= ipv6_hdr(skb
);
668 const struct tcphdr
*th
= tcp_hdr(skb
);
672 hash_expected
= tcp_v6_md5_do_lookup(sk
, &ip6h
->saddr
);
673 hash_location
= tcp_parse_md5sig_option(th
);
675 /* We've parsed the options - do we have a hash? */
676 if (!hash_expected
&& !hash_location
)
679 if (hash_expected
&& !hash_location
) {
680 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_TCPMD5NOTFOUND
);
684 if (!hash_expected
&& hash_location
) {
685 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_TCPMD5UNEXPECTED
);
689 /* check the signature */
690 genhash
= tcp_v6_md5_hash_skb(newhash
,
694 if (genhash
|| memcmp(hash_location
, newhash
, 16) != 0) {
695 net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
696 genhash
? "failed" : "mismatch",
697 &ip6h
->saddr
, ntohs(th
->source
),
698 &ip6h
->daddr
, ntohs(th
->dest
));
705 struct request_sock_ops tcp6_request_sock_ops __read_mostly
= {
707 .obj_size
= sizeof(struct tcp6_request_sock
),
708 .rtx_syn_ack
= tcp_v6_rtx_synack
,
709 .send_ack
= tcp_v6_reqsk_send_ack
,
710 .destructor
= tcp_v6_reqsk_destructor
,
711 .send_reset
= tcp_v6_send_reset
,
712 .syn_ack_timeout
= tcp_syn_ack_timeout
,
715 #ifdef CONFIG_TCP_MD5SIG
716 static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops
= {
717 .md5_lookup
= tcp_v6_reqsk_md5_lookup
,
718 .calc_md5_hash
= tcp_v6_md5_hash_skb
,
722 static void __tcp_v6_send_check(struct sk_buff
*skb
,
723 const struct in6_addr
*saddr
, const struct in6_addr
*daddr
)
725 struct tcphdr
*th
= tcp_hdr(skb
);
727 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
728 th
->check
= ~tcp_v6_check(skb
->len
, saddr
, daddr
, 0);
729 skb
->csum_start
= skb_transport_header(skb
) - skb
->head
;
730 skb
->csum_offset
= offsetof(struct tcphdr
, check
);
732 th
->check
= tcp_v6_check(skb
->len
, saddr
, daddr
,
733 csum_partial(th
, th
->doff
<< 2,
738 static void tcp_v6_send_check(struct sock
*sk
, struct sk_buff
*skb
)
740 struct ipv6_pinfo
*np
= inet6_sk(sk
);
742 __tcp_v6_send_check(skb
, &np
->saddr
, &np
->daddr
);
745 static int tcp_v6_gso_send_check(struct sk_buff
*skb
)
747 const struct ipv6hdr
*ipv6h
;
750 if (!pskb_may_pull(skb
, sizeof(*th
)))
753 ipv6h
= ipv6_hdr(skb
);
757 skb
->ip_summed
= CHECKSUM_PARTIAL
;
758 __tcp_v6_send_check(skb
, &ipv6h
->saddr
, &ipv6h
->daddr
);
762 static struct sk_buff
**tcp6_gro_receive(struct sk_buff
**head
,
765 const struct ipv6hdr
*iph
= skb_gro_network_header(skb
);
769 switch (skb
->ip_summed
) {
770 case CHECKSUM_COMPLETE
:
771 if (!tcp_v6_check(skb_gro_len(skb
), &iph
->saddr
, &iph
->daddr
,
773 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
777 NAPI_GRO_CB(skb
)->flush
= 1;
781 wsum
= ~csum_unfold(csum_ipv6_magic(&iph
->saddr
, &iph
->daddr
,
784 sum
= csum_fold(skb_checksum(skb
,
791 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
795 return tcp_gro_receive(head
, skb
);
798 static int tcp6_gro_complete(struct sk_buff
*skb
)
800 const struct ipv6hdr
*iph
= ipv6_hdr(skb
);
801 struct tcphdr
*th
= tcp_hdr(skb
);
803 th
->check
= ~tcp_v6_check(skb
->len
- skb_transport_offset(skb
),
804 &iph
->saddr
, &iph
->daddr
, 0);
805 skb_shinfo(skb
)->gso_type
= SKB_GSO_TCPV6
;
807 return tcp_gro_complete(skb
);
810 static void tcp_v6_send_response(struct sk_buff
*skb
, u32 seq
, u32 ack
, u32 win
,
811 u32 ts
, struct tcp_md5sig_key
*key
, int rst
, u8 tclass
)
813 const struct tcphdr
*th
= tcp_hdr(skb
);
815 struct sk_buff
*buff
;
817 struct net
*net
= dev_net(skb_dst(skb
)->dev
);
818 struct sock
*ctl_sk
= net
->ipv6
.tcp_sk
;
819 unsigned int tot_len
= sizeof(struct tcphdr
);
820 struct dst_entry
*dst
;
824 tot_len
+= TCPOLEN_TSTAMP_ALIGNED
;
825 #ifdef CONFIG_TCP_MD5SIG
827 tot_len
+= TCPOLEN_MD5SIG_ALIGNED
;
830 buff
= alloc_skb(MAX_HEADER
+ sizeof(struct ipv6hdr
) + tot_len
,
835 skb_reserve(buff
, MAX_HEADER
+ sizeof(struct ipv6hdr
) + tot_len
);
837 t1
= (struct tcphdr
*) skb_push(buff
, tot_len
);
838 skb_reset_transport_header(buff
);
840 /* Swap the send and the receive. */
841 memset(t1
, 0, sizeof(*t1
));
842 t1
->dest
= th
->source
;
843 t1
->source
= th
->dest
;
844 t1
->doff
= tot_len
/ 4;
845 t1
->seq
= htonl(seq
);
846 t1
->ack_seq
= htonl(ack
);
847 t1
->ack
= !rst
|| !th
->ack
;
849 t1
->window
= htons(win
);
851 topt
= (__be32
*)(t1
+ 1);
854 *topt
++ = htonl((TCPOPT_NOP
<< 24) | (TCPOPT_NOP
<< 16) |
855 (TCPOPT_TIMESTAMP
<< 8) | TCPOLEN_TIMESTAMP
);
856 *topt
++ = htonl(tcp_time_stamp
);
860 #ifdef CONFIG_TCP_MD5SIG
862 *topt
++ = htonl((TCPOPT_NOP
<< 24) | (TCPOPT_NOP
<< 16) |
863 (TCPOPT_MD5SIG
<< 8) | TCPOLEN_MD5SIG
);
864 tcp_v6_md5_hash_hdr((__u8
*)topt
, key
,
865 &ipv6_hdr(skb
)->saddr
,
866 &ipv6_hdr(skb
)->daddr
, t1
);
870 memset(&fl6
, 0, sizeof(fl6
));
871 fl6
.daddr
= ipv6_hdr(skb
)->saddr
;
872 fl6
.saddr
= ipv6_hdr(skb
)->daddr
;
874 buff
->ip_summed
= CHECKSUM_PARTIAL
;
877 __tcp_v6_send_check(buff
, &fl6
.saddr
, &fl6
.daddr
);
879 fl6
.flowi6_proto
= IPPROTO_TCP
;
880 fl6
.flowi6_oif
= inet6_iif(skb
);
881 fl6
.fl6_dport
= t1
->dest
;
882 fl6
.fl6_sport
= t1
->source
;
883 security_skb_classify_flow(skb
, flowi6_to_flowi(&fl6
));
885 /* Pass a socket to ip6_dst_lookup either it is for RST
886 * Underlying function will use this to retrieve the network
889 dst
= ip6_dst_lookup_flow(ctl_sk
, &fl6
, NULL
, false);
891 skb_dst_set(buff
, dst
);
892 ip6_xmit(ctl_sk
, buff
, &fl6
, NULL
, tclass
);
893 TCP_INC_STATS_BH(net
, TCP_MIB_OUTSEGS
);
895 TCP_INC_STATS_BH(net
, TCP_MIB_OUTRSTS
);
902 static void tcp_v6_send_reset(struct sock
*sk
, struct sk_buff
*skb
)
904 const struct tcphdr
*th
= tcp_hdr(skb
);
905 u32 seq
= 0, ack_seq
= 0;
906 struct tcp_md5sig_key
*key
= NULL
;
907 #ifdef CONFIG_TCP_MD5SIG
908 const __u8
*hash_location
= NULL
;
909 struct ipv6hdr
*ipv6h
= ipv6_hdr(skb
);
910 unsigned char newhash
[16];
912 struct sock
*sk1
= NULL
;
918 if (!ipv6_unicast_destination(skb
))
921 #ifdef CONFIG_TCP_MD5SIG
922 hash_location
= tcp_parse_md5sig_option(th
);
923 if (!sk
&& hash_location
) {
925 * active side is lost. Try to find listening socket through
926 * source port, and then find md5 key through listening socket.
927 * we are not loose security here:
928 * Incoming packet is checked with md5 hash with finding key,
929 * no RST generated if md5 hash doesn't match.
931 sk1
= inet6_lookup_listener(dev_net(skb_dst(skb
)->dev
),
932 &tcp_hashinfo
, &ipv6h
->daddr
,
933 ntohs(th
->source
), inet6_iif(skb
));
938 key
= tcp_v6_md5_do_lookup(sk1
, &ipv6h
->saddr
);
942 genhash
= tcp_v6_md5_hash_skb(newhash
, key
, NULL
, NULL
, skb
);
943 if (genhash
|| memcmp(hash_location
, newhash
, 16) != 0)
946 key
= sk
? tcp_v6_md5_do_lookup(sk
, &ipv6h
->saddr
) : NULL
;
951 seq
= ntohl(th
->ack_seq
);
953 ack_seq
= ntohl(th
->seq
) + th
->syn
+ th
->fin
+ skb
->len
-
956 tcp_v6_send_response(skb
, seq
, ack_seq
, 0, 0, key
, 1, 0);
958 #ifdef CONFIG_TCP_MD5SIG
967 static void tcp_v6_send_ack(struct sk_buff
*skb
, u32 seq
, u32 ack
, u32 win
, u32 ts
,
968 struct tcp_md5sig_key
*key
, u8 tclass
)
970 tcp_v6_send_response(skb
, seq
, ack
, win
, ts
, key
, 0, tclass
);
973 static void tcp_v6_timewait_ack(struct sock
*sk
, struct sk_buff
*skb
)
975 struct inet_timewait_sock
*tw
= inet_twsk(sk
);
976 struct tcp_timewait_sock
*tcptw
= tcp_twsk(sk
);
978 tcp_v6_send_ack(skb
, tcptw
->tw_snd_nxt
, tcptw
->tw_rcv_nxt
,
979 tcptw
->tw_rcv_wnd
>> tw
->tw_rcv_wscale
,
980 tcptw
->tw_ts_recent
, tcp_twsk_md5_key(tcptw
),
986 static void tcp_v6_reqsk_send_ack(struct sock
*sk
, struct sk_buff
*skb
,
987 struct request_sock
*req
)
989 tcp_v6_send_ack(skb
, tcp_rsk(req
)->snt_isn
+ 1, tcp_rsk(req
)->rcv_isn
+ 1, req
->rcv_wnd
, req
->ts_recent
,
990 tcp_v6_md5_do_lookup(sk
, &ipv6_hdr(skb
)->daddr
), 0);
994 static struct sock
*tcp_v6_hnd_req(struct sock
*sk
,struct sk_buff
*skb
)
996 struct request_sock
*req
, **prev
;
997 const struct tcphdr
*th
= tcp_hdr(skb
);
1000 /* Find possible connection requests. */
1001 req
= inet6_csk_search_req(sk
, &prev
, th
->source
,
1002 &ipv6_hdr(skb
)->saddr
,
1003 &ipv6_hdr(skb
)->daddr
, inet6_iif(skb
));
1005 return tcp_check_req(sk
, skb
, req
, prev
, false);
1007 nsk
= __inet6_lookup_established(sock_net(sk
), &tcp_hashinfo
,
1008 &ipv6_hdr(skb
)->saddr
, th
->source
,
1009 &ipv6_hdr(skb
)->daddr
, ntohs(th
->dest
), inet6_iif(skb
));
1012 if (nsk
->sk_state
!= TCP_TIME_WAIT
) {
1016 inet_twsk_put(inet_twsk(nsk
));
1020 #ifdef CONFIG_SYN_COOKIES
1022 sk
= cookie_v6_check(sk
, skb
);
1027 /* FIXME: this is substantially similar to the ipv4 code.
1028 * Can some kind of merge be done? -- erics
1030 static int tcp_v6_conn_request(struct sock
*sk
, struct sk_buff
*skb
)
1032 struct tcp_extend_values tmp_ext
;
1033 struct tcp_options_received tmp_opt
;
1034 const u8
*hash_location
;
1035 struct request_sock
*req
;
1036 struct inet6_request_sock
*treq
;
1037 struct ipv6_pinfo
*np
= inet6_sk(sk
);
1038 struct tcp_sock
*tp
= tcp_sk(sk
);
1039 __u32 isn
= TCP_SKB_CB(skb
)->when
;
1040 struct dst_entry
*dst
= NULL
;
1042 bool want_cookie
= false;
1044 if (skb
->protocol
== htons(ETH_P_IP
))
1045 return tcp_v4_conn_request(sk
, skb
);
1047 if (!ipv6_unicast_destination(skb
))
1050 if (inet_csk_reqsk_queue_is_full(sk
) && !isn
) {
1051 want_cookie
= tcp_syn_flood_action(sk
, skb
, "TCPv6");
1056 if (sk_acceptq_is_full(sk
) && inet_csk_reqsk_queue_young(sk
) > 1)
1059 req
= inet6_reqsk_alloc(&tcp6_request_sock_ops
);
1063 #ifdef CONFIG_TCP_MD5SIG
1064 tcp_rsk(req
)->af_specific
= &tcp_request_sock_ipv6_ops
;
1067 tcp_clear_options(&tmp_opt
);
1068 tmp_opt
.mss_clamp
= IPV6_MIN_MTU
- sizeof(struct tcphdr
) - sizeof(struct ipv6hdr
);
1069 tmp_opt
.user_mss
= tp
->rx_opt
.user_mss
;
1070 tcp_parse_options(skb
, &tmp_opt
, &hash_location
, 0, NULL
);
1072 if (tmp_opt
.cookie_plus
> 0 &&
1073 tmp_opt
.saw_tstamp
&&
1074 !tp
->rx_opt
.cookie_out_never
&&
1075 (sysctl_tcp_cookie_size
> 0 ||
1076 (tp
->cookie_values
!= NULL
&&
1077 tp
->cookie_values
->cookie_desired
> 0))) {
1080 u32
*mess
= &tmp_ext
.cookie_bakery
[COOKIE_DIGEST_WORDS
];
1081 int l
= tmp_opt
.cookie_plus
- TCPOLEN_COOKIE_BASE
;
1083 if (tcp_cookie_generator(&tmp_ext
.cookie_bakery
[0]) != 0)
1086 /* Secret recipe starts with IP addresses */
1087 d
= (__force u32
*)&ipv6_hdr(skb
)->daddr
.s6_addr32
[0];
1092 d
= (__force u32
*)&ipv6_hdr(skb
)->saddr
.s6_addr32
[0];
1098 /* plus variable length Initiator Cookie */
1101 *c
++ ^= *hash_location
++;
1103 want_cookie
= false; /* not our kind of cookie */
1104 tmp_ext
.cookie_out_never
= 0; /* false */
1105 tmp_ext
.cookie_plus
= tmp_opt
.cookie_plus
;
1106 } else if (!tp
->rx_opt
.cookie_in_always
) {
1107 /* redundant indications, but ensure initialization. */
1108 tmp_ext
.cookie_out_never
= 1; /* true */
1109 tmp_ext
.cookie_plus
= 0;
1113 tmp_ext
.cookie_in_always
= tp
->rx_opt
.cookie_in_always
;
1115 if (want_cookie
&& !tmp_opt
.saw_tstamp
)
1116 tcp_clear_options(&tmp_opt
);
1118 tmp_opt
.tstamp_ok
= tmp_opt
.saw_tstamp
;
1119 tcp_openreq_init(req
, &tmp_opt
, skb
);
1121 treq
= inet6_rsk(req
);
1122 treq
->rmt_addr
= ipv6_hdr(skb
)->saddr
;
1123 treq
->loc_addr
= ipv6_hdr(skb
)->daddr
;
1124 if (!want_cookie
|| tmp_opt
.tstamp_ok
)
1125 TCP_ECN_create_request(req
, skb
);
1127 treq
->iif
= sk
->sk_bound_dev_if
;
1129 /* So that link locals have meaning */
1130 if (!sk
->sk_bound_dev_if
&&
1131 ipv6_addr_type(&treq
->rmt_addr
) & IPV6_ADDR_LINKLOCAL
)
1132 treq
->iif
= inet6_iif(skb
);
1135 if (ipv6_opt_accepted(sk
, skb
) ||
1136 np
->rxopt
.bits
.rxinfo
|| np
->rxopt
.bits
.rxoinfo
||
1137 np
->rxopt
.bits
.rxhlim
|| np
->rxopt
.bits
.rxohlim
) {
1138 atomic_inc(&skb
->users
);
1139 treq
->pktopts
= skb
;
1143 isn
= cookie_v6_init_sequence(sk
, skb
, &req
->mss
);
1144 req
->cookie_ts
= tmp_opt
.tstamp_ok
;
1148 /* VJ's idea. We save last timestamp seen
1149 * from the destination in peer table, when entering
1150 * state TIME-WAIT, and check against it before
1151 * accepting new connection request.
1153 * If "isn" is not zero, this request hit alive
1154 * timewait bucket, so that all the necessary checks
1155 * are made in the function processing timewait state.
1157 if (tmp_opt
.saw_tstamp
&&
1158 tcp_death_row
.sysctl_tw_recycle
&&
1159 (dst
= inet6_csk_route_req(sk
, &fl6
, req
)) != NULL
) {
1160 if (!tcp_peer_is_proven(req
, dst
, true)) {
1161 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_PAWSPASSIVEREJECTED
);
1162 goto drop_and_release
;
1165 /* Kill the following clause, if you dislike this way. */
1166 else if (!sysctl_tcp_syncookies
&&
1167 (sysctl_max_syn_backlog
- inet_csk_reqsk_queue_len(sk
) <
1168 (sysctl_max_syn_backlog
>> 2)) &&
1169 !tcp_peer_is_proven(req
, dst
, false)) {
1170 /* Without syncookies last quarter of
1171 * backlog is filled with destinations,
1172 * proven to be alive.
1173 * It means that we continue to communicate
1174 * to destinations, already remembered
1175 * to the moment of synflood.
1177 LIMIT_NETDEBUG(KERN_DEBUG
"TCP: drop open request from %pI6/%u\n",
1178 &treq
->rmt_addr
, ntohs(tcp_hdr(skb
)->source
));
1179 goto drop_and_release
;
1182 isn
= tcp_v6_init_sequence(skb
);
1185 tcp_rsk(req
)->snt_isn
= isn
;
1187 if (security_inet_conn_request(sk
, skb
, req
))
1188 goto drop_and_release
;
1190 if (tcp_v6_send_synack(sk
, dst
, &fl6
, req
,
1191 (struct request_values
*)&tmp_ext
,
1192 skb_get_queue_mapping(skb
)) ||
1196 tcp_rsk(req
)->snt_synack
= tcp_time_stamp
;
1197 tcp_rsk(req
)->listener
= NULL
;
1198 inet6_csk_reqsk_queue_hash_add(sk
, req
, TCP_TIMEOUT_INIT
);
1206 return 0; /* don't send reset */
1209 static struct sock
* tcp_v6_syn_recv_sock(struct sock
*sk
, struct sk_buff
*skb
,
1210 struct request_sock
*req
,
1211 struct dst_entry
*dst
)
1213 struct inet6_request_sock
*treq
;
1214 struct ipv6_pinfo
*newnp
, *np
= inet6_sk(sk
);
1215 struct tcp6_sock
*newtcp6sk
;
1216 struct inet_sock
*newinet
;
1217 struct tcp_sock
*newtp
;
1219 #ifdef CONFIG_TCP_MD5SIG
1220 struct tcp_md5sig_key
*key
;
1224 if (skb
->protocol
== htons(ETH_P_IP
)) {
1229 newsk
= tcp_v4_syn_recv_sock(sk
, skb
, req
, dst
);
1234 newtcp6sk
= (struct tcp6_sock
*)newsk
;
1235 inet_sk(newsk
)->pinet6
= &newtcp6sk
->inet6
;
1237 newinet
= inet_sk(newsk
);
1238 newnp
= inet6_sk(newsk
);
1239 newtp
= tcp_sk(newsk
);
1241 memcpy(newnp
, np
, sizeof(struct ipv6_pinfo
));
1243 ipv6_addr_set_v4mapped(newinet
->inet_daddr
, &newnp
->daddr
);
1245 ipv6_addr_set_v4mapped(newinet
->inet_saddr
, &newnp
->saddr
);
1247 newnp
->rcv_saddr
= newnp
->saddr
;
1249 inet_csk(newsk
)->icsk_af_ops
= &ipv6_mapped
;
1250 newsk
->sk_backlog_rcv
= tcp_v4_do_rcv
;
1251 #ifdef CONFIG_TCP_MD5SIG
1252 newtp
->af_specific
= &tcp_sock_ipv6_mapped_specific
;
1255 newnp
->ipv6_ac_list
= NULL
;
1256 newnp
->ipv6_fl_list
= NULL
;
1257 newnp
->pktoptions
= NULL
;
1259 newnp
->mcast_oif
= inet6_iif(skb
);
1260 newnp
->mcast_hops
= ipv6_hdr(skb
)->hop_limit
;
1261 newnp
->rcv_tclass
= ipv6_tclass(ipv6_hdr(skb
));
1264 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1265 * here, tcp_create_openreq_child now does this for us, see the comment in
1266 * that function for the gory details. -acme
1269 /* It is tricky place. Until this moment IPv4 tcp
1270 worked with IPv6 icsk.icsk_af_ops.
1273 tcp_sync_mss(newsk
, inet_csk(newsk
)->icsk_pmtu_cookie
);
1278 treq
= inet6_rsk(req
);
1280 if (sk_acceptq_is_full(sk
))
1284 dst
= inet6_csk_route_req(sk
, &fl6
, req
);
1289 newsk
= tcp_create_openreq_child(sk
, req
, skb
);
1294 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1295 * count here, tcp_create_openreq_child now does this for us, see the
1296 * comment in that function for the gory details. -acme
1299 newsk
->sk_gso_type
= SKB_GSO_TCPV6
;
1300 __ip6_dst_store(newsk
, dst
, NULL
, NULL
);
1301 inet6_sk_rx_dst_set(newsk
, skb
);
1303 newtcp6sk
= (struct tcp6_sock
*)newsk
;
1304 inet_sk(newsk
)->pinet6
= &newtcp6sk
->inet6
;
1306 newtp
= tcp_sk(newsk
);
1307 newinet
= inet_sk(newsk
);
1308 newnp
= inet6_sk(newsk
);
1310 memcpy(newnp
, np
, sizeof(struct ipv6_pinfo
));
1312 newnp
->daddr
= treq
->rmt_addr
;
1313 newnp
->saddr
= treq
->loc_addr
;
1314 newnp
->rcv_saddr
= treq
->loc_addr
;
1315 newsk
->sk_bound_dev_if
= treq
->iif
;
1317 /* Now IPv6 options...
1319 First: no IPv4 options.
1321 newinet
->inet_opt
= NULL
;
1322 newnp
->ipv6_ac_list
= NULL
;
1323 newnp
->ipv6_fl_list
= NULL
;
1326 newnp
->rxopt
.all
= np
->rxopt
.all
;
1328 /* Clone pktoptions received with SYN */
1329 newnp
->pktoptions
= NULL
;
1330 if (treq
->pktopts
!= NULL
) {
1331 newnp
->pktoptions
= skb_clone(treq
->pktopts
,
1332 sk_gfp_atomic(sk
, GFP_ATOMIC
));
1333 consume_skb(treq
->pktopts
);
1334 treq
->pktopts
= NULL
;
1335 if (newnp
->pktoptions
)
1336 skb_set_owner_r(newnp
->pktoptions
, newsk
);
1339 newnp
->mcast_oif
= inet6_iif(skb
);
1340 newnp
->mcast_hops
= ipv6_hdr(skb
)->hop_limit
;
1341 newnp
->rcv_tclass
= ipv6_tclass(ipv6_hdr(skb
));
1343 /* Clone native IPv6 options from listening socket (if any)
1345 Yes, keeping reference count would be much more clever,
1346 but we make one more one thing there: reattach optmem
1350 newnp
->opt
= ipv6_dup_options(newsk
, np
->opt
);
1352 inet_csk(newsk
)->icsk_ext_hdr_len
= 0;
1354 inet_csk(newsk
)->icsk_ext_hdr_len
= (newnp
->opt
->opt_nflen
+
1355 newnp
->opt
->opt_flen
);
1357 tcp_mtup_init(newsk
);
1358 tcp_sync_mss(newsk
, dst_mtu(dst
));
1359 newtp
->advmss
= dst_metric_advmss(dst
);
1360 if (tcp_sk(sk
)->rx_opt
.user_mss
&&
1361 tcp_sk(sk
)->rx_opt
.user_mss
< newtp
->advmss
)
1362 newtp
->advmss
= tcp_sk(sk
)->rx_opt
.user_mss
;
1364 tcp_initialize_rcv_mss(newsk
);
1365 tcp_synack_rtt_meas(newsk
, req
);
1366 newtp
->total_retrans
= req
->retrans
;
1368 newinet
->inet_daddr
= newinet
->inet_saddr
= LOOPBACK4_IPV6
;
1369 newinet
->inet_rcv_saddr
= LOOPBACK4_IPV6
;
1371 #ifdef CONFIG_TCP_MD5SIG
1372 /* Copy over the MD5 key from the original socket */
1373 if ((key
= tcp_v6_md5_do_lookup(sk
, &newnp
->daddr
)) != NULL
) {
1374 /* We're using one, so create a matching key
1375 * on the newsk structure. If we fail to get
1376 * memory, then we end up not copying the key
1379 tcp_md5_do_add(newsk
, (union tcp_md5_addr
*)&newnp
->daddr
,
1380 AF_INET6
, key
->key
, key
->keylen
,
1381 sk_gfp_atomic(sk
, GFP_ATOMIC
));
1385 if (__inet_inherit_port(sk
, newsk
) < 0) {
1389 __inet6_hash(newsk
, NULL
);
1394 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_LISTENOVERFLOWS
);
1398 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_LISTENDROPS
);
1402 static __sum16
tcp_v6_checksum_init(struct sk_buff
*skb
)
1404 if (skb
->ip_summed
== CHECKSUM_COMPLETE
) {
1405 if (!tcp_v6_check(skb
->len
, &ipv6_hdr(skb
)->saddr
,
1406 &ipv6_hdr(skb
)->daddr
, skb
->csum
)) {
1407 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1412 skb
->csum
= ~csum_unfold(tcp_v6_check(skb
->len
,
1413 &ipv6_hdr(skb
)->saddr
,
1414 &ipv6_hdr(skb
)->daddr
, 0));
1416 if (skb
->len
<= 76) {
1417 return __skb_checksum_complete(skb
);
1422 /* The socket must have it's spinlock held when we get
1425 * We have a potential double-lock case here, so even when
1426 * doing backlog processing we use the BH locking scheme.
1427 * This is because we cannot sleep with the original spinlock
1430 static int tcp_v6_do_rcv(struct sock
*sk
, struct sk_buff
*skb
)
1432 struct ipv6_pinfo
*np
= inet6_sk(sk
);
1433 struct tcp_sock
*tp
;
1434 struct sk_buff
*opt_skb
= NULL
;
1436 /* Imagine: socket is IPv6. IPv4 packet arrives,
1437 goes to IPv4 receive handler and backlogged.
1438 From backlog it always goes here. Kerboom...
1439 Fortunately, tcp_rcv_established and rcv_established
1440 handle them correctly, but it is not case with
1441 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1444 if (skb
->protocol
== htons(ETH_P_IP
))
1445 return tcp_v4_do_rcv(sk
, skb
);
1447 #ifdef CONFIG_TCP_MD5SIG
1448 if (tcp_v6_inbound_md5_hash (sk
, skb
))
1452 if (sk_filter(sk
, skb
))
1456 * socket locking is here for SMP purposes as backlog rcv
1457 * is currently called with bh processing disabled.
1460 /* Do Stevens' IPV6_PKTOPTIONS.
1462 Yes, guys, it is the only place in our code, where we
1463 may make it not affecting IPv4.
1464 The rest of code is protocol independent,
1465 and I do not like idea to uglify IPv4.
1467 Actually, all the idea behind IPV6_PKTOPTIONS
1468 looks not very well thought. For now we latch
1469 options, received in the last packet, enqueued
1470 by tcp. Feel free to propose better solution.
1474 opt_skb
= skb_clone(skb
, sk_gfp_atomic(sk
, GFP_ATOMIC
));
1476 if (sk
->sk_state
== TCP_ESTABLISHED
) { /* Fast path */
1477 struct dst_entry
*dst
= sk
->sk_rx_dst
;
1479 sock_rps_save_rxhash(sk
, skb
);
1481 if (inet_sk(sk
)->rx_dst_ifindex
!= skb
->skb_iif
||
1482 dst
->ops
->check(dst
, np
->rx_dst_cookie
) == NULL
) {
1484 sk
->sk_rx_dst
= NULL
;
1488 if (tcp_rcv_established(sk
, skb
, tcp_hdr(skb
), skb
->len
))
1491 goto ipv6_pktoptions
;
1495 if (skb
->len
< tcp_hdrlen(skb
) || tcp_checksum_complete(skb
))
1498 if (sk
->sk_state
== TCP_LISTEN
) {
1499 struct sock
*nsk
= tcp_v6_hnd_req(sk
, skb
);
1504 * Queue it on the new socket if the new socket is active,
1505 * otherwise we just shortcircuit this and continue with
1509 sock_rps_save_rxhash(nsk
, skb
);
1510 if (tcp_child_process(sk
, nsk
, skb
))
1513 __kfree_skb(opt_skb
);
1517 sock_rps_save_rxhash(sk
, skb
);
1519 if (tcp_rcv_state_process(sk
, skb
, tcp_hdr(skb
), skb
->len
))
1522 goto ipv6_pktoptions
;
1526 tcp_v6_send_reset(sk
, skb
);
1529 __kfree_skb(opt_skb
);
1533 TCP_INC_STATS_BH(sock_net(sk
), TCP_MIB_INERRS
);
1538 /* Do you ask, what is it?
1540 1. skb was enqueued by tcp.
1541 2. skb is added to tail of read queue, rather than out of order.
1542 3. socket is not in passive state.
1543 4. Finally, it really contains options, which user wants to receive.
1546 if (TCP_SKB_CB(opt_skb
)->end_seq
== tp
->rcv_nxt
&&
1547 !((1 << sk
->sk_state
) & (TCPF_CLOSE
| TCPF_LISTEN
))) {
1548 if (np
->rxopt
.bits
.rxinfo
|| np
->rxopt
.bits
.rxoinfo
)
1549 np
->mcast_oif
= inet6_iif(opt_skb
);
1550 if (np
->rxopt
.bits
.rxhlim
|| np
->rxopt
.bits
.rxohlim
)
1551 np
->mcast_hops
= ipv6_hdr(opt_skb
)->hop_limit
;
1552 if (np
->rxopt
.bits
.rxtclass
)
1553 np
->rcv_tclass
= ipv6_tclass(ipv6_hdr(skb
));
1554 if (ipv6_opt_accepted(sk
, opt_skb
)) {
1555 skb_set_owner_r(opt_skb
, sk
);
1556 opt_skb
= xchg(&np
->pktoptions
, opt_skb
);
1558 __kfree_skb(opt_skb
);
1559 opt_skb
= xchg(&np
->pktoptions
, NULL
);
1567 static int tcp_v6_rcv(struct sk_buff
*skb
)
1569 const struct tcphdr
*th
;
1570 const struct ipv6hdr
*hdr
;
1573 struct net
*net
= dev_net(skb
->dev
);
1575 if (skb
->pkt_type
!= PACKET_HOST
)
1579 * Count it even if it's bad.
1581 TCP_INC_STATS_BH(net
, TCP_MIB_INSEGS
);
1583 if (!pskb_may_pull(skb
, sizeof(struct tcphdr
)))
1588 if (th
->doff
< sizeof(struct tcphdr
)/4)
1590 if (!pskb_may_pull(skb
, th
->doff
*4))
1593 if (!skb_csum_unnecessary(skb
) && tcp_v6_checksum_init(skb
))
1597 hdr
= ipv6_hdr(skb
);
1598 TCP_SKB_CB(skb
)->seq
= ntohl(th
->seq
);
1599 TCP_SKB_CB(skb
)->end_seq
= (TCP_SKB_CB(skb
)->seq
+ th
->syn
+ th
->fin
+
1600 skb
->len
- th
->doff
*4);
1601 TCP_SKB_CB(skb
)->ack_seq
= ntohl(th
->ack_seq
);
1602 TCP_SKB_CB(skb
)->when
= 0;
1603 TCP_SKB_CB(skb
)->ip_dsfield
= ipv6_get_dsfield(hdr
);
1604 TCP_SKB_CB(skb
)->sacked
= 0;
1606 sk
= __inet6_lookup_skb(&tcp_hashinfo
, skb
, th
->source
, th
->dest
);
1611 if (sk
->sk_state
== TCP_TIME_WAIT
)
1614 if (hdr
->hop_limit
< inet6_sk(sk
)->min_hopcount
) {
1615 NET_INC_STATS_BH(net
, LINUX_MIB_TCPMINTTLDROP
);
1616 goto discard_and_relse
;
1619 if (!xfrm6_policy_check(sk
, XFRM_POLICY_IN
, skb
))
1620 goto discard_and_relse
;
1622 if (sk_filter(sk
, skb
))
1623 goto discard_and_relse
;
1627 bh_lock_sock_nested(sk
);
1629 if (!sock_owned_by_user(sk
)) {
1630 #ifdef CONFIG_NET_DMA
1631 struct tcp_sock
*tp
= tcp_sk(sk
);
1632 if (!tp
->ucopy
.dma_chan
&& tp
->ucopy
.pinned_list
)
1633 tp
->ucopy
.dma_chan
= net_dma_find_channel();
1634 if (tp
->ucopy
.dma_chan
)
1635 ret
= tcp_v6_do_rcv(sk
, skb
);
1639 if (!tcp_prequeue(sk
, skb
))
1640 ret
= tcp_v6_do_rcv(sk
, skb
);
1642 } else if (unlikely(sk_add_backlog(sk
, skb
,
1643 sk
->sk_rcvbuf
+ sk
->sk_sndbuf
))) {
1645 NET_INC_STATS_BH(net
, LINUX_MIB_TCPBACKLOGDROP
);
1646 goto discard_and_relse
;
1651 return ret
? -1 : 0;
1654 if (!xfrm6_policy_check(NULL
, XFRM_POLICY_IN
, skb
))
1657 if (skb
->len
< (th
->doff
<<2) || tcp_checksum_complete(skb
)) {
1659 TCP_INC_STATS_BH(net
, TCP_MIB_INERRS
);
1661 tcp_v6_send_reset(NULL
, skb
);
1678 if (!xfrm6_policy_check(NULL
, XFRM_POLICY_IN
, skb
)) {
1679 inet_twsk_put(inet_twsk(sk
));
1683 if (skb
->len
< (th
->doff
<<2) || tcp_checksum_complete(skb
)) {
1684 TCP_INC_STATS_BH(net
, TCP_MIB_INERRS
);
1685 inet_twsk_put(inet_twsk(sk
));
1689 switch (tcp_timewait_state_process(inet_twsk(sk
), skb
, th
)) {
1694 sk2
= inet6_lookup_listener(dev_net(skb
->dev
), &tcp_hashinfo
,
1695 &ipv6_hdr(skb
)->daddr
,
1696 ntohs(th
->dest
), inet6_iif(skb
));
1698 struct inet_timewait_sock
*tw
= inet_twsk(sk
);
1699 inet_twsk_deschedule(tw
, &tcp_death_row
);
1704 /* Fall through to ACK */
1707 tcp_v6_timewait_ack(sk
, skb
);
1711 case TCP_TW_SUCCESS
:;
1716 static void tcp_v6_early_demux(struct sk_buff
*skb
)
1718 const struct ipv6hdr
*hdr
;
1719 const struct tcphdr
*th
;
1722 if (skb
->pkt_type
!= PACKET_HOST
)
1725 if (!pskb_may_pull(skb
, skb_transport_offset(skb
) + sizeof(struct tcphdr
)))
1728 hdr
= ipv6_hdr(skb
);
1731 if (th
->doff
< sizeof(struct tcphdr
) / 4)
1734 sk
= __inet6_lookup_established(dev_net(skb
->dev
), &tcp_hashinfo
,
1735 &hdr
->saddr
, th
->source
,
1736 &hdr
->daddr
, ntohs(th
->dest
),
1740 skb
->destructor
= sock_edemux
;
1741 if (sk
->sk_state
!= TCP_TIME_WAIT
) {
1742 struct dst_entry
*dst
= sk
->sk_rx_dst
;
1743 struct inet_sock
*icsk
= inet_sk(sk
);
1745 dst
= dst_check(dst
, inet6_sk(sk
)->rx_dst_cookie
);
1747 icsk
->rx_dst_ifindex
== skb
->skb_iif
)
1748 skb_dst_set_noref(skb
, dst
);
1753 static struct timewait_sock_ops tcp6_timewait_sock_ops
= {
1754 .twsk_obj_size
= sizeof(struct tcp6_timewait_sock
),
1755 .twsk_unique
= tcp_twsk_unique
,
1756 .twsk_destructor
= tcp_twsk_destructor
,
1759 static const struct inet_connection_sock_af_ops ipv6_specific
= {
1760 .queue_xmit
= inet6_csk_xmit
,
1761 .send_check
= tcp_v6_send_check
,
1762 .rebuild_header
= inet6_sk_rebuild_header
,
1763 .sk_rx_dst_set
= inet6_sk_rx_dst_set
,
1764 .conn_request
= tcp_v6_conn_request
,
1765 .syn_recv_sock
= tcp_v6_syn_recv_sock
,
1766 .net_header_len
= sizeof(struct ipv6hdr
),
1767 .net_frag_header_len
= sizeof(struct frag_hdr
),
1768 .setsockopt
= ipv6_setsockopt
,
1769 .getsockopt
= ipv6_getsockopt
,
1770 .addr2sockaddr
= inet6_csk_addr2sockaddr
,
1771 .sockaddr_len
= sizeof(struct sockaddr_in6
),
1772 .bind_conflict
= inet6_csk_bind_conflict
,
1773 #ifdef CONFIG_COMPAT
1774 .compat_setsockopt
= compat_ipv6_setsockopt
,
1775 .compat_getsockopt
= compat_ipv6_getsockopt
,
1779 #ifdef CONFIG_TCP_MD5SIG
1780 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific
= {
1781 .md5_lookup
= tcp_v6_md5_lookup
,
1782 .calc_md5_hash
= tcp_v6_md5_hash_skb
,
1783 .md5_parse
= tcp_v6_parse_md5_keys
,
1788 * TCP over IPv4 via INET6 API
1791 static const struct inet_connection_sock_af_ops ipv6_mapped
= {
1792 .queue_xmit
= ip_queue_xmit
,
1793 .send_check
= tcp_v4_send_check
,
1794 .rebuild_header
= inet_sk_rebuild_header
,
1795 .sk_rx_dst_set
= inet_sk_rx_dst_set
,
1796 .conn_request
= tcp_v6_conn_request
,
1797 .syn_recv_sock
= tcp_v6_syn_recv_sock
,
1798 .net_header_len
= sizeof(struct iphdr
),
1799 .setsockopt
= ipv6_setsockopt
,
1800 .getsockopt
= ipv6_getsockopt
,
1801 .addr2sockaddr
= inet6_csk_addr2sockaddr
,
1802 .sockaddr_len
= sizeof(struct sockaddr_in6
),
1803 .bind_conflict
= inet6_csk_bind_conflict
,
1804 #ifdef CONFIG_COMPAT
1805 .compat_setsockopt
= compat_ipv6_setsockopt
,
1806 .compat_getsockopt
= compat_ipv6_getsockopt
,
1810 #ifdef CONFIG_TCP_MD5SIG
1811 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific
= {
1812 .md5_lookup
= tcp_v4_md5_lookup
,
1813 .calc_md5_hash
= tcp_v4_md5_hash_skb
,
1814 .md5_parse
= tcp_v6_parse_md5_keys
,
1818 /* NOTE: A lot of things set to zero explicitly by call to
1819 * sk_alloc() so need not be done here.
1821 static int tcp_v6_init_sock(struct sock
*sk
)
1823 struct inet_connection_sock
*icsk
= inet_csk(sk
);
1827 icsk
->icsk_af_ops
= &ipv6_specific
;
1829 #ifdef CONFIG_TCP_MD5SIG
1830 tcp_sk(sk
)->af_specific
= &tcp_sock_ipv6_specific
;
1836 static void tcp_v6_destroy_sock(struct sock
*sk
)
1838 tcp_v4_destroy_sock(sk
);
1839 inet6_destroy_sock(sk
);
1842 #ifdef CONFIG_PROC_FS
1843 /* Proc filesystem TCPv6 sock list dumping. */
1844 static void get_openreq6(struct seq_file
*seq
,
1845 const struct sock
*sk
, struct request_sock
*req
, int i
, kuid_t uid
)
1847 int ttd
= req
->expires
- jiffies
;
1848 const struct in6_addr
*src
= &inet6_rsk(req
)->loc_addr
;
1849 const struct in6_addr
*dest
= &inet6_rsk(req
)->rmt_addr
;
1855 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1856 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1858 src
->s6_addr32
[0], src
->s6_addr32
[1],
1859 src
->s6_addr32
[2], src
->s6_addr32
[3],
1860 ntohs(inet_rsk(req
)->loc_port
),
1861 dest
->s6_addr32
[0], dest
->s6_addr32
[1],
1862 dest
->s6_addr32
[2], dest
->s6_addr32
[3],
1863 ntohs(inet_rsk(req
)->rmt_port
),
1865 0,0, /* could print option size, but that is af dependent. */
1866 1, /* timers active (only the expire timer) */
1867 jiffies_to_clock_t(ttd
),
1869 from_kuid_munged(seq_user_ns(seq
), uid
),
1870 0, /* non standard timer */
1871 0, /* open_requests have no inode */
1875 static void get_tcp6_sock(struct seq_file
*seq
, struct sock
*sp
, int i
)
1877 const struct in6_addr
*dest
, *src
;
1880 unsigned long timer_expires
;
1881 const struct inet_sock
*inet
= inet_sk(sp
);
1882 const struct tcp_sock
*tp
= tcp_sk(sp
);
1883 const struct inet_connection_sock
*icsk
= inet_csk(sp
);
1884 const struct ipv6_pinfo
*np
= inet6_sk(sp
);
1887 src
= &np
->rcv_saddr
;
1888 destp
= ntohs(inet
->inet_dport
);
1889 srcp
= ntohs(inet
->inet_sport
);
1891 if (icsk
->icsk_pending
== ICSK_TIME_RETRANS
) {
1893 timer_expires
= icsk
->icsk_timeout
;
1894 } else if (icsk
->icsk_pending
== ICSK_TIME_PROBE0
) {
1896 timer_expires
= icsk
->icsk_timeout
;
1897 } else if (timer_pending(&sp
->sk_timer
)) {
1899 timer_expires
= sp
->sk_timer
.expires
;
1902 timer_expires
= jiffies
;
1906 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1907 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %pK %lu %lu %u %u %d\n",
1909 src
->s6_addr32
[0], src
->s6_addr32
[1],
1910 src
->s6_addr32
[2], src
->s6_addr32
[3], srcp
,
1911 dest
->s6_addr32
[0], dest
->s6_addr32
[1],
1912 dest
->s6_addr32
[2], dest
->s6_addr32
[3], destp
,
1914 tp
->write_seq
-tp
->snd_una
,
1915 (sp
->sk_state
== TCP_LISTEN
) ? sp
->sk_ack_backlog
: (tp
->rcv_nxt
- tp
->copied_seq
),
1917 jiffies_delta_to_clock_t(timer_expires
- jiffies
),
1918 icsk
->icsk_retransmits
,
1919 from_kuid_munged(seq_user_ns(seq
), sock_i_uid(sp
)),
1920 icsk
->icsk_probes_out
,
1922 atomic_read(&sp
->sk_refcnt
), sp
,
1923 jiffies_to_clock_t(icsk
->icsk_rto
),
1924 jiffies_to_clock_t(icsk
->icsk_ack
.ato
),
1925 (icsk
->icsk_ack
.quick
<< 1 ) | icsk
->icsk_ack
.pingpong
,
1927 tcp_in_initial_slowstart(tp
) ? -1 : tp
->snd_ssthresh
1931 static void get_timewait6_sock(struct seq_file
*seq
,
1932 struct inet_timewait_sock
*tw
, int i
)
1934 const struct in6_addr
*dest
, *src
;
1936 const struct inet6_timewait_sock
*tw6
= inet6_twsk((struct sock
*)tw
);
1937 long delta
= tw
->tw_ttd
- jiffies
;
1939 dest
= &tw6
->tw_v6_daddr
;
1940 src
= &tw6
->tw_v6_rcv_saddr
;
1941 destp
= ntohs(tw
->tw_dport
);
1942 srcp
= ntohs(tw
->tw_sport
);
1945 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1946 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1948 src
->s6_addr32
[0], src
->s6_addr32
[1],
1949 src
->s6_addr32
[2], src
->s6_addr32
[3], srcp
,
1950 dest
->s6_addr32
[0], dest
->s6_addr32
[1],
1951 dest
->s6_addr32
[2], dest
->s6_addr32
[3], destp
,
1952 tw
->tw_substate
, 0, 0,
1953 3, jiffies_delta_to_clock_t(delta
), 0, 0, 0, 0,
1954 atomic_read(&tw
->tw_refcnt
), tw
);
1957 static int tcp6_seq_show(struct seq_file
*seq
, void *v
)
1959 struct tcp_iter_state
*st
;
1961 if (v
== SEQ_START_TOKEN
) {
1966 "st tx_queue rx_queue tr tm->when retrnsmt"
1967 " uid timeout inode\n");
1972 switch (st
->state
) {
1973 case TCP_SEQ_STATE_LISTENING
:
1974 case TCP_SEQ_STATE_ESTABLISHED
:
1975 get_tcp6_sock(seq
, v
, st
->num
);
1977 case TCP_SEQ_STATE_OPENREQ
:
1978 get_openreq6(seq
, st
->syn_wait_sk
, v
, st
->num
, st
->uid
);
1980 case TCP_SEQ_STATE_TIME_WAIT
:
1981 get_timewait6_sock(seq
, v
, st
->num
);
1988 static const struct file_operations tcp6_afinfo_seq_fops
= {
1989 .owner
= THIS_MODULE
,
1990 .open
= tcp_seq_open
,
1992 .llseek
= seq_lseek
,
1993 .release
= seq_release_net
1996 static struct tcp_seq_afinfo tcp6_seq_afinfo
= {
1999 .seq_fops
= &tcp6_afinfo_seq_fops
,
2001 .show
= tcp6_seq_show
,
2005 int __net_init
tcp6_proc_init(struct net
*net
)
2007 return tcp_proc_register(net
, &tcp6_seq_afinfo
);
2010 void tcp6_proc_exit(struct net
*net
)
2012 tcp_proc_unregister(net
, &tcp6_seq_afinfo
);
2016 struct proto tcpv6_prot
= {
2018 .owner
= THIS_MODULE
,
2020 .connect
= tcp_v6_connect
,
2021 .disconnect
= tcp_disconnect
,
2022 .accept
= inet_csk_accept
,
2024 .init
= tcp_v6_init_sock
,
2025 .destroy
= tcp_v6_destroy_sock
,
2026 .shutdown
= tcp_shutdown
,
2027 .setsockopt
= tcp_setsockopt
,
2028 .getsockopt
= tcp_getsockopt
,
2029 .recvmsg
= tcp_recvmsg
,
2030 .sendmsg
= tcp_sendmsg
,
2031 .sendpage
= tcp_sendpage
,
2032 .backlog_rcv
= tcp_v6_do_rcv
,
2033 .release_cb
= tcp_release_cb
,
2034 .mtu_reduced
= tcp_v6_mtu_reduced
,
2035 .hash
= tcp_v6_hash
,
2036 .unhash
= inet_unhash
,
2037 .get_port
= inet_csk_get_port
,
2038 .enter_memory_pressure
= tcp_enter_memory_pressure
,
2039 .sockets_allocated
= &tcp_sockets_allocated
,
2040 .memory_allocated
= &tcp_memory_allocated
,
2041 .memory_pressure
= &tcp_memory_pressure
,
2042 .orphan_count
= &tcp_orphan_count
,
2043 .sysctl_wmem
= sysctl_tcp_wmem
,
2044 .sysctl_rmem
= sysctl_tcp_rmem
,
2045 .max_header
= MAX_TCP_HEADER
,
2046 .obj_size
= sizeof(struct tcp6_sock
),
2047 .slab_flags
= SLAB_DESTROY_BY_RCU
,
2048 .twsk_prot
= &tcp6_timewait_sock_ops
,
2049 .rsk_prot
= &tcp6_request_sock_ops
,
2050 .h
.hashinfo
= &tcp_hashinfo
,
2051 .no_autobind
= true,
2052 #ifdef CONFIG_COMPAT
2053 .compat_setsockopt
= compat_tcp_setsockopt
,
2054 .compat_getsockopt
= compat_tcp_getsockopt
,
2056 #ifdef CONFIG_MEMCG_KMEM
2057 .proto_cgroup
= tcp_proto_cgroup
,
2061 static const struct inet6_protocol tcpv6_protocol
= {
2062 .early_demux
= tcp_v6_early_demux
,
2063 .handler
= tcp_v6_rcv
,
2064 .err_handler
= tcp_v6_err
,
2065 .gso_send_check
= tcp_v6_gso_send_check
,
2066 .gso_segment
= tcp_tso_segment
,
2067 .gro_receive
= tcp6_gro_receive
,
2068 .gro_complete
= tcp6_gro_complete
,
2069 .flags
= INET6_PROTO_NOPOLICY
|INET6_PROTO_FINAL
,
2072 static struct inet_protosw tcpv6_protosw
= {
2073 .type
= SOCK_STREAM
,
2074 .protocol
= IPPROTO_TCP
,
2075 .prot
= &tcpv6_prot
,
2076 .ops
= &inet6_stream_ops
,
2078 .flags
= INET_PROTOSW_PERMANENT
|
2082 static int __net_init
tcpv6_net_init(struct net
*net
)
2084 return inet_ctl_sock_create(&net
->ipv6
.tcp_sk
, PF_INET6
,
2085 SOCK_RAW
, IPPROTO_TCP
, net
);
2088 static void __net_exit
tcpv6_net_exit(struct net
*net
)
2090 inet_ctl_sock_destroy(net
->ipv6
.tcp_sk
);
2093 static void __net_exit
tcpv6_net_exit_batch(struct list_head
*net_exit_list
)
2095 inet_twsk_purge(&tcp_hashinfo
, &tcp_death_row
, AF_INET6
);
2098 static struct pernet_operations tcpv6_net_ops
= {
2099 .init
= tcpv6_net_init
,
2100 .exit
= tcpv6_net_exit
,
2101 .exit_batch
= tcpv6_net_exit_batch
,
2104 int __init
tcpv6_init(void)
2108 ret
= inet6_add_protocol(&tcpv6_protocol
, IPPROTO_TCP
);
2112 /* register inet6 protocol */
2113 ret
= inet6_register_protosw(&tcpv6_protosw
);
2115 goto out_tcpv6_protocol
;
2117 ret
= register_pernet_subsys(&tcpv6_net_ops
);
2119 goto out_tcpv6_protosw
;
2124 inet6_del_protocol(&tcpv6_protocol
, IPPROTO_TCP
);
2126 inet6_unregister_protosw(&tcpv6_protosw
);
2130 void tcpv6_exit(void)
2132 unregister_pernet_subsys(&tcpv6_net_ops
);
2133 inet6_unregister_protosw(&tcpv6_protosw
);
2134 inet6_del_protocol(&tcpv6_protocol
, IPPROTO_TCP
);