3 * Linux INET6 implementation
6 * Pedro Roque <roque@di.fc.ul.pt>
8 * $Id: tcp_ipv6.c,v 1.144 2002/02/01 22:01:04 davem Exp $
11 * linux/net/ipv4/tcp.c
12 * linux/net/ipv4/tcp_input.c
13 * linux/net/ipv4/tcp_output.c
16 * Hideaki YOSHIFUJI : sin6_scope_id support
17 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
18 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
19 * a single port at the same time.
20 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
22 * This program is free software; you can redistribute it and/or
23 * modify it under the terms of the GNU General Public License
24 * as published by the Free Software Foundation; either version
25 * 2 of the License, or (at your option) any later version.
28 #include <linux/module.h>
29 #include <linux/errno.h>
30 #include <linux/types.h>
31 #include <linux/socket.h>
32 #include <linux/sockios.h>
33 #include <linux/net.h>
34 #include <linux/jiffies.h>
36 #include <linux/in6.h>
37 #include <linux/netdevice.h>
38 #include <linux/init.h>
39 #include <linux/jhash.h>
40 #include <linux/ipsec.h>
41 #include <linux/times.h>
43 #include <linux/ipv6.h>
44 #include <linux/icmpv6.h>
45 #include <linux/random.h>
48 #include <net/ndisc.h>
49 #include <net/inet6_hashtables.h>
50 #include <net/inet6_connection_sock.h>
52 #include <net/transp_v6.h>
53 #include <net/addrconf.h>
54 #include <net/ip6_route.h>
55 #include <net/ip6_checksum.h>
56 #include <net/inet_ecn.h>
57 #include <net/protocol.h>
59 #include <net/addrconf.h>
61 #include <net/dsfield.h>
62 #include <net/timewait_sock.h>
64 #include <asm/uaccess.h>
66 #include <linux/proc_fs.h>
67 #include <linux/seq_file.h>
69 #include <linux/crypto.h>
70 #include <linux/scatterlist.h>
72 /* Socket used for sending RSTs and ACKs */
73 static struct socket
*tcp6_socket
;
75 static void tcp_v6_send_reset(struct sock
*sk
, struct sk_buff
*skb
);
76 static void tcp_v6_reqsk_send_ack(struct sk_buff
*skb
, struct request_sock
*req
);
77 static void tcp_v6_send_check(struct sock
*sk
, int len
,
80 static int tcp_v6_do_rcv(struct sock
*sk
, struct sk_buff
*skb
);
82 static struct inet_connection_sock_af_ops ipv6_mapped
;
83 static struct inet_connection_sock_af_ops ipv6_specific
;
84 static struct tcp_sock_af_ops tcp_sock_ipv6_specific
;
85 static struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific
;
87 static int tcp_v6_get_port(struct sock
*sk
, unsigned short snum
)
89 return inet_csk_get_port(&tcp_hashinfo
, sk
, snum
,
90 inet6_csk_bind_conflict
);
93 static void tcp_v6_hash(struct sock
*sk
)
95 if (sk
->sk_state
!= TCP_CLOSE
) {
96 if (inet_csk(sk
)->icsk_af_ops
== &ipv6_mapped
) {
101 __inet6_hash(&tcp_hashinfo
, sk
);
106 static __inline__ u16
tcp_v6_check(struct tcphdr
*th
, int len
,
107 struct in6_addr
*saddr
,
108 struct in6_addr
*daddr
,
111 return csum_ipv6_magic(saddr
, daddr
, len
, IPPROTO_TCP
, base
);
114 static __u32
tcp_v6_init_sequence(struct sk_buff
*skb
)
116 return secure_tcpv6_sequence_number(skb
->nh
.ipv6h
->daddr
.s6_addr32
,
117 skb
->nh
.ipv6h
->saddr
.s6_addr32
,
122 static int tcp_v6_connect(struct sock
*sk
, struct sockaddr
*uaddr
,
125 struct sockaddr_in6
*usin
= (struct sockaddr_in6
*) uaddr
;
126 struct inet_sock
*inet
= inet_sk(sk
);
127 struct inet_connection_sock
*icsk
= inet_csk(sk
);
128 struct ipv6_pinfo
*np
= inet6_sk(sk
);
129 struct tcp_sock
*tp
= tcp_sk(sk
);
130 struct in6_addr
*saddr
= NULL
, *final_p
= NULL
, final
;
132 struct dst_entry
*dst
;
136 if (addr_len
< SIN6_LEN_RFC2133
)
139 if (usin
->sin6_family
!= AF_INET6
)
140 return(-EAFNOSUPPORT
);
142 memset(&fl
, 0, sizeof(fl
));
145 fl
.fl6_flowlabel
= usin
->sin6_flowinfo
&IPV6_FLOWINFO_MASK
;
146 IP6_ECN_flow_init(fl
.fl6_flowlabel
);
147 if (fl
.fl6_flowlabel
&IPV6_FLOWLABEL_MASK
) {
148 struct ip6_flowlabel
*flowlabel
;
149 flowlabel
= fl6_sock_lookup(sk
, fl
.fl6_flowlabel
);
150 if (flowlabel
== NULL
)
152 ipv6_addr_copy(&usin
->sin6_addr
, &flowlabel
->dst
);
153 fl6_sock_release(flowlabel
);
158 * connect() to INADDR_ANY means loopback (BSD'ism).
161 if(ipv6_addr_any(&usin
->sin6_addr
))
162 usin
->sin6_addr
.s6_addr
[15] = 0x1;
164 addr_type
= ipv6_addr_type(&usin
->sin6_addr
);
166 if(addr_type
& IPV6_ADDR_MULTICAST
)
169 if (addr_type
&IPV6_ADDR_LINKLOCAL
) {
170 if (addr_len
>= sizeof(struct sockaddr_in6
) &&
171 usin
->sin6_scope_id
) {
172 /* If interface is set while binding, indices
175 if (sk
->sk_bound_dev_if
&&
176 sk
->sk_bound_dev_if
!= usin
->sin6_scope_id
)
179 sk
->sk_bound_dev_if
= usin
->sin6_scope_id
;
182 /* Connect to link-local address requires an interface */
183 if (!sk
->sk_bound_dev_if
)
187 if (tp
->rx_opt
.ts_recent_stamp
&&
188 !ipv6_addr_equal(&np
->daddr
, &usin
->sin6_addr
)) {
189 tp
->rx_opt
.ts_recent
= 0;
190 tp
->rx_opt
.ts_recent_stamp
= 0;
194 ipv6_addr_copy(&np
->daddr
, &usin
->sin6_addr
);
195 np
->flow_label
= fl
.fl6_flowlabel
;
201 if (addr_type
== IPV6_ADDR_MAPPED
) {
202 u32 exthdrlen
= icsk
->icsk_ext_hdr_len
;
203 struct sockaddr_in sin
;
205 SOCK_DEBUG(sk
, "connect: ipv4 mapped\n");
207 if (__ipv6_only_sock(sk
))
210 sin
.sin_family
= AF_INET
;
211 sin
.sin_port
= usin
->sin6_port
;
212 sin
.sin_addr
.s_addr
= usin
->sin6_addr
.s6_addr32
[3];
214 icsk
->icsk_af_ops
= &ipv6_mapped
;
215 sk
->sk_backlog_rcv
= tcp_v4_do_rcv
;
216 #ifdef CONFIG_TCP_MD5SIG
217 tp
->af_specific
= &tcp_sock_ipv6_mapped_specific
;
220 err
= tcp_v4_connect(sk
, (struct sockaddr
*)&sin
, sizeof(sin
));
223 icsk
->icsk_ext_hdr_len
= exthdrlen
;
224 icsk
->icsk_af_ops
= &ipv6_specific
;
225 sk
->sk_backlog_rcv
= tcp_v6_do_rcv
;
226 #ifdef CONFIG_TCP_MD5SIG
227 tp
->af_specific
= &tcp_sock_ipv6_specific
;
231 ipv6_addr_set(&np
->saddr
, 0, 0, htonl(0x0000FFFF),
233 ipv6_addr_set(&np
->rcv_saddr
, 0, 0, htonl(0x0000FFFF),
240 if (!ipv6_addr_any(&np
->rcv_saddr
))
241 saddr
= &np
->rcv_saddr
;
243 fl
.proto
= IPPROTO_TCP
;
244 ipv6_addr_copy(&fl
.fl6_dst
, &np
->daddr
);
245 ipv6_addr_copy(&fl
.fl6_src
,
246 (saddr
? saddr
: &np
->saddr
));
247 fl
.oif
= sk
->sk_bound_dev_if
;
248 fl
.fl_ip_dport
= usin
->sin6_port
;
249 fl
.fl_ip_sport
= inet
->sport
;
251 if (np
->opt
&& np
->opt
->srcrt
) {
252 struct rt0_hdr
*rt0
= (struct rt0_hdr
*)np
->opt
->srcrt
;
253 ipv6_addr_copy(&final
, &fl
.fl6_dst
);
254 ipv6_addr_copy(&fl
.fl6_dst
, rt0
->addr
);
258 security_sk_classify_flow(sk
, &fl
);
260 err
= ip6_dst_lookup(sk
, &dst
, &fl
);
264 ipv6_addr_copy(&fl
.fl6_dst
, final_p
);
266 if ((err
= xfrm_lookup(&dst
, &fl
, sk
, 0)) < 0)
271 ipv6_addr_copy(&np
->rcv_saddr
, saddr
);
274 /* set the source address */
275 ipv6_addr_copy(&np
->saddr
, saddr
);
276 inet
->rcv_saddr
= LOOPBACK4_IPV6
;
278 sk
->sk_gso_type
= SKB_GSO_TCPV6
;
279 __ip6_dst_store(sk
, dst
, NULL
, NULL
);
281 icsk
->icsk_ext_hdr_len
= 0;
283 icsk
->icsk_ext_hdr_len
= (np
->opt
->opt_flen
+
286 tp
->rx_opt
.mss_clamp
= IPV6_MIN_MTU
- sizeof(struct tcphdr
) - sizeof(struct ipv6hdr
);
288 inet
->dport
= usin
->sin6_port
;
290 tcp_set_state(sk
, TCP_SYN_SENT
);
291 err
= inet6_hash_connect(&tcp_death_row
, sk
);
296 tp
->write_seq
= secure_tcpv6_sequence_number(np
->saddr
.s6_addr32
,
301 err
= tcp_connect(sk
);
308 tcp_set_state(sk
, TCP_CLOSE
);
312 sk
->sk_route_caps
= 0;
316 static void tcp_v6_err(struct sk_buff
*skb
, struct inet6_skb_parm
*opt
,
317 int type
, int code
, int offset
, __be32 info
)
319 struct ipv6hdr
*hdr
= (struct ipv6hdr
*)skb
->data
;
320 const struct tcphdr
*th
= (struct tcphdr
*)(skb
->data
+offset
);
321 struct ipv6_pinfo
*np
;
327 sk
= inet6_lookup(&tcp_hashinfo
, &hdr
->daddr
, th
->dest
, &hdr
->saddr
,
328 th
->source
, skb
->dev
->ifindex
);
331 ICMP6_INC_STATS_BH(__in6_dev_get(skb
->dev
), ICMP6_MIB_INERRORS
);
335 if (sk
->sk_state
== TCP_TIME_WAIT
) {
336 inet_twsk_put(inet_twsk(sk
));
341 if (sock_owned_by_user(sk
))
342 NET_INC_STATS_BH(LINUX_MIB_LOCKDROPPEDICMPS
);
344 if (sk
->sk_state
== TCP_CLOSE
)
348 seq
= ntohl(th
->seq
);
349 if (sk
->sk_state
!= TCP_LISTEN
&&
350 !between(seq
, tp
->snd_una
, tp
->snd_nxt
)) {
351 NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS
);
357 if (type
== ICMPV6_PKT_TOOBIG
) {
358 struct dst_entry
*dst
= NULL
;
360 if (sock_owned_by_user(sk
))
362 if ((1 << sk
->sk_state
) & (TCPF_LISTEN
| TCPF_CLOSE
))
365 /* icmp should have updated the destination cache entry */
366 dst
= __sk_dst_check(sk
, np
->dst_cookie
);
369 struct inet_sock
*inet
= inet_sk(sk
);
372 /* BUGGG_FUTURE: Again, it is not clear how
373 to handle rthdr case. Ignore this complexity
376 memset(&fl
, 0, sizeof(fl
));
377 fl
.proto
= IPPROTO_TCP
;
378 ipv6_addr_copy(&fl
.fl6_dst
, &np
->daddr
);
379 ipv6_addr_copy(&fl
.fl6_src
, &np
->saddr
);
380 fl
.oif
= sk
->sk_bound_dev_if
;
381 fl
.fl_ip_dport
= inet
->dport
;
382 fl
.fl_ip_sport
= inet
->sport
;
383 security_skb_classify_flow(skb
, &fl
);
385 if ((err
= ip6_dst_lookup(sk
, &dst
, &fl
))) {
386 sk
->sk_err_soft
= -err
;
390 if ((err
= xfrm_lookup(&dst
, &fl
, sk
, 0)) < 0) {
391 sk
->sk_err_soft
= -err
;
398 if (inet_csk(sk
)->icsk_pmtu_cookie
> dst_mtu(dst
)) {
399 tcp_sync_mss(sk
, dst_mtu(dst
));
400 tcp_simple_retransmit(sk
);
401 } /* else let the usual retransmit timer handle it */
406 icmpv6_err_convert(type
, code
, &err
);
408 /* Might be for an request_sock */
409 switch (sk
->sk_state
) {
410 struct request_sock
*req
, **prev
;
412 if (sock_owned_by_user(sk
))
415 req
= inet6_csk_search_req(sk
, &prev
, th
->dest
, &hdr
->daddr
,
416 &hdr
->saddr
, inet6_iif(skb
));
420 /* ICMPs are not backlogged, hence we cannot get
421 * an established socket here.
423 BUG_TRAP(req
->sk
== NULL
);
425 if (seq
!= tcp_rsk(req
)->snt_isn
) {
426 NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS
);
430 inet_csk_reqsk_queue_drop(sk
, req
, prev
);
434 case TCP_SYN_RECV
: /* Cannot happen.
435 It can, it SYNs are crossed. --ANK */
436 if (!sock_owned_by_user(sk
)) {
438 sk
->sk_error_report(sk
); /* Wake people up to see the error (see connect in sock.c) */
442 sk
->sk_err_soft
= err
;
446 if (!sock_owned_by_user(sk
) && np
->recverr
) {
448 sk
->sk_error_report(sk
);
450 sk
->sk_err_soft
= err
;
458 static int tcp_v6_send_synack(struct sock
*sk
, struct request_sock
*req
,
459 struct dst_entry
*dst
)
461 struct inet6_request_sock
*treq
= inet6_rsk(req
);
462 struct ipv6_pinfo
*np
= inet6_sk(sk
);
463 struct sk_buff
* skb
;
464 struct ipv6_txoptions
*opt
= NULL
;
465 struct in6_addr
* final_p
= NULL
, final
;
469 memset(&fl
, 0, sizeof(fl
));
470 fl
.proto
= IPPROTO_TCP
;
471 ipv6_addr_copy(&fl
.fl6_dst
, &treq
->rmt_addr
);
472 ipv6_addr_copy(&fl
.fl6_src
, &treq
->loc_addr
);
473 fl
.fl6_flowlabel
= 0;
475 fl
.fl_ip_dport
= inet_rsk(req
)->rmt_port
;
476 fl
.fl_ip_sport
= inet_sk(sk
)->sport
;
477 security_req_classify_flow(req
, &fl
);
482 np
->rxopt
.bits
.osrcrt
== 2 &&
484 struct sk_buff
*pktopts
= treq
->pktopts
;
485 struct inet6_skb_parm
*rxopt
= IP6CB(pktopts
);
487 opt
= ipv6_invert_rthdr(sk
, (struct ipv6_rt_hdr
*)(pktopts
->nh
.raw
+ rxopt
->srcrt
));
490 if (opt
&& opt
->srcrt
) {
491 struct rt0_hdr
*rt0
= (struct rt0_hdr
*) opt
->srcrt
;
492 ipv6_addr_copy(&final
, &fl
.fl6_dst
);
493 ipv6_addr_copy(&fl
.fl6_dst
, rt0
->addr
);
497 err
= ip6_dst_lookup(sk
, &dst
, &fl
);
501 ipv6_addr_copy(&fl
.fl6_dst
, final_p
);
502 if ((err
= xfrm_lookup(&dst
, &fl
, sk
, 0)) < 0)
506 skb
= tcp_make_synack(sk
, dst
, req
);
508 struct tcphdr
*th
= skb
->h
.th
;
510 th
->check
= tcp_v6_check(th
, skb
->len
,
511 &treq
->loc_addr
, &treq
->rmt_addr
,
512 csum_partial((char *)th
, skb
->len
, skb
->csum
));
514 ipv6_addr_copy(&fl
.fl6_dst
, &treq
->rmt_addr
);
515 err
= ip6_xmit(sk
, skb
, &fl
, opt
, 0);
516 err
= net_xmit_eval(err
);
520 if (opt
&& opt
!= np
->opt
)
521 sock_kfree_s(sk
, opt
, opt
->tot_len
);
526 static void tcp_v6_reqsk_destructor(struct request_sock
*req
)
528 if (inet6_rsk(req
)->pktopts
)
529 kfree_skb(inet6_rsk(req
)->pktopts
);
532 #ifdef CONFIG_TCP_MD5SIG
533 static struct tcp_md5sig_key
*tcp_v6_md5_do_lookup(struct sock
*sk
,
534 struct in6_addr
*addr
)
536 struct tcp_sock
*tp
= tcp_sk(sk
);
541 if (!tp
->md5sig_info
|| !tp
->md5sig_info
->entries6
)
544 for (i
= 0; i
< tp
->md5sig_info
->entries6
; i
++) {
545 if (ipv6_addr_cmp(&tp
->md5sig_info
->keys6
[i
].addr
, addr
) == 0)
546 return (struct tcp_md5sig_key
*)&tp
->md5sig_info
->keys6
[i
];
551 static struct tcp_md5sig_key
*tcp_v6_md5_lookup(struct sock
*sk
,
552 struct sock
*addr_sk
)
554 return tcp_v6_md5_do_lookup(sk
, &inet6_sk(addr_sk
)->daddr
);
557 static struct tcp_md5sig_key
*tcp_v6_reqsk_md5_lookup(struct sock
*sk
,
558 struct request_sock
*req
)
560 return tcp_v6_md5_do_lookup(sk
, &inet6_rsk(req
)->rmt_addr
);
563 static int tcp_v6_md5_do_add(struct sock
*sk
, struct in6_addr
*peer
,
564 char *newkey
, u8 newkeylen
)
566 /* Add key to the list */
567 struct tcp6_md5sig_key
*key
;
568 struct tcp_sock
*tp
= tcp_sk(sk
);
569 struct tcp6_md5sig_key
*keys
;
571 key
= (struct tcp6_md5sig_key
*) tcp_v6_md5_do_lookup(sk
, peer
);
573 /* modify existing entry - just update that one */
576 key
->keylen
= newkeylen
;
578 /* reallocate new list if current one is full. */
579 if (!tp
->md5sig_info
) {
580 tp
->md5sig_info
= kzalloc(sizeof(*tp
->md5sig_info
), GFP_ATOMIC
);
581 if (!tp
->md5sig_info
) {
586 tcp_alloc_md5sig_pool();
587 if (tp
->md5sig_info
->alloced6
== tp
->md5sig_info
->entries6
) {
588 keys
= kmalloc((sizeof (tp
->md5sig_info
->keys6
[0]) *
589 (tp
->md5sig_info
->entries6
+ 1)), GFP_ATOMIC
);
592 tcp_free_md5sig_pool();
597 if (tp
->md5sig_info
->entries6
)
598 memmove(keys
, tp
->md5sig_info
->keys6
,
599 (sizeof (tp
->md5sig_info
->keys6
[0]) *
600 tp
->md5sig_info
->entries6
));
602 kfree(tp
->md5sig_info
->keys6
);
603 tp
->md5sig_info
->keys6
= keys
;
604 tp
->md5sig_info
->alloced6
++;
607 ipv6_addr_copy(&tp
->md5sig_info
->keys6
[tp
->md5sig_info
->entries6
].addr
,
609 tp
->md5sig_info
->keys6
[tp
->md5sig_info
->entries6
].key
= newkey
;
610 tp
->md5sig_info
->keys6
[tp
->md5sig_info
->entries6
].keylen
= newkeylen
;
612 tp
->md5sig_info
->entries6
++;
617 static int tcp_v6_md5_add_func(struct sock
*sk
, struct sock
*addr_sk
,
618 u8
*newkey
, __u8 newkeylen
)
620 return tcp_v6_md5_do_add(sk
, &inet6_sk(addr_sk
)->daddr
,
624 static int tcp_v6_md5_do_del(struct sock
*sk
, struct in6_addr
*peer
)
626 struct tcp_sock
*tp
= tcp_sk(sk
);
629 for (i
= 0; i
< tp
->md5sig_info
->entries6
; i
++) {
630 if (ipv6_addr_cmp(&tp
->md5sig_info
->keys6
[i
].addr
, peer
) == 0) {
632 kfree(tp
->md5sig_info
->keys6
[i
].key
);
633 tp
->md5sig_info
->entries6
--;
635 if (tp
->md5sig_info
->entries6
== 0) {
636 kfree(tp
->md5sig_info
->keys6
);
637 tp
->md5sig_info
->keys6
= NULL
;
639 tcp_free_md5sig_pool();
643 /* shrink the database */
644 if (tp
->md5sig_info
->entries6
!= i
)
645 memmove(&tp
->md5sig_info
->keys6
[i
],
646 &tp
->md5sig_info
->keys6
[i
+1],
647 (tp
->md5sig_info
->entries6
- i
)
648 * sizeof (tp
->md5sig_info
->keys6
[0]));
655 static void tcp_v6_clear_md5_list (struct sock
*sk
)
657 struct tcp_sock
*tp
= tcp_sk(sk
);
660 if (tp
->md5sig_info
->entries6
) {
661 for (i
= 0; i
< tp
->md5sig_info
->entries6
; i
++)
662 kfree(tp
->md5sig_info
->keys6
[i
].key
);
663 tp
->md5sig_info
->entries6
= 0;
664 tcp_free_md5sig_pool();
667 kfree(tp
->md5sig_info
->keys6
);
668 tp
->md5sig_info
->keys6
= NULL
;
669 tp
->md5sig_info
->alloced6
= 0;
671 if (tp
->md5sig_info
->entries4
) {
672 for (i
= 0; i
< tp
->md5sig_info
->entries4
; i
++)
673 kfree(tp
->md5sig_info
->keys4
[i
].key
);
674 tp
->md5sig_info
->entries4
= 0;
675 tcp_free_md5sig_pool();
678 kfree(tp
->md5sig_info
->keys4
);
679 tp
->md5sig_info
->keys4
= NULL
;
680 tp
->md5sig_info
->alloced4
= 0;
683 static int tcp_v6_parse_md5_keys (struct sock
*sk
, char __user
*optval
,
686 struct tcp_md5sig cmd
;
687 struct sockaddr_in6
*sin6
= (struct sockaddr_in6
*)&cmd
.tcpm_addr
;
690 if (optlen
< sizeof(cmd
))
693 if (copy_from_user(&cmd
, optval
, sizeof(cmd
)))
696 if (sin6
->sin6_family
!= AF_INET6
)
699 if (!cmd
.tcpm_keylen
) {
700 if (!tcp_sk(sk
)->md5sig_info
)
702 if (ipv6_addr_type(&sin6
->sin6_addr
) & IPV6_ADDR_MAPPED
)
703 return tcp_v4_md5_do_del(sk
, sin6
->sin6_addr
.s6_addr32
[3]);
704 return tcp_v6_md5_do_del(sk
, &sin6
->sin6_addr
);
707 if (cmd
.tcpm_keylen
> TCP_MD5SIG_MAXKEYLEN
)
710 if (!tcp_sk(sk
)->md5sig_info
) {
711 struct tcp_sock
*tp
= tcp_sk(sk
);
712 struct tcp_md5sig_info
*p
;
714 p
= kzalloc(sizeof(struct tcp_md5sig_info
), GFP_KERNEL
);
721 newkey
= kmalloc(cmd
.tcpm_keylen
, GFP_KERNEL
);
724 memcpy(newkey
, cmd
.tcpm_key
, cmd
.tcpm_keylen
);
725 if (ipv6_addr_type(&sin6
->sin6_addr
) & IPV6_ADDR_MAPPED
) {
726 return tcp_v4_md5_do_add(sk
, sin6
->sin6_addr
.s6_addr32
[3],
727 newkey
, cmd
.tcpm_keylen
);
729 return tcp_v6_md5_do_add(sk
, &sin6
->sin6_addr
, newkey
, cmd
.tcpm_keylen
);
732 static int tcp_v6_do_calc_md5_hash(char *md5_hash
, struct tcp_md5sig_key
*key
,
733 struct in6_addr
*saddr
,
734 struct in6_addr
*daddr
,
735 struct tcphdr
*th
, int protocol
,
738 struct scatterlist sg
[4];
742 struct tcp_md5sig_pool
*hp
;
743 struct tcp6_pseudohdr
*bp
;
744 struct hash_desc
*desc
;
746 unsigned int nbytes
= 0;
748 hp
= tcp_get_md5sig_pool();
750 printk(KERN_WARNING
"%s(): hash pool not found...\n", __FUNCTION__
);
751 goto clear_hash_noput
;
753 bp
= &hp
->md5_blk
.ip6
;
754 desc
= &hp
->md5_desc
;
756 /* 1. TCP pseudo-header (RFC2460) */
757 ipv6_addr_copy(&bp
->saddr
, saddr
);
758 ipv6_addr_copy(&bp
->daddr
, daddr
);
759 bp
->len
= htonl(tcplen
);
760 bp
->protocol
= htonl(protocol
);
762 sg_set_buf(&sg
[block
++], bp
, sizeof(*bp
));
763 nbytes
+= sizeof(*bp
);
765 /* 2. TCP header, excluding options */
768 sg_set_buf(&sg
[block
++], th
, sizeof(*th
));
769 nbytes
+= sizeof(*th
);
771 /* 3. TCP segment data (if any) */
772 data_len
= tcplen
- (th
->doff
<< 2);
774 u8
*data
= (u8
*)th
+ (th
->doff
<< 2);
775 sg_set_buf(&sg
[block
++], data
, data_len
);
780 sg_set_buf(&sg
[block
++], key
->key
, key
->keylen
);
781 nbytes
+= key
->keylen
;
783 /* Now store the hash into the packet */
784 err
= crypto_hash_init(desc
);
786 printk(KERN_WARNING
"%s(): hash_init failed\n", __FUNCTION__
);
789 err
= crypto_hash_update(desc
, sg
, nbytes
);
791 printk(KERN_WARNING
"%s(): hash_update failed\n", __FUNCTION__
);
794 err
= crypto_hash_final(desc
, md5_hash
);
796 printk(KERN_WARNING
"%s(): hash_final failed\n", __FUNCTION__
);
800 /* Reset header, and free up the crypto */
801 tcp_put_md5sig_pool();
806 tcp_put_md5sig_pool();
808 memset(md5_hash
, 0, 16);
812 static int tcp_v6_calc_md5_hash(char *md5_hash
, struct tcp_md5sig_key
*key
,
814 struct dst_entry
*dst
,
815 struct request_sock
*req
,
816 struct tcphdr
*th
, int protocol
,
819 struct in6_addr
*saddr
, *daddr
;
822 saddr
= &inet6_sk(sk
)->saddr
;
823 daddr
= &inet6_sk(sk
)->daddr
;
825 saddr
= &inet6_rsk(req
)->loc_addr
;
826 daddr
= &inet6_rsk(req
)->rmt_addr
;
828 return tcp_v6_do_calc_md5_hash(md5_hash
, key
,
830 th
, protocol
, tcplen
);
833 static int tcp_v6_inbound_md5_hash (struct sock
*sk
, struct sk_buff
*skb
)
835 __u8
*hash_location
= NULL
;
836 struct tcp_md5sig_key
*hash_expected
;
837 struct ipv6hdr
*ip6h
= skb
->nh
.ipv6h
;
838 struct tcphdr
*th
= skb
->h
.th
;
839 int length
= (th
->doff
<< 2) - sizeof (*th
);
844 hash_expected
= tcp_v6_md5_do_lookup(sk
, &ip6h
->saddr
);
846 /* If the TCP option is too short, we can short cut */
847 if (length
< TCPOLEN_MD5SIG
)
848 return hash_expected
? 1 : 0;
864 if (opsize
< 2 || opsize
> length
)
866 if (opcode
== TCPOPT_MD5SIG
) {
876 /* do we have a hash as expected? */
877 if (!hash_expected
) {
880 if (net_ratelimit()) {
881 printk(KERN_INFO
"MD5 Hash NOT expected but found "
882 "(" NIP6_FMT
", %u)->"
883 "(" NIP6_FMT
", %u)\n",
884 NIP6(ip6h
->saddr
), ntohs(th
->source
),
885 NIP6(ip6h
->daddr
), ntohs(th
->dest
));
890 if (!hash_location
) {
891 if (net_ratelimit()) {
892 printk(KERN_INFO
"MD5 Hash expected but NOT found "
893 "(" NIP6_FMT
", %u)->"
894 "(" NIP6_FMT
", %u)\n",
895 NIP6(ip6h
->saddr
), ntohs(th
->source
),
896 NIP6(ip6h
->daddr
), ntohs(th
->dest
));
901 /* check the signature */
902 genhash
= tcp_v6_do_calc_md5_hash(newhash
,
904 &ip6h
->saddr
, &ip6h
->daddr
,
907 if (genhash
|| memcmp(hash_location
, newhash
, 16) != 0) {
908 if (net_ratelimit()) {
909 printk(KERN_INFO
"MD5 Hash %s for "
910 "(" NIP6_FMT
", %u)->"
911 "(" NIP6_FMT
", %u)\n",
912 genhash
? "failed" : "mismatch",
913 NIP6(ip6h
->saddr
), ntohs(th
->source
),
914 NIP6(ip6h
->daddr
), ntohs(th
->dest
));
922 static struct request_sock_ops tcp6_request_sock_ops __read_mostly
= {
924 .obj_size
= sizeof(struct tcp6_request_sock
),
925 .rtx_syn_ack
= tcp_v6_send_synack
,
926 .send_ack
= tcp_v6_reqsk_send_ack
,
927 .destructor
= tcp_v6_reqsk_destructor
,
928 .send_reset
= tcp_v6_send_reset
931 struct tcp_request_sock_ops tcp_request_sock_ipv6_ops
= {
932 #ifdef CONFIG_TCP_MD5SIG
933 .md5_lookup
= tcp_v6_reqsk_md5_lookup
,
937 static struct timewait_sock_ops tcp6_timewait_sock_ops
= {
938 .twsk_obj_size
= sizeof(struct tcp6_timewait_sock
),
939 .twsk_unique
= tcp_twsk_unique
,
940 .twsk_destructor
= tcp_twsk_destructor
,
943 static void tcp_v6_send_check(struct sock
*sk
, int len
, struct sk_buff
*skb
)
945 struct ipv6_pinfo
*np
= inet6_sk(sk
);
946 struct tcphdr
*th
= skb
->h
.th
;
948 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
949 th
->check
= ~csum_ipv6_magic(&np
->saddr
, &np
->daddr
, len
, IPPROTO_TCP
, 0);
950 skb
->csum
= offsetof(struct tcphdr
, check
);
952 th
->check
= csum_ipv6_magic(&np
->saddr
, &np
->daddr
, len
, IPPROTO_TCP
,
953 csum_partial((char *)th
, th
->doff
<<2,
958 static int tcp_v6_gso_send_check(struct sk_buff
*skb
)
960 struct ipv6hdr
*ipv6h
;
963 if (!pskb_may_pull(skb
, sizeof(*th
)))
966 ipv6h
= skb
->nh
.ipv6h
;
970 th
->check
= ~csum_ipv6_magic(&ipv6h
->saddr
, &ipv6h
->daddr
, skb
->len
,
972 skb
->csum
= offsetof(struct tcphdr
, check
);
973 skb
->ip_summed
= CHECKSUM_PARTIAL
;
977 static void tcp_v6_send_reset(struct sock
*sk
, struct sk_buff
*skb
)
979 struct tcphdr
*th
= skb
->h
.th
, *t1
;
980 struct sk_buff
*buff
;
982 int tot_len
= sizeof(*th
);
983 #ifdef CONFIG_TCP_MD5SIG
984 struct tcp_md5sig_key
*key
;
990 if (!ipv6_unicast_destination(skb
))
993 #ifdef CONFIG_TCP_MD5SIG
995 key
= tcp_v6_md5_do_lookup(sk
, &skb
->nh
.ipv6h
->daddr
);
1000 tot_len
+= TCPOLEN_MD5SIG_ALIGNED
;
1004 * We need to grab some memory, and put together an RST,
1005 * and then put it into the queue to be sent.
1008 buff
= alloc_skb(MAX_HEADER
+ sizeof(struct ipv6hdr
) + tot_len
,
1013 skb_reserve(buff
, MAX_HEADER
+ sizeof(struct ipv6hdr
) + tot_len
);
1015 t1
= (struct tcphdr
*) skb_push(buff
, tot_len
);
1017 /* Swap the send and the receive. */
1018 memset(t1
, 0, sizeof(*t1
));
1019 t1
->dest
= th
->source
;
1020 t1
->source
= th
->dest
;
1021 t1
->doff
= tot_len
/ 4;
1025 t1
->seq
= th
->ack_seq
;
1028 t1
->ack_seq
= htonl(ntohl(th
->seq
) + th
->syn
+ th
->fin
1029 + skb
->len
- (th
->doff
<<2));
1032 #ifdef CONFIG_TCP_MD5SIG
1034 u32
*opt
= (u32
*)(t1
+ 1);
1035 opt
[0] = htonl((TCPOPT_NOP
<< 24) |
1036 (TCPOPT_NOP
<< 16) |
1037 (TCPOPT_MD5SIG
<< 8) |
1039 tcp_v6_do_calc_md5_hash((__u8
*)&opt
[1],
1041 &skb
->nh
.ipv6h
->daddr
,
1042 &skb
->nh
.ipv6h
->saddr
,
1048 buff
->csum
= csum_partial((char *)t1
, sizeof(*t1
), 0);
1050 memset(&fl
, 0, sizeof(fl
));
1051 ipv6_addr_copy(&fl
.fl6_dst
, &skb
->nh
.ipv6h
->saddr
);
1052 ipv6_addr_copy(&fl
.fl6_src
, &skb
->nh
.ipv6h
->daddr
);
1054 t1
->check
= csum_ipv6_magic(&fl
.fl6_src
, &fl
.fl6_dst
,
1055 sizeof(*t1
), IPPROTO_TCP
,
1058 fl
.proto
= IPPROTO_TCP
;
1059 fl
.oif
= inet6_iif(skb
);
1060 fl
.fl_ip_dport
= t1
->dest
;
1061 fl
.fl_ip_sport
= t1
->source
;
1062 security_skb_classify_flow(skb
, &fl
);
1064 /* sk = NULL, but it is safe for now. RST socket required. */
1065 if (!ip6_dst_lookup(NULL
, &buff
->dst
, &fl
)) {
1067 if (xfrm_lookup(&buff
->dst
, &fl
, NULL
, 0) >= 0) {
1068 ip6_xmit(tcp6_socket
->sk
, buff
, &fl
, NULL
, 0);
1069 TCP_INC_STATS_BH(TCP_MIB_OUTSEGS
);
1070 TCP_INC_STATS_BH(TCP_MIB_OUTRSTS
);
1078 static void tcp_v6_send_ack(struct tcp_timewait_sock
*tw
,
1079 struct sk_buff
*skb
, u32 seq
, u32 ack
, u32 win
, u32 ts
)
1081 struct tcphdr
*th
= skb
->h
.th
, *t1
;
1082 struct sk_buff
*buff
;
1084 int tot_len
= sizeof(struct tcphdr
);
1086 #ifdef CONFIG_TCP_MD5SIG
1087 struct tcp_md5sig_key
*key
;
1088 struct tcp_md5sig_key tw_key
;
1091 #ifdef CONFIG_TCP_MD5SIG
1092 if (!tw
&& skb
->sk
) {
1093 key
= tcp_v6_md5_do_lookup(skb
->sk
, &skb
->nh
.ipv6h
->daddr
);
1094 } else if (tw
&& tw
->tw_md5_keylen
) {
1095 tw_key
.key
= tw
->tw_md5_key
;
1096 tw_key
.keylen
= tw
->tw_md5_keylen
;
1104 tot_len
+= TCPOLEN_TSTAMP_ALIGNED
;
1105 #ifdef CONFIG_TCP_MD5SIG
1107 tot_len
+= TCPOLEN_MD5SIG_ALIGNED
;
1110 buff
= alloc_skb(MAX_HEADER
+ sizeof(struct ipv6hdr
) + tot_len
,
1115 skb_reserve(buff
, MAX_HEADER
+ sizeof(struct ipv6hdr
) + tot_len
);
1117 t1
= (struct tcphdr
*) skb_push(buff
,tot_len
);
1119 /* Swap the send and the receive. */
1120 memset(t1
, 0, sizeof(*t1
));
1121 t1
->dest
= th
->source
;
1122 t1
->source
= th
->dest
;
1123 t1
->doff
= tot_len
/4;
1124 t1
->seq
= htonl(seq
);
1125 t1
->ack_seq
= htonl(ack
);
1127 t1
->window
= htons(win
);
1129 topt
= (u32
*)(t1
+ 1);
1132 *topt
++ = htonl((TCPOPT_NOP
<< 24) | (TCPOPT_NOP
<< 16) |
1133 (TCPOPT_TIMESTAMP
<< 8) | TCPOLEN_TIMESTAMP
);
1134 *topt
++ = htonl(tcp_time_stamp
);
1138 #ifdef CONFIG_TCP_MD5SIG
1140 *topt
++ = htonl((TCPOPT_NOP
<< 24) | (TCPOPT_NOP
<< 16) |
1141 (TCPOPT_MD5SIG
<< 8) | TCPOLEN_MD5SIG
);
1142 tcp_v6_do_calc_md5_hash((__u8
*)topt
,
1144 &skb
->nh
.ipv6h
->daddr
,
1145 &skb
->nh
.ipv6h
->saddr
,
1151 buff
->csum
= csum_partial((char *)t1
, tot_len
, 0);
1153 memset(&fl
, 0, sizeof(fl
));
1154 ipv6_addr_copy(&fl
.fl6_dst
, &skb
->nh
.ipv6h
->saddr
);
1155 ipv6_addr_copy(&fl
.fl6_src
, &skb
->nh
.ipv6h
->daddr
);
1157 t1
->check
= csum_ipv6_magic(&fl
.fl6_src
, &fl
.fl6_dst
,
1158 tot_len
, IPPROTO_TCP
,
1161 fl
.proto
= IPPROTO_TCP
;
1162 fl
.oif
= inet6_iif(skb
);
1163 fl
.fl_ip_dport
= t1
->dest
;
1164 fl
.fl_ip_sport
= t1
->source
;
1165 security_skb_classify_flow(skb
, &fl
);
1167 if (!ip6_dst_lookup(NULL
, &buff
->dst
, &fl
)) {
1168 if (xfrm_lookup(&buff
->dst
, &fl
, NULL
, 0) >= 0) {
1169 ip6_xmit(tcp6_socket
->sk
, buff
, &fl
, NULL
, 0);
1170 TCP_INC_STATS_BH(TCP_MIB_OUTSEGS
);
1178 static void tcp_v6_timewait_ack(struct sock
*sk
, struct sk_buff
*skb
)
1180 struct inet_timewait_sock
*tw
= inet_twsk(sk
);
1181 struct tcp_timewait_sock
*tcptw
= tcp_twsk(sk
);
1183 tcp_v6_send_ack(tcptw
, skb
, tcptw
->tw_snd_nxt
, tcptw
->tw_rcv_nxt
,
1184 tcptw
->tw_rcv_wnd
>> tw
->tw_rcv_wscale
,
1185 tcptw
->tw_ts_recent
);
1190 static void tcp_v6_reqsk_send_ack(struct sk_buff
*skb
, struct request_sock
*req
)
1192 tcp_v6_send_ack(NULL
, skb
, tcp_rsk(req
)->snt_isn
+ 1, tcp_rsk(req
)->rcv_isn
+ 1, req
->rcv_wnd
, req
->ts_recent
);
1196 static struct sock
*tcp_v6_hnd_req(struct sock
*sk
,struct sk_buff
*skb
)
1198 struct request_sock
*req
, **prev
;
1199 const struct tcphdr
*th
= skb
->h
.th
;
1202 /* Find possible connection requests. */
1203 req
= inet6_csk_search_req(sk
, &prev
, th
->source
,
1204 &skb
->nh
.ipv6h
->saddr
,
1205 &skb
->nh
.ipv6h
->daddr
, inet6_iif(skb
));
1207 return tcp_check_req(sk
, skb
, req
, prev
);
1209 nsk
= __inet6_lookup_established(&tcp_hashinfo
, &skb
->nh
.ipv6h
->saddr
,
1210 th
->source
, &skb
->nh
.ipv6h
->daddr
,
1211 ntohs(th
->dest
), inet6_iif(skb
));
1214 if (nsk
->sk_state
!= TCP_TIME_WAIT
) {
1218 inet_twsk_put(inet_twsk(nsk
));
1222 #if 0 /*def CONFIG_SYN_COOKIES*/
1223 if (!th
->rst
&& !th
->syn
&& th
->ack
)
1224 sk
= cookie_v6_check(sk
, skb
, &(IPCB(skb
)->opt
));
1229 /* FIXME: this is substantially similar to the ipv4 code.
1230 * Can some kind of merge be done? -- erics
1232 static int tcp_v6_conn_request(struct sock
*sk
, struct sk_buff
*skb
)
1234 struct inet6_request_sock
*treq
;
1235 struct ipv6_pinfo
*np
= inet6_sk(sk
);
1236 struct tcp_options_received tmp_opt
;
1237 struct tcp_sock
*tp
= tcp_sk(sk
);
1238 struct request_sock
*req
= NULL
;
1239 __u32 isn
= TCP_SKB_CB(skb
)->when
;
1241 if (skb
->protocol
== htons(ETH_P_IP
))
1242 return tcp_v4_conn_request(sk
, skb
);
1244 if (!ipv6_unicast_destination(skb
))
1248 * There are no SYN attacks on IPv6, yet...
1250 if (inet_csk_reqsk_queue_is_full(sk
) && !isn
) {
1251 if (net_ratelimit())
1252 printk(KERN_INFO
"TCPv6: dropping request, synflood is possible\n");
1256 if (sk_acceptq_is_full(sk
) && inet_csk_reqsk_queue_young(sk
) > 1)
1259 req
= inet6_reqsk_alloc(&tcp6_request_sock_ops
);
1263 #ifdef CONFIG_TCP_MD5SIG
1264 tcp_rsk(req
)->af_specific
= &tcp_request_sock_ipv6_ops
;
1267 tcp_clear_options(&tmp_opt
);
1268 tmp_opt
.mss_clamp
= IPV6_MIN_MTU
- sizeof(struct tcphdr
) - sizeof(struct ipv6hdr
);
1269 tmp_opt
.user_mss
= tp
->rx_opt
.user_mss
;
1271 tcp_parse_options(skb
, &tmp_opt
, 0);
1273 tmp_opt
.tstamp_ok
= tmp_opt
.saw_tstamp
;
1274 tcp_openreq_init(req
, &tmp_opt
, skb
);
1276 treq
= inet6_rsk(req
);
1277 ipv6_addr_copy(&treq
->rmt_addr
, &skb
->nh
.ipv6h
->saddr
);
1278 ipv6_addr_copy(&treq
->loc_addr
, &skb
->nh
.ipv6h
->daddr
);
1279 TCP_ECN_create_request(req
, skb
->h
.th
);
1280 treq
->pktopts
= NULL
;
1281 if (ipv6_opt_accepted(sk
, skb
) ||
1282 np
->rxopt
.bits
.rxinfo
|| np
->rxopt
.bits
.rxoinfo
||
1283 np
->rxopt
.bits
.rxhlim
|| np
->rxopt
.bits
.rxohlim
) {
1284 atomic_inc(&skb
->users
);
1285 treq
->pktopts
= skb
;
1287 treq
->iif
= sk
->sk_bound_dev_if
;
1289 /* So that link locals have meaning */
1290 if (!sk
->sk_bound_dev_if
&&
1291 ipv6_addr_type(&treq
->rmt_addr
) & IPV6_ADDR_LINKLOCAL
)
1292 treq
->iif
= inet6_iif(skb
);
1295 isn
= tcp_v6_init_sequence(skb
);
1297 tcp_rsk(req
)->snt_isn
= isn
;
1299 security_inet_conn_request(sk
, skb
, req
);
1301 if (tcp_v6_send_synack(sk
, req
, NULL
))
1304 inet6_csk_reqsk_queue_hash_add(sk
, req
, TCP_TIMEOUT_INIT
);
1311 return 0; /* don't send reset */
1314 static struct sock
* tcp_v6_syn_recv_sock(struct sock
*sk
, struct sk_buff
*skb
,
1315 struct request_sock
*req
,
1316 struct dst_entry
*dst
)
1318 struct inet6_request_sock
*treq
= inet6_rsk(req
);
1319 struct ipv6_pinfo
*newnp
, *np
= inet6_sk(sk
);
1320 struct tcp6_sock
*newtcp6sk
;
1321 struct inet_sock
*newinet
;
1322 struct tcp_sock
*newtp
;
1324 struct ipv6_txoptions
*opt
;
1325 #ifdef CONFIG_TCP_MD5SIG
1326 struct tcp_md5sig_key
*key
;
1329 if (skb
->protocol
== htons(ETH_P_IP
)) {
1334 newsk
= tcp_v4_syn_recv_sock(sk
, skb
, req
, dst
);
1339 newtcp6sk
= (struct tcp6_sock
*)newsk
;
1340 inet_sk(newsk
)->pinet6
= &newtcp6sk
->inet6
;
1342 newinet
= inet_sk(newsk
);
1343 newnp
= inet6_sk(newsk
);
1344 newtp
= tcp_sk(newsk
);
1346 memcpy(newnp
, np
, sizeof(struct ipv6_pinfo
));
1348 ipv6_addr_set(&newnp
->daddr
, 0, 0, htonl(0x0000FFFF),
1351 ipv6_addr_set(&newnp
->saddr
, 0, 0, htonl(0x0000FFFF),
1354 ipv6_addr_copy(&newnp
->rcv_saddr
, &newnp
->saddr
);
1356 inet_csk(newsk
)->icsk_af_ops
= &ipv6_mapped
;
1357 newsk
->sk_backlog_rcv
= tcp_v4_do_rcv
;
1358 #ifdef CONFIG_TCP_MD5SIG
1359 newtp
->af_specific
= &tcp_sock_ipv6_mapped_specific
;
1362 newnp
->pktoptions
= NULL
;
1364 newnp
->mcast_oif
= inet6_iif(skb
);
1365 newnp
->mcast_hops
= skb
->nh
.ipv6h
->hop_limit
;
1368 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1369 * here, tcp_create_openreq_child now does this for us, see the comment in
1370 * that function for the gory details. -acme
1373 /* It is tricky place. Until this moment IPv4 tcp
1374 worked with IPv6 icsk.icsk_af_ops.
1377 tcp_sync_mss(newsk
, inet_csk(newsk
)->icsk_pmtu_cookie
);
1384 if (sk_acceptq_is_full(sk
))
1387 if (np
->rxopt
.bits
.osrcrt
== 2 &&
1388 opt
== NULL
&& treq
->pktopts
) {
1389 struct inet6_skb_parm
*rxopt
= IP6CB(treq
->pktopts
);
1391 opt
= ipv6_invert_rthdr(sk
, (struct ipv6_rt_hdr
*)(treq
->pktopts
->nh
.raw
+ rxopt
->srcrt
));
1395 struct in6_addr
*final_p
= NULL
, final
;
1398 memset(&fl
, 0, sizeof(fl
));
1399 fl
.proto
= IPPROTO_TCP
;
1400 ipv6_addr_copy(&fl
.fl6_dst
, &treq
->rmt_addr
);
1401 if (opt
&& opt
->srcrt
) {
1402 struct rt0_hdr
*rt0
= (struct rt0_hdr
*) opt
->srcrt
;
1403 ipv6_addr_copy(&final
, &fl
.fl6_dst
);
1404 ipv6_addr_copy(&fl
.fl6_dst
, rt0
->addr
);
1407 ipv6_addr_copy(&fl
.fl6_src
, &treq
->loc_addr
);
1408 fl
.oif
= sk
->sk_bound_dev_if
;
1409 fl
.fl_ip_dport
= inet_rsk(req
)->rmt_port
;
1410 fl
.fl_ip_sport
= inet_sk(sk
)->sport
;
1411 security_req_classify_flow(req
, &fl
);
1413 if (ip6_dst_lookup(sk
, &dst
, &fl
))
1417 ipv6_addr_copy(&fl
.fl6_dst
, final_p
);
1419 if ((xfrm_lookup(&dst
, &fl
, sk
, 0)) < 0)
1423 newsk
= tcp_create_openreq_child(sk
, req
, skb
);
1428 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1429 * count here, tcp_create_openreq_child now does this for us, see the
1430 * comment in that function for the gory details. -acme
1433 newsk
->sk_gso_type
= SKB_GSO_TCPV6
;
1434 __ip6_dst_store(newsk
, dst
, NULL
, NULL
);
1436 newtcp6sk
= (struct tcp6_sock
*)newsk
;
1437 inet_sk(newsk
)->pinet6
= &newtcp6sk
->inet6
;
1439 newtp
= tcp_sk(newsk
);
1440 newinet
= inet_sk(newsk
);
1441 newnp
= inet6_sk(newsk
);
1443 memcpy(newnp
, np
, sizeof(struct ipv6_pinfo
));
1445 ipv6_addr_copy(&newnp
->daddr
, &treq
->rmt_addr
);
1446 ipv6_addr_copy(&newnp
->saddr
, &treq
->loc_addr
);
1447 ipv6_addr_copy(&newnp
->rcv_saddr
, &treq
->loc_addr
);
1448 newsk
->sk_bound_dev_if
= treq
->iif
;
1450 /* Now IPv6 options...
1452 First: no IPv4 options.
1454 newinet
->opt
= NULL
;
1457 newnp
->rxopt
.all
= np
->rxopt
.all
;
1459 /* Clone pktoptions received with SYN */
1460 newnp
->pktoptions
= NULL
;
1461 if (treq
->pktopts
!= NULL
) {
1462 newnp
->pktoptions
= skb_clone(treq
->pktopts
, GFP_ATOMIC
);
1463 kfree_skb(treq
->pktopts
);
1464 treq
->pktopts
= NULL
;
1465 if (newnp
->pktoptions
)
1466 skb_set_owner_r(newnp
->pktoptions
, newsk
);
1469 newnp
->mcast_oif
= inet6_iif(skb
);
1470 newnp
->mcast_hops
= skb
->nh
.ipv6h
->hop_limit
;
1472 /* Clone native IPv6 options from listening socket (if any)
1474 Yes, keeping reference count would be much more clever,
1475 but we make one more one thing there: reattach optmem
1479 newnp
->opt
= ipv6_dup_options(newsk
, opt
);
1481 sock_kfree_s(sk
, opt
, opt
->tot_len
);
1484 inet_csk(newsk
)->icsk_ext_hdr_len
= 0;
1486 inet_csk(newsk
)->icsk_ext_hdr_len
= (newnp
->opt
->opt_nflen
+
1487 newnp
->opt
->opt_flen
);
1489 tcp_mtup_init(newsk
);
1490 tcp_sync_mss(newsk
, dst_mtu(dst
));
1491 newtp
->advmss
= dst_metric(dst
, RTAX_ADVMSS
);
1492 tcp_initialize_rcv_mss(newsk
);
1494 newinet
->daddr
= newinet
->saddr
= newinet
->rcv_saddr
= LOOPBACK4_IPV6
;
1496 #ifdef CONFIG_TCP_MD5SIG
1497 /* Copy over the MD5 key from the original socket */
1498 if ((key
= tcp_v6_md5_do_lookup(sk
, &newnp
->daddr
)) != NULL
) {
1499 /* We're using one, so create a matching key
1500 * on the newsk structure. If we fail to get
1501 * memory, then we end up not copying the key
1504 char *newkey
= kmalloc(key
->keylen
, GFP_ATOMIC
);
1506 memcpy(newkey
, key
->key
, key
->keylen
);
1507 tcp_v6_md5_do_add(newsk
, &inet6_sk(sk
)->daddr
,
1508 newkey
, key
->keylen
);
1513 __inet6_hash(&tcp_hashinfo
, newsk
);
1514 inet_inherit_port(&tcp_hashinfo
, sk
, newsk
);
1519 NET_INC_STATS_BH(LINUX_MIB_LISTENOVERFLOWS
);
1521 NET_INC_STATS_BH(LINUX_MIB_LISTENDROPS
);
1522 if (opt
&& opt
!= np
->opt
)
1523 sock_kfree_s(sk
, opt
, opt
->tot_len
);
1528 static int tcp_v6_checksum_init(struct sk_buff
*skb
)
1530 if (skb
->ip_summed
== CHECKSUM_COMPLETE
) {
1531 if (!tcp_v6_check(skb
->h
.th
,skb
->len
,&skb
->nh
.ipv6h
->saddr
,
1532 &skb
->nh
.ipv6h
->daddr
,skb
->csum
)) {
1533 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1538 skb
->csum
= ~tcp_v6_check(skb
->h
.th
,skb
->len
,&skb
->nh
.ipv6h
->saddr
,
1539 &skb
->nh
.ipv6h
->daddr
, 0);
1541 if (skb
->len
<= 76) {
1542 return __skb_checksum_complete(skb
);
1547 /* The socket must have it's spinlock held when we get
1550 * We have a potential double-lock case here, so even when
1551 * doing backlog processing we use the BH locking scheme.
1552 * This is because we cannot sleep with the original spinlock
1555 static int tcp_v6_do_rcv(struct sock
*sk
, struct sk_buff
*skb
)
1557 struct ipv6_pinfo
*np
= inet6_sk(sk
);
1558 struct tcp_sock
*tp
;
1559 struct sk_buff
*opt_skb
= NULL
;
1561 /* Imagine: socket is IPv6. IPv4 packet arrives,
1562 goes to IPv4 receive handler and backlogged.
1563 From backlog it always goes here. Kerboom...
1564 Fortunately, tcp_rcv_established and rcv_established
1565 handle them correctly, but it is not case with
1566 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1569 if (skb
->protocol
== htons(ETH_P_IP
))
1570 return tcp_v4_do_rcv(sk
, skb
);
1572 #ifdef CONFIG_TCP_MD5SIG
1573 if (tcp_v6_inbound_md5_hash (sk
, skb
))
1577 if (sk_filter(sk
, skb
))
1581 * socket locking is here for SMP purposes as backlog rcv
1582 * is currently called with bh processing disabled.
1585 /* Do Stevens' IPV6_PKTOPTIONS.
1587 Yes, guys, it is the only place in our code, where we
1588 may make it not affecting IPv4.
1589 The rest of code is protocol independent,
1590 and I do not like idea to uglify IPv4.
1592 Actually, all the idea behind IPV6_PKTOPTIONS
1593 looks not very well thought. For now we latch
1594 options, received in the last packet, enqueued
1595 by tcp. Feel free to propose better solution.
1599 opt_skb
= skb_clone(skb
, GFP_ATOMIC
);
1601 if (sk
->sk_state
== TCP_ESTABLISHED
) { /* Fast path */
1602 TCP_CHECK_TIMER(sk
);
1603 if (tcp_rcv_established(sk
, skb
, skb
->h
.th
, skb
->len
))
1605 TCP_CHECK_TIMER(sk
);
1607 goto ipv6_pktoptions
;
1611 if (skb
->len
< (skb
->h
.th
->doff
<<2) || tcp_checksum_complete(skb
))
1614 if (sk
->sk_state
== TCP_LISTEN
) {
1615 struct sock
*nsk
= tcp_v6_hnd_req(sk
, skb
);
1620 * Queue it on the new socket if the new socket is active,
1621 * otherwise we just shortcircuit this and continue with
1625 if (tcp_child_process(sk
, nsk
, skb
))
1628 __kfree_skb(opt_skb
);
1633 TCP_CHECK_TIMER(sk
);
1634 if (tcp_rcv_state_process(sk
, skb
, skb
->h
.th
, skb
->len
))
1636 TCP_CHECK_TIMER(sk
);
1638 goto ipv6_pktoptions
;
1642 tcp_v6_send_reset(sk
, skb
);
1645 __kfree_skb(opt_skb
);
1649 TCP_INC_STATS_BH(TCP_MIB_INERRS
);
1654 /* Do you ask, what is it?
1656 1. skb was enqueued by tcp.
1657 2. skb is added to tail of read queue, rather than out of order.
1658 3. socket is not in passive state.
1659 4. Finally, it really contains options, which user wants to receive.
1662 if (TCP_SKB_CB(opt_skb
)->end_seq
== tp
->rcv_nxt
&&
1663 !((1 << sk
->sk_state
) & (TCPF_CLOSE
| TCPF_LISTEN
))) {
1664 if (np
->rxopt
.bits
.rxinfo
|| np
->rxopt
.bits
.rxoinfo
)
1665 np
->mcast_oif
= inet6_iif(opt_skb
);
1666 if (np
->rxopt
.bits
.rxhlim
|| np
->rxopt
.bits
.rxohlim
)
1667 np
->mcast_hops
= opt_skb
->nh
.ipv6h
->hop_limit
;
1668 if (ipv6_opt_accepted(sk
, opt_skb
)) {
1669 skb_set_owner_r(opt_skb
, sk
);
1670 opt_skb
= xchg(&np
->pktoptions
, opt_skb
);
1672 __kfree_skb(opt_skb
);
1673 opt_skb
= xchg(&np
->pktoptions
, NULL
);
1682 static int tcp_v6_rcv(struct sk_buff
**pskb
)
1684 struct sk_buff
*skb
= *pskb
;
1689 if (skb
->pkt_type
!= PACKET_HOST
)
1693 * Count it even if it's bad.
1695 TCP_INC_STATS_BH(TCP_MIB_INSEGS
);
1697 if (!pskb_may_pull(skb
, sizeof(struct tcphdr
)))
1702 if (th
->doff
< sizeof(struct tcphdr
)/4)
1704 if (!pskb_may_pull(skb
, th
->doff
*4))
1707 if ((skb
->ip_summed
!= CHECKSUM_UNNECESSARY
&&
1708 tcp_v6_checksum_init(skb
)))
1712 TCP_SKB_CB(skb
)->seq
= ntohl(th
->seq
);
1713 TCP_SKB_CB(skb
)->end_seq
= (TCP_SKB_CB(skb
)->seq
+ th
->syn
+ th
->fin
+
1714 skb
->len
- th
->doff
*4);
1715 TCP_SKB_CB(skb
)->ack_seq
= ntohl(th
->ack_seq
);
1716 TCP_SKB_CB(skb
)->when
= 0;
1717 TCP_SKB_CB(skb
)->flags
= ipv6_get_dsfield(skb
->nh
.ipv6h
);
1718 TCP_SKB_CB(skb
)->sacked
= 0;
1720 sk
= __inet6_lookup(&tcp_hashinfo
, &skb
->nh
.ipv6h
->saddr
, th
->source
,
1721 &skb
->nh
.ipv6h
->daddr
, ntohs(th
->dest
),
1728 if (sk
->sk_state
== TCP_TIME_WAIT
)
1731 if (!xfrm6_policy_check(sk
, XFRM_POLICY_IN
, skb
))
1732 goto discard_and_relse
;
1734 if (sk_filter(sk
, skb
))
1735 goto discard_and_relse
;
1739 bh_lock_sock_nested(sk
);
1741 if (!sock_owned_by_user(sk
)) {
1742 #ifdef CONFIG_NET_DMA
1743 struct tcp_sock
*tp
= tcp_sk(sk
);
1744 if (tp
->ucopy
.dma_chan
)
1745 ret
= tcp_v6_do_rcv(sk
, skb
);
1749 if (!tcp_prequeue(sk
, skb
))
1750 ret
= tcp_v6_do_rcv(sk
, skb
);
1753 sk_add_backlog(sk
, skb
);
1757 return ret
? -1 : 0;
1760 if (!xfrm6_policy_check(NULL
, XFRM_POLICY_IN
, skb
))
1763 if (skb
->len
< (th
->doff
<<2) || tcp_checksum_complete(skb
)) {
1765 TCP_INC_STATS_BH(TCP_MIB_INERRS
);
1767 tcp_v6_send_reset(NULL
, skb
);
1784 if (!xfrm6_policy_check(NULL
, XFRM_POLICY_IN
, skb
)) {
1785 inet_twsk_put(inet_twsk(sk
));
1789 if (skb
->len
< (th
->doff
<<2) || tcp_checksum_complete(skb
)) {
1790 TCP_INC_STATS_BH(TCP_MIB_INERRS
);
1791 inet_twsk_put(inet_twsk(sk
));
1795 switch (tcp_timewait_state_process(inet_twsk(sk
), skb
, th
)) {
1800 sk2
= inet6_lookup_listener(&tcp_hashinfo
,
1801 &skb
->nh
.ipv6h
->daddr
,
1802 ntohs(th
->dest
), inet6_iif(skb
));
1804 struct inet_timewait_sock
*tw
= inet_twsk(sk
);
1805 inet_twsk_deschedule(tw
, &tcp_death_row
);
1810 /* Fall through to ACK */
1813 tcp_v6_timewait_ack(sk
, skb
);
1817 case TCP_TW_SUCCESS
:;
1822 static int tcp_v6_remember_stamp(struct sock
*sk
)
1824 /* Alas, not yet... */
1828 static struct inet_connection_sock_af_ops ipv6_specific
= {
1829 .queue_xmit
= inet6_csk_xmit
,
1830 .send_check
= tcp_v6_send_check
,
1831 .rebuild_header
= inet6_sk_rebuild_header
,
1832 .conn_request
= tcp_v6_conn_request
,
1833 .syn_recv_sock
= tcp_v6_syn_recv_sock
,
1834 .remember_stamp
= tcp_v6_remember_stamp
,
1835 .net_header_len
= sizeof(struct ipv6hdr
),
1836 .setsockopt
= ipv6_setsockopt
,
1837 .getsockopt
= ipv6_getsockopt
,
1838 .addr2sockaddr
= inet6_csk_addr2sockaddr
,
1839 .sockaddr_len
= sizeof(struct sockaddr_in6
),
1840 #ifdef CONFIG_COMPAT
1841 .compat_setsockopt
= compat_ipv6_setsockopt
,
1842 .compat_getsockopt
= compat_ipv6_getsockopt
,
1846 static struct tcp_sock_af_ops tcp_sock_ipv6_specific
= {
1847 #ifdef CONFIG_TCP_MD5SIG
1848 .md5_lookup
= tcp_v6_md5_lookup
,
1849 .calc_md5_hash
= tcp_v6_calc_md5_hash
,
1850 .md5_add
= tcp_v6_md5_add_func
,
1851 .md5_parse
= tcp_v6_parse_md5_keys
,
1856 * TCP over IPv4 via INET6 API
1859 static struct inet_connection_sock_af_ops ipv6_mapped
= {
1860 .queue_xmit
= ip_queue_xmit
,
1861 .send_check
= tcp_v4_send_check
,
1862 .rebuild_header
= inet_sk_rebuild_header
,
1863 .conn_request
= tcp_v6_conn_request
,
1864 .syn_recv_sock
= tcp_v6_syn_recv_sock
,
1865 .remember_stamp
= tcp_v4_remember_stamp
,
1866 .net_header_len
= sizeof(struct iphdr
),
1867 .setsockopt
= ipv6_setsockopt
,
1868 .getsockopt
= ipv6_getsockopt
,
1869 .addr2sockaddr
= inet6_csk_addr2sockaddr
,
1870 .sockaddr_len
= sizeof(struct sockaddr_in6
),
1871 #ifdef CONFIG_COMPAT
1872 .compat_setsockopt
= compat_ipv6_setsockopt
,
1873 .compat_getsockopt
= compat_ipv6_getsockopt
,
1877 static struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific
= {
1878 #ifdef CONFIG_TCP_MD5SIG
1879 .md5_lookup
= tcp_v4_md5_lookup
,
1880 .calc_md5_hash
= tcp_v4_calc_md5_hash
,
1881 .md5_add
= tcp_v6_md5_add_func
,
1882 .md5_parse
= tcp_v6_parse_md5_keys
,
1886 /* NOTE: A lot of things set to zero explicitly by call to
1887 * sk_alloc() so need not be done here.
1889 static int tcp_v6_init_sock(struct sock
*sk
)
1891 struct inet_connection_sock
*icsk
= inet_csk(sk
);
1892 struct tcp_sock
*tp
= tcp_sk(sk
);
1894 skb_queue_head_init(&tp
->out_of_order_queue
);
1895 tcp_init_xmit_timers(sk
);
1896 tcp_prequeue_init(tp
);
1898 icsk
->icsk_rto
= TCP_TIMEOUT_INIT
;
1899 tp
->mdev
= TCP_TIMEOUT_INIT
;
1901 /* So many TCP implementations out there (incorrectly) count the
1902 * initial SYN frame in their delayed-ACK and congestion control
1903 * algorithms that we must have the following bandaid to talk
1904 * efficiently to them. -DaveM
1908 /* See draft-stevens-tcpca-spec-01 for discussion of the
1909 * initialization of these values.
1911 tp
->snd_ssthresh
= 0x7fffffff;
1912 tp
->snd_cwnd_clamp
= ~0;
1913 tp
->mss_cache
= 536;
1915 tp
->reordering
= sysctl_tcp_reordering
;
1917 sk
->sk_state
= TCP_CLOSE
;
1919 icsk
->icsk_af_ops
= &ipv6_specific
;
1920 icsk
->icsk_ca_ops
= &tcp_init_congestion_ops
;
1921 icsk
->icsk_sync_mss
= tcp_sync_mss
;
1922 sk
->sk_write_space
= sk_stream_write_space
;
1923 sock_set_flag(sk
, SOCK_USE_WRITE_QUEUE
);
1925 #ifdef CONFIG_TCP_MD5SIG
1926 tp
->af_specific
= &tcp_sock_ipv6_specific
;
1929 sk
->sk_sndbuf
= sysctl_tcp_wmem
[1];
1930 sk
->sk_rcvbuf
= sysctl_tcp_rmem
[1];
1932 atomic_inc(&tcp_sockets_allocated
);
1937 static int tcp_v6_destroy_sock(struct sock
*sk
)
1939 #ifdef CONFIG_TCP_MD5SIG
1940 /* Clean up the MD5 key list */
1941 if (tcp_sk(sk
)->md5sig_info
)
1942 tcp_v6_clear_md5_list(sk
);
1944 tcp_v4_destroy_sock(sk
);
1945 return inet6_destroy_sock(sk
);
1948 /* Proc filesystem TCPv6 sock list dumping. */
1949 static void get_openreq6(struct seq_file
*seq
,
1950 struct sock
*sk
, struct request_sock
*req
, int i
, int uid
)
1952 int ttd
= req
->expires
- jiffies
;
1953 struct in6_addr
*src
= &inet6_rsk(req
)->loc_addr
;
1954 struct in6_addr
*dest
= &inet6_rsk(req
)->rmt_addr
;
1960 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1961 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p\n",
1963 src
->s6_addr32
[0], src
->s6_addr32
[1],
1964 src
->s6_addr32
[2], src
->s6_addr32
[3],
1965 ntohs(inet_sk(sk
)->sport
),
1966 dest
->s6_addr32
[0], dest
->s6_addr32
[1],
1967 dest
->s6_addr32
[2], dest
->s6_addr32
[3],
1968 ntohs(inet_rsk(req
)->rmt_port
),
1970 0,0, /* could print option size, but that is af dependent. */
1971 1, /* timers active (only the expire timer) */
1972 jiffies_to_clock_t(ttd
),
1975 0, /* non standard timer */
1976 0, /* open_requests have no inode */
1980 static void get_tcp6_sock(struct seq_file
*seq
, struct sock
*sp
, int i
)
1982 struct in6_addr
*dest
, *src
;
1985 unsigned long timer_expires
;
1986 struct inet_sock
*inet
= inet_sk(sp
);
1987 struct tcp_sock
*tp
= tcp_sk(sp
);
1988 const struct inet_connection_sock
*icsk
= inet_csk(sp
);
1989 struct ipv6_pinfo
*np
= inet6_sk(sp
);
1992 src
= &np
->rcv_saddr
;
1993 destp
= ntohs(inet
->dport
);
1994 srcp
= ntohs(inet
->sport
);
1996 if (icsk
->icsk_pending
== ICSK_TIME_RETRANS
) {
1998 timer_expires
= icsk
->icsk_timeout
;
1999 } else if (icsk
->icsk_pending
== ICSK_TIME_PROBE0
) {
2001 timer_expires
= icsk
->icsk_timeout
;
2002 } else if (timer_pending(&sp
->sk_timer
)) {
2004 timer_expires
= sp
->sk_timer
.expires
;
2007 timer_expires
= jiffies
;
2011 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2012 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %u %u %u %u %d\n",
2014 src
->s6_addr32
[0], src
->s6_addr32
[1],
2015 src
->s6_addr32
[2], src
->s6_addr32
[3], srcp
,
2016 dest
->s6_addr32
[0], dest
->s6_addr32
[1],
2017 dest
->s6_addr32
[2], dest
->s6_addr32
[3], destp
,
2019 tp
->write_seq
-tp
->snd_una
,
2020 (sp
->sk_state
== TCP_LISTEN
) ? sp
->sk_ack_backlog
: (tp
->rcv_nxt
- tp
->copied_seq
),
2022 jiffies_to_clock_t(timer_expires
- jiffies
),
2023 icsk
->icsk_retransmits
,
2025 icsk
->icsk_probes_out
,
2027 atomic_read(&sp
->sk_refcnt
), sp
,
2030 (icsk
->icsk_ack
.quick
<< 1 ) | icsk
->icsk_ack
.pingpong
,
2031 tp
->snd_cwnd
, tp
->snd_ssthresh
>=0xFFFF?-1:tp
->snd_ssthresh
2035 static void get_timewait6_sock(struct seq_file
*seq
,
2036 struct inet_timewait_sock
*tw
, int i
)
2038 struct in6_addr
*dest
, *src
;
2040 struct inet6_timewait_sock
*tw6
= inet6_twsk((struct sock
*)tw
);
2041 int ttd
= tw
->tw_ttd
- jiffies
;
2046 dest
= &tw6
->tw_v6_daddr
;
2047 src
= &tw6
->tw_v6_rcv_saddr
;
2048 destp
= ntohs(tw
->tw_dport
);
2049 srcp
= ntohs(tw
->tw_sport
);
2052 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2053 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p\n",
2055 src
->s6_addr32
[0], src
->s6_addr32
[1],
2056 src
->s6_addr32
[2], src
->s6_addr32
[3], srcp
,
2057 dest
->s6_addr32
[0], dest
->s6_addr32
[1],
2058 dest
->s6_addr32
[2], dest
->s6_addr32
[3], destp
,
2059 tw
->tw_substate
, 0, 0,
2060 3, jiffies_to_clock_t(ttd
), 0, 0, 0, 0,
2061 atomic_read(&tw
->tw_refcnt
), tw
);
2064 #ifdef CONFIG_PROC_FS
2065 static int tcp6_seq_show(struct seq_file
*seq
, void *v
)
2067 struct tcp_iter_state
*st
;
2069 if (v
== SEQ_START_TOKEN
) {
2074 "st tx_queue rx_queue tr tm->when retrnsmt"
2075 " uid timeout inode\n");
2080 switch (st
->state
) {
2081 case TCP_SEQ_STATE_LISTENING
:
2082 case TCP_SEQ_STATE_ESTABLISHED
:
2083 get_tcp6_sock(seq
, v
, st
->num
);
2085 case TCP_SEQ_STATE_OPENREQ
:
2086 get_openreq6(seq
, st
->syn_wait_sk
, v
, st
->num
, st
->uid
);
2088 case TCP_SEQ_STATE_TIME_WAIT
:
2089 get_timewait6_sock(seq
, v
, st
->num
);
2096 static struct file_operations tcp6_seq_fops
;
2097 static struct tcp_seq_afinfo tcp6_seq_afinfo
= {
2098 .owner
= THIS_MODULE
,
2101 .seq_show
= tcp6_seq_show
,
2102 .seq_fops
= &tcp6_seq_fops
,
2105 int __init
tcp6_proc_init(void)
2107 return tcp_proc_register(&tcp6_seq_afinfo
);
2110 void tcp6_proc_exit(void)
2112 tcp_proc_unregister(&tcp6_seq_afinfo
);
2116 struct proto tcpv6_prot
= {
2118 .owner
= THIS_MODULE
,
2120 .connect
= tcp_v6_connect
,
2121 .disconnect
= tcp_disconnect
,
2122 .accept
= inet_csk_accept
,
2124 .init
= tcp_v6_init_sock
,
2125 .destroy
= tcp_v6_destroy_sock
,
2126 .shutdown
= tcp_shutdown
,
2127 .setsockopt
= tcp_setsockopt
,
2128 .getsockopt
= tcp_getsockopt
,
2129 .sendmsg
= tcp_sendmsg
,
2130 .recvmsg
= tcp_recvmsg
,
2131 .backlog_rcv
= tcp_v6_do_rcv
,
2132 .hash
= tcp_v6_hash
,
2133 .unhash
= tcp_unhash
,
2134 .get_port
= tcp_v6_get_port
,
2135 .enter_memory_pressure
= tcp_enter_memory_pressure
,
2136 .sockets_allocated
= &tcp_sockets_allocated
,
2137 .memory_allocated
= &tcp_memory_allocated
,
2138 .memory_pressure
= &tcp_memory_pressure
,
2139 .orphan_count
= &tcp_orphan_count
,
2140 .sysctl_mem
= sysctl_tcp_mem
,
2141 .sysctl_wmem
= sysctl_tcp_wmem
,
2142 .sysctl_rmem
= sysctl_tcp_rmem
,
2143 .max_header
= MAX_TCP_HEADER
,
2144 .obj_size
= sizeof(struct tcp6_sock
),
2145 .twsk_prot
= &tcp6_timewait_sock_ops
,
2146 .rsk_prot
= &tcp6_request_sock_ops
,
2147 #ifdef CONFIG_COMPAT
2148 .compat_setsockopt
= compat_tcp_setsockopt
,
2149 .compat_getsockopt
= compat_tcp_getsockopt
,
2153 static struct inet6_protocol tcpv6_protocol
= {
2154 .handler
= tcp_v6_rcv
,
2155 .err_handler
= tcp_v6_err
,
2156 .gso_send_check
= tcp_v6_gso_send_check
,
2157 .gso_segment
= tcp_tso_segment
,
2158 .flags
= INET6_PROTO_NOPOLICY
|INET6_PROTO_FINAL
,
2161 static struct inet_protosw tcpv6_protosw
= {
2162 .type
= SOCK_STREAM
,
2163 .protocol
= IPPROTO_TCP
,
2164 .prot
= &tcpv6_prot
,
2165 .ops
= &inet6_stream_ops
,
2168 .flags
= INET_PROTOSW_PERMANENT
|
2172 void __init
tcpv6_init(void)
2174 /* register inet6 protocol */
2175 if (inet6_add_protocol(&tcpv6_protocol
, IPPROTO_TCP
) < 0)
2176 printk(KERN_ERR
"tcpv6_init: Could not register protocol\n");
2177 inet6_register_protosw(&tcpv6_protosw
);
2179 if (inet_csk_ctl_sock_create(&tcp6_socket
, PF_INET6
, SOCK_RAW
,
2181 panic("Failed to create the TCPv6 control socket.\n");