Merge branch 'timers-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[deliverable/linux.git] / net / ipv6 / tcp_ipv6.c
CommitLineData
1da177e4
LT
1/*
2 * TCP over IPv6
1ab1457c 3 * Linux INET6 implementation
1da177e4
LT
4 *
5 * Authors:
1ab1457c 6 * Pedro Roque <roque@di.fc.ul.pt>
1da177e4 7 *
1ab1457c 8 * Based on:
1da177e4
LT
9 * linux/net/ipv4/tcp.c
10 * linux/net/ipv4/tcp_input.c
11 * linux/net/ipv4/tcp_output.c
12 *
13 * Fixes:
14 * Hideaki YOSHIFUJI : sin6_scope_id support
15 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
16 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
17 * a single port at the same time.
18 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
19 *
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
24 */
25
eb4dea58 26#include <linux/bottom_half.h>
1da177e4 27#include <linux/module.h>
1da177e4
LT
28#include <linux/errno.h>
29#include <linux/types.h>
30#include <linux/socket.h>
31#include <linux/sockios.h>
32#include <linux/net.h>
33#include <linux/jiffies.h>
34#include <linux/in.h>
35#include <linux/in6.h>
36#include <linux/netdevice.h>
37#include <linux/init.h>
38#include <linux/jhash.h>
39#include <linux/ipsec.h>
40#include <linux/times.h>
5a0e3ad6 41#include <linux/slab.h>
1da177e4
LT
42
43#include <linux/ipv6.h>
44#include <linux/icmpv6.h>
45#include <linux/random.h>
46
47#include <net/tcp.h>
48#include <net/ndisc.h>
5324a040 49#include <net/inet6_hashtables.h>
8129765a 50#include <net/inet6_connection_sock.h>
1da177e4
LT
51#include <net/ipv6.h>
52#include <net/transp_v6.h>
53#include <net/addrconf.h>
54#include <net/ip6_route.h>
55#include <net/ip6_checksum.h>
56#include <net/inet_ecn.h>
57#include <net/protocol.h>
58#include <net/xfrm.h>
1da177e4
LT
59#include <net/snmp.h>
60#include <net/dsfield.h>
6d6ee43e 61#include <net/timewait_sock.h>
18134bed 62#include <net/netdma.h>
3d58b5fa 63#include <net/inet_common.h>
6e5714ea 64#include <net/secure_seq.h>
d1a4c0b3 65#include <net/tcp_memcontrol.h>
1da177e4
LT
66
67#include <asm/uaccess.h>
68
69#include <linux/proc_fs.h>
70#include <linux/seq_file.h>
71
cfb6eeb4
YH
72#include <linux/crypto.h>
73#include <linux/scatterlist.h>
74
cfb6eeb4 75static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb);
6edafaaf
GJ
76static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
77 struct request_sock *req);
1da177e4
LT
78
79static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
8ad50d96 80static void __tcp_v6_send_check(struct sk_buff *skb,
b71d1d42
ED
81 const struct in6_addr *saddr,
82 const struct in6_addr *daddr);
1da177e4 83
3b401a81
SH
84static const struct inet_connection_sock_af_ops ipv6_mapped;
85static const struct inet_connection_sock_af_ops ipv6_specific;
a928630a 86#ifdef CONFIG_TCP_MD5SIG
b2e4b3de
SH
87static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
88static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
9501f972
YH
89#else
90static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
b71d1d42 91 const struct in6_addr *addr)
9501f972
YH
92{
93 return NULL;
94}
a928630a 95#endif
1da177e4 96
fae6ef87
NC
97static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
98{
99 struct dst_entry *dst = skb_dst(skb);
100 const struct rt6_info *rt = (const struct rt6_info *)dst;
101
102 dst_hold(dst);
103 sk->sk_rx_dst = dst;
104 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
105 if (rt->rt6i_node)
106 inet6_sk(sk)->rx_dst_cookie = rt->rt6i_node->fn_sernum;
107}
108
1da177e4
LT
109static void tcp_v6_hash(struct sock *sk)
110{
111 if (sk->sk_state != TCP_CLOSE) {
8292a17a 112 if (inet_csk(sk)->icsk_af_ops == &ipv6_mapped) {
1da177e4
LT
113 tcp_prot.hash(sk);
114 return;
115 }
116 local_bh_disable();
9327f705 117 __inet6_hash(sk, NULL);
1da177e4
LT
118 local_bh_enable();
119 }
120}
121
684f2176 122static __inline__ __sum16 tcp_v6_check(int len,
b71d1d42
ED
123 const struct in6_addr *saddr,
124 const struct in6_addr *daddr,
868c86bc 125 __wsum base)
1da177e4
LT
126{
127 return csum_ipv6_magic(saddr, daddr, len, IPPROTO_TCP, base);
128}
129
cf533ea5 130static __u32 tcp_v6_init_sequence(const struct sk_buff *skb)
1da177e4 131{
0660e03f
ACM
132 return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
133 ipv6_hdr(skb)->saddr.s6_addr32,
aa8223c7
ACM
134 tcp_hdr(skb)->dest,
135 tcp_hdr(skb)->source);
1da177e4
LT
136}
137
1ab1457c 138static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
1da177e4
LT
139 int addr_len)
140{
141 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
1ab1457c 142 struct inet_sock *inet = inet_sk(sk);
d83d8461 143 struct inet_connection_sock *icsk = inet_csk(sk);
1da177e4
LT
144 struct ipv6_pinfo *np = inet6_sk(sk);
145 struct tcp_sock *tp = tcp_sk(sk);
20c59de2 146 struct in6_addr *saddr = NULL, *final_p, final;
493f377d 147 struct rt6_info *rt;
4c9483b2 148 struct flowi6 fl6;
1da177e4
LT
149 struct dst_entry *dst;
150 int addr_type;
151 int err;
152
1ab1457c 153 if (addr_len < SIN6_LEN_RFC2133)
1da177e4
LT
154 return -EINVAL;
155
1ab1457c 156 if (usin->sin6_family != AF_INET6)
a02cec21 157 return -EAFNOSUPPORT;
1da177e4 158
4c9483b2 159 memset(&fl6, 0, sizeof(fl6));
1da177e4
LT
160
161 if (np->sndflow) {
4c9483b2
DM
162 fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
163 IP6_ECN_flow_init(fl6.flowlabel);
164 if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
1da177e4 165 struct ip6_flowlabel *flowlabel;
4c9483b2 166 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
1da177e4
LT
167 if (flowlabel == NULL)
168 return -EINVAL;
4e3fd7a0 169 usin->sin6_addr = flowlabel->dst;
1da177e4
LT
170 fl6_sock_release(flowlabel);
171 }
172 }
173
174 /*
1ab1457c
YH
175 * connect() to INADDR_ANY means loopback (BSD'ism).
176 */
177
178 if(ipv6_addr_any(&usin->sin6_addr))
179 usin->sin6_addr.s6_addr[15] = 0x1;
1da177e4
LT
180
181 addr_type = ipv6_addr_type(&usin->sin6_addr);
182
183 if(addr_type & IPV6_ADDR_MULTICAST)
184 return -ENETUNREACH;
185
186 if (addr_type&IPV6_ADDR_LINKLOCAL) {
187 if (addr_len >= sizeof(struct sockaddr_in6) &&
188 usin->sin6_scope_id) {
189 /* If interface is set while binding, indices
190 * must coincide.
191 */
192 if (sk->sk_bound_dev_if &&
193 sk->sk_bound_dev_if != usin->sin6_scope_id)
194 return -EINVAL;
195
196 sk->sk_bound_dev_if = usin->sin6_scope_id;
197 }
198
199 /* Connect to link-local address requires an interface */
200 if (!sk->sk_bound_dev_if)
201 return -EINVAL;
202 }
203
204 if (tp->rx_opt.ts_recent_stamp &&
205 !ipv6_addr_equal(&np->daddr, &usin->sin6_addr)) {
206 tp->rx_opt.ts_recent = 0;
207 tp->rx_opt.ts_recent_stamp = 0;
208 tp->write_seq = 0;
209 }
210
4e3fd7a0 211 np->daddr = usin->sin6_addr;
4c9483b2 212 np->flow_label = fl6.flowlabel;
1da177e4
LT
213
214 /*
215 * TCP over IPv4
216 */
217
218 if (addr_type == IPV6_ADDR_MAPPED) {
d83d8461 219 u32 exthdrlen = icsk->icsk_ext_hdr_len;
1da177e4
LT
220 struct sockaddr_in sin;
221
222 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
223
224 if (__ipv6_only_sock(sk))
225 return -ENETUNREACH;
226
227 sin.sin_family = AF_INET;
228 sin.sin_port = usin->sin6_port;
229 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
230
d83d8461 231 icsk->icsk_af_ops = &ipv6_mapped;
1da177e4 232 sk->sk_backlog_rcv = tcp_v4_do_rcv;
cfb6eeb4
YH
233#ifdef CONFIG_TCP_MD5SIG
234 tp->af_specific = &tcp_sock_ipv6_mapped_specific;
235#endif
1da177e4
LT
236
237 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
238
239 if (err) {
d83d8461
ACM
240 icsk->icsk_ext_hdr_len = exthdrlen;
241 icsk->icsk_af_ops = &ipv6_specific;
1da177e4 242 sk->sk_backlog_rcv = tcp_v6_do_rcv;
cfb6eeb4
YH
243#ifdef CONFIG_TCP_MD5SIG
244 tp->af_specific = &tcp_sock_ipv6_specific;
245#endif
1da177e4
LT
246 goto failure;
247 } else {
c720c7e8
ED
248 ipv6_addr_set_v4mapped(inet->inet_saddr, &np->saddr);
249 ipv6_addr_set_v4mapped(inet->inet_rcv_saddr,
250 &np->rcv_saddr);
1da177e4
LT
251 }
252
253 return err;
254 }
255
256 if (!ipv6_addr_any(&np->rcv_saddr))
257 saddr = &np->rcv_saddr;
258
4c9483b2 259 fl6.flowi6_proto = IPPROTO_TCP;
4e3fd7a0
AD
260 fl6.daddr = np->daddr;
261 fl6.saddr = saddr ? *saddr : np->saddr;
4c9483b2
DM
262 fl6.flowi6_oif = sk->sk_bound_dev_if;
263 fl6.flowi6_mark = sk->sk_mark;
1958b856
DM
264 fl6.fl6_dport = usin->sin6_port;
265 fl6.fl6_sport = inet->inet_sport;
1da177e4 266
4c9483b2 267 final_p = fl6_update_dst(&fl6, np->opt, &final);
1da177e4 268
4c9483b2 269 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
beb8d13b 270
4c9483b2 271 dst = ip6_dst_lookup_flow(sk, &fl6, final_p, true);
68d0c6d3
DM
272 if (IS_ERR(dst)) {
273 err = PTR_ERR(dst);
1da177e4 274 goto failure;
14e50e57 275 }
1da177e4
LT
276
277 if (saddr == NULL) {
4c9483b2 278 saddr = &fl6.saddr;
4e3fd7a0 279 np->rcv_saddr = *saddr;
1da177e4
LT
280 }
281
282 /* set the source address */
4e3fd7a0 283 np->saddr = *saddr;
c720c7e8 284 inet->inet_rcv_saddr = LOOPBACK4_IPV6;
1da177e4 285
f83ef8c0 286 sk->sk_gso_type = SKB_GSO_TCPV6;
8e1ef0a9 287 __ip6_dst_store(sk, dst, NULL, NULL);
1da177e4 288
493f377d
DM
289 rt = (struct rt6_info *) dst;
290 if (tcp_death_row.sysctl_tw_recycle &&
291 !tp->rx_opt.ts_recent_stamp &&
81166dd6
DM
292 ipv6_addr_equal(&rt->rt6i_dst.addr, &np->daddr))
293 tcp_fetch_timewait_stamp(sk, dst);
493f377d 294
d83d8461 295 icsk->icsk_ext_hdr_len = 0;
1da177e4 296 if (np->opt)
d83d8461
ACM
297 icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
298 np->opt->opt_nflen);
1da177e4
LT
299
300 tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
301
c720c7e8 302 inet->inet_dport = usin->sin6_port;
1da177e4
LT
303
304 tcp_set_state(sk, TCP_SYN_SENT);
d8313f5c 305 err = inet6_hash_connect(&tcp_death_row, sk);
1da177e4
LT
306 if (err)
307 goto late_failure;
308
309 if (!tp->write_seq)
310 tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
311 np->daddr.s6_addr32,
c720c7e8
ED
312 inet->inet_sport,
313 inet->inet_dport);
1da177e4
LT
314
315 err = tcp_connect(sk);
316 if (err)
317 goto late_failure;
318
319 return 0;
320
321late_failure:
322 tcp_set_state(sk, TCP_CLOSE);
323 __sk_dst_reset(sk);
324failure:
c720c7e8 325 inet->inet_dport = 0;
1da177e4
LT
326 sk->sk_route_caps = 0;
327 return err;
328}
329
563d34d0
ED
330static void tcp_v6_mtu_reduced(struct sock *sk)
331{
332 struct dst_entry *dst;
333
334 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
335 return;
336
337 dst = inet6_csk_update_pmtu(sk, tcp_sk(sk)->mtu_info);
338 if (!dst)
339 return;
340
341 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
342 tcp_sync_mss(sk, dst_mtu(dst));
343 tcp_simple_retransmit(sk);
344 }
345}
346
1da177e4 347static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
d5fdd6ba 348 u8 type, u8 code, int offset, __be32 info)
1da177e4 349{
b71d1d42 350 const struct ipv6hdr *hdr = (const struct ipv6hdr*)skb->data;
505cbfc5 351 const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
1da177e4
LT
352 struct ipv6_pinfo *np;
353 struct sock *sk;
354 int err;
1ab1457c 355 struct tcp_sock *tp;
1da177e4 356 __u32 seq;
ca12a1a4 357 struct net *net = dev_net(skb->dev);
1da177e4 358
ca12a1a4 359 sk = inet6_lookup(net, &tcp_hashinfo, &hdr->daddr,
d86e0dac 360 th->dest, &hdr->saddr, th->source, skb->dev->ifindex);
1da177e4
LT
361
362 if (sk == NULL) {
e41b5368
DL
363 ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
364 ICMP6_MIB_INERRORS);
1da177e4
LT
365 return;
366 }
367
368 if (sk->sk_state == TCP_TIME_WAIT) {
9469c7b4 369 inet_twsk_put(inet_twsk(sk));
1da177e4
LT
370 return;
371 }
372
373 bh_lock_sock(sk);
563d34d0 374 if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
de0744af 375 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
1da177e4
LT
376
377 if (sk->sk_state == TCP_CLOSE)
378 goto out;
379
e802af9c
SH
380 if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
381 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
382 goto out;
383 }
384
1da177e4 385 tp = tcp_sk(sk);
1ab1457c 386 seq = ntohl(th->seq);
1da177e4
LT
387 if (sk->sk_state != TCP_LISTEN &&
388 !between(seq, tp->snd_una, tp->snd_nxt)) {
de0744af 389 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
1da177e4
LT
390 goto out;
391 }
392
393 np = inet6_sk(sk);
394
ec18d9a2
DM
395 if (type == NDISC_REDIRECT) {
396 struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
397
1ed5c48f 398 if (dst)
6700c270 399 dst->ops->redirect(dst, sk, skb);
ec18d9a2
DM
400 }
401
1da177e4 402 if (type == ICMPV6_PKT_TOOBIG) {
563d34d0
ED
403 tp->mtu_info = ntohl(info);
404 if (!sock_owned_by_user(sk))
405 tcp_v6_mtu_reduced(sk);
d013ef2a
JA
406 else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED,
407 &tp->tsq_flags))
408 sock_hold(sk);
1da177e4
LT
409 goto out;
410 }
411
412 icmpv6_err_convert(type, code, &err);
413
60236fdd 414 /* Might be for an request_sock */
1da177e4 415 switch (sk->sk_state) {
60236fdd 416 struct request_sock *req, **prev;
1da177e4
LT
417 case TCP_LISTEN:
418 if (sock_owned_by_user(sk))
419 goto out;
420
8129765a
ACM
421 req = inet6_csk_search_req(sk, &prev, th->dest, &hdr->daddr,
422 &hdr->saddr, inet6_iif(skb));
1da177e4
LT
423 if (!req)
424 goto out;
425
426 /* ICMPs are not backlogged, hence we cannot get
427 * an established socket here.
428 */
547b792c 429 WARN_ON(req->sk != NULL);
1da177e4 430
2e6599cb 431 if (seq != tcp_rsk(req)->snt_isn) {
de0744af 432 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
1da177e4
LT
433 goto out;
434 }
435
463c84b9 436 inet_csk_reqsk_queue_drop(sk, req, prev);
1da177e4
LT
437 goto out;
438
439 case TCP_SYN_SENT:
440 case TCP_SYN_RECV: /* Cannot happen.
1ab1457c 441 It can, it SYNs are crossed. --ANK */
1da177e4 442 if (!sock_owned_by_user(sk)) {
1da177e4
LT
443 sk->sk_err = err;
444 sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
445
446 tcp_done(sk);
447 } else
448 sk->sk_err_soft = err;
449 goto out;
450 }
451
452 if (!sock_owned_by_user(sk) && np->recverr) {
453 sk->sk_err = err;
454 sk->sk_error_report(sk);
455 } else
456 sk->sk_err_soft = err;
457
458out:
459 bh_unlock_sock(sk);
460 sock_put(sk);
461}
462
463
9f10d3f6
NC
464static int tcp_v6_send_synack(struct sock *sk, struct dst_entry *dst,
465 struct flowi6 *fl6,
3840a06e 466 struct request_sock *req,
fff32699
ED
467 struct request_values *rvp,
468 u16 queue_mapping)
1da177e4 469{
ca304b61 470 struct inet6_request_sock *treq = inet6_rsk(req);
1da177e4
LT
471 struct ipv6_pinfo *np = inet6_sk(sk);
472 struct sk_buff * skb;
9494218f 473 int err = -ENOMEM;
1da177e4 474
9f10d3f6
NC
475 /* First, grab a route. */
476 if (!dst && (dst = inet6_csk_route_req(sk, fl6, req)) == NULL)
fd80eb94 477 goto done;
9494218f 478
e6b4d113 479 skb = tcp_make_synack(sk, dst, req, rvp);
9494218f 480
1da177e4 481 if (skb) {
8ad50d96 482 __tcp_v6_send_check(skb, &treq->loc_addr, &treq->rmt_addr);
1da177e4 483
9f10d3f6 484 fl6->daddr = treq->rmt_addr;
fff32699 485 skb_set_queue_mapping(skb, queue_mapping);
43264e0b 486 err = ip6_xmit(sk, skb, fl6, np->opt, np->tclass);
b9df3cb8 487 err = net_xmit_eval(err);
1da177e4
LT
488 }
489
490done:
1da177e4
LT
491 return err;
492}
493
72659ecc
OP
494static int tcp_v6_rtx_synack(struct sock *sk, struct request_sock *req,
495 struct request_values *rvp)
496{
9f10d3f6
NC
497 struct flowi6 fl6;
498
72659ecc 499 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
9f10d3f6 500 return tcp_v6_send_synack(sk, NULL, &fl6, req, rvp, 0);
72659ecc
OP
501}
502
60236fdd 503static void tcp_v6_reqsk_destructor(struct request_sock *req)
1da177e4 504{
800d55f1 505 kfree_skb(inet6_rsk(req)->pktopts);
1da177e4
LT
506}
507
cfb6eeb4
YH
508#ifdef CONFIG_TCP_MD5SIG
509static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
b71d1d42 510 const struct in6_addr *addr)
cfb6eeb4 511{
a915da9b 512 return tcp_md5_do_lookup(sk, (union tcp_md5_addr *)addr, AF_INET6);
cfb6eeb4
YH
513}
514
515static struct tcp_md5sig_key *tcp_v6_md5_lookup(struct sock *sk,
516 struct sock *addr_sk)
517{
518 return tcp_v6_md5_do_lookup(sk, &inet6_sk(addr_sk)->daddr);
519}
520
521static struct tcp_md5sig_key *tcp_v6_reqsk_md5_lookup(struct sock *sk,
522 struct request_sock *req)
523{
524 return tcp_v6_md5_do_lookup(sk, &inet6_rsk(req)->rmt_addr);
525}
526
cfb6eeb4
YH
527static int tcp_v6_parse_md5_keys (struct sock *sk, char __user *optval,
528 int optlen)
529{
530 struct tcp_md5sig cmd;
531 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
cfb6eeb4
YH
532
533 if (optlen < sizeof(cmd))
534 return -EINVAL;
535
536 if (copy_from_user(&cmd, optval, sizeof(cmd)))
537 return -EFAULT;
538
539 if (sin6->sin6_family != AF_INET6)
540 return -EINVAL;
541
542 if (!cmd.tcpm_keylen) {
e773e4fa 543 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
a915da9b
ED
544 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
545 AF_INET);
546 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
547 AF_INET6);
cfb6eeb4
YH
548 }
549
550 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
551 return -EINVAL;
552
a915da9b
ED
553 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
554 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
555 AF_INET, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
cfb6eeb4 556
a915da9b
ED
557 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
558 AF_INET6, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
cfb6eeb4
YH
559}
560
49a72dfb 561static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
b71d1d42
ED
562 const struct in6_addr *daddr,
563 const struct in6_addr *saddr, int nbytes)
cfb6eeb4 564{
cfb6eeb4 565 struct tcp6_pseudohdr *bp;
49a72dfb 566 struct scatterlist sg;
8d26d76d 567
cfb6eeb4 568 bp = &hp->md5_blk.ip6;
cfb6eeb4 569 /* 1. TCP pseudo-header (RFC2460) */
4e3fd7a0
AD
570 bp->saddr = *saddr;
571 bp->daddr = *daddr;
49a72dfb 572 bp->protocol = cpu_to_be32(IPPROTO_TCP);
00b1304c 573 bp->len = cpu_to_be32(nbytes);
cfb6eeb4 574
49a72dfb
AL
575 sg_init_one(&sg, bp, sizeof(*bp));
576 return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
577}
c7da57a1 578
49a72dfb 579static int tcp_v6_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
b71d1d42 580 const struct in6_addr *daddr, struct in6_addr *saddr,
318cf7aa 581 const struct tcphdr *th)
49a72dfb
AL
582{
583 struct tcp_md5sig_pool *hp;
584 struct hash_desc *desc;
585
586 hp = tcp_get_md5sig_pool();
587 if (!hp)
588 goto clear_hash_noput;
589 desc = &hp->md5_desc;
590
591 if (crypto_hash_init(desc))
592 goto clear_hash;
593 if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
594 goto clear_hash;
595 if (tcp_md5_hash_header(hp, th))
596 goto clear_hash;
597 if (tcp_md5_hash_key(hp, key))
598 goto clear_hash;
599 if (crypto_hash_final(desc, md5_hash))
cfb6eeb4 600 goto clear_hash;
cfb6eeb4 601
cfb6eeb4 602 tcp_put_md5sig_pool();
cfb6eeb4 603 return 0;
49a72dfb 604
cfb6eeb4
YH
605clear_hash:
606 tcp_put_md5sig_pool();
607clear_hash_noput:
608 memset(md5_hash, 0, 16);
49a72dfb 609 return 1;
cfb6eeb4
YH
610}
611
49a72dfb 612static int tcp_v6_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
318cf7aa
ED
613 const struct sock *sk,
614 const struct request_sock *req,
615 const struct sk_buff *skb)
cfb6eeb4 616{
b71d1d42 617 const struct in6_addr *saddr, *daddr;
49a72dfb
AL
618 struct tcp_md5sig_pool *hp;
619 struct hash_desc *desc;
318cf7aa 620 const struct tcphdr *th = tcp_hdr(skb);
cfb6eeb4
YH
621
622 if (sk) {
623 saddr = &inet6_sk(sk)->saddr;
624 daddr = &inet6_sk(sk)->daddr;
49a72dfb 625 } else if (req) {
cfb6eeb4
YH
626 saddr = &inet6_rsk(req)->loc_addr;
627 daddr = &inet6_rsk(req)->rmt_addr;
49a72dfb 628 } else {
b71d1d42 629 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
49a72dfb
AL
630 saddr = &ip6h->saddr;
631 daddr = &ip6h->daddr;
cfb6eeb4 632 }
49a72dfb
AL
633
634 hp = tcp_get_md5sig_pool();
635 if (!hp)
636 goto clear_hash_noput;
637 desc = &hp->md5_desc;
638
639 if (crypto_hash_init(desc))
640 goto clear_hash;
641
642 if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
643 goto clear_hash;
644 if (tcp_md5_hash_header(hp, th))
645 goto clear_hash;
646 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
647 goto clear_hash;
648 if (tcp_md5_hash_key(hp, key))
649 goto clear_hash;
650 if (crypto_hash_final(desc, md5_hash))
651 goto clear_hash;
652
653 tcp_put_md5sig_pool();
654 return 0;
655
656clear_hash:
657 tcp_put_md5sig_pool();
658clear_hash_noput:
659 memset(md5_hash, 0, 16);
660 return 1;
cfb6eeb4
YH
661}
662
318cf7aa 663static int tcp_v6_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
cfb6eeb4 664{
cf533ea5 665 const __u8 *hash_location = NULL;
cfb6eeb4 666 struct tcp_md5sig_key *hash_expected;
b71d1d42 667 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
318cf7aa 668 const struct tcphdr *th = tcp_hdr(skb);
cfb6eeb4 669 int genhash;
cfb6eeb4
YH
670 u8 newhash[16];
671
672 hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
7d5d5525 673 hash_location = tcp_parse_md5sig_option(th);
cfb6eeb4 674
785957d3
DM
675 /* We've parsed the options - do we have a hash? */
676 if (!hash_expected && !hash_location)
677 return 0;
678
679 if (hash_expected && !hash_location) {
680 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
cfb6eeb4
YH
681 return 1;
682 }
683
785957d3
DM
684 if (!hash_expected && hash_location) {
685 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
cfb6eeb4
YH
686 return 1;
687 }
688
689 /* check the signature */
49a72dfb
AL
690 genhash = tcp_v6_md5_hash_skb(newhash,
691 hash_expected,
692 NULL, NULL, skb);
693
cfb6eeb4 694 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
e87cc472
JP
695 net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
696 genhash ? "failed" : "mismatch",
697 &ip6h->saddr, ntohs(th->source),
698 &ip6h->daddr, ntohs(th->dest));
cfb6eeb4
YH
699 return 1;
700 }
701 return 0;
702}
703#endif
704
c6aefafb 705struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
1da177e4 706 .family = AF_INET6,
2e6599cb 707 .obj_size = sizeof(struct tcp6_request_sock),
72659ecc 708 .rtx_syn_ack = tcp_v6_rtx_synack,
60236fdd
ACM
709 .send_ack = tcp_v6_reqsk_send_ack,
710 .destructor = tcp_v6_reqsk_destructor,
72659ecc
OP
711 .send_reset = tcp_v6_send_reset,
712 .syn_ack_timeout = tcp_syn_ack_timeout,
1da177e4
LT
713};
714
cfb6eeb4 715#ifdef CONFIG_TCP_MD5SIG
b2e4b3de 716static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
cfb6eeb4 717 .md5_lookup = tcp_v6_reqsk_md5_lookup,
e3afe7b7 718 .calc_md5_hash = tcp_v6_md5_hash_skb,
cfb6eeb4 719};
b6332e6c 720#endif
cfb6eeb4 721
8ad50d96 722static void __tcp_v6_send_check(struct sk_buff *skb,
b71d1d42 723 const struct in6_addr *saddr, const struct in6_addr *daddr)
1da177e4 724{
aa8223c7 725 struct tcphdr *th = tcp_hdr(skb);
1da177e4 726
84fa7933 727 if (skb->ip_summed == CHECKSUM_PARTIAL) {
8ad50d96 728 th->check = ~tcp_v6_check(skb->len, saddr, daddr, 0);
663ead3b 729 skb->csum_start = skb_transport_header(skb) - skb->head;
ff1dcadb 730 skb->csum_offset = offsetof(struct tcphdr, check);
1da177e4 731 } else {
8ad50d96
HX
732 th->check = tcp_v6_check(skb->len, saddr, daddr,
733 csum_partial(th, th->doff << 2,
734 skb->csum));
1da177e4
LT
735 }
736}
737
bb296246 738static void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb)
8ad50d96
HX
739{
740 struct ipv6_pinfo *np = inet6_sk(sk);
741
742 __tcp_v6_send_check(skb, &np->saddr, &np->daddr);
743}
744
a430a43d
HX
745static int tcp_v6_gso_send_check(struct sk_buff *skb)
746{
b71d1d42 747 const struct ipv6hdr *ipv6h;
a430a43d
HX
748 struct tcphdr *th;
749
750 if (!pskb_may_pull(skb, sizeof(*th)))
751 return -EINVAL;
752
0660e03f 753 ipv6h = ipv6_hdr(skb);
aa8223c7 754 th = tcp_hdr(skb);
a430a43d
HX
755
756 th->check = 0;
84fa7933 757 skb->ip_summed = CHECKSUM_PARTIAL;
8ad50d96 758 __tcp_v6_send_check(skb, &ipv6h->saddr, &ipv6h->daddr);
a430a43d
HX
759 return 0;
760}
1da177e4 761
36990673
HX
762static struct sk_buff **tcp6_gro_receive(struct sk_buff **head,
763 struct sk_buff *skb)
684f2176 764{
b71d1d42 765 const struct ipv6hdr *iph = skb_gro_network_header(skb);
684f2176
HX
766
767 switch (skb->ip_summed) {
768 case CHECKSUM_COMPLETE:
86911732 769 if (!tcp_v6_check(skb_gro_len(skb), &iph->saddr, &iph->daddr,
684f2176
HX
770 skb->csum)) {
771 skb->ip_summed = CHECKSUM_UNNECESSARY;
772 break;
773 }
774
775 /* fall through */
776 case CHECKSUM_NONE:
777 NAPI_GRO_CB(skb)->flush = 1;
778 return NULL;
779 }
780
781 return tcp_gro_receive(head, skb);
782}
684f2176 783
36990673 784static int tcp6_gro_complete(struct sk_buff *skb)
684f2176 785{
b71d1d42 786 const struct ipv6hdr *iph = ipv6_hdr(skb);
684f2176
HX
787 struct tcphdr *th = tcp_hdr(skb);
788
789 th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
790 &iph->saddr, &iph->daddr, 0);
791 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
792
793 return tcp_gro_complete(skb);
794}
684f2176 795
626e264d 796static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
b903d324 797 u32 ts, struct tcp_md5sig_key *key, int rst, u8 tclass)
1da177e4 798{
cf533ea5
ED
799 const struct tcphdr *th = tcp_hdr(skb);
800 struct tcphdr *t1;
1da177e4 801 struct sk_buff *buff;
4c9483b2 802 struct flowi6 fl6;
adf30907 803 struct net *net = dev_net(skb_dst(skb)->dev);
e5047992 804 struct sock *ctl_sk = net->ipv6.tcp_sk;
77c676da 805 unsigned int tot_len = sizeof(struct tcphdr);
adf30907 806 struct dst_entry *dst;
81ada62d 807 __be32 *topt;
1da177e4 808
626e264d
IJ
809 if (ts)
810 tot_len += TCPOLEN_TSTAMP_ALIGNED;
cfb6eeb4 811#ifdef CONFIG_TCP_MD5SIG
cfb6eeb4
YH
812 if (key)
813 tot_len += TCPOLEN_MD5SIG_ALIGNED;
814#endif
815
cfb6eeb4 816 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
1da177e4 817 GFP_ATOMIC);
1ab1457c
YH
818 if (buff == NULL)
819 return;
1da177e4 820
cfb6eeb4 821 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
1da177e4 822
cfb6eeb4 823 t1 = (struct tcphdr *) skb_push(buff, tot_len);
6651ffc8 824 skb_reset_transport_header(buff);
1da177e4
LT
825
826 /* Swap the send and the receive. */
827 memset(t1, 0, sizeof(*t1));
828 t1->dest = th->source;
829 t1->source = th->dest;
cfb6eeb4 830 t1->doff = tot_len / 4;
626e264d
IJ
831 t1->seq = htonl(seq);
832 t1->ack_seq = htonl(ack);
833 t1->ack = !rst || !th->ack;
834 t1->rst = rst;
835 t1->window = htons(win);
1da177e4 836
81ada62d
IJ
837 topt = (__be32 *)(t1 + 1);
838
626e264d
IJ
839 if (ts) {
840 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
841 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
842 *topt++ = htonl(tcp_time_stamp);
843 *topt++ = htonl(ts);
844 }
845
cfb6eeb4
YH
846#ifdef CONFIG_TCP_MD5SIG
847 if (key) {
81ada62d
IJ
848 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
849 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
850 tcp_v6_md5_hash_hdr((__u8 *)topt, key,
78e645cb
IJ
851 &ipv6_hdr(skb)->saddr,
852 &ipv6_hdr(skb)->daddr, t1);
cfb6eeb4
YH
853 }
854#endif
855
4c9483b2 856 memset(&fl6, 0, sizeof(fl6));
4e3fd7a0
AD
857 fl6.daddr = ipv6_hdr(skb)->saddr;
858 fl6.saddr = ipv6_hdr(skb)->daddr;
1da177e4 859
e5700aff
DM
860 buff->ip_summed = CHECKSUM_PARTIAL;
861 buff->csum = 0;
862
4c9483b2 863 __tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
1da177e4 864
4c9483b2
DM
865 fl6.flowi6_proto = IPPROTO_TCP;
866 fl6.flowi6_oif = inet6_iif(skb);
1958b856
DM
867 fl6.fl6_dport = t1->dest;
868 fl6.fl6_sport = t1->source;
4c9483b2 869 security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
1da177e4 870
c20121ae
DL
871 /* Pass a socket to ip6_dst_lookup either it is for RST
872 * Underlying function will use this to retrieve the network
873 * namespace
874 */
4c9483b2 875 dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL, false);
68d0c6d3
DM
876 if (!IS_ERR(dst)) {
877 skb_dst_set(buff, dst);
b903d324 878 ip6_xmit(ctl_sk, buff, &fl6, NULL, tclass);
68d0c6d3
DM
879 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
880 if (rst)
881 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
882 return;
1da177e4
LT
883 }
884
885 kfree_skb(buff);
886}
887
626e264d 888static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
1da177e4 889{
cf533ea5 890 const struct tcphdr *th = tcp_hdr(skb);
626e264d 891 u32 seq = 0, ack_seq = 0;
fa3e5b4e 892 struct tcp_md5sig_key *key = NULL;
658ddaaf
SL
893#ifdef CONFIG_TCP_MD5SIG
894 const __u8 *hash_location = NULL;
895 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
896 unsigned char newhash[16];
897 int genhash;
898 struct sock *sk1 = NULL;
899#endif
1da177e4 900
626e264d 901 if (th->rst)
1da177e4
LT
902 return;
903
626e264d
IJ
904 if (!ipv6_unicast_destination(skb))
905 return;
1da177e4 906
cfb6eeb4 907#ifdef CONFIG_TCP_MD5SIG
658ddaaf
SL
908 hash_location = tcp_parse_md5sig_option(th);
909 if (!sk && hash_location) {
910 /*
911 * active side is lost. Try to find listening socket through
912 * source port, and then find md5 key through listening socket.
913 * we are not loose security here:
914 * Incoming packet is checked with md5 hash with finding key,
915 * no RST generated if md5 hash doesn't match.
916 */
917 sk1 = inet6_lookup_listener(dev_net(skb_dst(skb)->dev),
918 &tcp_hashinfo, &ipv6h->daddr,
919 ntohs(th->source), inet6_iif(skb));
920 if (!sk1)
921 return;
922
923 rcu_read_lock();
924 key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr);
925 if (!key)
926 goto release_sk1;
927
928 genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, NULL, skb);
929 if (genhash || memcmp(hash_location, newhash, 16) != 0)
930 goto release_sk1;
931 } else {
932 key = sk ? tcp_v6_md5_do_lookup(sk, &ipv6h->saddr) : NULL;
933 }
cfb6eeb4
YH
934#endif
935
626e264d
IJ
936 if (th->ack)
937 seq = ntohl(th->ack_seq);
938 else
939 ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
940 (th->doff << 2);
1da177e4 941
b903d324 942 tcp_v6_send_response(skb, seq, ack_seq, 0, 0, key, 1, 0);
658ddaaf
SL
943
944#ifdef CONFIG_TCP_MD5SIG
945release_sk1:
946 if (sk1) {
947 rcu_read_unlock();
948 sock_put(sk1);
949 }
950#endif
626e264d 951}
1da177e4 952
626e264d 953static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts,
b903d324 954 struct tcp_md5sig_key *key, u8 tclass)
626e264d 955{
b903d324 956 tcp_v6_send_response(skb, seq, ack, win, ts, key, 0, tclass);
1da177e4
LT
957}
958
959static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
960{
8feaf0c0 961 struct inet_timewait_sock *tw = inet_twsk(sk);
cfb6eeb4 962 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
1da177e4 963
9501f972 964 tcp_v6_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
8feaf0c0 965 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
b903d324
ED
966 tcptw->tw_ts_recent, tcp_twsk_md5_key(tcptw),
967 tw->tw_tclass);
1da177e4 968
8feaf0c0 969 inet_twsk_put(tw);
1da177e4
LT
970}
971
6edafaaf
GJ
972static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
973 struct request_sock *req)
1da177e4 974{
9501f972 975 tcp_v6_send_ack(skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, req->ts_recent,
b903d324 976 tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr), 0);
1da177e4
LT
977}
978
979
980static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
981{
60236fdd 982 struct request_sock *req, **prev;
aa8223c7 983 const struct tcphdr *th = tcp_hdr(skb);
1da177e4
LT
984 struct sock *nsk;
985
986 /* Find possible connection requests. */
8129765a 987 req = inet6_csk_search_req(sk, &prev, th->source,
0660e03f
ACM
988 &ipv6_hdr(skb)->saddr,
989 &ipv6_hdr(skb)->daddr, inet6_iif(skb));
1da177e4
LT
990 if (req)
991 return tcp_check_req(sk, skb, req, prev);
992
3b1e0a65 993 nsk = __inet6_lookup_established(sock_net(sk), &tcp_hashinfo,
d86e0dac
PE
994 &ipv6_hdr(skb)->saddr, th->source,
995 &ipv6_hdr(skb)->daddr, ntohs(th->dest), inet6_iif(skb));
1da177e4
LT
996
997 if (nsk) {
998 if (nsk->sk_state != TCP_TIME_WAIT) {
999 bh_lock_sock(nsk);
1000 return nsk;
1001 }
9469c7b4 1002 inet_twsk_put(inet_twsk(nsk));
1da177e4
LT
1003 return NULL;
1004 }
1005
c6aefafb 1006#ifdef CONFIG_SYN_COOKIES
af9b4738 1007 if (!th->syn)
c6aefafb 1008 sk = cookie_v6_check(sk, skb);
1da177e4
LT
1009#endif
1010 return sk;
1011}
1012
1da177e4
LT
1013/* FIXME: this is substantially similar to the ipv4 code.
1014 * Can some kind of merge be done? -- erics
1015 */
1016static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1017{
4957faad 1018 struct tcp_extend_values tmp_ext;
e6b4d113 1019 struct tcp_options_received tmp_opt;
cf533ea5 1020 const u8 *hash_location;
e6b4d113 1021 struct request_sock *req;
ca304b61 1022 struct inet6_request_sock *treq;
1da177e4 1023 struct ipv6_pinfo *np = inet6_sk(sk);
1da177e4 1024 struct tcp_sock *tp = tcp_sk(sk);
e6b4d113 1025 __u32 isn = TCP_SKB_CB(skb)->when;
493f377d 1026 struct dst_entry *dst = NULL;
3840a06e 1027 struct flowi6 fl6;
a2a385d6 1028 bool want_cookie = false;
1da177e4
LT
1029
1030 if (skb->protocol == htons(ETH_P_IP))
1031 return tcp_v4_conn_request(sk, skb);
1032
1033 if (!ipv6_unicast_destination(skb))
1ab1457c 1034 goto drop;
1da177e4 1035
463c84b9 1036 if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
946cedcc
ED
1037 want_cookie = tcp_syn_flood_action(sk, skb, "TCPv6");
1038 if (!want_cookie)
1039 goto drop;
1da177e4
LT
1040 }
1041
463c84b9 1042 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
1da177e4
LT
1043 goto drop;
1044
ca304b61 1045 req = inet6_reqsk_alloc(&tcp6_request_sock_ops);
1da177e4
LT
1046 if (req == NULL)
1047 goto drop;
1048
cfb6eeb4
YH
1049#ifdef CONFIG_TCP_MD5SIG
1050 tcp_rsk(req)->af_specific = &tcp_request_sock_ipv6_ops;
1051#endif
1052
1da177e4
LT
1053 tcp_clear_options(&tmp_opt);
1054 tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
1055 tmp_opt.user_mss = tp->rx_opt.user_mss;
2100c8d2 1056 tcp_parse_options(skb, &tmp_opt, &hash_location, 0, NULL);
4957faad
WAS
1057
1058 if (tmp_opt.cookie_plus > 0 &&
1059 tmp_opt.saw_tstamp &&
1060 !tp->rx_opt.cookie_out_never &&
1061 (sysctl_tcp_cookie_size > 0 ||
1062 (tp->cookie_values != NULL &&
1063 tp->cookie_values->cookie_desired > 0))) {
1064 u8 *c;
1065 u32 *d;
1066 u32 *mess = &tmp_ext.cookie_bakery[COOKIE_DIGEST_WORDS];
1067 int l = tmp_opt.cookie_plus - TCPOLEN_COOKIE_BASE;
1068
1069 if (tcp_cookie_generator(&tmp_ext.cookie_bakery[0]) != 0)
1070 goto drop_and_free;
1071
1072 /* Secret recipe starts with IP addresses */
0eae88f3 1073 d = (__force u32 *)&ipv6_hdr(skb)->daddr.s6_addr32[0];
4957faad
WAS
1074 *mess++ ^= *d++;
1075 *mess++ ^= *d++;
1076 *mess++ ^= *d++;
1077 *mess++ ^= *d++;
0eae88f3 1078 d = (__force u32 *)&ipv6_hdr(skb)->saddr.s6_addr32[0];
4957faad
WAS
1079 *mess++ ^= *d++;
1080 *mess++ ^= *d++;
1081 *mess++ ^= *d++;
1082 *mess++ ^= *d++;
1083
1084 /* plus variable length Initiator Cookie */
1085 c = (u8 *)mess;
1086 while (l-- > 0)
1087 *c++ ^= *hash_location++;
1da177e4 1088
a2a385d6 1089 want_cookie = false; /* not our kind of cookie */
4957faad
WAS
1090 tmp_ext.cookie_out_never = 0; /* false */
1091 tmp_ext.cookie_plus = tmp_opt.cookie_plus;
1092 } else if (!tp->rx_opt.cookie_in_always) {
1093 /* redundant indications, but ensure initialization. */
1094 tmp_ext.cookie_out_never = 1; /* true */
1095 tmp_ext.cookie_plus = 0;
1096 } else {
1097 goto drop_and_free;
1098 }
1099 tmp_ext.cookie_in_always = tp->rx_opt.cookie_in_always;
1da177e4 1100
4dfc2817 1101 if (want_cookie && !tmp_opt.saw_tstamp)
c6aefafb 1102 tcp_clear_options(&tmp_opt);
c6aefafb 1103
1da177e4
LT
1104 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
1105 tcp_openreq_init(req, &tmp_opt, skb);
1106
ca304b61 1107 treq = inet6_rsk(req);
4e3fd7a0
AD
1108 treq->rmt_addr = ipv6_hdr(skb)->saddr;
1109 treq->loc_addr = ipv6_hdr(skb)->daddr;
172d69e6 1110 if (!want_cookie || tmp_opt.tstamp_ok)
bd14b1b2 1111 TCP_ECN_create_request(req, skb);
c6aefafb 1112
4d0fe50c
ED
1113 treq->iif = sk->sk_bound_dev_if;
1114
1115 /* So that link locals have meaning */
1116 if (!sk->sk_bound_dev_if &&
1117 ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL)
1118 treq->iif = inet6_iif(skb);
1119
2bbdf389 1120 if (!isn) {
c6aefafb
GG
1121 if (ipv6_opt_accepted(sk, skb) ||
1122 np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
1123 np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
1124 atomic_inc(&skb->users);
1125 treq->pktopts = skb;
1126 }
493f377d
DM
1127
1128 if (want_cookie) {
2bbdf389
FW
1129 isn = cookie_v6_init_sequence(sk, skb, &req->mss);
1130 req->cookie_ts = tmp_opt.tstamp_ok;
493f377d
DM
1131 goto have_isn;
1132 }
1133
1134 /* VJ's idea. We save last timestamp seen
1135 * from the destination in peer table, when entering
1136 * state TIME-WAIT, and check against it before
1137 * accepting new connection request.
1138 *
1139 * If "isn" is not zero, this request hit alive
1140 * timewait bucket, so that all the necessary checks
1141 * are made in the function processing timewait state.
1142 */
1143 if (tmp_opt.saw_tstamp &&
1144 tcp_death_row.sysctl_tw_recycle &&
81166dd6
DM
1145 (dst = inet6_csk_route_req(sk, &fl6, req)) != NULL) {
1146 if (!tcp_peer_is_proven(req, dst, true)) {
493f377d
DM
1147 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
1148 goto drop_and_release;
1149 }
1150 }
1151 /* Kill the following clause, if you dislike this way. */
1152 else if (!sysctl_tcp_syncookies &&
1153 (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
1154 (sysctl_max_syn_backlog >> 2)) &&
81166dd6 1155 !tcp_peer_is_proven(req, dst, false)) {
493f377d
DM
1156 /* Without syncookies last quarter of
1157 * backlog is filled with destinations,
1158 * proven to be alive.
1159 * It means that we continue to communicate
1160 * to destinations, already remembered
1161 * to the moment of synflood.
1162 */
1163 LIMIT_NETDEBUG(KERN_DEBUG "TCP: drop open request from %pI6/%u\n",
1164 &treq->rmt_addr, ntohs(tcp_hdr(skb)->source));
1165 goto drop_and_release;
2bbdf389 1166 }
493f377d
DM
1167
1168 isn = tcp_v6_init_sequence(skb);
c6aefafb 1169 }
493f377d 1170have_isn:
2e6599cb 1171 tcp_rsk(req)->snt_isn = isn;
9ad7c049 1172 tcp_rsk(req)->snt_synack = tcp_time_stamp;
1da177e4 1173
437c5b53
NC
1174 if (security_inet_conn_request(sk, skb, req))
1175 goto drop_and_release;
4237c75c 1176
9f10d3f6 1177 if (tcp_v6_send_synack(sk, dst, &fl6, req,
fff32699
ED
1178 (struct request_values *)&tmp_ext,
1179 skb_get_queue_mapping(skb)) ||
4957faad 1180 want_cookie)
e6b4d113 1181 goto drop_and_free;
1da177e4 1182
e6b4d113
WAS
1183 inet6_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1184 return 0;
1da177e4 1185
493f377d
DM
1186drop_and_release:
1187 dst_release(dst);
e6b4d113
WAS
1188drop_and_free:
1189 reqsk_free(req);
1da177e4 1190drop:
1da177e4
LT
1191 return 0; /* don't send reset */
1192}
1193
1194static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
60236fdd 1195 struct request_sock *req,
1da177e4
LT
1196 struct dst_entry *dst)
1197{
78d15e82 1198 struct inet6_request_sock *treq;
1da177e4
LT
1199 struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
1200 struct tcp6_sock *newtcp6sk;
1201 struct inet_sock *newinet;
1202 struct tcp_sock *newtp;
1203 struct sock *newsk;
cfb6eeb4
YH
1204#ifdef CONFIG_TCP_MD5SIG
1205 struct tcp_md5sig_key *key;
1206#endif
3840a06e 1207 struct flowi6 fl6;
1da177e4
LT
1208
1209 if (skb->protocol == htons(ETH_P_IP)) {
1210 /*
1211 * v6 mapped
1212 */
1213
1214 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst);
1215
1ab1457c 1216 if (newsk == NULL)
1da177e4
LT
1217 return NULL;
1218
1219 newtcp6sk = (struct tcp6_sock *)newsk;
1220 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1221
1222 newinet = inet_sk(newsk);
1223 newnp = inet6_sk(newsk);
1224 newtp = tcp_sk(newsk);
1225
1226 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1227
c720c7e8 1228 ipv6_addr_set_v4mapped(newinet->inet_daddr, &newnp->daddr);
1da177e4 1229
c720c7e8 1230 ipv6_addr_set_v4mapped(newinet->inet_saddr, &newnp->saddr);
1da177e4 1231
4e3fd7a0 1232 newnp->rcv_saddr = newnp->saddr;
1da177e4 1233
8292a17a 1234 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1da177e4 1235 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
cfb6eeb4
YH
1236#ifdef CONFIG_TCP_MD5SIG
1237 newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1238#endif
1239
676a1184
YZ
1240 newnp->ipv6_ac_list = NULL;
1241 newnp->ipv6_fl_list = NULL;
1da177e4
LT
1242 newnp->pktoptions = NULL;
1243 newnp->opt = NULL;
505cbfc5 1244 newnp->mcast_oif = inet6_iif(skb);
0660e03f 1245 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
4c507d28 1246 newnp->rcv_tclass = ipv6_tclass(ipv6_hdr(skb));
1da177e4 1247
e6848976
ACM
1248 /*
1249 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1250 * here, tcp_create_openreq_child now does this for us, see the comment in
1251 * that function for the gory details. -acme
1da177e4 1252 */
1da177e4
LT
1253
1254 /* It is tricky place. Until this moment IPv4 tcp
8292a17a 1255 worked with IPv6 icsk.icsk_af_ops.
1da177e4
LT
1256 Sync it now.
1257 */
d83d8461 1258 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1da177e4
LT
1259
1260 return newsk;
1261 }
1262
78d15e82 1263 treq = inet6_rsk(req);
1da177e4
LT
1264
1265 if (sk_acceptq_is_full(sk))
1266 goto out_overflow;
1267
493f377d 1268 if (!dst) {
3840a06e 1269 dst = inet6_csk_route_req(sk, &fl6, req);
493f377d 1270 if (!dst)
1da177e4 1271 goto out;
1ab1457c 1272 }
1da177e4
LT
1273
1274 newsk = tcp_create_openreq_child(sk, req, skb);
1275 if (newsk == NULL)
093d2823 1276 goto out_nonewsk;
1da177e4 1277
e6848976
ACM
1278 /*
1279 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1280 * count here, tcp_create_openreq_child now does this for us, see the
1281 * comment in that function for the gory details. -acme
1282 */
1da177e4 1283
59eed279 1284 newsk->sk_gso_type = SKB_GSO_TCPV6;
8e1ef0a9 1285 __ip6_dst_store(newsk, dst, NULL, NULL);
fae6ef87 1286 inet6_sk_rx_dst_set(newsk, skb);
1da177e4
LT
1287
1288 newtcp6sk = (struct tcp6_sock *)newsk;
1289 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1290
1291 newtp = tcp_sk(newsk);
1292 newinet = inet_sk(newsk);
1293 newnp = inet6_sk(newsk);
1294
1295 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1296
4e3fd7a0
AD
1297 newnp->daddr = treq->rmt_addr;
1298 newnp->saddr = treq->loc_addr;
1299 newnp->rcv_saddr = treq->loc_addr;
2e6599cb 1300 newsk->sk_bound_dev_if = treq->iif;
1da177e4 1301
1ab1457c 1302 /* Now IPv6 options...
1da177e4
LT
1303
1304 First: no IPv4 options.
1305 */
f6d8bd05 1306 newinet->inet_opt = NULL;
676a1184 1307 newnp->ipv6_ac_list = NULL;
d35690be 1308 newnp->ipv6_fl_list = NULL;
1da177e4
LT
1309
1310 /* Clone RX bits */
1311 newnp->rxopt.all = np->rxopt.all;
1312
1313 /* Clone pktoptions received with SYN */
1314 newnp->pktoptions = NULL;
2e6599cb 1315 if (treq->pktopts != NULL) {
99a1dec7
MG
1316 newnp->pktoptions = skb_clone(treq->pktopts,
1317 sk_gfp_atomic(sk, GFP_ATOMIC));
ab185d7b 1318 consume_skb(treq->pktopts);
2e6599cb 1319 treq->pktopts = NULL;
1da177e4
LT
1320 if (newnp->pktoptions)
1321 skb_set_owner_r(newnp->pktoptions, newsk);
1322 }
1323 newnp->opt = NULL;
505cbfc5 1324 newnp->mcast_oif = inet6_iif(skb);
0660e03f 1325 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
4c507d28 1326 newnp->rcv_tclass = ipv6_tclass(ipv6_hdr(skb));
1da177e4
LT
1327
1328 /* Clone native IPv6 options from listening socket (if any)
1329
1330 Yes, keeping reference count would be much more clever,
1331 but we make one more one thing there: reattach optmem
1332 to newsk.
1333 */
43264e0b
RL
1334 if (np->opt)
1335 newnp->opt = ipv6_dup_options(newsk, np->opt);
1da177e4 1336
d83d8461 1337 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1da177e4 1338 if (newnp->opt)
d83d8461
ACM
1339 inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
1340 newnp->opt->opt_flen);
1da177e4 1341
5d424d5a 1342 tcp_mtup_init(newsk);
1da177e4 1343 tcp_sync_mss(newsk, dst_mtu(dst));
0dbaee3b 1344 newtp->advmss = dst_metric_advmss(dst);
d135c522
NC
1345 if (tcp_sk(sk)->rx_opt.user_mss &&
1346 tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1347 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1348
1da177e4 1349 tcp_initialize_rcv_mss(newsk);
9ad7c049
JC
1350 if (tcp_rsk(req)->snt_synack)
1351 tcp_valid_rtt_meas(newsk,
1352 tcp_time_stamp - tcp_rsk(req)->snt_synack);
1353 newtp->total_retrans = req->retrans;
1da177e4 1354
c720c7e8
ED
1355 newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1356 newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
1da177e4 1357
cfb6eeb4
YH
1358#ifdef CONFIG_TCP_MD5SIG
1359 /* Copy over the MD5 key from the original socket */
1360 if ((key = tcp_v6_md5_do_lookup(sk, &newnp->daddr)) != NULL) {
1361 /* We're using one, so create a matching key
1362 * on the newsk structure. If we fail to get
1363 * memory, then we end up not copying the key
1364 * across. Shucks.
1365 */
a915da9b 1366 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newnp->daddr,
99a1dec7
MG
1367 AF_INET6, key->key, key->keylen,
1368 sk_gfp_atomic(sk, GFP_ATOMIC));
cfb6eeb4
YH
1369 }
1370#endif
1371
093d2823
BS
1372 if (__inet_inherit_port(sk, newsk) < 0) {
1373 sock_put(newsk);
1374 goto out;
1375 }
9327f705 1376 __inet6_hash(newsk, NULL);
1da177e4
LT
1377
1378 return newsk;
1379
1380out_overflow:
de0744af 1381 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
093d2823 1382out_nonewsk:
1da177e4 1383 dst_release(dst);
093d2823
BS
1384out:
1385 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1da177e4
LT
1386 return NULL;
1387}
1388
b51655b9 1389static __sum16 tcp_v6_checksum_init(struct sk_buff *skb)
1da177e4 1390{
84fa7933 1391 if (skb->ip_summed == CHECKSUM_COMPLETE) {
684f2176 1392 if (!tcp_v6_check(skb->len, &ipv6_hdr(skb)->saddr,
0660e03f 1393 &ipv6_hdr(skb)->daddr, skb->csum)) {
fb286bb2 1394 skb->ip_summed = CHECKSUM_UNNECESSARY;
1da177e4 1395 return 0;
fb286bb2 1396 }
1da177e4 1397 }
fb286bb2 1398
684f2176 1399 skb->csum = ~csum_unfold(tcp_v6_check(skb->len,
0660e03f
ACM
1400 &ipv6_hdr(skb)->saddr,
1401 &ipv6_hdr(skb)->daddr, 0));
fb286bb2 1402
1da177e4 1403 if (skb->len <= 76) {
fb286bb2 1404 return __skb_checksum_complete(skb);
1da177e4
LT
1405 }
1406 return 0;
1407}
1408
1409/* The socket must have it's spinlock held when we get
1410 * here.
1411 *
1412 * We have a potential double-lock case here, so even when
1413 * doing backlog processing we use the BH locking scheme.
1414 * This is because we cannot sleep with the original spinlock
1415 * held.
1416 */
1417static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1418{
1419 struct ipv6_pinfo *np = inet6_sk(sk);
1420 struct tcp_sock *tp;
1421 struct sk_buff *opt_skb = NULL;
1422
1423 /* Imagine: socket is IPv6. IPv4 packet arrives,
1424 goes to IPv4 receive handler and backlogged.
1425 From backlog it always goes here. Kerboom...
1426 Fortunately, tcp_rcv_established and rcv_established
1427 handle them correctly, but it is not case with
1428 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1429 */
1430
1431 if (skb->protocol == htons(ETH_P_IP))
1432 return tcp_v4_do_rcv(sk, skb);
1433
cfb6eeb4
YH
1434#ifdef CONFIG_TCP_MD5SIG
1435 if (tcp_v6_inbound_md5_hash (sk, skb))
1436 goto discard;
1437#endif
1438
fda9ef5d 1439 if (sk_filter(sk, skb))
1da177e4
LT
1440 goto discard;
1441
1442 /*
1443 * socket locking is here for SMP purposes as backlog rcv
1444 * is currently called with bh processing disabled.
1445 */
1446
1447 /* Do Stevens' IPV6_PKTOPTIONS.
1448
1449 Yes, guys, it is the only place in our code, where we
1450 may make it not affecting IPv4.
1451 The rest of code is protocol independent,
1452 and I do not like idea to uglify IPv4.
1453
1454 Actually, all the idea behind IPV6_PKTOPTIONS
1455 looks not very well thought. For now we latch
1456 options, received in the last packet, enqueued
1457 by tcp. Feel free to propose better solution.
1ab1457c 1458 --ANK (980728)
1da177e4
LT
1459 */
1460 if (np->rxopt.all)
99a1dec7 1461 opt_skb = skb_clone(skb, sk_gfp_atomic(sk, GFP_ATOMIC));
1da177e4
LT
1462
1463 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
5d299f3d
ED
1464 struct dst_entry *dst = sk->sk_rx_dst;
1465
bdeab991 1466 sock_rps_save_rxhash(sk, skb);
5d299f3d
ED
1467 if (dst) {
1468 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1469 dst->ops->check(dst, np->rx_dst_cookie) == NULL) {
1470 dst_release(dst);
1471 sk->sk_rx_dst = NULL;
1472 }
1473 }
1474
aa8223c7 1475 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len))
1da177e4 1476 goto reset;
1da177e4
LT
1477 if (opt_skb)
1478 goto ipv6_pktoptions;
1479 return 0;
1480 }
1481
ab6a5bb6 1482 if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1da177e4
LT
1483 goto csum_err;
1484
1ab1457c 1485 if (sk->sk_state == TCP_LISTEN) {
1da177e4
LT
1486 struct sock *nsk = tcp_v6_hnd_req(sk, skb);
1487 if (!nsk)
1488 goto discard;
1489
1490 /*
1491 * Queue it on the new socket if the new socket is active,
1492 * otherwise we just shortcircuit this and continue with
1493 * the new socket..
1494 */
1ab1457c 1495 if(nsk != sk) {
bdeab991 1496 sock_rps_save_rxhash(nsk, skb);
1da177e4
LT
1497 if (tcp_child_process(sk, nsk, skb))
1498 goto reset;
1499 if (opt_skb)
1500 __kfree_skb(opt_skb);
1501 return 0;
1502 }
47482f13 1503 } else
bdeab991 1504 sock_rps_save_rxhash(sk, skb);
1da177e4 1505
aa8223c7 1506 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len))
1da177e4 1507 goto reset;
1da177e4
LT
1508 if (opt_skb)
1509 goto ipv6_pktoptions;
1510 return 0;
1511
1512reset:
cfb6eeb4 1513 tcp_v6_send_reset(sk, skb);
1da177e4
LT
1514discard:
1515 if (opt_skb)
1516 __kfree_skb(opt_skb);
1517 kfree_skb(skb);
1518 return 0;
1519csum_err:
63231bdd 1520 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1da177e4
LT
1521 goto discard;
1522
1523
1524ipv6_pktoptions:
1525 /* Do you ask, what is it?
1526
1527 1. skb was enqueued by tcp.
1528 2. skb is added to tail of read queue, rather than out of order.
1529 3. socket is not in passive state.
1530 4. Finally, it really contains options, which user wants to receive.
1531 */
1532 tp = tcp_sk(sk);
1533 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1534 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
333fad53 1535 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
505cbfc5 1536 np->mcast_oif = inet6_iif(opt_skb);
333fad53 1537 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
0660e03f 1538 np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
4c507d28
JB
1539 if (np->rxopt.bits.rxtclass)
1540 np->rcv_tclass = ipv6_tclass(ipv6_hdr(skb));
1da177e4
LT
1541 if (ipv6_opt_accepted(sk, opt_skb)) {
1542 skb_set_owner_r(opt_skb, sk);
1543 opt_skb = xchg(&np->pktoptions, opt_skb);
1544 } else {
1545 __kfree_skb(opt_skb);
1546 opt_skb = xchg(&np->pktoptions, NULL);
1547 }
1548 }
1549
800d55f1 1550 kfree_skb(opt_skb);
1da177e4
LT
1551 return 0;
1552}
1553
e5bbef20 1554static int tcp_v6_rcv(struct sk_buff *skb)
1da177e4 1555{
cf533ea5 1556 const struct tcphdr *th;
b71d1d42 1557 const struct ipv6hdr *hdr;
1da177e4
LT
1558 struct sock *sk;
1559 int ret;
a86b1e30 1560 struct net *net = dev_net(skb->dev);
1da177e4
LT
1561
1562 if (skb->pkt_type != PACKET_HOST)
1563 goto discard_it;
1564
1565 /*
1566 * Count it even if it's bad.
1567 */
63231bdd 1568 TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1da177e4
LT
1569
1570 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1571 goto discard_it;
1572
aa8223c7 1573 th = tcp_hdr(skb);
1da177e4
LT
1574
1575 if (th->doff < sizeof(struct tcphdr)/4)
1576 goto bad_packet;
1577 if (!pskb_may_pull(skb, th->doff*4))
1578 goto discard_it;
1579
60476372 1580 if (!skb_csum_unnecessary(skb) && tcp_v6_checksum_init(skb))
1da177e4
LT
1581 goto bad_packet;
1582
aa8223c7 1583 th = tcp_hdr(skb);
e802af9c 1584 hdr = ipv6_hdr(skb);
1da177e4
LT
1585 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1586 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1587 skb->len - th->doff*4);
1588 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1589 TCP_SKB_CB(skb)->when = 0;
b82d1bb4 1590 TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
1da177e4
LT
1591 TCP_SKB_CB(skb)->sacked = 0;
1592
9a1f27c4 1593 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
1da177e4
LT
1594 if (!sk)
1595 goto no_tcp_socket;
1596
1597process:
1598 if (sk->sk_state == TCP_TIME_WAIT)
1599 goto do_time_wait;
1600
e802af9c
SH
1601 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
1602 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
1603 goto discard_and_relse;
1604 }
1605
1da177e4
LT
1606 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1607 goto discard_and_relse;
1608
fda9ef5d 1609 if (sk_filter(sk, skb))
1da177e4
LT
1610 goto discard_and_relse;
1611
1612 skb->dev = NULL;
1613
293b9c42 1614 bh_lock_sock_nested(sk);
1da177e4
LT
1615 ret = 0;
1616 if (!sock_owned_by_user(sk)) {
1a2449a8 1617#ifdef CONFIG_NET_DMA
1ab1457c 1618 struct tcp_sock *tp = tcp_sk(sk);
b4caea8a 1619 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
a2bd1140 1620 tp->ucopy.dma_chan = net_dma_find_channel();
1ab1457c
YH
1621 if (tp->ucopy.dma_chan)
1622 ret = tcp_v6_do_rcv(sk, skb);
1623 else
1a2449a8
CL
1624#endif
1625 {
1626 if (!tcp_prequeue(sk, skb))
1627 ret = tcp_v6_do_rcv(sk, skb);
1628 }
da882c1f
ED
1629 } else if (unlikely(sk_add_backlog(sk, skb,
1630 sk->sk_rcvbuf + sk->sk_sndbuf))) {
6b03a53a 1631 bh_unlock_sock(sk);
6cce09f8 1632 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
6b03a53a
ZY
1633 goto discard_and_relse;
1634 }
1da177e4
LT
1635 bh_unlock_sock(sk);
1636
1637 sock_put(sk);
1638 return ret ? -1 : 0;
1639
1640no_tcp_socket:
1641 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1642 goto discard_it;
1643
1644 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1645bad_packet:
63231bdd 1646 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1da177e4 1647 } else {
cfb6eeb4 1648 tcp_v6_send_reset(NULL, skb);
1da177e4
LT
1649 }
1650
1651discard_it:
1652
1653 /*
1654 * Discard frame
1655 */
1656
1657 kfree_skb(skb);
1658 return 0;
1659
1660discard_and_relse:
1661 sock_put(sk);
1662 goto discard_it;
1663
1664do_time_wait:
1665 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
9469c7b4 1666 inet_twsk_put(inet_twsk(sk));
1da177e4
LT
1667 goto discard_it;
1668 }
1669
1670 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
63231bdd 1671 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
9469c7b4 1672 inet_twsk_put(inet_twsk(sk));
1da177e4
LT
1673 goto discard_it;
1674 }
1675
9469c7b4 1676 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1da177e4
LT
1677 case TCP_TW_SYN:
1678 {
1679 struct sock *sk2;
1680
c346dca1 1681 sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
0660e03f 1682 &ipv6_hdr(skb)->daddr,
505cbfc5 1683 ntohs(th->dest), inet6_iif(skb));
1da177e4 1684 if (sk2 != NULL) {
295ff7ed
ACM
1685 struct inet_timewait_sock *tw = inet_twsk(sk);
1686 inet_twsk_deschedule(tw, &tcp_death_row);
1687 inet_twsk_put(tw);
1da177e4
LT
1688 sk = sk2;
1689 goto process;
1690 }
1691 /* Fall through to ACK */
1692 }
1693 case TCP_TW_ACK:
1694 tcp_v6_timewait_ack(sk, skb);
1695 break;
1696 case TCP_TW_RST:
1697 goto no_tcp_socket;
1698 case TCP_TW_SUCCESS:;
1699 }
1700 goto discard_it;
1701}
1702
c7109986
ED
1703static void tcp_v6_early_demux(struct sk_buff *skb)
1704{
1705 const struct ipv6hdr *hdr;
1706 const struct tcphdr *th;
1707 struct sock *sk;
1708
1709 if (skb->pkt_type != PACKET_HOST)
1710 return;
1711
1712 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1713 return;
1714
1715 hdr = ipv6_hdr(skb);
1716 th = tcp_hdr(skb);
1717
1718 if (th->doff < sizeof(struct tcphdr) / 4)
1719 return;
1720
1721 sk = __inet6_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1722 &hdr->saddr, th->source,
1723 &hdr->daddr, ntohs(th->dest),
1724 inet6_iif(skb));
1725 if (sk) {
1726 skb->sk = sk;
1727 skb->destructor = sock_edemux;
1728 if (sk->sk_state != TCP_TIME_WAIT) {
1729 struct dst_entry *dst = sk->sk_rx_dst;
1730 struct inet_sock *icsk = inet_sk(sk);
1731 if (dst)
5d299f3d 1732 dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
c7109986 1733 if (dst &&
5d299f3d 1734 icsk->rx_dst_ifindex == skb->skb_iif)
c7109986
ED
1735 skb_dst_set_noref(skb, dst);
1736 }
1737 }
1738}
1739
ccb7c410
DM
1740static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1741 .twsk_obj_size = sizeof(struct tcp6_timewait_sock),
1742 .twsk_unique = tcp_twsk_unique,
1743 .twsk_destructor= tcp_twsk_destructor,
ccb7c410
DM
1744};
1745
3b401a81 1746static const struct inet_connection_sock_af_ops ipv6_specific = {
543d9cfe
ACM
1747 .queue_xmit = inet6_csk_xmit,
1748 .send_check = tcp_v6_send_check,
1749 .rebuild_header = inet6_sk_rebuild_header,
5d299f3d 1750 .sk_rx_dst_set = inet6_sk_rx_dst_set,
543d9cfe
ACM
1751 .conn_request = tcp_v6_conn_request,
1752 .syn_recv_sock = tcp_v6_syn_recv_sock,
543d9cfe 1753 .net_header_len = sizeof(struct ipv6hdr),
67469601 1754 .net_frag_header_len = sizeof(struct frag_hdr),
543d9cfe
ACM
1755 .setsockopt = ipv6_setsockopt,
1756 .getsockopt = ipv6_getsockopt,
1757 .addr2sockaddr = inet6_csk_addr2sockaddr,
1758 .sockaddr_len = sizeof(struct sockaddr_in6),
ab1e0a13 1759 .bind_conflict = inet6_csk_bind_conflict,
3fdadf7d 1760#ifdef CONFIG_COMPAT
543d9cfe
ACM
1761 .compat_setsockopt = compat_ipv6_setsockopt,
1762 .compat_getsockopt = compat_ipv6_getsockopt,
3fdadf7d 1763#endif
1da177e4
LT
1764};
1765
cfb6eeb4 1766#ifdef CONFIG_TCP_MD5SIG
b2e4b3de 1767static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
cfb6eeb4 1768 .md5_lookup = tcp_v6_md5_lookup,
49a72dfb 1769 .calc_md5_hash = tcp_v6_md5_hash_skb,
cfb6eeb4 1770 .md5_parse = tcp_v6_parse_md5_keys,
cfb6eeb4 1771};
a928630a 1772#endif
cfb6eeb4 1773
1da177e4
LT
1774/*
1775 * TCP over IPv4 via INET6 API
1776 */
1777
3b401a81 1778static const struct inet_connection_sock_af_ops ipv6_mapped = {
543d9cfe
ACM
1779 .queue_xmit = ip_queue_xmit,
1780 .send_check = tcp_v4_send_check,
1781 .rebuild_header = inet_sk_rebuild_header,
63d02d15 1782 .sk_rx_dst_set = inet_sk_rx_dst_set,
543d9cfe
ACM
1783 .conn_request = tcp_v6_conn_request,
1784 .syn_recv_sock = tcp_v6_syn_recv_sock,
543d9cfe
ACM
1785 .net_header_len = sizeof(struct iphdr),
1786 .setsockopt = ipv6_setsockopt,
1787 .getsockopt = ipv6_getsockopt,
1788 .addr2sockaddr = inet6_csk_addr2sockaddr,
1789 .sockaddr_len = sizeof(struct sockaddr_in6),
ab1e0a13 1790 .bind_conflict = inet6_csk_bind_conflict,
3fdadf7d 1791#ifdef CONFIG_COMPAT
543d9cfe
ACM
1792 .compat_setsockopt = compat_ipv6_setsockopt,
1793 .compat_getsockopt = compat_ipv6_getsockopt,
3fdadf7d 1794#endif
1da177e4
LT
1795};
1796
cfb6eeb4 1797#ifdef CONFIG_TCP_MD5SIG
b2e4b3de 1798static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
cfb6eeb4 1799 .md5_lookup = tcp_v4_md5_lookup,
49a72dfb 1800 .calc_md5_hash = tcp_v4_md5_hash_skb,
cfb6eeb4 1801 .md5_parse = tcp_v6_parse_md5_keys,
cfb6eeb4 1802};
a928630a 1803#endif
cfb6eeb4 1804
1da177e4
LT
1805/* NOTE: A lot of things set to zero explicitly by call to
1806 * sk_alloc() so need not be done here.
1807 */
1808static int tcp_v6_init_sock(struct sock *sk)
1809{
6687e988 1810 struct inet_connection_sock *icsk = inet_csk(sk);
1da177e4 1811
900f65d3 1812 tcp_init_sock(sk);
1da177e4 1813
8292a17a 1814 icsk->icsk_af_ops = &ipv6_specific;
1da177e4 1815
cfb6eeb4 1816#ifdef CONFIG_TCP_MD5SIG
ac807fa8 1817 tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific;
cfb6eeb4
YH
1818#endif
1819
1da177e4
LT
1820 return 0;
1821}
1822
7d06b2e0 1823static void tcp_v6_destroy_sock(struct sock *sk)
1da177e4 1824{
1da177e4 1825 tcp_v4_destroy_sock(sk);
7d06b2e0 1826 inet6_destroy_sock(sk);
1da177e4
LT
1827}
1828
952a10be 1829#ifdef CONFIG_PROC_FS
1da177e4 1830/* Proc filesystem TCPv6 sock list dumping. */
1ab1457c 1831static void get_openreq6(struct seq_file *seq,
cf533ea5 1832 const struct sock *sk, struct request_sock *req, int i, int uid)
1da177e4 1833{
1da177e4 1834 int ttd = req->expires - jiffies;
b71d1d42
ED
1835 const struct in6_addr *src = &inet6_rsk(req)->loc_addr;
1836 const struct in6_addr *dest = &inet6_rsk(req)->rmt_addr;
1da177e4
LT
1837
1838 if (ttd < 0)
1839 ttd = 0;
1840
1da177e4
LT
1841 seq_printf(seq,
1842 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
71338aa7 1843 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1da177e4
LT
1844 i,
1845 src->s6_addr32[0], src->s6_addr32[1],
1846 src->s6_addr32[2], src->s6_addr32[3],
fd507037 1847 ntohs(inet_rsk(req)->loc_port),
1da177e4
LT
1848 dest->s6_addr32[0], dest->s6_addr32[1],
1849 dest->s6_addr32[2], dest->s6_addr32[3],
2e6599cb 1850 ntohs(inet_rsk(req)->rmt_port),
1da177e4
LT
1851 TCP_SYN_RECV,
1852 0,0, /* could print option size, but that is af dependent. */
1ab1457c
YH
1853 1, /* timers active (only the expire timer) */
1854 jiffies_to_clock_t(ttd),
1da177e4
LT
1855 req->retrans,
1856 uid,
1ab1457c 1857 0, /* non standard timer */
1da177e4
LT
1858 0, /* open_requests have no inode */
1859 0, req);
1860}
1861
1862static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1863{
b71d1d42 1864 const struct in6_addr *dest, *src;
1da177e4
LT
1865 __u16 destp, srcp;
1866 int timer_active;
1867 unsigned long timer_expires;
cf533ea5
ED
1868 const struct inet_sock *inet = inet_sk(sp);
1869 const struct tcp_sock *tp = tcp_sk(sp);
463c84b9 1870 const struct inet_connection_sock *icsk = inet_csk(sp);
cf533ea5 1871 const struct ipv6_pinfo *np = inet6_sk(sp);
1da177e4
LT
1872
1873 dest = &np->daddr;
1874 src = &np->rcv_saddr;
c720c7e8
ED
1875 destp = ntohs(inet->inet_dport);
1876 srcp = ntohs(inet->inet_sport);
463c84b9
ACM
1877
1878 if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
1da177e4 1879 timer_active = 1;
463c84b9
ACM
1880 timer_expires = icsk->icsk_timeout;
1881 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1da177e4 1882 timer_active = 4;
463c84b9 1883 timer_expires = icsk->icsk_timeout;
1da177e4
LT
1884 } else if (timer_pending(&sp->sk_timer)) {
1885 timer_active = 2;
1886 timer_expires = sp->sk_timer.expires;
1887 } else {
1888 timer_active = 0;
1889 timer_expires = jiffies;
1890 }
1891
1892 seq_printf(seq,
1893 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
71338aa7 1894 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %pK %lu %lu %u %u %d\n",
1da177e4
LT
1895 i,
1896 src->s6_addr32[0], src->s6_addr32[1],
1897 src->s6_addr32[2], src->s6_addr32[3], srcp,
1898 dest->s6_addr32[0], dest->s6_addr32[1],
1899 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1ab1457c 1900 sp->sk_state,
47da8ee6
SS
1901 tp->write_seq-tp->snd_una,
1902 (sp->sk_state == TCP_LISTEN) ? sp->sk_ack_backlog : (tp->rcv_nxt - tp->copied_seq),
1da177e4
LT
1903 timer_active,
1904 jiffies_to_clock_t(timer_expires - jiffies),
463c84b9 1905 icsk->icsk_retransmits,
1da177e4 1906 sock_i_uid(sp),
6687e988 1907 icsk->icsk_probes_out,
1da177e4
LT
1908 sock_i_ino(sp),
1909 atomic_read(&sp->sk_refcnt), sp,
7be87351
SH
1910 jiffies_to_clock_t(icsk->icsk_rto),
1911 jiffies_to_clock_t(icsk->icsk_ack.ato),
463c84b9 1912 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
0b6a05c1
IJ
1913 tp->snd_cwnd,
1914 tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh
1da177e4
LT
1915 );
1916}
1917
1ab1457c 1918static void get_timewait6_sock(struct seq_file *seq,
8feaf0c0 1919 struct inet_timewait_sock *tw, int i)
1da177e4 1920{
b71d1d42 1921 const struct in6_addr *dest, *src;
1da177e4 1922 __u16 destp, srcp;
cf533ea5 1923 const struct inet6_timewait_sock *tw6 = inet6_twsk((struct sock *)tw);
1da177e4
LT
1924 int ttd = tw->tw_ttd - jiffies;
1925
1926 if (ttd < 0)
1927 ttd = 0;
1928
0fa1a53e
ACM
1929 dest = &tw6->tw_v6_daddr;
1930 src = &tw6->tw_v6_rcv_saddr;
1da177e4
LT
1931 destp = ntohs(tw->tw_dport);
1932 srcp = ntohs(tw->tw_sport);
1933
1934 seq_printf(seq,
1935 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
71338aa7 1936 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1da177e4
LT
1937 i,
1938 src->s6_addr32[0], src->s6_addr32[1],
1939 src->s6_addr32[2], src->s6_addr32[3], srcp,
1940 dest->s6_addr32[0], dest->s6_addr32[1],
1941 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1942 tw->tw_substate, 0, 0,
1943 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
1944 atomic_read(&tw->tw_refcnt), tw);
1945}
1946
1da177e4
LT
1947static int tcp6_seq_show(struct seq_file *seq, void *v)
1948{
1949 struct tcp_iter_state *st;
1950
1951 if (v == SEQ_START_TOKEN) {
1952 seq_puts(seq,
1953 " sl "
1954 "local_address "
1955 "remote_address "
1956 "st tx_queue rx_queue tr tm->when retrnsmt"
1957 " uid timeout inode\n");
1958 goto out;
1959 }
1960 st = seq->private;
1961
1962 switch (st->state) {
1963 case TCP_SEQ_STATE_LISTENING:
1964 case TCP_SEQ_STATE_ESTABLISHED:
1965 get_tcp6_sock(seq, v, st->num);
1966 break;
1967 case TCP_SEQ_STATE_OPENREQ:
1968 get_openreq6(seq, st->syn_wait_sk, v, st->num, st->uid);
1969 break;
1970 case TCP_SEQ_STATE_TIME_WAIT:
1971 get_timewait6_sock(seq, v, st->num);
1972 break;
1973 }
1974out:
1975 return 0;
1976}
1977
73cb88ec
AV
1978static const struct file_operations tcp6_afinfo_seq_fops = {
1979 .owner = THIS_MODULE,
1980 .open = tcp_seq_open,
1981 .read = seq_read,
1982 .llseek = seq_lseek,
1983 .release = seq_release_net
1984};
1985
1da177e4 1986static struct tcp_seq_afinfo tcp6_seq_afinfo = {
1da177e4
LT
1987 .name = "tcp6",
1988 .family = AF_INET6,
73cb88ec 1989 .seq_fops = &tcp6_afinfo_seq_fops,
9427c4b3
DL
1990 .seq_ops = {
1991 .show = tcp6_seq_show,
1992 },
1da177e4
LT
1993};
1994
2c8c1e72 1995int __net_init tcp6_proc_init(struct net *net)
1da177e4 1996{
6f8b13bc 1997 return tcp_proc_register(net, &tcp6_seq_afinfo);
1da177e4
LT
1998}
1999
6f8b13bc 2000void tcp6_proc_exit(struct net *net)
1da177e4 2001{
6f8b13bc 2002 tcp_proc_unregister(net, &tcp6_seq_afinfo);
1da177e4
LT
2003}
2004#endif
2005
2006struct proto tcpv6_prot = {
2007 .name = "TCPv6",
2008 .owner = THIS_MODULE,
2009 .close = tcp_close,
2010 .connect = tcp_v6_connect,
2011 .disconnect = tcp_disconnect,
463c84b9 2012 .accept = inet_csk_accept,
1da177e4
LT
2013 .ioctl = tcp_ioctl,
2014 .init = tcp_v6_init_sock,
2015 .destroy = tcp_v6_destroy_sock,
2016 .shutdown = tcp_shutdown,
2017 .setsockopt = tcp_setsockopt,
2018 .getsockopt = tcp_getsockopt,
1da177e4 2019 .recvmsg = tcp_recvmsg,
7ba42910
CG
2020 .sendmsg = tcp_sendmsg,
2021 .sendpage = tcp_sendpage,
1da177e4 2022 .backlog_rcv = tcp_v6_do_rcv,
46d3ceab 2023 .release_cb = tcp_release_cb,
563d34d0 2024 .mtu_reduced = tcp_v6_mtu_reduced,
1da177e4 2025 .hash = tcp_v6_hash,
ab1e0a13
ACM
2026 .unhash = inet_unhash,
2027 .get_port = inet_csk_get_port,
1da177e4
LT
2028 .enter_memory_pressure = tcp_enter_memory_pressure,
2029 .sockets_allocated = &tcp_sockets_allocated,
2030 .memory_allocated = &tcp_memory_allocated,
2031 .memory_pressure = &tcp_memory_pressure,
0a5578cf 2032 .orphan_count = &tcp_orphan_count,
1da177e4
LT
2033 .sysctl_wmem = sysctl_tcp_wmem,
2034 .sysctl_rmem = sysctl_tcp_rmem,
2035 .max_header = MAX_TCP_HEADER,
2036 .obj_size = sizeof(struct tcp6_sock),
3ab5aee7 2037 .slab_flags = SLAB_DESTROY_BY_RCU,
6d6ee43e 2038 .twsk_prot = &tcp6_timewait_sock_ops,
60236fdd 2039 .rsk_prot = &tcp6_request_sock_ops,
39d8cda7 2040 .h.hashinfo = &tcp_hashinfo,
7ba42910 2041 .no_autobind = true,
543d9cfe
ACM
2042#ifdef CONFIG_COMPAT
2043 .compat_setsockopt = compat_tcp_setsockopt,
2044 .compat_getsockopt = compat_tcp_getsockopt,
2045#endif
c255a458 2046#ifdef CONFIG_MEMCG_KMEM
d1a4c0b3
GC
2047 .proto_cgroup = tcp_proto_cgroup,
2048#endif
1da177e4
LT
2049};
2050
41135cc8 2051static const struct inet6_protocol tcpv6_protocol = {
c7109986 2052 .early_demux = tcp_v6_early_demux,
1da177e4
LT
2053 .handler = tcp_v6_rcv,
2054 .err_handler = tcp_v6_err,
a430a43d 2055 .gso_send_check = tcp_v6_gso_send_check,
adcfc7d0 2056 .gso_segment = tcp_tso_segment,
684f2176
HX
2057 .gro_receive = tcp6_gro_receive,
2058 .gro_complete = tcp6_gro_complete,
1da177e4
LT
2059 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
2060};
2061
1da177e4
LT
2062static struct inet_protosw tcpv6_protosw = {
2063 .type = SOCK_STREAM,
2064 .protocol = IPPROTO_TCP,
2065 .prot = &tcpv6_prot,
2066 .ops = &inet6_stream_ops,
1da177e4 2067 .no_check = 0,
d83d8461
ACM
2068 .flags = INET_PROTOSW_PERMANENT |
2069 INET_PROTOSW_ICSK,
1da177e4
LT
2070};
2071
2c8c1e72 2072static int __net_init tcpv6_net_init(struct net *net)
93ec926b 2073{
5677242f
DL
2074 return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
2075 SOCK_RAW, IPPROTO_TCP, net);
93ec926b
DL
2076}
2077
2c8c1e72 2078static void __net_exit tcpv6_net_exit(struct net *net)
93ec926b 2079{
5677242f 2080 inet_ctl_sock_destroy(net->ipv6.tcp_sk);
b099ce26
EB
2081}
2082
2c8c1e72 2083static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
b099ce26
EB
2084{
2085 inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET6);
93ec926b
DL
2086}
2087
2088static struct pernet_operations tcpv6_net_ops = {
b099ce26
EB
2089 .init = tcpv6_net_init,
2090 .exit = tcpv6_net_exit,
2091 .exit_batch = tcpv6_net_exit_batch,
93ec926b
DL
2092};
2093
7f4e4868 2094int __init tcpv6_init(void)
1da177e4 2095{
7f4e4868
DL
2096 int ret;
2097
2098 ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
2099 if (ret)
2100 goto out;
2101
1da177e4 2102 /* register inet6 protocol */
7f4e4868
DL
2103 ret = inet6_register_protosw(&tcpv6_protosw);
2104 if (ret)
2105 goto out_tcpv6_protocol;
2106
93ec926b 2107 ret = register_pernet_subsys(&tcpv6_net_ops);
7f4e4868
DL
2108 if (ret)
2109 goto out_tcpv6_protosw;
2110out:
2111 return ret;
ae0f7d5f 2112
7f4e4868
DL
2113out_tcpv6_protocol:
2114 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
2115out_tcpv6_protosw:
2116 inet6_unregister_protosw(&tcpv6_protosw);
2117 goto out;
2118}
2119
09f7709f 2120void tcpv6_exit(void)
7f4e4868 2121{
93ec926b 2122 unregister_pernet_subsys(&tcpv6_net_ops);
7f4e4868
DL
2123 inet6_unregister_protosw(&tcpv6_protosw);
2124 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
1da177e4 2125}
This page took 0.894006 seconds and 5 git commands to generate.