net: remove ipv6_addr_copy()
[deliverable/linux.git] / net / ipv6 / tcp_ipv6.c
CommitLineData
1da177e4
LT
1/*
2 * TCP over IPv6
1ab1457c 3 * Linux INET6 implementation
1da177e4
LT
4 *
5 * Authors:
1ab1457c 6 * Pedro Roque <roque@di.fc.ul.pt>
1da177e4 7 *
1ab1457c 8 * Based on:
1da177e4
LT
9 * linux/net/ipv4/tcp.c
10 * linux/net/ipv4/tcp_input.c
11 * linux/net/ipv4/tcp_output.c
12 *
13 * Fixes:
14 * Hideaki YOSHIFUJI : sin6_scope_id support
15 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
16 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
17 * a single port at the same time.
18 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
19 *
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
24 */
25
eb4dea58 26#include <linux/bottom_half.h>
1da177e4 27#include <linux/module.h>
1da177e4
LT
28#include <linux/errno.h>
29#include <linux/types.h>
30#include <linux/socket.h>
31#include <linux/sockios.h>
32#include <linux/net.h>
33#include <linux/jiffies.h>
34#include <linux/in.h>
35#include <linux/in6.h>
36#include <linux/netdevice.h>
37#include <linux/init.h>
38#include <linux/jhash.h>
39#include <linux/ipsec.h>
40#include <linux/times.h>
5a0e3ad6 41#include <linux/slab.h>
1da177e4
LT
42
43#include <linux/ipv6.h>
44#include <linux/icmpv6.h>
45#include <linux/random.h>
46
47#include <net/tcp.h>
48#include <net/ndisc.h>
5324a040 49#include <net/inet6_hashtables.h>
8129765a 50#include <net/inet6_connection_sock.h>
1da177e4
LT
51#include <net/ipv6.h>
52#include <net/transp_v6.h>
53#include <net/addrconf.h>
54#include <net/ip6_route.h>
55#include <net/ip6_checksum.h>
56#include <net/inet_ecn.h>
57#include <net/protocol.h>
58#include <net/xfrm.h>
1da177e4
LT
59#include <net/snmp.h>
60#include <net/dsfield.h>
6d6ee43e 61#include <net/timewait_sock.h>
18134bed 62#include <net/netdma.h>
3d58b5fa 63#include <net/inet_common.h>
6e5714ea 64#include <net/secure_seq.h>
1da177e4
LT
65
66#include <asm/uaccess.h>
67
68#include <linux/proc_fs.h>
69#include <linux/seq_file.h>
70
cfb6eeb4
YH
71#include <linux/crypto.h>
72#include <linux/scatterlist.h>
73
cfb6eeb4 74static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb);
6edafaaf
GJ
75static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
76 struct request_sock *req);
1da177e4
LT
77
78static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
8ad50d96 79static void __tcp_v6_send_check(struct sk_buff *skb,
b71d1d42
ED
80 const struct in6_addr *saddr,
81 const struct in6_addr *daddr);
1da177e4 82
3b401a81
SH
83static const struct inet_connection_sock_af_ops ipv6_mapped;
84static const struct inet_connection_sock_af_ops ipv6_specific;
a928630a 85#ifdef CONFIG_TCP_MD5SIG
b2e4b3de
SH
86static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
87static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
9501f972
YH
88#else
89static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
b71d1d42 90 const struct in6_addr *addr)
9501f972
YH
91{
92 return NULL;
93}
a928630a 94#endif
1da177e4 95
1da177e4
LT
96static void tcp_v6_hash(struct sock *sk)
97{
98 if (sk->sk_state != TCP_CLOSE) {
8292a17a 99 if (inet_csk(sk)->icsk_af_ops == &ipv6_mapped) {
1da177e4
LT
100 tcp_prot.hash(sk);
101 return;
102 }
103 local_bh_disable();
9327f705 104 __inet6_hash(sk, NULL);
1da177e4
LT
105 local_bh_enable();
106 }
107}
108
684f2176 109static __inline__ __sum16 tcp_v6_check(int len,
b71d1d42
ED
110 const struct in6_addr *saddr,
111 const struct in6_addr *daddr,
868c86bc 112 __wsum base)
1da177e4
LT
113{
114 return csum_ipv6_magic(saddr, daddr, len, IPPROTO_TCP, base);
115}
116
cf533ea5 117static __u32 tcp_v6_init_sequence(const struct sk_buff *skb)
1da177e4 118{
0660e03f
ACM
119 return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
120 ipv6_hdr(skb)->saddr.s6_addr32,
aa8223c7
ACM
121 tcp_hdr(skb)->dest,
122 tcp_hdr(skb)->source);
1da177e4
LT
123}
124
1ab1457c 125static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
1da177e4
LT
126 int addr_len)
127{
128 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
1ab1457c 129 struct inet_sock *inet = inet_sk(sk);
d83d8461 130 struct inet_connection_sock *icsk = inet_csk(sk);
1da177e4
LT
131 struct ipv6_pinfo *np = inet6_sk(sk);
132 struct tcp_sock *tp = tcp_sk(sk);
20c59de2 133 struct in6_addr *saddr = NULL, *final_p, final;
493f377d 134 struct rt6_info *rt;
4c9483b2 135 struct flowi6 fl6;
1da177e4
LT
136 struct dst_entry *dst;
137 int addr_type;
138 int err;
139
1ab1457c 140 if (addr_len < SIN6_LEN_RFC2133)
1da177e4
LT
141 return -EINVAL;
142
1ab1457c 143 if (usin->sin6_family != AF_INET6)
a02cec21 144 return -EAFNOSUPPORT;
1da177e4 145
4c9483b2 146 memset(&fl6, 0, sizeof(fl6));
1da177e4
LT
147
148 if (np->sndflow) {
4c9483b2
DM
149 fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
150 IP6_ECN_flow_init(fl6.flowlabel);
151 if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
1da177e4 152 struct ip6_flowlabel *flowlabel;
4c9483b2 153 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
1da177e4
LT
154 if (flowlabel == NULL)
155 return -EINVAL;
4e3fd7a0 156 usin->sin6_addr = flowlabel->dst;
1da177e4
LT
157 fl6_sock_release(flowlabel);
158 }
159 }
160
161 /*
1ab1457c
YH
162 * connect() to INADDR_ANY means loopback (BSD'ism).
163 */
164
165 if(ipv6_addr_any(&usin->sin6_addr))
166 usin->sin6_addr.s6_addr[15] = 0x1;
1da177e4
LT
167
168 addr_type = ipv6_addr_type(&usin->sin6_addr);
169
170 if(addr_type & IPV6_ADDR_MULTICAST)
171 return -ENETUNREACH;
172
173 if (addr_type&IPV6_ADDR_LINKLOCAL) {
174 if (addr_len >= sizeof(struct sockaddr_in6) &&
175 usin->sin6_scope_id) {
176 /* If interface is set while binding, indices
177 * must coincide.
178 */
179 if (sk->sk_bound_dev_if &&
180 sk->sk_bound_dev_if != usin->sin6_scope_id)
181 return -EINVAL;
182
183 sk->sk_bound_dev_if = usin->sin6_scope_id;
184 }
185
186 /* Connect to link-local address requires an interface */
187 if (!sk->sk_bound_dev_if)
188 return -EINVAL;
189 }
190
191 if (tp->rx_opt.ts_recent_stamp &&
192 !ipv6_addr_equal(&np->daddr, &usin->sin6_addr)) {
193 tp->rx_opt.ts_recent = 0;
194 tp->rx_opt.ts_recent_stamp = 0;
195 tp->write_seq = 0;
196 }
197
4e3fd7a0 198 np->daddr = usin->sin6_addr;
4c9483b2 199 np->flow_label = fl6.flowlabel;
1da177e4
LT
200
201 /*
202 * TCP over IPv4
203 */
204
205 if (addr_type == IPV6_ADDR_MAPPED) {
d83d8461 206 u32 exthdrlen = icsk->icsk_ext_hdr_len;
1da177e4
LT
207 struct sockaddr_in sin;
208
209 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
210
211 if (__ipv6_only_sock(sk))
212 return -ENETUNREACH;
213
214 sin.sin_family = AF_INET;
215 sin.sin_port = usin->sin6_port;
216 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
217
d83d8461 218 icsk->icsk_af_ops = &ipv6_mapped;
1da177e4 219 sk->sk_backlog_rcv = tcp_v4_do_rcv;
cfb6eeb4
YH
220#ifdef CONFIG_TCP_MD5SIG
221 tp->af_specific = &tcp_sock_ipv6_mapped_specific;
222#endif
1da177e4
LT
223
224 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
225
226 if (err) {
d83d8461
ACM
227 icsk->icsk_ext_hdr_len = exthdrlen;
228 icsk->icsk_af_ops = &ipv6_specific;
1da177e4 229 sk->sk_backlog_rcv = tcp_v6_do_rcv;
cfb6eeb4
YH
230#ifdef CONFIG_TCP_MD5SIG
231 tp->af_specific = &tcp_sock_ipv6_specific;
232#endif
1da177e4
LT
233 goto failure;
234 } else {
c720c7e8
ED
235 ipv6_addr_set_v4mapped(inet->inet_saddr, &np->saddr);
236 ipv6_addr_set_v4mapped(inet->inet_rcv_saddr,
237 &np->rcv_saddr);
1da177e4
LT
238 }
239
240 return err;
241 }
242
243 if (!ipv6_addr_any(&np->rcv_saddr))
244 saddr = &np->rcv_saddr;
245
4c9483b2 246 fl6.flowi6_proto = IPPROTO_TCP;
4e3fd7a0
AD
247 fl6.daddr = np->daddr;
248 fl6.saddr = saddr ? *saddr : np->saddr;
4c9483b2
DM
249 fl6.flowi6_oif = sk->sk_bound_dev_if;
250 fl6.flowi6_mark = sk->sk_mark;
1958b856
DM
251 fl6.fl6_dport = usin->sin6_port;
252 fl6.fl6_sport = inet->inet_sport;
1da177e4 253
4c9483b2 254 final_p = fl6_update_dst(&fl6, np->opt, &final);
1da177e4 255
4c9483b2 256 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
beb8d13b 257
4c9483b2 258 dst = ip6_dst_lookup_flow(sk, &fl6, final_p, true);
68d0c6d3
DM
259 if (IS_ERR(dst)) {
260 err = PTR_ERR(dst);
1da177e4 261 goto failure;
14e50e57 262 }
1da177e4
LT
263
264 if (saddr == NULL) {
4c9483b2 265 saddr = &fl6.saddr;
4e3fd7a0 266 np->rcv_saddr = *saddr;
1da177e4
LT
267 }
268
269 /* set the source address */
4e3fd7a0 270 np->saddr = *saddr;
c720c7e8 271 inet->inet_rcv_saddr = LOOPBACK4_IPV6;
1da177e4 272
f83ef8c0 273 sk->sk_gso_type = SKB_GSO_TCPV6;
8e1ef0a9 274 __ip6_dst_store(sk, dst, NULL, NULL);
1da177e4 275
493f377d
DM
276 rt = (struct rt6_info *) dst;
277 if (tcp_death_row.sysctl_tw_recycle &&
278 !tp->rx_opt.ts_recent_stamp &&
279 ipv6_addr_equal(&rt->rt6i_dst.addr, &np->daddr)) {
280 struct inet_peer *peer = rt6_get_peer(rt);
281 /*
282 * VJ's idea. We save last timestamp seen from
283 * the destination in peer table, when entering state
284 * TIME-WAIT * and initialize rx_opt.ts_recent from it,
285 * when trying new connection.
286 */
287 if (peer) {
288 inet_peer_refcheck(peer);
289 if ((u32)get_seconds() - peer->tcp_ts_stamp <= TCP_PAWS_MSL) {
290 tp->rx_opt.ts_recent_stamp = peer->tcp_ts_stamp;
291 tp->rx_opt.ts_recent = peer->tcp_ts;
292 }
293 }
294 }
295
d83d8461 296 icsk->icsk_ext_hdr_len = 0;
1da177e4 297 if (np->opt)
d83d8461
ACM
298 icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
299 np->opt->opt_nflen);
1da177e4
LT
300
301 tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
302
c720c7e8 303 inet->inet_dport = usin->sin6_port;
1da177e4
LT
304
305 tcp_set_state(sk, TCP_SYN_SENT);
d8313f5c 306 err = inet6_hash_connect(&tcp_death_row, sk);
1da177e4
LT
307 if (err)
308 goto late_failure;
309
310 if (!tp->write_seq)
311 tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
312 np->daddr.s6_addr32,
c720c7e8
ED
313 inet->inet_sport,
314 inet->inet_dport);
1da177e4
LT
315
316 err = tcp_connect(sk);
317 if (err)
318 goto late_failure;
319
320 return 0;
321
322late_failure:
323 tcp_set_state(sk, TCP_CLOSE);
324 __sk_dst_reset(sk);
325failure:
c720c7e8 326 inet->inet_dport = 0;
1da177e4
LT
327 sk->sk_route_caps = 0;
328 return err;
329}
330
331static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
d5fdd6ba 332 u8 type, u8 code, int offset, __be32 info)
1da177e4 333{
b71d1d42 334 const struct ipv6hdr *hdr = (const struct ipv6hdr*)skb->data;
505cbfc5 335 const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
1da177e4
LT
336 struct ipv6_pinfo *np;
337 struct sock *sk;
338 int err;
1ab1457c 339 struct tcp_sock *tp;
1da177e4 340 __u32 seq;
ca12a1a4 341 struct net *net = dev_net(skb->dev);
1da177e4 342
ca12a1a4 343 sk = inet6_lookup(net, &tcp_hashinfo, &hdr->daddr,
d86e0dac 344 th->dest, &hdr->saddr, th->source, skb->dev->ifindex);
1da177e4
LT
345
346 if (sk == NULL) {
e41b5368
DL
347 ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
348 ICMP6_MIB_INERRORS);
1da177e4
LT
349 return;
350 }
351
352 if (sk->sk_state == TCP_TIME_WAIT) {
9469c7b4 353 inet_twsk_put(inet_twsk(sk));
1da177e4
LT
354 return;
355 }
356
357 bh_lock_sock(sk);
358 if (sock_owned_by_user(sk))
de0744af 359 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
1da177e4
LT
360
361 if (sk->sk_state == TCP_CLOSE)
362 goto out;
363
e802af9c
SH
364 if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
365 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
366 goto out;
367 }
368
1da177e4 369 tp = tcp_sk(sk);
1ab1457c 370 seq = ntohl(th->seq);
1da177e4
LT
371 if (sk->sk_state != TCP_LISTEN &&
372 !between(seq, tp->snd_una, tp->snd_nxt)) {
de0744af 373 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
1da177e4
LT
374 goto out;
375 }
376
377 np = inet6_sk(sk);
378
379 if (type == ICMPV6_PKT_TOOBIG) {
68d0c6d3 380 struct dst_entry *dst;
1da177e4
LT
381
382 if (sock_owned_by_user(sk))
383 goto out;
384 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
385 goto out;
386
387 /* icmp should have updated the destination cache entry */
388 dst = __sk_dst_check(sk, np->dst_cookie);
389
390 if (dst == NULL) {
391 struct inet_sock *inet = inet_sk(sk);
4c9483b2 392 struct flowi6 fl6;
1da177e4
LT
393
394 /* BUGGG_FUTURE: Again, it is not clear how
395 to handle rthdr case. Ignore this complexity
396 for now.
397 */
4c9483b2
DM
398 memset(&fl6, 0, sizeof(fl6));
399 fl6.flowi6_proto = IPPROTO_TCP;
4e3fd7a0
AD
400 fl6.daddr = np->daddr;
401 fl6.saddr = np->saddr;
4c9483b2
DM
402 fl6.flowi6_oif = sk->sk_bound_dev_if;
403 fl6.flowi6_mark = sk->sk_mark;
1958b856
DM
404 fl6.fl6_dport = inet->inet_dport;
405 fl6.fl6_sport = inet->inet_sport;
4c9483b2
DM
406 security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
407
408 dst = ip6_dst_lookup_flow(sk, &fl6, NULL, false);
68d0c6d3
DM
409 if (IS_ERR(dst)) {
410 sk->sk_err_soft = -PTR_ERR(dst);
1da177e4
LT
411 goto out;
412 }
413
414 } else
415 dst_hold(dst);
416
d83d8461 417 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
1da177e4
LT
418 tcp_sync_mss(sk, dst_mtu(dst));
419 tcp_simple_retransmit(sk);
420 } /* else let the usual retransmit timer handle it */
421 dst_release(dst);
422 goto out;
423 }
424
425 icmpv6_err_convert(type, code, &err);
426
60236fdd 427 /* Might be for an request_sock */
1da177e4 428 switch (sk->sk_state) {
60236fdd 429 struct request_sock *req, **prev;
1da177e4
LT
430 case TCP_LISTEN:
431 if (sock_owned_by_user(sk))
432 goto out;
433
8129765a
ACM
434 req = inet6_csk_search_req(sk, &prev, th->dest, &hdr->daddr,
435 &hdr->saddr, inet6_iif(skb));
1da177e4
LT
436 if (!req)
437 goto out;
438
439 /* ICMPs are not backlogged, hence we cannot get
440 * an established socket here.
441 */
547b792c 442 WARN_ON(req->sk != NULL);
1da177e4 443
2e6599cb 444 if (seq != tcp_rsk(req)->snt_isn) {
de0744af 445 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
1da177e4
LT
446 goto out;
447 }
448
463c84b9 449 inet_csk_reqsk_queue_drop(sk, req, prev);
1da177e4
LT
450 goto out;
451
452 case TCP_SYN_SENT:
453 case TCP_SYN_RECV: /* Cannot happen.
1ab1457c 454 It can, it SYNs are crossed. --ANK */
1da177e4 455 if (!sock_owned_by_user(sk)) {
1da177e4
LT
456 sk->sk_err = err;
457 sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
458
459 tcp_done(sk);
460 } else
461 sk->sk_err_soft = err;
462 goto out;
463 }
464
465 if (!sock_owned_by_user(sk) && np->recverr) {
466 sk->sk_err = err;
467 sk->sk_error_report(sk);
468 } else
469 sk->sk_err_soft = err;
470
471out:
472 bh_unlock_sock(sk);
473 sock_put(sk);
474}
475
476
e6b4d113
WAS
477static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req,
478 struct request_values *rvp)
1da177e4 479{
ca304b61 480 struct inet6_request_sock *treq = inet6_rsk(req);
1da177e4
LT
481 struct ipv6_pinfo *np = inet6_sk(sk);
482 struct sk_buff * skb;
483 struct ipv6_txoptions *opt = NULL;
20c59de2 484 struct in6_addr * final_p, final;
4c9483b2 485 struct flowi6 fl6;
fd80eb94 486 struct dst_entry *dst;
68d0c6d3 487 int err;
1da177e4 488
4c9483b2
DM
489 memset(&fl6, 0, sizeof(fl6));
490 fl6.flowi6_proto = IPPROTO_TCP;
4e3fd7a0
AD
491 fl6.daddr = treq->rmt_addr;
492 fl6.saddr = treq->loc_addr;
4c9483b2
DM
493 fl6.flowlabel = 0;
494 fl6.flowi6_oif = treq->iif;
495 fl6.flowi6_mark = sk->sk_mark;
1958b856
DM
496 fl6.fl6_dport = inet_rsk(req)->rmt_port;
497 fl6.fl6_sport = inet_rsk(req)->loc_port;
4c9483b2 498 security_req_classify_flow(req, flowi6_to_flowi(&fl6));
1da177e4 499
fd80eb94 500 opt = np->opt;
4c9483b2 501 final_p = fl6_update_dst(&fl6, opt, &final);
1da177e4 502
4c9483b2 503 dst = ip6_dst_lookup_flow(sk, &fl6, final_p, false);
68d0c6d3
DM
504 if (IS_ERR(dst)) {
505 err = PTR_ERR(dst);
738faca3 506 dst = NULL;
fd80eb94 507 goto done;
68d0c6d3 508 }
e6b4d113 509 skb = tcp_make_synack(sk, dst, req, rvp);
68d0c6d3 510 err = -ENOMEM;
1da177e4 511 if (skb) {
8ad50d96 512 __tcp_v6_send_check(skb, &treq->loc_addr, &treq->rmt_addr);
1da177e4 513
4e3fd7a0 514 fl6.daddr = treq->rmt_addr;
b903d324 515 err = ip6_xmit(sk, skb, &fl6, opt, np->tclass);
b9df3cb8 516 err = net_xmit_eval(err);
1da177e4
LT
517 }
518
519done:
1ab1457c 520 if (opt && opt != np->opt)
1da177e4 521 sock_kfree_s(sk, opt, opt->tot_len);
78b91042 522 dst_release(dst);
1da177e4
LT
523 return err;
524}
525
72659ecc
OP
526static int tcp_v6_rtx_synack(struct sock *sk, struct request_sock *req,
527 struct request_values *rvp)
528{
529 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
530 return tcp_v6_send_synack(sk, req, rvp);
531}
532
60236fdd 533static void tcp_v6_reqsk_destructor(struct request_sock *req)
1da177e4 534{
800d55f1 535 kfree_skb(inet6_rsk(req)->pktopts);
1da177e4
LT
536}
537
cfb6eeb4
YH
538#ifdef CONFIG_TCP_MD5SIG
539static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
b71d1d42 540 const struct in6_addr *addr)
cfb6eeb4
YH
541{
542 struct tcp_sock *tp = tcp_sk(sk);
543 int i;
544
545 BUG_ON(tp == NULL);
546
547 if (!tp->md5sig_info || !tp->md5sig_info->entries6)
548 return NULL;
549
550 for (i = 0; i < tp->md5sig_info->entries6; i++) {
caad295f 551 if (ipv6_addr_equal(&tp->md5sig_info->keys6[i].addr, addr))
f8ab18d2 552 return &tp->md5sig_info->keys6[i].base;
cfb6eeb4
YH
553 }
554 return NULL;
555}
556
557static struct tcp_md5sig_key *tcp_v6_md5_lookup(struct sock *sk,
558 struct sock *addr_sk)
559{
560 return tcp_v6_md5_do_lookup(sk, &inet6_sk(addr_sk)->daddr);
561}
562
563static struct tcp_md5sig_key *tcp_v6_reqsk_md5_lookup(struct sock *sk,
564 struct request_sock *req)
565{
566 return tcp_v6_md5_do_lookup(sk, &inet6_rsk(req)->rmt_addr);
567}
568
b71d1d42 569static int tcp_v6_md5_do_add(struct sock *sk, const struct in6_addr *peer,
cfb6eeb4
YH
570 char *newkey, u8 newkeylen)
571{
572 /* Add key to the list */
b0a713e9 573 struct tcp_md5sig_key *key;
cfb6eeb4
YH
574 struct tcp_sock *tp = tcp_sk(sk);
575 struct tcp6_md5sig_key *keys;
576
b0a713e9 577 key = tcp_v6_md5_do_lookup(sk, peer);
cfb6eeb4
YH
578 if (key) {
579 /* modify existing entry - just update that one */
b0a713e9
MD
580 kfree(key->key);
581 key->key = newkey;
582 key->keylen = newkeylen;
cfb6eeb4
YH
583 } else {
584 /* reallocate new list if current one is full. */
585 if (!tp->md5sig_info) {
586 tp->md5sig_info = kzalloc(sizeof(*tp->md5sig_info), GFP_ATOMIC);
587 if (!tp->md5sig_info) {
588 kfree(newkey);
589 return -ENOMEM;
590 }
a465419b 591 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
cfb6eeb4 592 }
260fcbeb
YZ
593 if (tp->md5sig_info->entries6 == 0 &&
594 tcp_alloc_md5sig_pool(sk) == NULL) {
aacbe8c8
YH
595 kfree(newkey);
596 return -ENOMEM;
597 }
cfb6eeb4
YH
598 if (tp->md5sig_info->alloced6 == tp->md5sig_info->entries6) {
599 keys = kmalloc((sizeof (tp->md5sig_info->keys6[0]) *
600 (tp->md5sig_info->entries6 + 1)), GFP_ATOMIC);
601
602 if (!keys) {
cfb6eeb4 603 kfree(newkey);
260fcbeb
YZ
604 if (tp->md5sig_info->entries6 == 0)
605 tcp_free_md5sig_pool();
cfb6eeb4
YH
606 return -ENOMEM;
607 }
608
609 if (tp->md5sig_info->entries6)
610 memmove(keys, tp->md5sig_info->keys6,
611 (sizeof (tp->md5sig_info->keys6[0]) *
612 tp->md5sig_info->entries6));
613
614 kfree(tp->md5sig_info->keys6);
615 tp->md5sig_info->keys6 = keys;
616 tp->md5sig_info->alloced6++;
617 }
618
4e3fd7a0 619 tp->md5sig_info->keys6[tp->md5sig_info->entries6].addr = *peer;
f8ab18d2
DM
620 tp->md5sig_info->keys6[tp->md5sig_info->entries6].base.key = newkey;
621 tp->md5sig_info->keys6[tp->md5sig_info->entries6].base.keylen = newkeylen;
cfb6eeb4
YH
622
623 tp->md5sig_info->entries6++;
624 }
625 return 0;
626}
627
628static int tcp_v6_md5_add_func(struct sock *sk, struct sock *addr_sk,
629 u8 *newkey, __u8 newkeylen)
630{
631 return tcp_v6_md5_do_add(sk, &inet6_sk(addr_sk)->daddr,
632 newkey, newkeylen);
633}
634
b71d1d42 635static int tcp_v6_md5_do_del(struct sock *sk, const struct in6_addr *peer)
cfb6eeb4
YH
636{
637 struct tcp_sock *tp = tcp_sk(sk);
638 int i;
639
640 for (i = 0; i < tp->md5sig_info->entries6; i++) {
caad295f 641 if (ipv6_addr_equal(&tp->md5sig_info->keys6[i].addr, peer)) {
cfb6eeb4 642 /* Free the key */
f8ab18d2 643 kfree(tp->md5sig_info->keys6[i].base.key);
cfb6eeb4
YH
644 tp->md5sig_info->entries6--;
645
646 if (tp->md5sig_info->entries6 == 0) {
647 kfree(tp->md5sig_info->keys6);
648 tp->md5sig_info->keys6 = NULL;
ca983cef 649 tp->md5sig_info->alloced6 = 0;
260fcbeb 650 tcp_free_md5sig_pool();
cfb6eeb4
YH
651 } else {
652 /* shrink the database */
653 if (tp->md5sig_info->entries6 != i)
654 memmove(&tp->md5sig_info->keys6[i],
655 &tp->md5sig_info->keys6[i+1],
656 (tp->md5sig_info->entries6 - i)
657 * sizeof (tp->md5sig_info->keys6[0]));
658 }
77adefdc 659 return 0;
cfb6eeb4
YH
660 }
661 }
662 return -ENOENT;
663}
664
665static void tcp_v6_clear_md5_list (struct sock *sk)
666{
667 struct tcp_sock *tp = tcp_sk(sk);
668 int i;
669
670 if (tp->md5sig_info->entries6) {
671 for (i = 0; i < tp->md5sig_info->entries6; i++)
f8ab18d2 672 kfree(tp->md5sig_info->keys6[i].base.key);
cfb6eeb4
YH
673 tp->md5sig_info->entries6 = 0;
674 tcp_free_md5sig_pool();
675 }
676
677 kfree(tp->md5sig_info->keys6);
678 tp->md5sig_info->keys6 = NULL;
679 tp->md5sig_info->alloced6 = 0;
680
681 if (tp->md5sig_info->entries4) {
682 for (i = 0; i < tp->md5sig_info->entries4; i++)
f8ab18d2 683 kfree(tp->md5sig_info->keys4[i].base.key);
cfb6eeb4
YH
684 tp->md5sig_info->entries4 = 0;
685 tcp_free_md5sig_pool();
686 }
687
688 kfree(tp->md5sig_info->keys4);
689 tp->md5sig_info->keys4 = NULL;
690 tp->md5sig_info->alloced4 = 0;
691}
692
693static int tcp_v6_parse_md5_keys (struct sock *sk, char __user *optval,
694 int optlen)
695{
696 struct tcp_md5sig cmd;
697 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
698 u8 *newkey;
699
700 if (optlen < sizeof(cmd))
701 return -EINVAL;
702
703 if (copy_from_user(&cmd, optval, sizeof(cmd)))
704 return -EFAULT;
705
706 if (sin6->sin6_family != AF_INET6)
707 return -EINVAL;
708
709 if (!cmd.tcpm_keylen) {
710 if (!tcp_sk(sk)->md5sig_info)
711 return -ENOENT;
e773e4fa 712 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
cfb6eeb4
YH
713 return tcp_v4_md5_do_del(sk, sin6->sin6_addr.s6_addr32[3]);
714 return tcp_v6_md5_do_del(sk, &sin6->sin6_addr);
715 }
716
717 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
718 return -EINVAL;
719
720 if (!tcp_sk(sk)->md5sig_info) {
721 struct tcp_sock *tp = tcp_sk(sk);
722 struct tcp_md5sig_info *p;
723
724 p = kzalloc(sizeof(struct tcp_md5sig_info), GFP_KERNEL);
725 if (!p)
726 return -ENOMEM;
727
728 tp->md5sig_info = p;
a465419b 729 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
cfb6eeb4
YH
730 }
731
af879cc7 732 newkey = kmemdup(cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
cfb6eeb4
YH
733 if (!newkey)
734 return -ENOMEM;
e773e4fa 735 if (ipv6_addr_v4mapped(&sin6->sin6_addr)) {
cfb6eeb4
YH
736 return tcp_v4_md5_do_add(sk, sin6->sin6_addr.s6_addr32[3],
737 newkey, cmd.tcpm_keylen);
738 }
739 return tcp_v6_md5_do_add(sk, &sin6->sin6_addr, newkey, cmd.tcpm_keylen);
740}
741
49a72dfb 742static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
b71d1d42
ED
743 const struct in6_addr *daddr,
744 const struct in6_addr *saddr, int nbytes)
cfb6eeb4 745{
cfb6eeb4 746 struct tcp6_pseudohdr *bp;
49a72dfb 747 struct scatterlist sg;
8d26d76d 748
cfb6eeb4 749 bp = &hp->md5_blk.ip6;
cfb6eeb4 750 /* 1. TCP pseudo-header (RFC2460) */
4e3fd7a0
AD
751 bp->saddr = *saddr;
752 bp->daddr = *daddr;
49a72dfb 753 bp->protocol = cpu_to_be32(IPPROTO_TCP);
00b1304c 754 bp->len = cpu_to_be32(nbytes);
cfb6eeb4 755
49a72dfb
AL
756 sg_init_one(&sg, bp, sizeof(*bp));
757 return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
758}
c7da57a1 759
49a72dfb 760static int tcp_v6_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
b71d1d42 761 const struct in6_addr *daddr, struct in6_addr *saddr,
318cf7aa 762 const struct tcphdr *th)
49a72dfb
AL
763{
764 struct tcp_md5sig_pool *hp;
765 struct hash_desc *desc;
766
767 hp = tcp_get_md5sig_pool();
768 if (!hp)
769 goto clear_hash_noput;
770 desc = &hp->md5_desc;
771
772 if (crypto_hash_init(desc))
773 goto clear_hash;
774 if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
775 goto clear_hash;
776 if (tcp_md5_hash_header(hp, th))
777 goto clear_hash;
778 if (tcp_md5_hash_key(hp, key))
779 goto clear_hash;
780 if (crypto_hash_final(desc, md5_hash))
cfb6eeb4 781 goto clear_hash;
cfb6eeb4 782
cfb6eeb4 783 tcp_put_md5sig_pool();
cfb6eeb4 784 return 0;
49a72dfb 785
cfb6eeb4
YH
786clear_hash:
787 tcp_put_md5sig_pool();
788clear_hash_noput:
789 memset(md5_hash, 0, 16);
49a72dfb 790 return 1;
cfb6eeb4
YH
791}
792
49a72dfb 793static int tcp_v6_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
318cf7aa
ED
794 const struct sock *sk,
795 const struct request_sock *req,
796 const struct sk_buff *skb)
cfb6eeb4 797{
b71d1d42 798 const struct in6_addr *saddr, *daddr;
49a72dfb
AL
799 struct tcp_md5sig_pool *hp;
800 struct hash_desc *desc;
318cf7aa 801 const struct tcphdr *th = tcp_hdr(skb);
cfb6eeb4
YH
802
803 if (sk) {
804 saddr = &inet6_sk(sk)->saddr;
805 daddr = &inet6_sk(sk)->daddr;
49a72dfb 806 } else if (req) {
cfb6eeb4
YH
807 saddr = &inet6_rsk(req)->loc_addr;
808 daddr = &inet6_rsk(req)->rmt_addr;
49a72dfb 809 } else {
b71d1d42 810 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
49a72dfb
AL
811 saddr = &ip6h->saddr;
812 daddr = &ip6h->daddr;
cfb6eeb4 813 }
49a72dfb
AL
814
815 hp = tcp_get_md5sig_pool();
816 if (!hp)
817 goto clear_hash_noput;
818 desc = &hp->md5_desc;
819
820 if (crypto_hash_init(desc))
821 goto clear_hash;
822
823 if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
824 goto clear_hash;
825 if (tcp_md5_hash_header(hp, th))
826 goto clear_hash;
827 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
828 goto clear_hash;
829 if (tcp_md5_hash_key(hp, key))
830 goto clear_hash;
831 if (crypto_hash_final(desc, md5_hash))
832 goto clear_hash;
833
834 tcp_put_md5sig_pool();
835 return 0;
836
837clear_hash:
838 tcp_put_md5sig_pool();
839clear_hash_noput:
840 memset(md5_hash, 0, 16);
841 return 1;
cfb6eeb4
YH
842}
843
318cf7aa 844static int tcp_v6_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
cfb6eeb4 845{
cf533ea5 846 const __u8 *hash_location = NULL;
cfb6eeb4 847 struct tcp_md5sig_key *hash_expected;
b71d1d42 848 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
318cf7aa 849 const struct tcphdr *th = tcp_hdr(skb);
cfb6eeb4 850 int genhash;
cfb6eeb4
YH
851 u8 newhash[16];
852
853 hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
7d5d5525 854 hash_location = tcp_parse_md5sig_option(th);
cfb6eeb4 855
785957d3
DM
856 /* We've parsed the options - do we have a hash? */
857 if (!hash_expected && !hash_location)
858 return 0;
859
860 if (hash_expected && !hash_location) {
861 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
cfb6eeb4
YH
862 return 1;
863 }
864
785957d3
DM
865 if (!hash_expected && hash_location) {
866 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
cfb6eeb4
YH
867 return 1;
868 }
869
870 /* check the signature */
49a72dfb
AL
871 genhash = tcp_v6_md5_hash_skb(newhash,
872 hash_expected,
873 NULL, NULL, skb);
874
cfb6eeb4
YH
875 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
876 if (net_ratelimit()) {
5856b606 877 printk(KERN_INFO "MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
cfb6eeb4 878 genhash ? "failed" : "mismatch",
0c6ce78a
HH
879 &ip6h->saddr, ntohs(th->source),
880 &ip6h->daddr, ntohs(th->dest));
cfb6eeb4
YH
881 }
882 return 1;
883 }
884 return 0;
885}
886#endif
887
c6aefafb 888struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
1da177e4 889 .family = AF_INET6,
2e6599cb 890 .obj_size = sizeof(struct tcp6_request_sock),
72659ecc 891 .rtx_syn_ack = tcp_v6_rtx_synack,
60236fdd
ACM
892 .send_ack = tcp_v6_reqsk_send_ack,
893 .destructor = tcp_v6_reqsk_destructor,
72659ecc
OP
894 .send_reset = tcp_v6_send_reset,
895 .syn_ack_timeout = tcp_syn_ack_timeout,
1da177e4
LT
896};
897
cfb6eeb4 898#ifdef CONFIG_TCP_MD5SIG
b2e4b3de 899static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
cfb6eeb4 900 .md5_lookup = tcp_v6_reqsk_md5_lookup,
e3afe7b7 901 .calc_md5_hash = tcp_v6_md5_hash_skb,
cfb6eeb4 902};
b6332e6c 903#endif
cfb6eeb4 904
8ad50d96 905static void __tcp_v6_send_check(struct sk_buff *skb,
b71d1d42 906 const struct in6_addr *saddr, const struct in6_addr *daddr)
1da177e4 907{
aa8223c7 908 struct tcphdr *th = tcp_hdr(skb);
1da177e4 909
84fa7933 910 if (skb->ip_summed == CHECKSUM_PARTIAL) {
8ad50d96 911 th->check = ~tcp_v6_check(skb->len, saddr, daddr, 0);
663ead3b 912 skb->csum_start = skb_transport_header(skb) - skb->head;
ff1dcadb 913 skb->csum_offset = offsetof(struct tcphdr, check);
1da177e4 914 } else {
8ad50d96
HX
915 th->check = tcp_v6_check(skb->len, saddr, daddr,
916 csum_partial(th, th->doff << 2,
917 skb->csum));
1da177e4
LT
918 }
919}
920
bb296246 921static void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb)
8ad50d96
HX
922{
923 struct ipv6_pinfo *np = inet6_sk(sk);
924
925 __tcp_v6_send_check(skb, &np->saddr, &np->daddr);
926}
927
a430a43d
HX
928static int tcp_v6_gso_send_check(struct sk_buff *skb)
929{
b71d1d42 930 const struct ipv6hdr *ipv6h;
a430a43d
HX
931 struct tcphdr *th;
932
933 if (!pskb_may_pull(skb, sizeof(*th)))
934 return -EINVAL;
935
0660e03f 936 ipv6h = ipv6_hdr(skb);
aa8223c7 937 th = tcp_hdr(skb);
a430a43d
HX
938
939 th->check = 0;
84fa7933 940 skb->ip_summed = CHECKSUM_PARTIAL;
8ad50d96 941 __tcp_v6_send_check(skb, &ipv6h->saddr, &ipv6h->daddr);
a430a43d
HX
942 return 0;
943}
1da177e4 944
36990673
HX
945static struct sk_buff **tcp6_gro_receive(struct sk_buff **head,
946 struct sk_buff *skb)
684f2176 947{
b71d1d42 948 const struct ipv6hdr *iph = skb_gro_network_header(skb);
684f2176
HX
949
950 switch (skb->ip_summed) {
951 case CHECKSUM_COMPLETE:
86911732 952 if (!tcp_v6_check(skb_gro_len(skb), &iph->saddr, &iph->daddr,
684f2176
HX
953 skb->csum)) {
954 skb->ip_summed = CHECKSUM_UNNECESSARY;
955 break;
956 }
957
958 /* fall through */
959 case CHECKSUM_NONE:
960 NAPI_GRO_CB(skb)->flush = 1;
961 return NULL;
962 }
963
964 return tcp_gro_receive(head, skb);
965}
684f2176 966
36990673 967static int tcp6_gro_complete(struct sk_buff *skb)
684f2176 968{
b71d1d42 969 const struct ipv6hdr *iph = ipv6_hdr(skb);
684f2176
HX
970 struct tcphdr *th = tcp_hdr(skb);
971
972 th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
973 &iph->saddr, &iph->daddr, 0);
974 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
975
976 return tcp_gro_complete(skb);
977}
684f2176 978
626e264d 979static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
b903d324 980 u32 ts, struct tcp_md5sig_key *key, int rst, u8 tclass)
1da177e4 981{
cf533ea5
ED
982 const struct tcphdr *th = tcp_hdr(skb);
983 struct tcphdr *t1;
1da177e4 984 struct sk_buff *buff;
4c9483b2 985 struct flowi6 fl6;
adf30907 986 struct net *net = dev_net(skb_dst(skb)->dev);
e5047992 987 struct sock *ctl_sk = net->ipv6.tcp_sk;
77c676da 988 unsigned int tot_len = sizeof(struct tcphdr);
adf30907 989 struct dst_entry *dst;
81ada62d 990 __be32 *topt;
1da177e4 991
626e264d
IJ
992 if (ts)
993 tot_len += TCPOLEN_TSTAMP_ALIGNED;
cfb6eeb4 994#ifdef CONFIG_TCP_MD5SIG
cfb6eeb4
YH
995 if (key)
996 tot_len += TCPOLEN_MD5SIG_ALIGNED;
997#endif
998
cfb6eeb4 999 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
1da177e4 1000 GFP_ATOMIC);
1ab1457c
YH
1001 if (buff == NULL)
1002 return;
1da177e4 1003
cfb6eeb4 1004 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
1da177e4 1005
cfb6eeb4 1006 t1 = (struct tcphdr *) skb_push(buff, tot_len);
6651ffc8 1007 skb_reset_transport_header(buff);
1da177e4
LT
1008
1009 /* Swap the send and the receive. */
1010 memset(t1, 0, sizeof(*t1));
1011 t1->dest = th->source;
1012 t1->source = th->dest;
cfb6eeb4 1013 t1->doff = tot_len / 4;
626e264d
IJ
1014 t1->seq = htonl(seq);
1015 t1->ack_seq = htonl(ack);
1016 t1->ack = !rst || !th->ack;
1017 t1->rst = rst;
1018 t1->window = htons(win);
1da177e4 1019
81ada62d
IJ
1020 topt = (__be32 *)(t1 + 1);
1021
626e264d
IJ
1022 if (ts) {
1023 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
1024 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
1025 *topt++ = htonl(tcp_time_stamp);
1026 *topt++ = htonl(ts);
1027 }
1028
cfb6eeb4
YH
1029#ifdef CONFIG_TCP_MD5SIG
1030 if (key) {
81ada62d
IJ
1031 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
1032 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
1033 tcp_v6_md5_hash_hdr((__u8 *)topt, key,
78e645cb
IJ
1034 &ipv6_hdr(skb)->saddr,
1035 &ipv6_hdr(skb)->daddr, t1);
cfb6eeb4
YH
1036 }
1037#endif
1038
4c9483b2 1039 memset(&fl6, 0, sizeof(fl6));
4e3fd7a0
AD
1040 fl6.daddr = ipv6_hdr(skb)->saddr;
1041 fl6.saddr = ipv6_hdr(skb)->daddr;
1da177e4 1042
e5700aff
DM
1043 buff->ip_summed = CHECKSUM_PARTIAL;
1044 buff->csum = 0;
1045
4c9483b2 1046 __tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
1da177e4 1047
4c9483b2
DM
1048 fl6.flowi6_proto = IPPROTO_TCP;
1049 fl6.flowi6_oif = inet6_iif(skb);
1958b856
DM
1050 fl6.fl6_dport = t1->dest;
1051 fl6.fl6_sport = t1->source;
4c9483b2 1052 security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
1da177e4 1053
c20121ae
DL
1054 /* Pass a socket to ip6_dst_lookup either it is for RST
1055 * Underlying function will use this to retrieve the network
1056 * namespace
1057 */
4c9483b2 1058 dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL, false);
68d0c6d3
DM
1059 if (!IS_ERR(dst)) {
1060 skb_dst_set(buff, dst);
b903d324 1061 ip6_xmit(ctl_sk, buff, &fl6, NULL, tclass);
68d0c6d3
DM
1062 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
1063 if (rst)
1064 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
1065 return;
1da177e4
LT
1066 }
1067
1068 kfree_skb(buff);
1069}
1070
626e264d 1071static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
1da177e4 1072{
cf533ea5 1073 const struct tcphdr *th = tcp_hdr(skb);
626e264d 1074 u32 seq = 0, ack_seq = 0;
fa3e5b4e 1075 struct tcp_md5sig_key *key = NULL;
1da177e4 1076
626e264d 1077 if (th->rst)
1da177e4
LT
1078 return;
1079
626e264d
IJ
1080 if (!ipv6_unicast_destination(skb))
1081 return;
1da177e4 1082
cfb6eeb4 1083#ifdef CONFIG_TCP_MD5SIG
626e264d
IJ
1084 if (sk)
1085 key = tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr);
cfb6eeb4
YH
1086#endif
1087
626e264d
IJ
1088 if (th->ack)
1089 seq = ntohl(th->ack_seq);
1090 else
1091 ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
1092 (th->doff << 2);
1da177e4 1093
b903d324 1094 tcp_v6_send_response(skb, seq, ack_seq, 0, 0, key, 1, 0);
626e264d 1095}
1da177e4 1096
626e264d 1097static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts,
b903d324 1098 struct tcp_md5sig_key *key, u8 tclass)
626e264d 1099{
b903d324 1100 tcp_v6_send_response(skb, seq, ack, win, ts, key, 0, tclass);
1da177e4
LT
1101}
1102
1103static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
1104{
8feaf0c0 1105 struct inet_timewait_sock *tw = inet_twsk(sk);
cfb6eeb4 1106 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
1da177e4 1107
9501f972 1108 tcp_v6_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
8feaf0c0 1109 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
b903d324
ED
1110 tcptw->tw_ts_recent, tcp_twsk_md5_key(tcptw),
1111 tw->tw_tclass);
1da177e4 1112
8feaf0c0 1113 inet_twsk_put(tw);
1da177e4
LT
1114}
1115
6edafaaf
GJ
1116static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
1117 struct request_sock *req)
1da177e4 1118{
9501f972 1119 tcp_v6_send_ack(skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, req->ts_recent,
b903d324 1120 tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr), 0);
1da177e4
LT
1121}
1122
1123
1124static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
1125{
60236fdd 1126 struct request_sock *req, **prev;
aa8223c7 1127 const struct tcphdr *th = tcp_hdr(skb);
1da177e4
LT
1128 struct sock *nsk;
1129
1130 /* Find possible connection requests. */
8129765a 1131 req = inet6_csk_search_req(sk, &prev, th->source,
0660e03f
ACM
1132 &ipv6_hdr(skb)->saddr,
1133 &ipv6_hdr(skb)->daddr, inet6_iif(skb));
1da177e4
LT
1134 if (req)
1135 return tcp_check_req(sk, skb, req, prev);
1136
3b1e0a65 1137 nsk = __inet6_lookup_established(sock_net(sk), &tcp_hashinfo,
d86e0dac
PE
1138 &ipv6_hdr(skb)->saddr, th->source,
1139 &ipv6_hdr(skb)->daddr, ntohs(th->dest), inet6_iif(skb));
1da177e4
LT
1140
1141 if (nsk) {
1142 if (nsk->sk_state != TCP_TIME_WAIT) {
1143 bh_lock_sock(nsk);
1144 return nsk;
1145 }
9469c7b4 1146 inet_twsk_put(inet_twsk(nsk));
1da177e4
LT
1147 return NULL;
1148 }
1149
c6aefafb 1150#ifdef CONFIG_SYN_COOKIES
af9b4738 1151 if (!th->syn)
c6aefafb 1152 sk = cookie_v6_check(sk, skb);
1da177e4
LT
1153#endif
1154 return sk;
1155}
1156
1da177e4
LT
1157/* FIXME: this is substantially similar to the ipv4 code.
1158 * Can some kind of merge be done? -- erics
1159 */
1160static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1161{
4957faad 1162 struct tcp_extend_values tmp_ext;
e6b4d113 1163 struct tcp_options_received tmp_opt;
cf533ea5 1164 const u8 *hash_location;
e6b4d113 1165 struct request_sock *req;
ca304b61 1166 struct inet6_request_sock *treq;
1da177e4 1167 struct ipv6_pinfo *np = inet6_sk(sk);
1da177e4 1168 struct tcp_sock *tp = tcp_sk(sk);
e6b4d113 1169 __u32 isn = TCP_SKB_CB(skb)->when;
493f377d 1170 struct dst_entry *dst = NULL;
c6aefafb 1171 int want_cookie = 0;
1da177e4
LT
1172
1173 if (skb->protocol == htons(ETH_P_IP))
1174 return tcp_v4_conn_request(sk, skb);
1175
1176 if (!ipv6_unicast_destination(skb))
1ab1457c 1177 goto drop;
1da177e4 1178
463c84b9 1179 if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
946cedcc
ED
1180 want_cookie = tcp_syn_flood_action(sk, skb, "TCPv6");
1181 if (!want_cookie)
1182 goto drop;
1da177e4
LT
1183 }
1184
463c84b9 1185 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
1da177e4
LT
1186 goto drop;
1187
ca304b61 1188 req = inet6_reqsk_alloc(&tcp6_request_sock_ops);
1da177e4
LT
1189 if (req == NULL)
1190 goto drop;
1191
cfb6eeb4
YH
1192#ifdef CONFIG_TCP_MD5SIG
1193 tcp_rsk(req)->af_specific = &tcp_request_sock_ipv6_ops;
1194#endif
1195
1da177e4
LT
1196 tcp_clear_options(&tmp_opt);
1197 tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
1198 tmp_opt.user_mss = tp->rx_opt.user_mss;
bb5b7c11 1199 tcp_parse_options(skb, &tmp_opt, &hash_location, 0);
4957faad
WAS
1200
1201 if (tmp_opt.cookie_plus > 0 &&
1202 tmp_opt.saw_tstamp &&
1203 !tp->rx_opt.cookie_out_never &&
1204 (sysctl_tcp_cookie_size > 0 ||
1205 (tp->cookie_values != NULL &&
1206 tp->cookie_values->cookie_desired > 0))) {
1207 u8 *c;
1208 u32 *d;
1209 u32 *mess = &tmp_ext.cookie_bakery[COOKIE_DIGEST_WORDS];
1210 int l = tmp_opt.cookie_plus - TCPOLEN_COOKIE_BASE;
1211
1212 if (tcp_cookie_generator(&tmp_ext.cookie_bakery[0]) != 0)
1213 goto drop_and_free;
1214
1215 /* Secret recipe starts with IP addresses */
0eae88f3 1216 d = (__force u32 *)&ipv6_hdr(skb)->daddr.s6_addr32[0];
4957faad
WAS
1217 *mess++ ^= *d++;
1218 *mess++ ^= *d++;
1219 *mess++ ^= *d++;
1220 *mess++ ^= *d++;
0eae88f3 1221 d = (__force u32 *)&ipv6_hdr(skb)->saddr.s6_addr32[0];
4957faad
WAS
1222 *mess++ ^= *d++;
1223 *mess++ ^= *d++;
1224 *mess++ ^= *d++;
1225 *mess++ ^= *d++;
1226
1227 /* plus variable length Initiator Cookie */
1228 c = (u8 *)mess;
1229 while (l-- > 0)
1230 *c++ ^= *hash_location++;
1da177e4 1231
4957faad 1232 want_cookie = 0; /* not our kind of cookie */
4957faad
WAS
1233 tmp_ext.cookie_out_never = 0; /* false */
1234 tmp_ext.cookie_plus = tmp_opt.cookie_plus;
1235 } else if (!tp->rx_opt.cookie_in_always) {
1236 /* redundant indications, but ensure initialization. */
1237 tmp_ext.cookie_out_never = 1; /* true */
1238 tmp_ext.cookie_plus = 0;
1239 } else {
1240 goto drop_and_free;
1241 }
1242 tmp_ext.cookie_in_always = tp->rx_opt.cookie_in_always;
1da177e4 1243
4dfc2817 1244 if (want_cookie && !tmp_opt.saw_tstamp)
c6aefafb 1245 tcp_clear_options(&tmp_opt);
c6aefafb 1246
1da177e4
LT
1247 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
1248 tcp_openreq_init(req, &tmp_opt, skb);
1249
ca304b61 1250 treq = inet6_rsk(req);
4e3fd7a0
AD
1251 treq->rmt_addr = ipv6_hdr(skb)->saddr;
1252 treq->loc_addr = ipv6_hdr(skb)->daddr;
172d69e6 1253 if (!want_cookie || tmp_opt.tstamp_ok)
c6aefafb
GG
1254 TCP_ECN_create_request(req, tcp_hdr(skb));
1255
2bbdf389 1256 if (!isn) {
493f377d
DM
1257 struct inet_peer *peer = NULL;
1258
c6aefafb
GG
1259 if (ipv6_opt_accepted(sk, skb) ||
1260 np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
1261 np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
1262 atomic_inc(&skb->users);
1263 treq->pktopts = skb;
1264 }
1265 treq->iif = sk->sk_bound_dev_if;
1da177e4 1266
c6aefafb
GG
1267 /* So that link locals have meaning */
1268 if (!sk->sk_bound_dev_if &&
1269 ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL)
1270 treq->iif = inet6_iif(skb);
493f377d
DM
1271
1272 if (want_cookie) {
2bbdf389
FW
1273 isn = cookie_v6_init_sequence(sk, skb, &req->mss);
1274 req->cookie_ts = tmp_opt.tstamp_ok;
493f377d
DM
1275 goto have_isn;
1276 }
1277
1278 /* VJ's idea. We save last timestamp seen
1279 * from the destination in peer table, when entering
1280 * state TIME-WAIT, and check against it before
1281 * accepting new connection request.
1282 *
1283 * If "isn" is not zero, this request hit alive
1284 * timewait bucket, so that all the necessary checks
1285 * are made in the function processing timewait state.
1286 */
1287 if (tmp_opt.saw_tstamp &&
1288 tcp_death_row.sysctl_tw_recycle &&
1289 (dst = inet6_csk_route_req(sk, req)) != NULL &&
1290 (peer = rt6_get_peer((struct rt6_info *)dst)) != NULL &&
7a71ed89 1291 ipv6_addr_equal((struct in6_addr *)peer->daddr.addr.a6,
493f377d
DM
1292 &treq->rmt_addr)) {
1293 inet_peer_refcheck(peer);
1294 if ((u32)get_seconds() - peer->tcp_ts_stamp < TCP_PAWS_MSL &&
1295 (s32)(peer->tcp_ts - req->ts_recent) >
1296 TCP_PAWS_WINDOW) {
1297 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
1298 goto drop_and_release;
1299 }
1300 }
1301 /* Kill the following clause, if you dislike this way. */
1302 else if (!sysctl_tcp_syncookies &&
1303 (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
1304 (sysctl_max_syn_backlog >> 2)) &&
1305 (!peer || !peer->tcp_ts_stamp) &&
1306 (!dst || !dst_metric(dst, RTAX_RTT))) {
1307 /* Without syncookies last quarter of
1308 * backlog is filled with destinations,
1309 * proven to be alive.
1310 * It means that we continue to communicate
1311 * to destinations, already remembered
1312 * to the moment of synflood.
1313 */
1314 LIMIT_NETDEBUG(KERN_DEBUG "TCP: drop open request from %pI6/%u\n",
1315 &treq->rmt_addr, ntohs(tcp_hdr(skb)->source));
1316 goto drop_and_release;
2bbdf389 1317 }
493f377d
DM
1318
1319 isn = tcp_v6_init_sequence(skb);
c6aefafb 1320 }
493f377d 1321have_isn:
2e6599cb 1322 tcp_rsk(req)->snt_isn = isn;
9ad7c049 1323 tcp_rsk(req)->snt_synack = tcp_time_stamp;
1da177e4 1324
4237c75c
VY
1325 security_inet_conn_request(sk, skb, req);
1326
4957faad
WAS
1327 if (tcp_v6_send_synack(sk, req,
1328 (struct request_values *)&tmp_ext) ||
1329 want_cookie)
e6b4d113 1330 goto drop_and_free;
1da177e4 1331
e6b4d113
WAS
1332 inet6_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1333 return 0;
1da177e4 1334
493f377d
DM
1335drop_and_release:
1336 dst_release(dst);
e6b4d113
WAS
1337drop_and_free:
1338 reqsk_free(req);
1da177e4 1339drop:
1da177e4
LT
1340 return 0; /* don't send reset */
1341}
1342
1343static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
60236fdd 1344 struct request_sock *req,
1da177e4
LT
1345 struct dst_entry *dst)
1346{
78d15e82 1347 struct inet6_request_sock *treq;
1da177e4
LT
1348 struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
1349 struct tcp6_sock *newtcp6sk;
1350 struct inet_sock *newinet;
1351 struct tcp_sock *newtp;
1352 struct sock *newsk;
1353 struct ipv6_txoptions *opt;
cfb6eeb4
YH
1354#ifdef CONFIG_TCP_MD5SIG
1355 struct tcp_md5sig_key *key;
1356#endif
1da177e4
LT
1357
1358 if (skb->protocol == htons(ETH_P_IP)) {
1359 /*
1360 * v6 mapped
1361 */
1362
1363 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst);
1364
1ab1457c 1365 if (newsk == NULL)
1da177e4
LT
1366 return NULL;
1367
1368 newtcp6sk = (struct tcp6_sock *)newsk;
1369 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1370
1371 newinet = inet_sk(newsk);
1372 newnp = inet6_sk(newsk);
1373 newtp = tcp_sk(newsk);
1374
1375 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1376
c720c7e8 1377 ipv6_addr_set_v4mapped(newinet->inet_daddr, &newnp->daddr);
1da177e4 1378
c720c7e8 1379 ipv6_addr_set_v4mapped(newinet->inet_saddr, &newnp->saddr);
1da177e4 1380
4e3fd7a0 1381 newnp->rcv_saddr = newnp->saddr;
1da177e4 1382
8292a17a 1383 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1da177e4 1384 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
cfb6eeb4
YH
1385#ifdef CONFIG_TCP_MD5SIG
1386 newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1387#endif
1388
676a1184
YZ
1389 newnp->ipv6_ac_list = NULL;
1390 newnp->ipv6_fl_list = NULL;
1da177e4
LT
1391 newnp->pktoptions = NULL;
1392 newnp->opt = NULL;
505cbfc5 1393 newnp->mcast_oif = inet6_iif(skb);
0660e03f 1394 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1da177e4 1395
e6848976
ACM
1396 /*
1397 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1398 * here, tcp_create_openreq_child now does this for us, see the comment in
1399 * that function for the gory details. -acme
1da177e4 1400 */
1da177e4
LT
1401
1402 /* It is tricky place. Until this moment IPv4 tcp
8292a17a 1403 worked with IPv6 icsk.icsk_af_ops.
1da177e4
LT
1404 Sync it now.
1405 */
d83d8461 1406 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1da177e4
LT
1407
1408 return newsk;
1409 }
1410
78d15e82 1411 treq = inet6_rsk(req);
1da177e4
LT
1412 opt = np->opt;
1413
1414 if (sk_acceptq_is_full(sk))
1415 goto out_overflow;
1416
493f377d
DM
1417 if (!dst) {
1418 dst = inet6_csk_route_req(sk, req);
1419 if (!dst)
1da177e4 1420 goto out;
1ab1457c 1421 }
1da177e4
LT
1422
1423 newsk = tcp_create_openreq_child(sk, req, skb);
1424 if (newsk == NULL)
093d2823 1425 goto out_nonewsk;
1da177e4 1426
e6848976
ACM
1427 /*
1428 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1429 * count here, tcp_create_openreq_child now does this for us, see the
1430 * comment in that function for the gory details. -acme
1431 */
1da177e4 1432
59eed279 1433 newsk->sk_gso_type = SKB_GSO_TCPV6;
8e1ef0a9 1434 __ip6_dst_store(newsk, dst, NULL, NULL);
1da177e4
LT
1435
1436 newtcp6sk = (struct tcp6_sock *)newsk;
1437 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1438
1439 newtp = tcp_sk(newsk);
1440 newinet = inet_sk(newsk);
1441 newnp = inet6_sk(newsk);
1442
1443 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1444
4e3fd7a0
AD
1445 newnp->daddr = treq->rmt_addr;
1446 newnp->saddr = treq->loc_addr;
1447 newnp->rcv_saddr = treq->loc_addr;
2e6599cb 1448 newsk->sk_bound_dev_if = treq->iif;
1da177e4 1449
1ab1457c 1450 /* Now IPv6 options...
1da177e4
LT
1451
1452 First: no IPv4 options.
1453 */
f6d8bd05 1454 newinet->inet_opt = NULL;
676a1184 1455 newnp->ipv6_ac_list = NULL;
d35690be 1456 newnp->ipv6_fl_list = NULL;
1da177e4
LT
1457
1458 /* Clone RX bits */
1459 newnp->rxopt.all = np->rxopt.all;
1460
1461 /* Clone pktoptions received with SYN */
1462 newnp->pktoptions = NULL;
2e6599cb
ACM
1463 if (treq->pktopts != NULL) {
1464 newnp->pktoptions = skb_clone(treq->pktopts, GFP_ATOMIC);
1465 kfree_skb(treq->pktopts);
1466 treq->pktopts = NULL;
1da177e4
LT
1467 if (newnp->pktoptions)
1468 skb_set_owner_r(newnp->pktoptions, newsk);
1469 }
1470 newnp->opt = NULL;
505cbfc5 1471 newnp->mcast_oif = inet6_iif(skb);
0660e03f 1472 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1da177e4
LT
1473
1474 /* Clone native IPv6 options from listening socket (if any)
1475
1476 Yes, keeping reference count would be much more clever,
1477 but we make one more one thing there: reattach optmem
1478 to newsk.
1479 */
1480 if (opt) {
1481 newnp->opt = ipv6_dup_options(newsk, opt);
1482 if (opt != np->opt)
1483 sock_kfree_s(sk, opt, opt->tot_len);
1484 }
1485
d83d8461 1486 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1da177e4 1487 if (newnp->opt)
d83d8461
ACM
1488 inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
1489 newnp->opt->opt_flen);
1da177e4 1490
5d424d5a 1491 tcp_mtup_init(newsk);
1da177e4 1492 tcp_sync_mss(newsk, dst_mtu(dst));
0dbaee3b 1493 newtp->advmss = dst_metric_advmss(dst);
1da177e4 1494 tcp_initialize_rcv_mss(newsk);
9ad7c049
JC
1495 if (tcp_rsk(req)->snt_synack)
1496 tcp_valid_rtt_meas(newsk,
1497 tcp_time_stamp - tcp_rsk(req)->snt_synack);
1498 newtp->total_retrans = req->retrans;
1da177e4 1499
c720c7e8
ED
1500 newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1501 newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
1da177e4 1502
cfb6eeb4
YH
1503#ifdef CONFIG_TCP_MD5SIG
1504 /* Copy over the MD5 key from the original socket */
1505 if ((key = tcp_v6_md5_do_lookup(sk, &newnp->daddr)) != NULL) {
1506 /* We're using one, so create a matching key
1507 * on the newsk structure. If we fail to get
1508 * memory, then we end up not copying the key
1509 * across. Shucks.
1510 */
af879cc7
ACM
1511 char *newkey = kmemdup(key->key, key->keylen, GFP_ATOMIC);
1512 if (newkey != NULL)
e547bc1e 1513 tcp_v6_md5_do_add(newsk, &newnp->daddr,
cfb6eeb4 1514 newkey, key->keylen);
cfb6eeb4
YH
1515 }
1516#endif
1517
093d2823
BS
1518 if (__inet_inherit_port(sk, newsk) < 0) {
1519 sock_put(newsk);
1520 goto out;
1521 }
9327f705 1522 __inet6_hash(newsk, NULL);
1da177e4
LT
1523
1524 return newsk;
1525
1526out_overflow:
de0744af 1527 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
093d2823 1528out_nonewsk:
1da177e4
LT
1529 if (opt && opt != np->opt)
1530 sock_kfree_s(sk, opt, opt->tot_len);
1531 dst_release(dst);
093d2823
BS
1532out:
1533 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1da177e4
LT
1534 return NULL;
1535}
1536
b51655b9 1537static __sum16 tcp_v6_checksum_init(struct sk_buff *skb)
1da177e4 1538{
84fa7933 1539 if (skb->ip_summed == CHECKSUM_COMPLETE) {
684f2176 1540 if (!tcp_v6_check(skb->len, &ipv6_hdr(skb)->saddr,
0660e03f 1541 &ipv6_hdr(skb)->daddr, skb->csum)) {
fb286bb2 1542 skb->ip_summed = CHECKSUM_UNNECESSARY;
1da177e4 1543 return 0;
fb286bb2 1544 }
1da177e4 1545 }
fb286bb2 1546
684f2176 1547 skb->csum = ~csum_unfold(tcp_v6_check(skb->len,
0660e03f
ACM
1548 &ipv6_hdr(skb)->saddr,
1549 &ipv6_hdr(skb)->daddr, 0));
fb286bb2 1550
1da177e4 1551 if (skb->len <= 76) {
fb286bb2 1552 return __skb_checksum_complete(skb);
1da177e4
LT
1553 }
1554 return 0;
1555}
1556
1557/* The socket must have it's spinlock held when we get
1558 * here.
1559 *
1560 * We have a potential double-lock case here, so even when
1561 * doing backlog processing we use the BH locking scheme.
1562 * This is because we cannot sleep with the original spinlock
1563 * held.
1564 */
1565static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1566{
1567 struct ipv6_pinfo *np = inet6_sk(sk);
1568 struct tcp_sock *tp;
1569 struct sk_buff *opt_skb = NULL;
1570
1571 /* Imagine: socket is IPv6. IPv4 packet arrives,
1572 goes to IPv4 receive handler and backlogged.
1573 From backlog it always goes here. Kerboom...
1574 Fortunately, tcp_rcv_established and rcv_established
1575 handle them correctly, but it is not case with
1576 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1577 */
1578
1579 if (skb->protocol == htons(ETH_P_IP))
1580 return tcp_v4_do_rcv(sk, skb);
1581
cfb6eeb4
YH
1582#ifdef CONFIG_TCP_MD5SIG
1583 if (tcp_v6_inbound_md5_hash (sk, skb))
1584 goto discard;
1585#endif
1586
fda9ef5d 1587 if (sk_filter(sk, skb))
1da177e4
LT
1588 goto discard;
1589
1590 /*
1591 * socket locking is here for SMP purposes as backlog rcv
1592 * is currently called with bh processing disabled.
1593 */
1594
1595 /* Do Stevens' IPV6_PKTOPTIONS.
1596
1597 Yes, guys, it is the only place in our code, where we
1598 may make it not affecting IPv4.
1599 The rest of code is protocol independent,
1600 and I do not like idea to uglify IPv4.
1601
1602 Actually, all the idea behind IPV6_PKTOPTIONS
1603 looks not very well thought. For now we latch
1604 options, received in the last packet, enqueued
1605 by tcp. Feel free to propose better solution.
1ab1457c 1606 --ANK (980728)
1da177e4
LT
1607 */
1608 if (np->rxopt.all)
1609 opt_skb = skb_clone(skb, GFP_ATOMIC);
1610
1611 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
bdeab991 1612 sock_rps_save_rxhash(sk, skb);
aa8223c7 1613 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len))
1da177e4 1614 goto reset;
1da177e4
LT
1615 if (opt_skb)
1616 goto ipv6_pktoptions;
1617 return 0;
1618 }
1619
ab6a5bb6 1620 if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1da177e4
LT
1621 goto csum_err;
1622
1ab1457c 1623 if (sk->sk_state == TCP_LISTEN) {
1da177e4
LT
1624 struct sock *nsk = tcp_v6_hnd_req(sk, skb);
1625 if (!nsk)
1626 goto discard;
1627
1628 /*
1629 * Queue it on the new socket if the new socket is active,
1630 * otherwise we just shortcircuit this and continue with
1631 * the new socket..
1632 */
1ab1457c 1633 if(nsk != sk) {
bdeab991 1634 sock_rps_save_rxhash(nsk, skb);
1da177e4
LT
1635 if (tcp_child_process(sk, nsk, skb))
1636 goto reset;
1637 if (opt_skb)
1638 __kfree_skb(opt_skb);
1639 return 0;
1640 }
47482f13 1641 } else
bdeab991 1642 sock_rps_save_rxhash(sk, skb);
1da177e4 1643
aa8223c7 1644 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len))
1da177e4 1645 goto reset;
1da177e4
LT
1646 if (opt_skb)
1647 goto ipv6_pktoptions;
1648 return 0;
1649
1650reset:
cfb6eeb4 1651 tcp_v6_send_reset(sk, skb);
1da177e4
LT
1652discard:
1653 if (opt_skb)
1654 __kfree_skb(opt_skb);
1655 kfree_skb(skb);
1656 return 0;
1657csum_err:
63231bdd 1658 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1da177e4
LT
1659 goto discard;
1660
1661
1662ipv6_pktoptions:
1663 /* Do you ask, what is it?
1664
1665 1. skb was enqueued by tcp.
1666 2. skb is added to tail of read queue, rather than out of order.
1667 3. socket is not in passive state.
1668 4. Finally, it really contains options, which user wants to receive.
1669 */
1670 tp = tcp_sk(sk);
1671 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1672 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
333fad53 1673 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
505cbfc5 1674 np->mcast_oif = inet6_iif(opt_skb);
333fad53 1675 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
0660e03f 1676 np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
1da177e4
LT
1677 if (ipv6_opt_accepted(sk, opt_skb)) {
1678 skb_set_owner_r(opt_skb, sk);
1679 opt_skb = xchg(&np->pktoptions, opt_skb);
1680 } else {
1681 __kfree_skb(opt_skb);
1682 opt_skb = xchg(&np->pktoptions, NULL);
1683 }
1684 }
1685
800d55f1 1686 kfree_skb(opt_skb);
1da177e4
LT
1687 return 0;
1688}
1689
e5bbef20 1690static int tcp_v6_rcv(struct sk_buff *skb)
1da177e4 1691{
cf533ea5 1692 const struct tcphdr *th;
b71d1d42 1693 const struct ipv6hdr *hdr;
1da177e4
LT
1694 struct sock *sk;
1695 int ret;
a86b1e30 1696 struct net *net = dev_net(skb->dev);
1da177e4
LT
1697
1698 if (skb->pkt_type != PACKET_HOST)
1699 goto discard_it;
1700
1701 /*
1702 * Count it even if it's bad.
1703 */
63231bdd 1704 TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1da177e4
LT
1705
1706 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1707 goto discard_it;
1708
aa8223c7 1709 th = tcp_hdr(skb);
1da177e4
LT
1710
1711 if (th->doff < sizeof(struct tcphdr)/4)
1712 goto bad_packet;
1713 if (!pskb_may_pull(skb, th->doff*4))
1714 goto discard_it;
1715
60476372 1716 if (!skb_csum_unnecessary(skb) && tcp_v6_checksum_init(skb))
1da177e4
LT
1717 goto bad_packet;
1718
aa8223c7 1719 th = tcp_hdr(skb);
e802af9c 1720 hdr = ipv6_hdr(skb);
1da177e4
LT
1721 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1722 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1723 skb->len - th->doff*4);
1724 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1725 TCP_SKB_CB(skb)->when = 0;
b82d1bb4 1726 TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
1da177e4
LT
1727 TCP_SKB_CB(skb)->sacked = 0;
1728
9a1f27c4 1729 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
1da177e4
LT
1730 if (!sk)
1731 goto no_tcp_socket;
1732
1733process:
1734 if (sk->sk_state == TCP_TIME_WAIT)
1735 goto do_time_wait;
1736
e802af9c
SH
1737 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
1738 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
1739 goto discard_and_relse;
1740 }
1741
1da177e4
LT
1742 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1743 goto discard_and_relse;
1744
fda9ef5d 1745 if (sk_filter(sk, skb))
1da177e4
LT
1746 goto discard_and_relse;
1747
1748 skb->dev = NULL;
1749
293b9c42 1750 bh_lock_sock_nested(sk);
1da177e4
LT
1751 ret = 0;
1752 if (!sock_owned_by_user(sk)) {
1a2449a8 1753#ifdef CONFIG_NET_DMA
1ab1457c 1754 struct tcp_sock *tp = tcp_sk(sk);
b4caea8a 1755 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
f67b4599 1756 tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY);
1ab1457c
YH
1757 if (tp->ucopy.dma_chan)
1758 ret = tcp_v6_do_rcv(sk, skb);
1759 else
1a2449a8
CL
1760#endif
1761 {
1762 if (!tcp_prequeue(sk, skb))
1763 ret = tcp_v6_do_rcv(sk, skb);
1764 }
6cce09f8 1765 } else if (unlikely(sk_add_backlog(sk, skb))) {
6b03a53a 1766 bh_unlock_sock(sk);
6cce09f8 1767 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
6b03a53a
ZY
1768 goto discard_and_relse;
1769 }
1da177e4
LT
1770 bh_unlock_sock(sk);
1771
1772 sock_put(sk);
1773 return ret ? -1 : 0;
1774
1775no_tcp_socket:
1776 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1777 goto discard_it;
1778
1779 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1780bad_packet:
63231bdd 1781 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1da177e4 1782 } else {
cfb6eeb4 1783 tcp_v6_send_reset(NULL, skb);
1da177e4
LT
1784 }
1785
1786discard_it:
1787
1788 /*
1789 * Discard frame
1790 */
1791
1792 kfree_skb(skb);
1793 return 0;
1794
1795discard_and_relse:
1796 sock_put(sk);
1797 goto discard_it;
1798
1799do_time_wait:
1800 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
9469c7b4 1801 inet_twsk_put(inet_twsk(sk));
1da177e4
LT
1802 goto discard_it;
1803 }
1804
1805 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
63231bdd 1806 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
9469c7b4 1807 inet_twsk_put(inet_twsk(sk));
1da177e4
LT
1808 goto discard_it;
1809 }
1810
9469c7b4 1811 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1da177e4
LT
1812 case TCP_TW_SYN:
1813 {
1814 struct sock *sk2;
1815
c346dca1 1816 sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
0660e03f 1817 &ipv6_hdr(skb)->daddr,
505cbfc5 1818 ntohs(th->dest), inet6_iif(skb));
1da177e4 1819 if (sk2 != NULL) {
295ff7ed
ACM
1820 struct inet_timewait_sock *tw = inet_twsk(sk);
1821 inet_twsk_deschedule(tw, &tcp_death_row);
1822 inet_twsk_put(tw);
1da177e4
LT
1823 sk = sk2;
1824 goto process;
1825 }
1826 /* Fall through to ACK */
1827 }
1828 case TCP_TW_ACK:
1829 tcp_v6_timewait_ack(sk, skb);
1830 break;
1831 case TCP_TW_RST:
1832 goto no_tcp_socket;
1833 case TCP_TW_SUCCESS:;
1834 }
1835 goto discard_it;
1836}
1837
ccb7c410
DM
1838static struct inet_peer *tcp_v6_get_peer(struct sock *sk, bool *release_it)
1839{
db3949c4
DM
1840 struct rt6_info *rt = (struct rt6_info *) __sk_dst_get(sk);
1841 struct ipv6_pinfo *np = inet6_sk(sk);
1842 struct inet_peer *peer;
1843
1844 if (!rt ||
1845 !ipv6_addr_equal(&np->daddr, &rt->rt6i_dst.addr)) {
1846 peer = inet_getpeer_v6(&np->daddr, 1);
1847 *release_it = true;
1848 } else {
1849 if (!rt->rt6i_peer)
1850 rt6_bind_peer(rt, 1);
1851 peer = rt->rt6i_peer;
457de438 1852 *release_it = false;
db3949c4
DM
1853 }
1854
1855 return peer;
ccb7c410
DM
1856}
1857
1858static void *tcp_v6_tw_get_peer(struct sock *sk)
1da177e4 1859{
cf533ea5
ED
1860 const struct inet6_timewait_sock *tw6 = inet6_twsk(sk);
1861 const struct inet_timewait_sock *tw = inet_twsk(sk);
ccb7c410
DM
1862
1863 if (tw->tw_family == AF_INET)
1864 return tcp_v4_tw_get_peer(sk);
1865
db3949c4 1866 return inet_getpeer_v6(&tw6->tw_v6_daddr, 1);
1da177e4
LT
1867}
1868
ccb7c410
DM
1869static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1870 .twsk_obj_size = sizeof(struct tcp6_timewait_sock),
1871 .twsk_unique = tcp_twsk_unique,
1872 .twsk_destructor= tcp_twsk_destructor,
1873 .twsk_getpeer = tcp_v6_tw_get_peer,
1874};
1875
3b401a81 1876static const struct inet_connection_sock_af_ops ipv6_specific = {
543d9cfe
ACM
1877 .queue_xmit = inet6_csk_xmit,
1878 .send_check = tcp_v6_send_check,
1879 .rebuild_header = inet6_sk_rebuild_header,
1880 .conn_request = tcp_v6_conn_request,
1881 .syn_recv_sock = tcp_v6_syn_recv_sock,
3f419d2d 1882 .get_peer = tcp_v6_get_peer,
543d9cfe
ACM
1883 .net_header_len = sizeof(struct ipv6hdr),
1884 .setsockopt = ipv6_setsockopt,
1885 .getsockopt = ipv6_getsockopt,
1886 .addr2sockaddr = inet6_csk_addr2sockaddr,
1887 .sockaddr_len = sizeof(struct sockaddr_in6),
ab1e0a13 1888 .bind_conflict = inet6_csk_bind_conflict,
3fdadf7d 1889#ifdef CONFIG_COMPAT
543d9cfe
ACM
1890 .compat_setsockopt = compat_ipv6_setsockopt,
1891 .compat_getsockopt = compat_ipv6_getsockopt,
3fdadf7d 1892#endif
1da177e4
LT
1893};
1894
cfb6eeb4 1895#ifdef CONFIG_TCP_MD5SIG
b2e4b3de 1896static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
cfb6eeb4 1897 .md5_lookup = tcp_v6_md5_lookup,
49a72dfb 1898 .calc_md5_hash = tcp_v6_md5_hash_skb,
cfb6eeb4
YH
1899 .md5_add = tcp_v6_md5_add_func,
1900 .md5_parse = tcp_v6_parse_md5_keys,
cfb6eeb4 1901};
a928630a 1902#endif
cfb6eeb4 1903
1da177e4
LT
1904/*
1905 * TCP over IPv4 via INET6 API
1906 */
1907
3b401a81 1908static const struct inet_connection_sock_af_ops ipv6_mapped = {
543d9cfe
ACM
1909 .queue_xmit = ip_queue_xmit,
1910 .send_check = tcp_v4_send_check,
1911 .rebuild_header = inet_sk_rebuild_header,
1912 .conn_request = tcp_v6_conn_request,
1913 .syn_recv_sock = tcp_v6_syn_recv_sock,
3f419d2d 1914 .get_peer = tcp_v4_get_peer,
543d9cfe
ACM
1915 .net_header_len = sizeof(struct iphdr),
1916 .setsockopt = ipv6_setsockopt,
1917 .getsockopt = ipv6_getsockopt,
1918 .addr2sockaddr = inet6_csk_addr2sockaddr,
1919 .sockaddr_len = sizeof(struct sockaddr_in6),
ab1e0a13 1920 .bind_conflict = inet6_csk_bind_conflict,
3fdadf7d 1921#ifdef CONFIG_COMPAT
543d9cfe
ACM
1922 .compat_setsockopt = compat_ipv6_setsockopt,
1923 .compat_getsockopt = compat_ipv6_getsockopt,
3fdadf7d 1924#endif
1da177e4
LT
1925};
1926
cfb6eeb4 1927#ifdef CONFIG_TCP_MD5SIG
b2e4b3de 1928static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
cfb6eeb4 1929 .md5_lookup = tcp_v4_md5_lookup,
49a72dfb 1930 .calc_md5_hash = tcp_v4_md5_hash_skb,
cfb6eeb4
YH
1931 .md5_add = tcp_v6_md5_add_func,
1932 .md5_parse = tcp_v6_parse_md5_keys,
cfb6eeb4 1933};
a928630a 1934#endif
cfb6eeb4 1935
1da177e4
LT
1936/* NOTE: A lot of things set to zero explicitly by call to
1937 * sk_alloc() so need not be done here.
1938 */
1939static int tcp_v6_init_sock(struct sock *sk)
1940{
6687e988 1941 struct inet_connection_sock *icsk = inet_csk(sk);
1da177e4
LT
1942 struct tcp_sock *tp = tcp_sk(sk);
1943
1944 skb_queue_head_init(&tp->out_of_order_queue);
1945 tcp_init_xmit_timers(sk);
1946 tcp_prequeue_init(tp);
1947
6687e988 1948 icsk->icsk_rto = TCP_TIMEOUT_INIT;
1da177e4
LT
1949 tp->mdev = TCP_TIMEOUT_INIT;
1950
1951 /* So many TCP implementations out there (incorrectly) count the
1952 * initial SYN frame in their delayed-ACK and congestion control
1953 * algorithms that we must have the following bandaid to talk
1954 * efficiently to them. -DaveM
1955 */
1956 tp->snd_cwnd = 2;
1957
1958 /* See draft-stevens-tcpca-spec-01 for discussion of the
1959 * initialization of these values.
1960 */
0b6a05c1 1961 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
1da177e4 1962 tp->snd_cwnd_clamp = ~0;
bee7ca9e 1963 tp->mss_cache = TCP_MSS_DEFAULT;
1da177e4
LT
1964
1965 tp->reordering = sysctl_tcp_reordering;
1966
1967 sk->sk_state = TCP_CLOSE;
1968
8292a17a 1969 icsk->icsk_af_ops = &ipv6_specific;
6687e988 1970 icsk->icsk_ca_ops = &tcp_init_congestion_ops;
d83d8461 1971 icsk->icsk_sync_mss = tcp_sync_mss;
1da177e4
LT
1972 sk->sk_write_space = sk_stream_write_space;
1973 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
1974
cfb6eeb4
YH
1975#ifdef CONFIG_TCP_MD5SIG
1976 tp->af_specific = &tcp_sock_ipv6_specific;
1977#endif
1978
435cf559
WAS
1979 /* TCP Cookie Transactions */
1980 if (sysctl_tcp_cookie_size > 0) {
1981 /* Default, cookies without s_data_payload. */
1982 tp->cookie_values =
1983 kzalloc(sizeof(*tp->cookie_values),
1984 sk->sk_allocation);
1985 if (tp->cookie_values != NULL)
1986 kref_init(&tp->cookie_values->kref);
1987 }
1988 /* Presumed zeroed, in order of appearance:
1989 * cookie_in_always, cookie_out_never,
1990 * s_data_constant, s_data_in, s_data_out
1991 */
1da177e4
LT
1992 sk->sk_sndbuf = sysctl_tcp_wmem[1];
1993 sk->sk_rcvbuf = sysctl_tcp_rmem[1];
1994
eb4dea58 1995 local_bh_disable();
1748376b 1996 percpu_counter_inc(&tcp_sockets_allocated);
eb4dea58 1997 local_bh_enable();
1da177e4
LT
1998
1999 return 0;
2000}
2001
7d06b2e0 2002static void tcp_v6_destroy_sock(struct sock *sk)
1da177e4 2003{
cfb6eeb4
YH
2004#ifdef CONFIG_TCP_MD5SIG
2005 /* Clean up the MD5 key list */
2006 if (tcp_sk(sk)->md5sig_info)
2007 tcp_v6_clear_md5_list(sk);
2008#endif
1da177e4 2009 tcp_v4_destroy_sock(sk);
7d06b2e0 2010 inet6_destroy_sock(sk);
1da177e4
LT
2011}
2012
952a10be 2013#ifdef CONFIG_PROC_FS
1da177e4 2014/* Proc filesystem TCPv6 sock list dumping. */
1ab1457c 2015static void get_openreq6(struct seq_file *seq,
cf533ea5 2016 const struct sock *sk, struct request_sock *req, int i, int uid)
1da177e4 2017{
1da177e4 2018 int ttd = req->expires - jiffies;
b71d1d42
ED
2019 const struct in6_addr *src = &inet6_rsk(req)->loc_addr;
2020 const struct in6_addr *dest = &inet6_rsk(req)->rmt_addr;
1da177e4
LT
2021
2022 if (ttd < 0)
2023 ttd = 0;
2024
1da177e4
LT
2025 seq_printf(seq,
2026 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
71338aa7 2027 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1da177e4
LT
2028 i,
2029 src->s6_addr32[0], src->s6_addr32[1],
2030 src->s6_addr32[2], src->s6_addr32[3],
fd507037 2031 ntohs(inet_rsk(req)->loc_port),
1da177e4
LT
2032 dest->s6_addr32[0], dest->s6_addr32[1],
2033 dest->s6_addr32[2], dest->s6_addr32[3],
2e6599cb 2034 ntohs(inet_rsk(req)->rmt_port),
1da177e4
LT
2035 TCP_SYN_RECV,
2036 0,0, /* could print option size, but that is af dependent. */
1ab1457c
YH
2037 1, /* timers active (only the expire timer) */
2038 jiffies_to_clock_t(ttd),
1da177e4
LT
2039 req->retrans,
2040 uid,
1ab1457c 2041 0, /* non standard timer */
1da177e4
LT
2042 0, /* open_requests have no inode */
2043 0, req);
2044}
2045
2046static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
2047{
b71d1d42 2048 const struct in6_addr *dest, *src;
1da177e4
LT
2049 __u16 destp, srcp;
2050 int timer_active;
2051 unsigned long timer_expires;
cf533ea5
ED
2052 const struct inet_sock *inet = inet_sk(sp);
2053 const struct tcp_sock *tp = tcp_sk(sp);
463c84b9 2054 const struct inet_connection_sock *icsk = inet_csk(sp);
cf533ea5 2055 const struct ipv6_pinfo *np = inet6_sk(sp);
1da177e4
LT
2056
2057 dest = &np->daddr;
2058 src = &np->rcv_saddr;
c720c7e8
ED
2059 destp = ntohs(inet->inet_dport);
2060 srcp = ntohs(inet->inet_sport);
463c84b9
ACM
2061
2062 if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
1da177e4 2063 timer_active = 1;
463c84b9
ACM
2064 timer_expires = icsk->icsk_timeout;
2065 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1da177e4 2066 timer_active = 4;
463c84b9 2067 timer_expires = icsk->icsk_timeout;
1da177e4
LT
2068 } else if (timer_pending(&sp->sk_timer)) {
2069 timer_active = 2;
2070 timer_expires = sp->sk_timer.expires;
2071 } else {
2072 timer_active = 0;
2073 timer_expires = jiffies;
2074 }
2075
2076 seq_printf(seq,
2077 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
71338aa7 2078 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %pK %lu %lu %u %u %d\n",
1da177e4
LT
2079 i,
2080 src->s6_addr32[0], src->s6_addr32[1],
2081 src->s6_addr32[2], src->s6_addr32[3], srcp,
2082 dest->s6_addr32[0], dest->s6_addr32[1],
2083 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1ab1457c 2084 sp->sk_state,
47da8ee6
SS
2085 tp->write_seq-tp->snd_una,
2086 (sp->sk_state == TCP_LISTEN) ? sp->sk_ack_backlog : (tp->rcv_nxt - tp->copied_seq),
1da177e4
LT
2087 timer_active,
2088 jiffies_to_clock_t(timer_expires - jiffies),
463c84b9 2089 icsk->icsk_retransmits,
1da177e4 2090 sock_i_uid(sp),
6687e988 2091 icsk->icsk_probes_out,
1da177e4
LT
2092 sock_i_ino(sp),
2093 atomic_read(&sp->sk_refcnt), sp,
7be87351
SH
2094 jiffies_to_clock_t(icsk->icsk_rto),
2095 jiffies_to_clock_t(icsk->icsk_ack.ato),
463c84b9 2096 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
0b6a05c1
IJ
2097 tp->snd_cwnd,
2098 tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh
1da177e4
LT
2099 );
2100}
2101
1ab1457c 2102static void get_timewait6_sock(struct seq_file *seq,
8feaf0c0 2103 struct inet_timewait_sock *tw, int i)
1da177e4 2104{
b71d1d42 2105 const struct in6_addr *dest, *src;
1da177e4 2106 __u16 destp, srcp;
cf533ea5 2107 const struct inet6_timewait_sock *tw6 = inet6_twsk((struct sock *)tw);
1da177e4
LT
2108 int ttd = tw->tw_ttd - jiffies;
2109
2110 if (ttd < 0)
2111 ttd = 0;
2112
0fa1a53e
ACM
2113 dest = &tw6->tw_v6_daddr;
2114 src = &tw6->tw_v6_rcv_saddr;
1da177e4
LT
2115 destp = ntohs(tw->tw_dport);
2116 srcp = ntohs(tw->tw_sport);
2117
2118 seq_printf(seq,
2119 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
71338aa7 2120 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1da177e4
LT
2121 i,
2122 src->s6_addr32[0], src->s6_addr32[1],
2123 src->s6_addr32[2], src->s6_addr32[3], srcp,
2124 dest->s6_addr32[0], dest->s6_addr32[1],
2125 dest->s6_addr32[2], dest->s6_addr32[3], destp,
2126 tw->tw_substate, 0, 0,
2127 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
2128 atomic_read(&tw->tw_refcnt), tw);
2129}
2130
1da177e4
LT
2131static int tcp6_seq_show(struct seq_file *seq, void *v)
2132{
2133 struct tcp_iter_state *st;
2134
2135 if (v == SEQ_START_TOKEN) {
2136 seq_puts(seq,
2137 " sl "
2138 "local_address "
2139 "remote_address "
2140 "st tx_queue rx_queue tr tm->when retrnsmt"
2141 " uid timeout inode\n");
2142 goto out;
2143 }
2144 st = seq->private;
2145
2146 switch (st->state) {
2147 case TCP_SEQ_STATE_LISTENING:
2148 case TCP_SEQ_STATE_ESTABLISHED:
2149 get_tcp6_sock(seq, v, st->num);
2150 break;
2151 case TCP_SEQ_STATE_OPENREQ:
2152 get_openreq6(seq, st->syn_wait_sk, v, st->num, st->uid);
2153 break;
2154 case TCP_SEQ_STATE_TIME_WAIT:
2155 get_timewait6_sock(seq, v, st->num);
2156 break;
2157 }
2158out:
2159 return 0;
2160}
2161
73cb88ec
AV
2162static const struct file_operations tcp6_afinfo_seq_fops = {
2163 .owner = THIS_MODULE,
2164 .open = tcp_seq_open,
2165 .read = seq_read,
2166 .llseek = seq_lseek,
2167 .release = seq_release_net
2168};
2169
1da177e4 2170static struct tcp_seq_afinfo tcp6_seq_afinfo = {
1da177e4
LT
2171 .name = "tcp6",
2172 .family = AF_INET6,
73cb88ec 2173 .seq_fops = &tcp6_afinfo_seq_fops,
9427c4b3
DL
2174 .seq_ops = {
2175 .show = tcp6_seq_show,
2176 },
1da177e4
LT
2177};
2178
2c8c1e72 2179int __net_init tcp6_proc_init(struct net *net)
1da177e4 2180{
6f8b13bc 2181 return tcp_proc_register(net, &tcp6_seq_afinfo);
1da177e4
LT
2182}
2183
6f8b13bc 2184void tcp6_proc_exit(struct net *net)
1da177e4 2185{
6f8b13bc 2186 tcp_proc_unregister(net, &tcp6_seq_afinfo);
1da177e4
LT
2187}
2188#endif
2189
2190struct proto tcpv6_prot = {
2191 .name = "TCPv6",
2192 .owner = THIS_MODULE,
2193 .close = tcp_close,
2194 .connect = tcp_v6_connect,
2195 .disconnect = tcp_disconnect,
463c84b9 2196 .accept = inet_csk_accept,
1da177e4
LT
2197 .ioctl = tcp_ioctl,
2198 .init = tcp_v6_init_sock,
2199 .destroy = tcp_v6_destroy_sock,
2200 .shutdown = tcp_shutdown,
2201 .setsockopt = tcp_setsockopt,
2202 .getsockopt = tcp_getsockopt,
1da177e4 2203 .recvmsg = tcp_recvmsg,
7ba42910
CG
2204 .sendmsg = tcp_sendmsg,
2205 .sendpage = tcp_sendpage,
1da177e4
LT
2206 .backlog_rcv = tcp_v6_do_rcv,
2207 .hash = tcp_v6_hash,
ab1e0a13
ACM
2208 .unhash = inet_unhash,
2209 .get_port = inet_csk_get_port,
1da177e4
LT
2210 .enter_memory_pressure = tcp_enter_memory_pressure,
2211 .sockets_allocated = &tcp_sockets_allocated,
2212 .memory_allocated = &tcp_memory_allocated,
2213 .memory_pressure = &tcp_memory_pressure,
0a5578cf 2214 .orphan_count = &tcp_orphan_count,
1da177e4
LT
2215 .sysctl_mem = sysctl_tcp_mem,
2216 .sysctl_wmem = sysctl_tcp_wmem,
2217 .sysctl_rmem = sysctl_tcp_rmem,
2218 .max_header = MAX_TCP_HEADER,
2219 .obj_size = sizeof(struct tcp6_sock),
3ab5aee7 2220 .slab_flags = SLAB_DESTROY_BY_RCU,
6d6ee43e 2221 .twsk_prot = &tcp6_timewait_sock_ops,
60236fdd 2222 .rsk_prot = &tcp6_request_sock_ops,
39d8cda7 2223 .h.hashinfo = &tcp_hashinfo,
7ba42910 2224 .no_autobind = true,
543d9cfe
ACM
2225#ifdef CONFIG_COMPAT
2226 .compat_setsockopt = compat_tcp_setsockopt,
2227 .compat_getsockopt = compat_tcp_getsockopt,
2228#endif
1da177e4
LT
2229};
2230
41135cc8 2231static const struct inet6_protocol tcpv6_protocol = {
1da177e4
LT
2232 .handler = tcp_v6_rcv,
2233 .err_handler = tcp_v6_err,
a430a43d 2234 .gso_send_check = tcp_v6_gso_send_check,
adcfc7d0 2235 .gso_segment = tcp_tso_segment,
684f2176
HX
2236 .gro_receive = tcp6_gro_receive,
2237 .gro_complete = tcp6_gro_complete,
1da177e4
LT
2238 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
2239};
2240
1da177e4
LT
2241static struct inet_protosw tcpv6_protosw = {
2242 .type = SOCK_STREAM,
2243 .protocol = IPPROTO_TCP,
2244 .prot = &tcpv6_prot,
2245 .ops = &inet6_stream_ops,
1da177e4 2246 .no_check = 0,
d83d8461
ACM
2247 .flags = INET_PROTOSW_PERMANENT |
2248 INET_PROTOSW_ICSK,
1da177e4
LT
2249};
2250
2c8c1e72 2251static int __net_init tcpv6_net_init(struct net *net)
93ec926b 2252{
5677242f
DL
2253 return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
2254 SOCK_RAW, IPPROTO_TCP, net);
93ec926b
DL
2255}
2256
2c8c1e72 2257static void __net_exit tcpv6_net_exit(struct net *net)
93ec926b 2258{
5677242f 2259 inet_ctl_sock_destroy(net->ipv6.tcp_sk);
b099ce26
EB
2260}
2261
2c8c1e72 2262static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
b099ce26
EB
2263{
2264 inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET6);
93ec926b
DL
2265}
2266
2267static struct pernet_operations tcpv6_net_ops = {
b099ce26
EB
2268 .init = tcpv6_net_init,
2269 .exit = tcpv6_net_exit,
2270 .exit_batch = tcpv6_net_exit_batch,
93ec926b
DL
2271};
2272
7f4e4868 2273int __init tcpv6_init(void)
1da177e4 2274{
7f4e4868
DL
2275 int ret;
2276
2277 ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
2278 if (ret)
2279 goto out;
2280
1da177e4 2281 /* register inet6 protocol */
7f4e4868
DL
2282 ret = inet6_register_protosw(&tcpv6_protosw);
2283 if (ret)
2284 goto out_tcpv6_protocol;
2285
93ec926b 2286 ret = register_pernet_subsys(&tcpv6_net_ops);
7f4e4868
DL
2287 if (ret)
2288 goto out_tcpv6_protosw;
2289out:
2290 return ret;
ae0f7d5f 2291
7f4e4868
DL
2292out_tcpv6_protocol:
2293 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
2294out_tcpv6_protosw:
2295 inet6_unregister_protosw(&tcpv6_protosw);
2296 goto out;
2297}
2298
09f7709f 2299void tcpv6_exit(void)
7f4e4868 2300{
93ec926b 2301 unregister_pernet_subsys(&tcpv6_net_ops);
7f4e4868
DL
2302 inet6_unregister_protosw(&tcpv6_protosw);
2303 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
1da177e4 2304}
This page took 4.165352 seconds and 5 git commands to generate.