Merge tag 'arc-4.8-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc
[deliverable/linux.git] / net / ipv6 / tcp_ipv6.c
1 /*
2 * TCP over IPv6
3 * Linux INET6 implementation
4 *
5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt>
7 *
8 * Based on:
9 * linux/net/ipv4/tcp.c
10 * linux/net/ipv4/tcp_input.c
11 * linux/net/ipv4/tcp_output.c
12 *
13 * Fixes:
14 * Hideaki YOSHIFUJI : sin6_scope_id support
15 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
16 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
17 * a single port at the same time.
18 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
19 *
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
24 */
25
26 #include <linux/bottom_half.h>
27 #include <linux/module.h>
28 #include <linux/errno.h>
29 #include <linux/types.h>
30 #include <linux/socket.h>
31 #include <linux/sockios.h>
32 #include <linux/net.h>
33 #include <linux/jiffies.h>
34 #include <linux/in.h>
35 #include <linux/in6.h>
36 #include <linux/netdevice.h>
37 #include <linux/init.h>
38 #include <linux/jhash.h>
39 #include <linux/ipsec.h>
40 #include <linux/times.h>
41 #include <linux/slab.h>
42 #include <linux/uaccess.h>
43 #include <linux/ipv6.h>
44 #include <linux/icmpv6.h>
45 #include <linux/random.h>
46
47 #include <net/tcp.h>
48 #include <net/ndisc.h>
49 #include <net/inet6_hashtables.h>
50 #include <net/inet6_connection_sock.h>
51 #include <net/ipv6.h>
52 #include <net/transp_v6.h>
53 #include <net/addrconf.h>
54 #include <net/ip6_route.h>
55 #include <net/ip6_checksum.h>
56 #include <net/inet_ecn.h>
57 #include <net/protocol.h>
58 #include <net/xfrm.h>
59 #include <net/snmp.h>
60 #include <net/dsfield.h>
61 #include <net/timewait_sock.h>
62 #include <net/inet_common.h>
63 #include <net/secure_seq.h>
64 #include <net/busy_poll.h>
65
66 #include <linux/proc_fs.h>
67 #include <linux/seq_file.h>
68
69 #include <crypto/hash.h>
70 #include <linux/scatterlist.h>
71
72 static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb);
73 static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
74 struct request_sock *req);
75
76 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
77
78 static const struct inet_connection_sock_af_ops ipv6_mapped;
79 static const struct inet_connection_sock_af_ops ipv6_specific;
80 #ifdef CONFIG_TCP_MD5SIG
81 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
82 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
83 #else
84 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
85 const struct in6_addr *addr)
86 {
87 return NULL;
88 }
89 #endif
90
91 static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
92 {
93 struct dst_entry *dst = skb_dst(skb);
94
95 if (dst && dst_hold_safe(dst)) {
96 const struct rt6_info *rt = (const struct rt6_info *)dst;
97
98 sk->sk_rx_dst = dst;
99 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
100 inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt);
101 }
102 }
103
104 static __u32 tcp_v6_init_sequence(const struct sk_buff *skb)
105 {
106 return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
107 ipv6_hdr(skb)->saddr.s6_addr32,
108 tcp_hdr(skb)->dest,
109 tcp_hdr(skb)->source);
110 }
111
112 static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
113 int addr_len)
114 {
115 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
116 struct inet_sock *inet = inet_sk(sk);
117 struct inet_connection_sock *icsk = inet_csk(sk);
118 struct ipv6_pinfo *np = inet6_sk(sk);
119 struct tcp_sock *tp = tcp_sk(sk);
120 struct in6_addr *saddr = NULL, *final_p, final;
121 struct ipv6_txoptions *opt;
122 struct flowi6 fl6;
123 struct dst_entry *dst;
124 int addr_type;
125 int err;
126
127 if (addr_len < SIN6_LEN_RFC2133)
128 return -EINVAL;
129
130 if (usin->sin6_family != AF_INET6)
131 return -EAFNOSUPPORT;
132
133 memset(&fl6, 0, sizeof(fl6));
134
135 if (np->sndflow) {
136 fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
137 IP6_ECN_flow_init(fl6.flowlabel);
138 if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
139 struct ip6_flowlabel *flowlabel;
140 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
141 if (!flowlabel)
142 return -EINVAL;
143 fl6_sock_release(flowlabel);
144 }
145 }
146
147 /*
148 * connect() to INADDR_ANY means loopback (BSD'ism).
149 */
150
151 if (ipv6_addr_any(&usin->sin6_addr))
152 usin->sin6_addr.s6_addr[15] = 0x1;
153
154 addr_type = ipv6_addr_type(&usin->sin6_addr);
155
156 if (addr_type & IPV6_ADDR_MULTICAST)
157 return -ENETUNREACH;
158
159 if (addr_type&IPV6_ADDR_LINKLOCAL) {
160 if (addr_len >= sizeof(struct sockaddr_in6) &&
161 usin->sin6_scope_id) {
162 /* If interface is set while binding, indices
163 * must coincide.
164 */
165 if (sk->sk_bound_dev_if &&
166 sk->sk_bound_dev_if != usin->sin6_scope_id)
167 return -EINVAL;
168
169 sk->sk_bound_dev_if = usin->sin6_scope_id;
170 }
171
172 /* Connect to link-local address requires an interface */
173 if (!sk->sk_bound_dev_if)
174 return -EINVAL;
175 }
176
177 if (tp->rx_opt.ts_recent_stamp &&
178 !ipv6_addr_equal(&sk->sk_v6_daddr, &usin->sin6_addr)) {
179 tp->rx_opt.ts_recent = 0;
180 tp->rx_opt.ts_recent_stamp = 0;
181 tp->write_seq = 0;
182 }
183
184 sk->sk_v6_daddr = usin->sin6_addr;
185 np->flow_label = fl6.flowlabel;
186
187 /*
188 * TCP over IPv4
189 */
190
191 if (addr_type == IPV6_ADDR_MAPPED) {
192 u32 exthdrlen = icsk->icsk_ext_hdr_len;
193 struct sockaddr_in sin;
194
195 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
196
197 if (__ipv6_only_sock(sk))
198 return -ENETUNREACH;
199
200 sin.sin_family = AF_INET;
201 sin.sin_port = usin->sin6_port;
202 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
203
204 icsk->icsk_af_ops = &ipv6_mapped;
205 sk->sk_backlog_rcv = tcp_v4_do_rcv;
206 #ifdef CONFIG_TCP_MD5SIG
207 tp->af_specific = &tcp_sock_ipv6_mapped_specific;
208 #endif
209
210 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
211
212 if (err) {
213 icsk->icsk_ext_hdr_len = exthdrlen;
214 icsk->icsk_af_ops = &ipv6_specific;
215 sk->sk_backlog_rcv = tcp_v6_do_rcv;
216 #ifdef CONFIG_TCP_MD5SIG
217 tp->af_specific = &tcp_sock_ipv6_specific;
218 #endif
219 goto failure;
220 }
221 np->saddr = sk->sk_v6_rcv_saddr;
222
223 return err;
224 }
225
226 if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr))
227 saddr = &sk->sk_v6_rcv_saddr;
228
229 fl6.flowi6_proto = IPPROTO_TCP;
230 fl6.daddr = sk->sk_v6_daddr;
231 fl6.saddr = saddr ? *saddr : np->saddr;
232 fl6.flowi6_oif = sk->sk_bound_dev_if;
233 fl6.flowi6_mark = sk->sk_mark;
234 fl6.fl6_dport = usin->sin6_port;
235 fl6.fl6_sport = inet->inet_sport;
236
237 opt = rcu_dereference_protected(np->opt, lockdep_sock_is_held(sk));
238 final_p = fl6_update_dst(&fl6, opt, &final);
239
240 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
241
242 dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
243 if (IS_ERR(dst)) {
244 err = PTR_ERR(dst);
245 goto failure;
246 }
247
248 if (!saddr) {
249 saddr = &fl6.saddr;
250 sk->sk_v6_rcv_saddr = *saddr;
251 }
252
253 /* set the source address */
254 np->saddr = *saddr;
255 inet->inet_rcv_saddr = LOOPBACK4_IPV6;
256
257 sk->sk_gso_type = SKB_GSO_TCPV6;
258 ip6_dst_store(sk, dst, NULL, NULL);
259
260 if (tcp_death_row.sysctl_tw_recycle &&
261 !tp->rx_opt.ts_recent_stamp &&
262 ipv6_addr_equal(&fl6.daddr, &sk->sk_v6_daddr))
263 tcp_fetch_timewait_stamp(sk, dst);
264
265 icsk->icsk_ext_hdr_len = 0;
266 if (opt)
267 icsk->icsk_ext_hdr_len = opt->opt_flen +
268 opt->opt_nflen;
269
270 tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
271
272 inet->inet_dport = usin->sin6_port;
273
274 tcp_set_state(sk, TCP_SYN_SENT);
275 err = inet6_hash_connect(&tcp_death_row, sk);
276 if (err)
277 goto late_failure;
278
279 sk_set_txhash(sk);
280
281 if (!tp->write_seq && likely(!tp->repair))
282 tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
283 sk->sk_v6_daddr.s6_addr32,
284 inet->inet_sport,
285 inet->inet_dport);
286
287 err = tcp_connect(sk);
288 if (err)
289 goto late_failure;
290
291 return 0;
292
293 late_failure:
294 tcp_set_state(sk, TCP_CLOSE);
295 __sk_dst_reset(sk);
296 failure:
297 inet->inet_dport = 0;
298 sk->sk_route_caps = 0;
299 return err;
300 }
301
302 static void tcp_v6_mtu_reduced(struct sock *sk)
303 {
304 struct dst_entry *dst;
305
306 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
307 return;
308
309 dst = inet6_csk_update_pmtu(sk, tcp_sk(sk)->mtu_info);
310 if (!dst)
311 return;
312
313 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
314 tcp_sync_mss(sk, dst_mtu(dst));
315 tcp_simple_retransmit(sk);
316 }
317 }
318
319 static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
320 u8 type, u8 code, int offset, __be32 info)
321 {
322 const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
323 const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
324 struct net *net = dev_net(skb->dev);
325 struct request_sock *fastopen;
326 struct ipv6_pinfo *np;
327 struct tcp_sock *tp;
328 __u32 seq, snd_una;
329 struct sock *sk;
330 bool fatal;
331 int err;
332
333 sk = __inet6_lookup_established(net, &tcp_hashinfo,
334 &hdr->daddr, th->dest,
335 &hdr->saddr, ntohs(th->source),
336 skb->dev->ifindex);
337
338 if (!sk) {
339 __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
340 ICMP6_MIB_INERRORS);
341 return;
342 }
343
344 if (sk->sk_state == TCP_TIME_WAIT) {
345 inet_twsk_put(inet_twsk(sk));
346 return;
347 }
348 seq = ntohl(th->seq);
349 fatal = icmpv6_err_convert(type, code, &err);
350 if (sk->sk_state == TCP_NEW_SYN_RECV)
351 return tcp_req_err(sk, seq, fatal);
352
353 bh_lock_sock(sk);
354 if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
355 __NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
356
357 if (sk->sk_state == TCP_CLOSE)
358 goto out;
359
360 if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
361 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
362 goto out;
363 }
364
365 tp = tcp_sk(sk);
366 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
367 fastopen = tp->fastopen_rsk;
368 snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
369 if (sk->sk_state != TCP_LISTEN &&
370 !between(seq, snd_una, tp->snd_nxt)) {
371 __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
372 goto out;
373 }
374
375 np = inet6_sk(sk);
376
377 if (type == NDISC_REDIRECT) {
378 struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
379
380 if (dst)
381 dst->ops->redirect(dst, sk, skb);
382 goto out;
383 }
384
385 if (type == ICMPV6_PKT_TOOBIG) {
386 /* We are not interested in TCP_LISTEN and open_requests
387 * (SYN-ACKs send out by Linux are always <576bytes so
388 * they should go through unfragmented).
389 */
390 if (sk->sk_state == TCP_LISTEN)
391 goto out;
392
393 if (!ip6_sk_accept_pmtu(sk))
394 goto out;
395
396 tp->mtu_info = ntohl(info);
397 if (!sock_owned_by_user(sk))
398 tcp_v6_mtu_reduced(sk);
399 else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED,
400 &tp->tsq_flags))
401 sock_hold(sk);
402 goto out;
403 }
404
405
406 /* Might be for an request_sock */
407 switch (sk->sk_state) {
408 case TCP_SYN_SENT:
409 case TCP_SYN_RECV:
410 /* Only in fast or simultaneous open. If a fast open socket is
411 * is already accepted it is treated as a connected one below.
412 */
413 if (fastopen && !fastopen->sk)
414 break;
415
416 if (!sock_owned_by_user(sk)) {
417 sk->sk_err = err;
418 sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
419
420 tcp_done(sk);
421 } else
422 sk->sk_err_soft = err;
423 goto out;
424 }
425
426 if (!sock_owned_by_user(sk) && np->recverr) {
427 sk->sk_err = err;
428 sk->sk_error_report(sk);
429 } else
430 sk->sk_err_soft = err;
431
432 out:
433 bh_unlock_sock(sk);
434 sock_put(sk);
435 }
436
437
438 static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
439 struct flowi *fl,
440 struct request_sock *req,
441 struct tcp_fastopen_cookie *foc,
442 enum tcp_synack_type synack_type)
443 {
444 struct inet_request_sock *ireq = inet_rsk(req);
445 struct ipv6_pinfo *np = inet6_sk(sk);
446 struct flowi6 *fl6 = &fl->u.ip6;
447 struct sk_buff *skb;
448 int err = -ENOMEM;
449
450 /* First, grab a route. */
451 if (!dst && (dst = inet6_csk_route_req(sk, fl6, req,
452 IPPROTO_TCP)) == NULL)
453 goto done;
454
455 skb = tcp_make_synack(sk, dst, req, foc, synack_type);
456
457 if (skb) {
458 __tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr,
459 &ireq->ir_v6_rmt_addr);
460
461 fl6->daddr = ireq->ir_v6_rmt_addr;
462 if (np->repflow && ireq->pktopts)
463 fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
464
465 rcu_read_lock();
466 err = ip6_xmit(sk, skb, fl6, rcu_dereference(np->opt),
467 np->tclass);
468 rcu_read_unlock();
469 err = net_xmit_eval(err);
470 }
471
472 done:
473 return err;
474 }
475
476
477 static void tcp_v6_reqsk_destructor(struct request_sock *req)
478 {
479 kfree_skb(inet_rsk(req)->pktopts);
480 }
481
482 #ifdef CONFIG_TCP_MD5SIG
483 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
484 const struct in6_addr *addr)
485 {
486 return tcp_md5_do_lookup(sk, (union tcp_md5_addr *)addr, AF_INET6);
487 }
488
489 static struct tcp_md5sig_key *tcp_v6_md5_lookup(const struct sock *sk,
490 const struct sock *addr_sk)
491 {
492 return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr);
493 }
494
495 static int tcp_v6_parse_md5_keys(struct sock *sk, char __user *optval,
496 int optlen)
497 {
498 struct tcp_md5sig cmd;
499 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
500
501 if (optlen < sizeof(cmd))
502 return -EINVAL;
503
504 if (copy_from_user(&cmd, optval, sizeof(cmd)))
505 return -EFAULT;
506
507 if (sin6->sin6_family != AF_INET6)
508 return -EINVAL;
509
510 if (!cmd.tcpm_keylen) {
511 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
512 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
513 AF_INET);
514 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
515 AF_INET6);
516 }
517
518 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
519 return -EINVAL;
520
521 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
522 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
523 AF_INET, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
524
525 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
526 AF_INET6, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
527 }
528
529 static int tcp_v6_md5_hash_headers(struct tcp_md5sig_pool *hp,
530 const struct in6_addr *daddr,
531 const struct in6_addr *saddr,
532 const struct tcphdr *th, int nbytes)
533 {
534 struct tcp6_pseudohdr *bp;
535 struct scatterlist sg;
536 struct tcphdr *_th;
537
538 bp = hp->scratch;
539 /* 1. TCP pseudo-header (RFC2460) */
540 bp->saddr = *saddr;
541 bp->daddr = *daddr;
542 bp->protocol = cpu_to_be32(IPPROTO_TCP);
543 bp->len = cpu_to_be32(nbytes);
544
545 _th = (struct tcphdr *)(bp + 1);
546 memcpy(_th, th, sizeof(*th));
547 _th->check = 0;
548
549 sg_init_one(&sg, bp, sizeof(*bp) + sizeof(*th));
550 ahash_request_set_crypt(hp->md5_req, &sg, NULL,
551 sizeof(*bp) + sizeof(*th));
552 return crypto_ahash_update(hp->md5_req);
553 }
554
555 static int tcp_v6_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
556 const struct in6_addr *daddr, struct in6_addr *saddr,
557 const struct tcphdr *th)
558 {
559 struct tcp_md5sig_pool *hp;
560 struct ahash_request *req;
561
562 hp = tcp_get_md5sig_pool();
563 if (!hp)
564 goto clear_hash_noput;
565 req = hp->md5_req;
566
567 if (crypto_ahash_init(req))
568 goto clear_hash;
569 if (tcp_v6_md5_hash_headers(hp, daddr, saddr, th, th->doff << 2))
570 goto clear_hash;
571 if (tcp_md5_hash_key(hp, key))
572 goto clear_hash;
573 ahash_request_set_crypt(req, NULL, md5_hash, 0);
574 if (crypto_ahash_final(req))
575 goto clear_hash;
576
577 tcp_put_md5sig_pool();
578 return 0;
579
580 clear_hash:
581 tcp_put_md5sig_pool();
582 clear_hash_noput:
583 memset(md5_hash, 0, 16);
584 return 1;
585 }
586
587 static int tcp_v6_md5_hash_skb(char *md5_hash,
588 const struct tcp_md5sig_key *key,
589 const struct sock *sk,
590 const struct sk_buff *skb)
591 {
592 const struct in6_addr *saddr, *daddr;
593 struct tcp_md5sig_pool *hp;
594 struct ahash_request *req;
595 const struct tcphdr *th = tcp_hdr(skb);
596
597 if (sk) { /* valid for establish/request sockets */
598 saddr = &sk->sk_v6_rcv_saddr;
599 daddr = &sk->sk_v6_daddr;
600 } else {
601 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
602 saddr = &ip6h->saddr;
603 daddr = &ip6h->daddr;
604 }
605
606 hp = tcp_get_md5sig_pool();
607 if (!hp)
608 goto clear_hash_noput;
609 req = hp->md5_req;
610
611 if (crypto_ahash_init(req))
612 goto clear_hash;
613
614 if (tcp_v6_md5_hash_headers(hp, daddr, saddr, th, skb->len))
615 goto clear_hash;
616 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
617 goto clear_hash;
618 if (tcp_md5_hash_key(hp, key))
619 goto clear_hash;
620 ahash_request_set_crypt(req, NULL, md5_hash, 0);
621 if (crypto_ahash_final(req))
622 goto clear_hash;
623
624 tcp_put_md5sig_pool();
625 return 0;
626
627 clear_hash:
628 tcp_put_md5sig_pool();
629 clear_hash_noput:
630 memset(md5_hash, 0, 16);
631 return 1;
632 }
633
634 #endif
635
636 static bool tcp_v6_inbound_md5_hash(const struct sock *sk,
637 const struct sk_buff *skb)
638 {
639 #ifdef CONFIG_TCP_MD5SIG
640 const __u8 *hash_location = NULL;
641 struct tcp_md5sig_key *hash_expected;
642 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
643 const struct tcphdr *th = tcp_hdr(skb);
644 int genhash;
645 u8 newhash[16];
646
647 hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
648 hash_location = tcp_parse_md5sig_option(th);
649
650 /* We've parsed the options - do we have a hash? */
651 if (!hash_expected && !hash_location)
652 return false;
653
654 if (hash_expected && !hash_location) {
655 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
656 return true;
657 }
658
659 if (!hash_expected && hash_location) {
660 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
661 return true;
662 }
663
664 /* check the signature */
665 genhash = tcp_v6_md5_hash_skb(newhash,
666 hash_expected,
667 NULL, skb);
668
669 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
670 net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
671 genhash ? "failed" : "mismatch",
672 &ip6h->saddr, ntohs(th->source),
673 &ip6h->daddr, ntohs(th->dest));
674 return true;
675 }
676 #endif
677 return false;
678 }
679
680 static void tcp_v6_init_req(struct request_sock *req,
681 const struct sock *sk_listener,
682 struct sk_buff *skb)
683 {
684 struct inet_request_sock *ireq = inet_rsk(req);
685 const struct ipv6_pinfo *np = inet6_sk(sk_listener);
686
687 ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
688 ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
689
690 /* So that link locals have meaning */
691 if (!sk_listener->sk_bound_dev_if &&
692 ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
693 ireq->ir_iif = tcp_v6_iif(skb);
694
695 if (!TCP_SKB_CB(skb)->tcp_tw_isn &&
696 (ipv6_opt_accepted(sk_listener, skb, &TCP_SKB_CB(skb)->header.h6) ||
697 np->rxopt.bits.rxinfo ||
698 np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim ||
699 np->rxopt.bits.rxohlim || np->repflow)) {
700 atomic_inc(&skb->users);
701 ireq->pktopts = skb;
702 }
703 }
704
705 static struct dst_entry *tcp_v6_route_req(const struct sock *sk,
706 struct flowi *fl,
707 const struct request_sock *req,
708 bool *strict)
709 {
710 if (strict)
711 *strict = true;
712 return inet6_csk_route_req(sk, &fl->u.ip6, req, IPPROTO_TCP);
713 }
714
715 struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
716 .family = AF_INET6,
717 .obj_size = sizeof(struct tcp6_request_sock),
718 .rtx_syn_ack = tcp_rtx_synack,
719 .send_ack = tcp_v6_reqsk_send_ack,
720 .destructor = tcp_v6_reqsk_destructor,
721 .send_reset = tcp_v6_send_reset,
722 .syn_ack_timeout = tcp_syn_ack_timeout,
723 };
724
725 static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
726 .mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) -
727 sizeof(struct ipv6hdr),
728 #ifdef CONFIG_TCP_MD5SIG
729 .req_md5_lookup = tcp_v6_md5_lookup,
730 .calc_md5_hash = tcp_v6_md5_hash_skb,
731 #endif
732 .init_req = tcp_v6_init_req,
733 #ifdef CONFIG_SYN_COOKIES
734 .cookie_init_seq = cookie_v6_init_sequence,
735 #endif
736 .route_req = tcp_v6_route_req,
737 .init_seq = tcp_v6_init_sequence,
738 .send_synack = tcp_v6_send_synack,
739 };
740
741 static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 seq,
742 u32 ack, u32 win, u32 tsval, u32 tsecr,
743 int oif, struct tcp_md5sig_key *key, int rst,
744 u8 tclass, __be32 label)
745 {
746 const struct tcphdr *th = tcp_hdr(skb);
747 struct tcphdr *t1;
748 struct sk_buff *buff;
749 struct flowi6 fl6;
750 struct net *net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
751 struct sock *ctl_sk = net->ipv6.tcp_sk;
752 unsigned int tot_len = sizeof(struct tcphdr);
753 struct dst_entry *dst;
754 __be32 *topt;
755
756 if (tsecr)
757 tot_len += TCPOLEN_TSTAMP_ALIGNED;
758 #ifdef CONFIG_TCP_MD5SIG
759 if (key)
760 tot_len += TCPOLEN_MD5SIG_ALIGNED;
761 #endif
762
763 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
764 GFP_ATOMIC);
765 if (!buff)
766 return;
767
768 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
769
770 t1 = (struct tcphdr *) skb_push(buff, tot_len);
771 skb_reset_transport_header(buff);
772
773 /* Swap the send and the receive. */
774 memset(t1, 0, sizeof(*t1));
775 t1->dest = th->source;
776 t1->source = th->dest;
777 t1->doff = tot_len / 4;
778 t1->seq = htonl(seq);
779 t1->ack_seq = htonl(ack);
780 t1->ack = !rst || !th->ack;
781 t1->rst = rst;
782 t1->window = htons(win);
783
784 topt = (__be32 *)(t1 + 1);
785
786 if (tsecr) {
787 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
788 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
789 *topt++ = htonl(tsval);
790 *topt++ = htonl(tsecr);
791 }
792
793 #ifdef CONFIG_TCP_MD5SIG
794 if (key) {
795 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
796 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
797 tcp_v6_md5_hash_hdr((__u8 *)topt, key,
798 &ipv6_hdr(skb)->saddr,
799 &ipv6_hdr(skb)->daddr, t1);
800 }
801 #endif
802
803 memset(&fl6, 0, sizeof(fl6));
804 fl6.daddr = ipv6_hdr(skb)->saddr;
805 fl6.saddr = ipv6_hdr(skb)->daddr;
806 fl6.flowlabel = label;
807
808 buff->ip_summed = CHECKSUM_PARTIAL;
809 buff->csum = 0;
810
811 __tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
812
813 fl6.flowi6_proto = IPPROTO_TCP;
814 if (rt6_need_strict(&fl6.daddr) && !oif)
815 fl6.flowi6_oif = tcp_v6_iif(skb);
816 else {
817 if (!oif && netif_index_is_l3_master(net, skb->skb_iif))
818 oif = skb->skb_iif;
819
820 fl6.flowi6_oif = oif;
821 }
822
823 fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark);
824 fl6.fl6_dport = t1->dest;
825 fl6.fl6_sport = t1->source;
826 security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
827
828 /* Pass a socket to ip6_dst_lookup either it is for RST
829 * Underlying function will use this to retrieve the network
830 * namespace
831 */
832 dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL);
833 if (!IS_ERR(dst)) {
834 skb_dst_set(buff, dst);
835 ip6_xmit(ctl_sk, buff, &fl6, NULL, tclass);
836 TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
837 if (rst)
838 TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
839 return;
840 }
841
842 kfree_skb(buff);
843 }
844
845 static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb)
846 {
847 const struct tcphdr *th = tcp_hdr(skb);
848 u32 seq = 0, ack_seq = 0;
849 struct tcp_md5sig_key *key = NULL;
850 #ifdef CONFIG_TCP_MD5SIG
851 const __u8 *hash_location = NULL;
852 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
853 unsigned char newhash[16];
854 int genhash;
855 struct sock *sk1 = NULL;
856 #endif
857 int oif;
858
859 if (th->rst)
860 return;
861
862 /* If sk not NULL, it means we did a successful lookup and incoming
863 * route had to be correct. prequeue might have dropped our dst.
864 */
865 if (!sk && !ipv6_unicast_destination(skb))
866 return;
867
868 #ifdef CONFIG_TCP_MD5SIG
869 rcu_read_lock();
870 hash_location = tcp_parse_md5sig_option(th);
871 if (sk && sk_fullsock(sk)) {
872 key = tcp_v6_md5_do_lookup(sk, &ipv6h->saddr);
873 } else if (hash_location) {
874 /*
875 * active side is lost. Try to find listening socket through
876 * source port, and then find md5 key through listening socket.
877 * we are not loose security here:
878 * Incoming packet is checked with md5 hash with finding key,
879 * no RST generated if md5 hash doesn't match.
880 */
881 sk1 = inet6_lookup_listener(dev_net(skb_dst(skb)->dev),
882 &tcp_hashinfo, NULL, 0,
883 &ipv6h->saddr,
884 th->source, &ipv6h->daddr,
885 ntohs(th->source), tcp_v6_iif(skb));
886 if (!sk1)
887 goto out;
888
889 key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr);
890 if (!key)
891 goto out;
892
893 genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, skb);
894 if (genhash || memcmp(hash_location, newhash, 16) != 0)
895 goto out;
896 }
897 #endif
898
899 if (th->ack)
900 seq = ntohl(th->ack_seq);
901 else
902 ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
903 (th->doff << 2);
904
905 oif = sk ? sk->sk_bound_dev_if : 0;
906 tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, key, 1, 0, 0);
907
908 #ifdef CONFIG_TCP_MD5SIG
909 out:
910 rcu_read_unlock();
911 #endif
912 }
913
914 static void tcp_v6_send_ack(const struct sock *sk, struct sk_buff *skb, u32 seq,
915 u32 ack, u32 win, u32 tsval, u32 tsecr, int oif,
916 struct tcp_md5sig_key *key, u8 tclass,
917 __be32 label)
918 {
919 tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, key, 0,
920 tclass, label);
921 }
922
923 static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
924 {
925 struct inet_timewait_sock *tw = inet_twsk(sk);
926 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
927
928 tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
929 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
930 tcp_time_stamp + tcptw->tw_ts_offset,
931 tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw),
932 tw->tw_tclass, cpu_to_be32(tw->tw_flowlabel));
933
934 inet_twsk_put(tw);
935 }
936
937 static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
938 struct request_sock *req)
939 {
940 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
941 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
942 */
943 tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ?
944 tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
945 tcp_rsk(req)->rcv_nxt, req->rsk_rcv_wnd,
946 tcp_time_stamp, req->ts_recent, sk->sk_bound_dev_if,
947 tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr),
948 0, 0);
949 }
950
951
952 static struct sock *tcp_v6_cookie_check(struct sock *sk, struct sk_buff *skb)
953 {
954 #ifdef CONFIG_SYN_COOKIES
955 const struct tcphdr *th = tcp_hdr(skb);
956
957 if (!th->syn)
958 sk = cookie_v6_check(sk, skb);
959 #endif
960 return sk;
961 }
962
963 static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
964 {
965 if (skb->protocol == htons(ETH_P_IP))
966 return tcp_v4_conn_request(sk, skb);
967
968 if (!ipv6_unicast_destination(skb))
969 goto drop;
970
971 return tcp_conn_request(&tcp6_request_sock_ops,
972 &tcp_request_sock_ipv6_ops, sk, skb);
973
974 drop:
975 tcp_listendrop(sk);
976 return 0; /* don't send reset */
977 }
978
979 static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
980 struct request_sock *req,
981 struct dst_entry *dst,
982 struct request_sock *req_unhash,
983 bool *own_req)
984 {
985 struct inet_request_sock *ireq;
986 struct ipv6_pinfo *newnp;
987 const struct ipv6_pinfo *np = inet6_sk(sk);
988 struct ipv6_txoptions *opt;
989 struct tcp6_sock *newtcp6sk;
990 struct inet_sock *newinet;
991 struct tcp_sock *newtp;
992 struct sock *newsk;
993 #ifdef CONFIG_TCP_MD5SIG
994 struct tcp_md5sig_key *key;
995 #endif
996 struct flowi6 fl6;
997
998 if (skb->protocol == htons(ETH_P_IP)) {
999 /*
1000 * v6 mapped
1001 */
1002
1003 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst,
1004 req_unhash, own_req);
1005
1006 if (!newsk)
1007 return NULL;
1008
1009 newtcp6sk = (struct tcp6_sock *)newsk;
1010 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1011
1012 newinet = inet_sk(newsk);
1013 newnp = inet6_sk(newsk);
1014 newtp = tcp_sk(newsk);
1015
1016 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1017
1018 newnp->saddr = newsk->sk_v6_rcv_saddr;
1019
1020 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1021 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1022 #ifdef CONFIG_TCP_MD5SIG
1023 newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1024 #endif
1025
1026 newnp->ipv6_ac_list = NULL;
1027 newnp->ipv6_fl_list = NULL;
1028 newnp->pktoptions = NULL;
1029 newnp->opt = NULL;
1030 newnp->mcast_oif = tcp_v6_iif(skb);
1031 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1032 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1033 if (np->repflow)
1034 newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1035
1036 /*
1037 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1038 * here, tcp_create_openreq_child now does this for us, see the comment in
1039 * that function for the gory details. -acme
1040 */
1041
1042 /* It is tricky place. Until this moment IPv4 tcp
1043 worked with IPv6 icsk.icsk_af_ops.
1044 Sync it now.
1045 */
1046 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1047
1048 return newsk;
1049 }
1050
1051 ireq = inet_rsk(req);
1052
1053 if (sk_acceptq_is_full(sk))
1054 goto out_overflow;
1055
1056 if (!dst) {
1057 dst = inet6_csk_route_req(sk, &fl6, req, IPPROTO_TCP);
1058 if (!dst)
1059 goto out;
1060 }
1061
1062 newsk = tcp_create_openreq_child(sk, req, skb);
1063 if (!newsk)
1064 goto out_nonewsk;
1065
1066 /*
1067 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1068 * count here, tcp_create_openreq_child now does this for us, see the
1069 * comment in that function for the gory details. -acme
1070 */
1071
1072 newsk->sk_gso_type = SKB_GSO_TCPV6;
1073 ip6_dst_store(newsk, dst, NULL, NULL);
1074 inet6_sk_rx_dst_set(newsk, skb);
1075
1076 newtcp6sk = (struct tcp6_sock *)newsk;
1077 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1078
1079 newtp = tcp_sk(newsk);
1080 newinet = inet_sk(newsk);
1081 newnp = inet6_sk(newsk);
1082
1083 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1084
1085 newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr;
1086 newnp->saddr = ireq->ir_v6_loc_addr;
1087 newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
1088 newsk->sk_bound_dev_if = ireq->ir_iif;
1089
1090 /* Now IPv6 options...
1091
1092 First: no IPv4 options.
1093 */
1094 newinet->inet_opt = NULL;
1095 newnp->ipv6_ac_list = NULL;
1096 newnp->ipv6_fl_list = NULL;
1097
1098 /* Clone RX bits */
1099 newnp->rxopt.all = np->rxopt.all;
1100
1101 newnp->pktoptions = NULL;
1102 newnp->opt = NULL;
1103 newnp->mcast_oif = tcp_v6_iif(skb);
1104 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1105 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1106 if (np->repflow)
1107 newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1108
1109 /* Clone native IPv6 options from listening socket (if any)
1110
1111 Yes, keeping reference count would be much more clever,
1112 but we make one more one thing there: reattach optmem
1113 to newsk.
1114 */
1115 opt = rcu_dereference(np->opt);
1116 if (opt) {
1117 opt = ipv6_dup_options(newsk, opt);
1118 RCU_INIT_POINTER(newnp->opt, opt);
1119 }
1120 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1121 if (opt)
1122 inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen +
1123 opt->opt_flen;
1124
1125 tcp_ca_openreq_child(newsk, dst);
1126
1127 tcp_sync_mss(newsk, dst_mtu(dst));
1128 newtp->advmss = dst_metric_advmss(dst);
1129 if (tcp_sk(sk)->rx_opt.user_mss &&
1130 tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1131 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1132
1133 tcp_initialize_rcv_mss(newsk);
1134
1135 newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1136 newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
1137
1138 #ifdef CONFIG_TCP_MD5SIG
1139 /* Copy over the MD5 key from the original socket */
1140 key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr);
1141 if (key) {
1142 /* We're using one, so create a matching key
1143 * on the newsk structure. If we fail to get
1144 * memory, then we end up not copying the key
1145 * across. Shucks.
1146 */
1147 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newsk->sk_v6_daddr,
1148 AF_INET6, key->key, key->keylen,
1149 sk_gfp_mask(sk, GFP_ATOMIC));
1150 }
1151 #endif
1152
1153 if (__inet_inherit_port(sk, newsk) < 0) {
1154 inet_csk_prepare_forced_close(newsk);
1155 tcp_done(newsk);
1156 goto out;
1157 }
1158 *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
1159 if (*own_req) {
1160 tcp_move_syn(newtp, req);
1161
1162 /* Clone pktoptions received with SYN, if we own the req */
1163 if (ireq->pktopts) {
1164 newnp->pktoptions = skb_clone(ireq->pktopts,
1165 sk_gfp_mask(sk, GFP_ATOMIC));
1166 consume_skb(ireq->pktopts);
1167 ireq->pktopts = NULL;
1168 if (newnp->pktoptions)
1169 skb_set_owner_r(newnp->pktoptions, newsk);
1170 }
1171 }
1172
1173 return newsk;
1174
1175 out_overflow:
1176 __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1177 out_nonewsk:
1178 dst_release(dst);
1179 out:
1180 tcp_listendrop(sk);
1181 return NULL;
1182 }
1183
1184 /* The socket must have it's spinlock held when we get
1185 * here, unless it is a TCP_LISTEN socket.
1186 *
1187 * We have a potential double-lock case here, so even when
1188 * doing backlog processing we use the BH locking scheme.
1189 * This is because we cannot sleep with the original spinlock
1190 * held.
1191 */
1192 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1193 {
1194 struct ipv6_pinfo *np = inet6_sk(sk);
1195 struct tcp_sock *tp;
1196 struct sk_buff *opt_skb = NULL;
1197
1198 /* Imagine: socket is IPv6. IPv4 packet arrives,
1199 goes to IPv4 receive handler and backlogged.
1200 From backlog it always goes here. Kerboom...
1201 Fortunately, tcp_rcv_established and rcv_established
1202 handle them correctly, but it is not case with
1203 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1204 */
1205
1206 if (skb->protocol == htons(ETH_P_IP))
1207 return tcp_v4_do_rcv(sk, skb);
1208
1209 if (sk_filter(sk, skb))
1210 goto discard;
1211
1212 /*
1213 * socket locking is here for SMP purposes as backlog rcv
1214 * is currently called with bh processing disabled.
1215 */
1216
1217 /* Do Stevens' IPV6_PKTOPTIONS.
1218
1219 Yes, guys, it is the only place in our code, where we
1220 may make it not affecting IPv4.
1221 The rest of code is protocol independent,
1222 and I do not like idea to uglify IPv4.
1223
1224 Actually, all the idea behind IPV6_PKTOPTIONS
1225 looks not very well thought. For now we latch
1226 options, received in the last packet, enqueued
1227 by tcp. Feel free to propose better solution.
1228 --ANK (980728)
1229 */
1230 if (np->rxopt.all)
1231 opt_skb = skb_clone(skb, sk_gfp_mask(sk, GFP_ATOMIC));
1232
1233 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1234 struct dst_entry *dst = sk->sk_rx_dst;
1235
1236 sock_rps_save_rxhash(sk, skb);
1237 sk_mark_napi_id(sk, skb);
1238 if (dst) {
1239 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1240 dst->ops->check(dst, np->rx_dst_cookie) == NULL) {
1241 dst_release(dst);
1242 sk->sk_rx_dst = NULL;
1243 }
1244 }
1245
1246 tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len);
1247 if (opt_skb)
1248 goto ipv6_pktoptions;
1249 return 0;
1250 }
1251
1252 if (tcp_checksum_complete(skb))
1253 goto csum_err;
1254
1255 if (sk->sk_state == TCP_LISTEN) {
1256 struct sock *nsk = tcp_v6_cookie_check(sk, skb);
1257
1258 if (!nsk)
1259 goto discard;
1260
1261 if (nsk != sk) {
1262 sock_rps_save_rxhash(nsk, skb);
1263 sk_mark_napi_id(nsk, skb);
1264 if (tcp_child_process(sk, nsk, skb))
1265 goto reset;
1266 if (opt_skb)
1267 __kfree_skb(opt_skb);
1268 return 0;
1269 }
1270 } else
1271 sock_rps_save_rxhash(sk, skb);
1272
1273 if (tcp_rcv_state_process(sk, skb))
1274 goto reset;
1275 if (opt_skb)
1276 goto ipv6_pktoptions;
1277 return 0;
1278
1279 reset:
1280 tcp_v6_send_reset(sk, skb);
1281 discard:
1282 if (opt_skb)
1283 __kfree_skb(opt_skb);
1284 kfree_skb(skb);
1285 return 0;
1286 csum_err:
1287 TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
1288 TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
1289 goto discard;
1290
1291
1292 ipv6_pktoptions:
1293 /* Do you ask, what is it?
1294
1295 1. skb was enqueued by tcp.
1296 2. skb is added to tail of read queue, rather than out of order.
1297 3. socket is not in passive state.
1298 4. Finally, it really contains options, which user wants to receive.
1299 */
1300 tp = tcp_sk(sk);
1301 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1302 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1303 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
1304 np->mcast_oif = tcp_v6_iif(opt_skb);
1305 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1306 np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
1307 if (np->rxopt.bits.rxflow || np->rxopt.bits.rxtclass)
1308 np->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(opt_skb));
1309 if (np->repflow)
1310 np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
1311 if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) {
1312 skb_set_owner_r(opt_skb, sk);
1313 opt_skb = xchg(&np->pktoptions, opt_skb);
1314 } else {
1315 __kfree_skb(opt_skb);
1316 opt_skb = xchg(&np->pktoptions, NULL);
1317 }
1318 }
1319
1320 kfree_skb(opt_skb);
1321 return 0;
1322 }
1323
1324 static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr,
1325 const struct tcphdr *th)
1326 {
1327 /* This is tricky: we move IP6CB at its correct location into
1328 * TCP_SKB_CB(). It must be done after xfrm6_policy_check(), because
1329 * _decode_session6() uses IP6CB().
1330 * barrier() makes sure compiler won't play aliasing games.
1331 */
1332 memmove(&TCP_SKB_CB(skb)->header.h6, IP6CB(skb),
1333 sizeof(struct inet6_skb_parm));
1334 barrier();
1335
1336 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1337 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1338 skb->len - th->doff*4);
1339 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1340 TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1341 TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1342 TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
1343 TCP_SKB_CB(skb)->sacked = 0;
1344 }
1345
1346 static void tcp_v6_restore_cb(struct sk_buff *skb)
1347 {
1348 /* We need to move header back to the beginning if xfrm6_policy_check()
1349 * and tcp_v6_fill_cb() are going to be called again.
1350 */
1351 memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
1352 sizeof(struct inet6_skb_parm));
1353 }
1354
1355 static int tcp_v6_rcv(struct sk_buff *skb)
1356 {
1357 const struct tcphdr *th;
1358 const struct ipv6hdr *hdr;
1359 bool refcounted;
1360 struct sock *sk;
1361 int ret;
1362 struct net *net = dev_net(skb->dev);
1363
1364 if (skb->pkt_type != PACKET_HOST)
1365 goto discard_it;
1366
1367 /*
1368 * Count it even if it's bad.
1369 */
1370 __TCP_INC_STATS(net, TCP_MIB_INSEGS);
1371
1372 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1373 goto discard_it;
1374
1375 th = (const struct tcphdr *)skb->data;
1376
1377 if (unlikely(th->doff < sizeof(struct tcphdr)/4))
1378 goto bad_packet;
1379 if (!pskb_may_pull(skb, th->doff*4))
1380 goto discard_it;
1381
1382 if (skb_checksum_init(skb, IPPROTO_TCP, ip6_compute_pseudo))
1383 goto csum_error;
1384
1385 th = (const struct tcphdr *)skb->data;
1386 hdr = ipv6_hdr(skb);
1387
1388 lookup:
1389 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th),
1390 th->source, th->dest, inet6_iif(skb),
1391 &refcounted);
1392 if (!sk)
1393 goto no_tcp_socket;
1394
1395 process:
1396 if (sk->sk_state == TCP_TIME_WAIT)
1397 goto do_time_wait;
1398
1399 if (sk->sk_state == TCP_NEW_SYN_RECV) {
1400 struct request_sock *req = inet_reqsk(sk);
1401 struct sock *nsk;
1402
1403 sk = req->rsk_listener;
1404 tcp_v6_fill_cb(skb, hdr, th);
1405 if (tcp_v6_inbound_md5_hash(sk, skb)) {
1406 reqsk_put(req);
1407 goto discard_it;
1408 }
1409 if (unlikely(sk->sk_state != TCP_LISTEN)) {
1410 inet_csk_reqsk_queue_drop_and_put(sk, req);
1411 goto lookup;
1412 }
1413 sock_hold(sk);
1414 refcounted = true;
1415 nsk = tcp_check_req(sk, skb, req, false);
1416 if (!nsk) {
1417 reqsk_put(req);
1418 goto discard_and_relse;
1419 }
1420 if (nsk == sk) {
1421 reqsk_put(req);
1422 tcp_v6_restore_cb(skb);
1423 } else if (tcp_child_process(sk, nsk, skb)) {
1424 tcp_v6_send_reset(nsk, skb);
1425 goto discard_and_relse;
1426 } else {
1427 sock_put(sk);
1428 return 0;
1429 }
1430 }
1431 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
1432 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
1433 goto discard_and_relse;
1434 }
1435
1436 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1437 goto discard_and_relse;
1438
1439 tcp_v6_fill_cb(skb, hdr, th);
1440
1441 if (tcp_v6_inbound_md5_hash(sk, skb))
1442 goto discard_and_relse;
1443
1444 if (sk_filter(sk, skb))
1445 goto discard_and_relse;
1446
1447 skb->dev = NULL;
1448
1449 if (sk->sk_state == TCP_LISTEN) {
1450 ret = tcp_v6_do_rcv(sk, skb);
1451 goto put_and_return;
1452 }
1453
1454 sk_incoming_cpu_update(sk);
1455
1456 bh_lock_sock_nested(sk);
1457 tcp_segs_in(tcp_sk(sk), skb);
1458 ret = 0;
1459 if (!sock_owned_by_user(sk)) {
1460 if (!tcp_prequeue(sk, skb))
1461 ret = tcp_v6_do_rcv(sk, skb);
1462 } else if (unlikely(sk_add_backlog(sk, skb,
1463 sk->sk_rcvbuf + sk->sk_sndbuf))) {
1464 bh_unlock_sock(sk);
1465 __NET_INC_STATS(net, LINUX_MIB_TCPBACKLOGDROP);
1466 goto discard_and_relse;
1467 }
1468 bh_unlock_sock(sk);
1469
1470 put_and_return:
1471 if (refcounted)
1472 sock_put(sk);
1473 return ret ? -1 : 0;
1474
1475 no_tcp_socket:
1476 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1477 goto discard_it;
1478
1479 tcp_v6_fill_cb(skb, hdr, th);
1480
1481 if (tcp_checksum_complete(skb)) {
1482 csum_error:
1483 __TCP_INC_STATS(net, TCP_MIB_CSUMERRORS);
1484 bad_packet:
1485 __TCP_INC_STATS(net, TCP_MIB_INERRS);
1486 } else {
1487 tcp_v6_send_reset(NULL, skb);
1488 }
1489
1490 discard_it:
1491 kfree_skb(skb);
1492 return 0;
1493
1494 discard_and_relse:
1495 sk_drops_add(sk, skb);
1496 if (refcounted)
1497 sock_put(sk);
1498 goto discard_it;
1499
1500 do_time_wait:
1501 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1502 inet_twsk_put(inet_twsk(sk));
1503 goto discard_it;
1504 }
1505
1506 tcp_v6_fill_cb(skb, hdr, th);
1507
1508 if (tcp_checksum_complete(skb)) {
1509 inet_twsk_put(inet_twsk(sk));
1510 goto csum_error;
1511 }
1512
1513 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1514 case TCP_TW_SYN:
1515 {
1516 struct sock *sk2;
1517
1518 sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
1519 skb, __tcp_hdrlen(th),
1520 &ipv6_hdr(skb)->saddr, th->source,
1521 &ipv6_hdr(skb)->daddr,
1522 ntohs(th->dest), tcp_v6_iif(skb));
1523 if (sk2) {
1524 struct inet_timewait_sock *tw = inet_twsk(sk);
1525 inet_twsk_deschedule_put(tw);
1526 sk = sk2;
1527 tcp_v6_restore_cb(skb);
1528 refcounted = false;
1529 goto process;
1530 }
1531 /* Fall through to ACK */
1532 }
1533 case TCP_TW_ACK:
1534 tcp_v6_timewait_ack(sk, skb);
1535 break;
1536 case TCP_TW_RST:
1537 tcp_v6_restore_cb(skb);
1538 tcp_v6_send_reset(sk, skb);
1539 inet_twsk_deschedule_put(inet_twsk(sk));
1540 goto discard_it;
1541 case TCP_TW_SUCCESS:
1542 ;
1543 }
1544 goto discard_it;
1545 }
1546
1547 static void tcp_v6_early_demux(struct sk_buff *skb)
1548 {
1549 const struct ipv6hdr *hdr;
1550 const struct tcphdr *th;
1551 struct sock *sk;
1552
1553 if (skb->pkt_type != PACKET_HOST)
1554 return;
1555
1556 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1557 return;
1558
1559 hdr = ipv6_hdr(skb);
1560 th = tcp_hdr(skb);
1561
1562 if (th->doff < sizeof(struct tcphdr) / 4)
1563 return;
1564
1565 /* Note : We use inet6_iif() here, not tcp_v6_iif() */
1566 sk = __inet6_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1567 &hdr->saddr, th->source,
1568 &hdr->daddr, ntohs(th->dest),
1569 inet6_iif(skb));
1570 if (sk) {
1571 skb->sk = sk;
1572 skb->destructor = sock_edemux;
1573 if (sk_fullsock(sk)) {
1574 struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
1575
1576 if (dst)
1577 dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
1578 if (dst &&
1579 inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1580 skb_dst_set_noref(skb, dst);
1581 }
1582 }
1583 }
1584
1585 static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1586 .twsk_obj_size = sizeof(struct tcp6_timewait_sock),
1587 .twsk_unique = tcp_twsk_unique,
1588 .twsk_destructor = tcp_twsk_destructor,
1589 };
1590
1591 static const struct inet_connection_sock_af_ops ipv6_specific = {
1592 .queue_xmit = inet6_csk_xmit,
1593 .send_check = tcp_v6_send_check,
1594 .rebuild_header = inet6_sk_rebuild_header,
1595 .sk_rx_dst_set = inet6_sk_rx_dst_set,
1596 .conn_request = tcp_v6_conn_request,
1597 .syn_recv_sock = tcp_v6_syn_recv_sock,
1598 .net_header_len = sizeof(struct ipv6hdr),
1599 .net_frag_header_len = sizeof(struct frag_hdr),
1600 .setsockopt = ipv6_setsockopt,
1601 .getsockopt = ipv6_getsockopt,
1602 .addr2sockaddr = inet6_csk_addr2sockaddr,
1603 .sockaddr_len = sizeof(struct sockaddr_in6),
1604 .bind_conflict = inet6_csk_bind_conflict,
1605 #ifdef CONFIG_COMPAT
1606 .compat_setsockopt = compat_ipv6_setsockopt,
1607 .compat_getsockopt = compat_ipv6_getsockopt,
1608 #endif
1609 .mtu_reduced = tcp_v6_mtu_reduced,
1610 };
1611
1612 #ifdef CONFIG_TCP_MD5SIG
1613 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
1614 .md5_lookup = tcp_v6_md5_lookup,
1615 .calc_md5_hash = tcp_v6_md5_hash_skb,
1616 .md5_parse = tcp_v6_parse_md5_keys,
1617 };
1618 #endif
1619
1620 /*
1621 * TCP over IPv4 via INET6 API
1622 */
1623 static const struct inet_connection_sock_af_ops ipv6_mapped = {
1624 .queue_xmit = ip_queue_xmit,
1625 .send_check = tcp_v4_send_check,
1626 .rebuild_header = inet_sk_rebuild_header,
1627 .sk_rx_dst_set = inet_sk_rx_dst_set,
1628 .conn_request = tcp_v6_conn_request,
1629 .syn_recv_sock = tcp_v6_syn_recv_sock,
1630 .net_header_len = sizeof(struct iphdr),
1631 .setsockopt = ipv6_setsockopt,
1632 .getsockopt = ipv6_getsockopt,
1633 .addr2sockaddr = inet6_csk_addr2sockaddr,
1634 .sockaddr_len = sizeof(struct sockaddr_in6),
1635 .bind_conflict = inet6_csk_bind_conflict,
1636 #ifdef CONFIG_COMPAT
1637 .compat_setsockopt = compat_ipv6_setsockopt,
1638 .compat_getsockopt = compat_ipv6_getsockopt,
1639 #endif
1640 .mtu_reduced = tcp_v4_mtu_reduced,
1641 };
1642
1643 #ifdef CONFIG_TCP_MD5SIG
1644 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
1645 .md5_lookup = tcp_v4_md5_lookup,
1646 .calc_md5_hash = tcp_v4_md5_hash_skb,
1647 .md5_parse = tcp_v6_parse_md5_keys,
1648 };
1649 #endif
1650
1651 /* NOTE: A lot of things set to zero explicitly by call to
1652 * sk_alloc() so need not be done here.
1653 */
1654 static int tcp_v6_init_sock(struct sock *sk)
1655 {
1656 struct inet_connection_sock *icsk = inet_csk(sk);
1657
1658 tcp_init_sock(sk);
1659
1660 icsk->icsk_af_ops = &ipv6_specific;
1661
1662 #ifdef CONFIG_TCP_MD5SIG
1663 tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific;
1664 #endif
1665
1666 return 0;
1667 }
1668
1669 static void tcp_v6_destroy_sock(struct sock *sk)
1670 {
1671 tcp_v4_destroy_sock(sk);
1672 inet6_destroy_sock(sk);
1673 }
1674
1675 #ifdef CONFIG_PROC_FS
1676 /* Proc filesystem TCPv6 sock list dumping. */
1677 static void get_openreq6(struct seq_file *seq,
1678 const struct request_sock *req, int i)
1679 {
1680 long ttd = req->rsk_timer.expires - jiffies;
1681 const struct in6_addr *src = &inet_rsk(req)->ir_v6_loc_addr;
1682 const struct in6_addr *dest = &inet_rsk(req)->ir_v6_rmt_addr;
1683
1684 if (ttd < 0)
1685 ttd = 0;
1686
1687 seq_printf(seq,
1688 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1689 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %d %d %pK\n",
1690 i,
1691 src->s6_addr32[0], src->s6_addr32[1],
1692 src->s6_addr32[2], src->s6_addr32[3],
1693 inet_rsk(req)->ir_num,
1694 dest->s6_addr32[0], dest->s6_addr32[1],
1695 dest->s6_addr32[2], dest->s6_addr32[3],
1696 ntohs(inet_rsk(req)->ir_rmt_port),
1697 TCP_SYN_RECV,
1698 0, 0, /* could print option size, but that is af dependent. */
1699 1, /* timers active (only the expire timer) */
1700 jiffies_to_clock_t(ttd),
1701 req->num_timeout,
1702 from_kuid_munged(seq_user_ns(seq),
1703 sock_i_uid(req->rsk_listener)),
1704 0, /* non standard timer */
1705 0, /* open_requests have no inode */
1706 0, req);
1707 }
1708
1709 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1710 {
1711 const struct in6_addr *dest, *src;
1712 __u16 destp, srcp;
1713 int timer_active;
1714 unsigned long timer_expires;
1715 const struct inet_sock *inet = inet_sk(sp);
1716 const struct tcp_sock *tp = tcp_sk(sp);
1717 const struct inet_connection_sock *icsk = inet_csk(sp);
1718 const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
1719 int rx_queue;
1720 int state;
1721
1722 dest = &sp->sk_v6_daddr;
1723 src = &sp->sk_v6_rcv_saddr;
1724 destp = ntohs(inet->inet_dport);
1725 srcp = ntohs(inet->inet_sport);
1726
1727 if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
1728 icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
1729 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
1730 timer_active = 1;
1731 timer_expires = icsk->icsk_timeout;
1732 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1733 timer_active = 4;
1734 timer_expires = icsk->icsk_timeout;
1735 } else if (timer_pending(&sp->sk_timer)) {
1736 timer_active = 2;
1737 timer_expires = sp->sk_timer.expires;
1738 } else {
1739 timer_active = 0;
1740 timer_expires = jiffies;
1741 }
1742
1743 state = sk_state_load(sp);
1744 if (state == TCP_LISTEN)
1745 rx_queue = sp->sk_ack_backlog;
1746 else
1747 /* Because we don't lock the socket,
1748 * we might find a transient negative value.
1749 */
1750 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
1751
1752 seq_printf(seq,
1753 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1754 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n",
1755 i,
1756 src->s6_addr32[0], src->s6_addr32[1],
1757 src->s6_addr32[2], src->s6_addr32[3], srcp,
1758 dest->s6_addr32[0], dest->s6_addr32[1],
1759 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1760 state,
1761 tp->write_seq - tp->snd_una,
1762 rx_queue,
1763 timer_active,
1764 jiffies_delta_to_clock_t(timer_expires - jiffies),
1765 icsk->icsk_retransmits,
1766 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
1767 icsk->icsk_probes_out,
1768 sock_i_ino(sp),
1769 atomic_read(&sp->sk_refcnt), sp,
1770 jiffies_to_clock_t(icsk->icsk_rto),
1771 jiffies_to_clock_t(icsk->icsk_ack.ato),
1772 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
1773 tp->snd_cwnd,
1774 state == TCP_LISTEN ?
1775 fastopenq->max_qlen :
1776 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)
1777 );
1778 }
1779
1780 static void get_timewait6_sock(struct seq_file *seq,
1781 struct inet_timewait_sock *tw, int i)
1782 {
1783 long delta = tw->tw_timer.expires - jiffies;
1784 const struct in6_addr *dest, *src;
1785 __u16 destp, srcp;
1786
1787 dest = &tw->tw_v6_daddr;
1788 src = &tw->tw_v6_rcv_saddr;
1789 destp = ntohs(tw->tw_dport);
1790 srcp = ntohs(tw->tw_sport);
1791
1792 seq_printf(seq,
1793 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1794 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1795 i,
1796 src->s6_addr32[0], src->s6_addr32[1],
1797 src->s6_addr32[2], src->s6_addr32[3], srcp,
1798 dest->s6_addr32[0], dest->s6_addr32[1],
1799 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1800 tw->tw_substate, 0, 0,
1801 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
1802 atomic_read(&tw->tw_refcnt), tw);
1803 }
1804
1805 static int tcp6_seq_show(struct seq_file *seq, void *v)
1806 {
1807 struct tcp_iter_state *st;
1808 struct sock *sk = v;
1809
1810 if (v == SEQ_START_TOKEN) {
1811 seq_puts(seq,
1812 " sl "
1813 "local_address "
1814 "remote_address "
1815 "st tx_queue rx_queue tr tm->when retrnsmt"
1816 " uid timeout inode\n");
1817 goto out;
1818 }
1819 st = seq->private;
1820
1821 if (sk->sk_state == TCP_TIME_WAIT)
1822 get_timewait6_sock(seq, v, st->num);
1823 else if (sk->sk_state == TCP_NEW_SYN_RECV)
1824 get_openreq6(seq, v, st->num);
1825 else
1826 get_tcp6_sock(seq, v, st->num);
1827 out:
1828 return 0;
1829 }
1830
1831 static const struct file_operations tcp6_afinfo_seq_fops = {
1832 .owner = THIS_MODULE,
1833 .open = tcp_seq_open,
1834 .read = seq_read,
1835 .llseek = seq_lseek,
1836 .release = seq_release_net
1837 };
1838
1839 static struct tcp_seq_afinfo tcp6_seq_afinfo = {
1840 .name = "tcp6",
1841 .family = AF_INET6,
1842 .seq_fops = &tcp6_afinfo_seq_fops,
1843 .seq_ops = {
1844 .show = tcp6_seq_show,
1845 },
1846 };
1847
1848 int __net_init tcp6_proc_init(struct net *net)
1849 {
1850 return tcp_proc_register(net, &tcp6_seq_afinfo);
1851 }
1852
1853 void tcp6_proc_exit(struct net *net)
1854 {
1855 tcp_proc_unregister(net, &tcp6_seq_afinfo);
1856 }
1857 #endif
1858
1859 static void tcp_v6_clear_sk(struct sock *sk, int size)
1860 {
1861 struct inet_sock *inet = inet_sk(sk);
1862
1863 /* we do not want to clear pinet6 field, because of RCU lookups */
1864 sk_prot_clear_nulls(sk, offsetof(struct inet_sock, pinet6));
1865
1866 size -= offsetof(struct inet_sock, pinet6) + sizeof(inet->pinet6);
1867 memset(&inet->pinet6 + 1, 0, size);
1868 }
1869
1870 struct proto tcpv6_prot = {
1871 .name = "TCPv6",
1872 .owner = THIS_MODULE,
1873 .close = tcp_close,
1874 .connect = tcp_v6_connect,
1875 .disconnect = tcp_disconnect,
1876 .accept = inet_csk_accept,
1877 .ioctl = tcp_ioctl,
1878 .init = tcp_v6_init_sock,
1879 .destroy = tcp_v6_destroy_sock,
1880 .shutdown = tcp_shutdown,
1881 .setsockopt = tcp_setsockopt,
1882 .getsockopt = tcp_getsockopt,
1883 .recvmsg = tcp_recvmsg,
1884 .sendmsg = tcp_sendmsg,
1885 .sendpage = tcp_sendpage,
1886 .backlog_rcv = tcp_v6_do_rcv,
1887 .release_cb = tcp_release_cb,
1888 .hash = inet6_hash,
1889 .unhash = inet_unhash,
1890 .get_port = inet_csk_get_port,
1891 .enter_memory_pressure = tcp_enter_memory_pressure,
1892 .stream_memory_free = tcp_stream_memory_free,
1893 .sockets_allocated = &tcp_sockets_allocated,
1894 .memory_allocated = &tcp_memory_allocated,
1895 .memory_pressure = &tcp_memory_pressure,
1896 .orphan_count = &tcp_orphan_count,
1897 .sysctl_mem = sysctl_tcp_mem,
1898 .sysctl_wmem = sysctl_tcp_wmem,
1899 .sysctl_rmem = sysctl_tcp_rmem,
1900 .max_header = MAX_TCP_HEADER,
1901 .obj_size = sizeof(struct tcp6_sock),
1902 .slab_flags = SLAB_DESTROY_BY_RCU,
1903 .twsk_prot = &tcp6_timewait_sock_ops,
1904 .rsk_prot = &tcp6_request_sock_ops,
1905 .h.hashinfo = &tcp_hashinfo,
1906 .no_autobind = true,
1907 #ifdef CONFIG_COMPAT
1908 .compat_setsockopt = compat_tcp_setsockopt,
1909 .compat_getsockopt = compat_tcp_getsockopt,
1910 #endif
1911 .clear_sk = tcp_v6_clear_sk,
1912 .diag_destroy = tcp_abort,
1913 };
1914
1915 static const struct inet6_protocol tcpv6_protocol = {
1916 .early_demux = tcp_v6_early_demux,
1917 .handler = tcp_v6_rcv,
1918 .err_handler = tcp_v6_err,
1919 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1920 };
1921
1922 static struct inet_protosw tcpv6_protosw = {
1923 .type = SOCK_STREAM,
1924 .protocol = IPPROTO_TCP,
1925 .prot = &tcpv6_prot,
1926 .ops = &inet6_stream_ops,
1927 .flags = INET_PROTOSW_PERMANENT |
1928 INET_PROTOSW_ICSK,
1929 };
1930
1931 static int __net_init tcpv6_net_init(struct net *net)
1932 {
1933 return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
1934 SOCK_RAW, IPPROTO_TCP, net);
1935 }
1936
1937 static void __net_exit tcpv6_net_exit(struct net *net)
1938 {
1939 inet_ctl_sock_destroy(net->ipv6.tcp_sk);
1940 }
1941
1942 static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
1943 {
1944 inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET6);
1945 }
1946
1947 static struct pernet_operations tcpv6_net_ops = {
1948 .init = tcpv6_net_init,
1949 .exit = tcpv6_net_exit,
1950 .exit_batch = tcpv6_net_exit_batch,
1951 };
1952
1953 int __init tcpv6_init(void)
1954 {
1955 int ret;
1956
1957 ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
1958 if (ret)
1959 goto out;
1960
1961 /* register inet6 protocol */
1962 ret = inet6_register_protosw(&tcpv6_protosw);
1963 if (ret)
1964 goto out_tcpv6_protocol;
1965
1966 ret = register_pernet_subsys(&tcpv6_net_ops);
1967 if (ret)
1968 goto out_tcpv6_protosw;
1969 out:
1970 return ret;
1971
1972 out_tcpv6_protosw:
1973 inet6_unregister_protosw(&tcpv6_protosw);
1974 out_tcpv6_protocol:
1975 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
1976 goto out;
1977 }
1978
1979 void tcpv6_exit(void)
1980 {
1981 unregister_pernet_subsys(&tcpv6_net_ops);
1982 inet6_unregister_protosw(&tcpv6_protosw);
1983 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
1984 }
This page took 0.071868 seconds and 6 git commands to generate.