Merge branch 'stable-4.8' of git://git.infradead.org/users/pcmoore/selinux into next
[deliverable/linux.git] / net / ipv6 / tcp_ipv6.c
1 /*
2 * TCP over IPv6
3 * Linux INET6 implementation
4 *
5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt>
7 *
8 * Based on:
9 * linux/net/ipv4/tcp.c
10 * linux/net/ipv4/tcp_input.c
11 * linux/net/ipv4/tcp_output.c
12 *
13 * Fixes:
14 * Hideaki YOSHIFUJI : sin6_scope_id support
15 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
16 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
17 * a single port at the same time.
18 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
19 *
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
24 */
25
26 #include <linux/bottom_half.h>
27 #include <linux/module.h>
28 #include <linux/errno.h>
29 #include <linux/types.h>
30 #include <linux/socket.h>
31 #include <linux/sockios.h>
32 #include <linux/net.h>
33 #include <linux/jiffies.h>
34 #include <linux/in.h>
35 #include <linux/in6.h>
36 #include <linux/netdevice.h>
37 #include <linux/init.h>
38 #include <linux/jhash.h>
39 #include <linux/ipsec.h>
40 #include <linux/times.h>
41 #include <linux/slab.h>
42 #include <linux/uaccess.h>
43 #include <linux/ipv6.h>
44 #include <linux/icmpv6.h>
45 #include <linux/random.h>
46
47 #include <net/tcp.h>
48 #include <net/ndisc.h>
49 #include <net/inet6_hashtables.h>
50 #include <net/inet6_connection_sock.h>
51 #include <net/ipv6.h>
52 #include <net/transp_v6.h>
53 #include <net/addrconf.h>
54 #include <net/ip6_route.h>
55 #include <net/ip6_checksum.h>
56 #include <net/inet_ecn.h>
57 #include <net/protocol.h>
58 #include <net/xfrm.h>
59 #include <net/snmp.h>
60 #include <net/dsfield.h>
61 #include <net/timewait_sock.h>
62 #include <net/inet_common.h>
63 #include <net/secure_seq.h>
64 #include <net/busy_poll.h>
65
66 #include <linux/proc_fs.h>
67 #include <linux/seq_file.h>
68
69 #include <crypto/hash.h>
70 #include <linux/scatterlist.h>
71
72 static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb);
73 static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
74 struct request_sock *req);
75
76 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
77
78 static const struct inet_connection_sock_af_ops ipv6_mapped;
79 static const struct inet_connection_sock_af_ops ipv6_specific;
80 #ifdef CONFIG_TCP_MD5SIG
81 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
82 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
83 #else
84 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
85 const struct in6_addr *addr)
86 {
87 return NULL;
88 }
89 #endif
90
91 static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
92 {
93 struct dst_entry *dst = skb_dst(skb);
94
95 if (dst && dst_hold_safe(dst)) {
96 const struct rt6_info *rt = (const struct rt6_info *)dst;
97
98 sk->sk_rx_dst = dst;
99 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
100 inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt);
101 }
102 }
103
104 static __u32 tcp_v6_init_sequence(const struct sk_buff *skb)
105 {
106 return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
107 ipv6_hdr(skb)->saddr.s6_addr32,
108 tcp_hdr(skb)->dest,
109 tcp_hdr(skb)->source);
110 }
111
112 static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
113 int addr_len)
114 {
115 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
116 struct inet_sock *inet = inet_sk(sk);
117 struct inet_connection_sock *icsk = inet_csk(sk);
118 struct ipv6_pinfo *np = inet6_sk(sk);
119 struct tcp_sock *tp = tcp_sk(sk);
120 struct in6_addr *saddr = NULL, *final_p, final;
121 struct ipv6_txoptions *opt;
122 struct flowi6 fl6;
123 struct dst_entry *dst;
124 int addr_type;
125 int err;
126
127 if (addr_len < SIN6_LEN_RFC2133)
128 return -EINVAL;
129
130 if (usin->sin6_family != AF_INET6)
131 return -EAFNOSUPPORT;
132
133 memset(&fl6, 0, sizeof(fl6));
134
135 if (np->sndflow) {
136 fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
137 IP6_ECN_flow_init(fl6.flowlabel);
138 if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
139 struct ip6_flowlabel *flowlabel;
140 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
141 if (!flowlabel)
142 return -EINVAL;
143 fl6_sock_release(flowlabel);
144 }
145 }
146
147 /*
148 * connect() to INADDR_ANY means loopback (BSD'ism).
149 */
150
151 if (ipv6_addr_any(&usin->sin6_addr))
152 usin->sin6_addr.s6_addr[15] = 0x1;
153
154 addr_type = ipv6_addr_type(&usin->sin6_addr);
155
156 if (addr_type & IPV6_ADDR_MULTICAST)
157 return -ENETUNREACH;
158
159 if (addr_type&IPV6_ADDR_LINKLOCAL) {
160 if (addr_len >= sizeof(struct sockaddr_in6) &&
161 usin->sin6_scope_id) {
162 /* If interface is set while binding, indices
163 * must coincide.
164 */
165 if (sk->sk_bound_dev_if &&
166 sk->sk_bound_dev_if != usin->sin6_scope_id)
167 return -EINVAL;
168
169 sk->sk_bound_dev_if = usin->sin6_scope_id;
170 }
171
172 /* Connect to link-local address requires an interface */
173 if (!sk->sk_bound_dev_if)
174 return -EINVAL;
175 }
176
177 if (tp->rx_opt.ts_recent_stamp &&
178 !ipv6_addr_equal(&sk->sk_v6_daddr, &usin->sin6_addr)) {
179 tp->rx_opt.ts_recent = 0;
180 tp->rx_opt.ts_recent_stamp = 0;
181 tp->write_seq = 0;
182 }
183
184 sk->sk_v6_daddr = usin->sin6_addr;
185 np->flow_label = fl6.flowlabel;
186
187 /*
188 * TCP over IPv4
189 */
190
191 if (addr_type == IPV6_ADDR_MAPPED) {
192 u32 exthdrlen = icsk->icsk_ext_hdr_len;
193 struct sockaddr_in sin;
194
195 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
196
197 if (__ipv6_only_sock(sk))
198 return -ENETUNREACH;
199
200 sin.sin_family = AF_INET;
201 sin.sin_port = usin->sin6_port;
202 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
203
204 icsk->icsk_af_ops = &ipv6_mapped;
205 sk->sk_backlog_rcv = tcp_v4_do_rcv;
206 #ifdef CONFIG_TCP_MD5SIG
207 tp->af_specific = &tcp_sock_ipv6_mapped_specific;
208 #endif
209
210 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
211
212 if (err) {
213 icsk->icsk_ext_hdr_len = exthdrlen;
214 icsk->icsk_af_ops = &ipv6_specific;
215 sk->sk_backlog_rcv = tcp_v6_do_rcv;
216 #ifdef CONFIG_TCP_MD5SIG
217 tp->af_specific = &tcp_sock_ipv6_specific;
218 #endif
219 goto failure;
220 }
221 np->saddr = sk->sk_v6_rcv_saddr;
222
223 return err;
224 }
225
226 if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr))
227 saddr = &sk->sk_v6_rcv_saddr;
228
229 fl6.flowi6_proto = IPPROTO_TCP;
230 fl6.daddr = sk->sk_v6_daddr;
231 fl6.saddr = saddr ? *saddr : np->saddr;
232 fl6.flowi6_oif = sk->sk_bound_dev_if;
233 fl6.flowi6_mark = sk->sk_mark;
234 fl6.fl6_dport = usin->sin6_port;
235 fl6.fl6_sport = inet->inet_sport;
236
237 opt = rcu_dereference_protected(np->opt, lockdep_sock_is_held(sk));
238 final_p = fl6_update_dst(&fl6, opt, &final);
239
240 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
241
242 dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
243 if (IS_ERR(dst)) {
244 err = PTR_ERR(dst);
245 goto failure;
246 }
247
248 if (!saddr) {
249 saddr = &fl6.saddr;
250 sk->sk_v6_rcv_saddr = *saddr;
251 }
252
253 /* set the source address */
254 np->saddr = *saddr;
255 inet->inet_rcv_saddr = LOOPBACK4_IPV6;
256
257 sk->sk_gso_type = SKB_GSO_TCPV6;
258 ip6_dst_store(sk, dst, NULL, NULL);
259
260 if (tcp_death_row.sysctl_tw_recycle &&
261 !tp->rx_opt.ts_recent_stamp &&
262 ipv6_addr_equal(&fl6.daddr, &sk->sk_v6_daddr))
263 tcp_fetch_timewait_stamp(sk, dst);
264
265 icsk->icsk_ext_hdr_len = 0;
266 if (opt)
267 icsk->icsk_ext_hdr_len = opt->opt_flen +
268 opt->opt_nflen;
269
270 tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
271
272 inet->inet_dport = usin->sin6_port;
273
274 tcp_set_state(sk, TCP_SYN_SENT);
275 err = inet6_hash_connect(&tcp_death_row, sk);
276 if (err)
277 goto late_failure;
278
279 sk_set_txhash(sk);
280
281 if (!tp->write_seq && likely(!tp->repair))
282 tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
283 sk->sk_v6_daddr.s6_addr32,
284 inet->inet_sport,
285 inet->inet_dport);
286
287 err = tcp_connect(sk);
288 if (err)
289 goto late_failure;
290
291 return 0;
292
293 late_failure:
294 tcp_set_state(sk, TCP_CLOSE);
295 __sk_dst_reset(sk);
296 failure:
297 inet->inet_dport = 0;
298 sk->sk_route_caps = 0;
299 return err;
300 }
301
302 static void tcp_v6_mtu_reduced(struct sock *sk)
303 {
304 struct dst_entry *dst;
305
306 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
307 return;
308
309 dst = inet6_csk_update_pmtu(sk, tcp_sk(sk)->mtu_info);
310 if (!dst)
311 return;
312
313 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
314 tcp_sync_mss(sk, dst_mtu(dst));
315 tcp_simple_retransmit(sk);
316 }
317 }
318
319 static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
320 u8 type, u8 code, int offset, __be32 info)
321 {
322 const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
323 const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
324 struct net *net = dev_net(skb->dev);
325 struct request_sock *fastopen;
326 struct ipv6_pinfo *np;
327 struct tcp_sock *tp;
328 __u32 seq, snd_una;
329 struct sock *sk;
330 bool fatal;
331 int err;
332
333 sk = __inet6_lookup_established(net, &tcp_hashinfo,
334 &hdr->daddr, th->dest,
335 &hdr->saddr, ntohs(th->source),
336 skb->dev->ifindex);
337
338 if (!sk) {
339 __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
340 ICMP6_MIB_INERRORS);
341 return;
342 }
343
344 if (sk->sk_state == TCP_TIME_WAIT) {
345 inet_twsk_put(inet_twsk(sk));
346 return;
347 }
348 seq = ntohl(th->seq);
349 fatal = icmpv6_err_convert(type, code, &err);
350 if (sk->sk_state == TCP_NEW_SYN_RECV)
351 return tcp_req_err(sk, seq, fatal);
352
353 bh_lock_sock(sk);
354 if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
355 __NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
356
357 if (sk->sk_state == TCP_CLOSE)
358 goto out;
359
360 if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
361 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
362 goto out;
363 }
364
365 tp = tcp_sk(sk);
366 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
367 fastopen = tp->fastopen_rsk;
368 snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
369 if (sk->sk_state != TCP_LISTEN &&
370 !between(seq, snd_una, tp->snd_nxt)) {
371 __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
372 goto out;
373 }
374
375 np = inet6_sk(sk);
376
377 if (type == NDISC_REDIRECT) {
378 struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
379
380 if (dst)
381 dst->ops->redirect(dst, sk, skb);
382 goto out;
383 }
384
385 if (type == ICMPV6_PKT_TOOBIG) {
386 /* We are not interested in TCP_LISTEN and open_requests
387 * (SYN-ACKs send out by Linux are always <576bytes so
388 * they should go through unfragmented).
389 */
390 if (sk->sk_state == TCP_LISTEN)
391 goto out;
392
393 if (!ip6_sk_accept_pmtu(sk))
394 goto out;
395
396 tp->mtu_info = ntohl(info);
397 if (!sock_owned_by_user(sk))
398 tcp_v6_mtu_reduced(sk);
399 else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED,
400 &tp->tsq_flags))
401 sock_hold(sk);
402 goto out;
403 }
404
405
406 /* Might be for an request_sock */
407 switch (sk->sk_state) {
408 case TCP_SYN_SENT:
409 case TCP_SYN_RECV:
410 /* Only in fast or simultaneous open. If a fast open socket is
411 * is already accepted it is treated as a connected one below.
412 */
413 if (fastopen && !fastopen->sk)
414 break;
415
416 if (!sock_owned_by_user(sk)) {
417 sk->sk_err = err;
418 sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
419
420 tcp_done(sk);
421 } else
422 sk->sk_err_soft = err;
423 goto out;
424 }
425
426 if (!sock_owned_by_user(sk) && np->recverr) {
427 sk->sk_err = err;
428 sk->sk_error_report(sk);
429 } else
430 sk->sk_err_soft = err;
431
432 out:
433 bh_unlock_sock(sk);
434 sock_put(sk);
435 }
436
437
438 static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
439 struct flowi *fl,
440 struct request_sock *req,
441 struct tcp_fastopen_cookie *foc,
442 enum tcp_synack_type synack_type)
443 {
444 struct inet_request_sock *ireq = inet_rsk(req);
445 struct ipv6_pinfo *np = inet6_sk(sk);
446 struct ipv6_txoptions *opt;
447 struct flowi6 *fl6 = &fl->u.ip6;
448 struct sk_buff *skb;
449 int err = -ENOMEM;
450
451 /* First, grab a route. */
452 if (!dst && (dst = inet6_csk_route_req(sk, fl6, req,
453 IPPROTO_TCP)) == NULL)
454 goto done;
455
456 skb = tcp_make_synack(sk, dst, req, foc, synack_type);
457
458 if (skb) {
459 __tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr,
460 &ireq->ir_v6_rmt_addr);
461
462 fl6->daddr = ireq->ir_v6_rmt_addr;
463 if (np->repflow && ireq->pktopts)
464 fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
465
466 rcu_read_lock();
467 opt = ireq->ipv6_opt;
468 if (!opt)
469 opt = rcu_dereference(np->opt);
470 err = ip6_xmit(sk, skb, fl6, opt, np->tclass);
471 rcu_read_unlock();
472 err = net_xmit_eval(err);
473 }
474
475 done:
476 return err;
477 }
478
479
480 static void tcp_v6_reqsk_destructor(struct request_sock *req)
481 {
482 kfree(inet_rsk(req)->ipv6_opt);
483 kfree_skb(inet_rsk(req)->pktopts);
484 }
485
486 #ifdef CONFIG_TCP_MD5SIG
487 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
488 const struct in6_addr *addr)
489 {
490 return tcp_md5_do_lookup(sk, (union tcp_md5_addr *)addr, AF_INET6);
491 }
492
493 static struct tcp_md5sig_key *tcp_v6_md5_lookup(const struct sock *sk,
494 const struct sock *addr_sk)
495 {
496 return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr);
497 }
498
499 static int tcp_v6_parse_md5_keys(struct sock *sk, char __user *optval,
500 int optlen)
501 {
502 struct tcp_md5sig cmd;
503 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
504
505 if (optlen < sizeof(cmd))
506 return -EINVAL;
507
508 if (copy_from_user(&cmd, optval, sizeof(cmd)))
509 return -EFAULT;
510
511 if (sin6->sin6_family != AF_INET6)
512 return -EINVAL;
513
514 if (!cmd.tcpm_keylen) {
515 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
516 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
517 AF_INET);
518 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
519 AF_INET6);
520 }
521
522 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
523 return -EINVAL;
524
525 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
526 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
527 AF_INET, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
528
529 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
530 AF_INET6, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
531 }
532
533 static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
534 const struct in6_addr *daddr,
535 const struct in6_addr *saddr, int nbytes)
536 {
537 struct tcp6_pseudohdr *bp;
538 struct scatterlist sg;
539
540 bp = &hp->md5_blk.ip6;
541 /* 1. TCP pseudo-header (RFC2460) */
542 bp->saddr = *saddr;
543 bp->daddr = *daddr;
544 bp->protocol = cpu_to_be32(IPPROTO_TCP);
545 bp->len = cpu_to_be32(nbytes);
546
547 sg_init_one(&sg, bp, sizeof(*bp));
548 ahash_request_set_crypt(hp->md5_req, &sg, NULL, sizeof(*bp));
549 return crypto_ahash_update(hp->md5_req);
550 }
551
552 static int tcp_v6_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
553 const struct in6_addr *daddr, struct in6_addr *saddr,
554 const struct tcphdr *th)
555 {
556 struct tcp_md5sig_pool *hp;
557 struct ahash_request *req;
558
559 hp = tcp_get_md5sig_pool();
560 if (!hp)
561 goto clear_hash_noput;
562 req = hp->md5_req;
563
564 if (crypto_ahash_init(req))
565 goto clear_hash;
566 if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
567 goto clear_hash;
568 if (tcp_md5_hash_header(hp, th))
569 goto clear_hash;
570 if (tcp_md5_hash_key(hp, key))
571 goto clear_hash;
572 ahash_request_set_crypt(req, NULL, md5_hash, 0);
573 if (crypto_ahash_final(req))
574 goto clear_hash;
575
576 tcp_put_md5sig_pool();
577 return 0;
578
579 clear_hash:
580 tcp_put_md5sig_pool();
581 clear_hash_noput:
582 memset(md5_hash, 0, 16);
583 return 1;
584 }
585
586 static int tcp_v6_md5_hash_skb(char *md5_hash,
587 const struct tcp_md5sig_key *key,
588 const struct sock *sk,
589 const struct sk_buff *skb)
590 {
591 const struct in6_addr *saddr, *daddr;
592 struct tcp_md5sig_pool *hp;
593 struct ahash_request *req;
594 const struct tcphdr *th = tcp_hdr(skb);
595
596 if (sk) { /* valid for establish/request sockets */
597 saddr = &sk->sk_v6_rcv_saddr;
598 daddr = &sk->sk_v6_daddr;
599 } else {
600 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
601 saddr = &ip6h->saddr;
602 daddr = &ip6h->daddr;
603 }
604
605 hp = tcp_get_md5sig_pool();
606 if (!hp)
607 goto clear_hash_noput;
608 req = hp->md5_req;
609
610 if (crypto_ahash_init(req))
611 goto clear_hash;
612
613 if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
614 goto clear_hash;
615 if (tcp_md5_hash_header(hp, th))
616 goto clear_hash;
617 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
618 goto clear_hash;
619 if (tcp_md5_hash_key(hp, key))
620 goto clear_hash;
621 ahash_request_set_crypt(req, NULL, md5_hash, 0);
622 if (crypto_ahash_final(req))
623 goto clear_hash;
624
625 tcp_put_md5sig_pool();
626 return 0;
627
628 clear_hash:
629 tcp_put_md5sig_pool();
630 clear_hash_noput:
631 memset(md5_hash, 0, 16);
632 return 1;
633 }
634
635 #endif
636
637 static bool tcp_v6_inbound_md5_hash(const struct sock *sk,
638 const struct sk_buff *skb)
639 {
640 #ifdef CONFIG_TCP_MD5SIG
641 const __u8 *hash_location = NULL;
642 struct tcp_md5sig_key *hash_expected;
643 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
644 const struct tcphdr *th = tcp_hdr(skb);
645 int genhash;
646 u8 newhash[16];
647
648 hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
649 hash_location = tcp_parse_md5sig_option(th);
650
651 /* We've parsed the options - do we have a hash? */
652 if (!hash_expected && !hash_location)
653 return false;
654
655 if (hash_expected && !hash_location) {
656 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
657 return true;
658 }
659
660 if (!hash_expected && hash_location) {
661 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
662 return true;
663 }
664
665 /* check the signature */
666 genhash = tcp_v6_md5_hash_skb(newhash,
667 hash_expected,
668 NULL, skb);
669
670 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
671 net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
672 genhash ? "failed" : "mismatch",
673 &ip6h->saddr, ntohs(th->source),
674 &ip6h->daddr, ntohs(th->dest));
675 return true;
676 }
677 #endif
678 return false;
679 }
680
681 static void tcp_v6_init_req(struct request_sock *req,
682 const struct sock *sk_listener,
683 struct sk_buff *skb)
684 {
685 struct inet_request_sock *ireq = inet_rsk(req);
686 const struct ipv6_pinfo *np = inet6_sk(sk_listener);
687
688 ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
689 ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
690
691 /* So that link locals have meaning */
692 if (!sk_listener->sk_bound_dev_if &&
693 ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
694 ireq->ir_iif = tcp_v6_iif(skb);
695
696 if (!TCP_SKB_CB(skb)->tcp_tw_isn &&
697 (ipv6_opt_accepted(sk_listener, skb, &TCP_SKB_CB(skb)->header.h6) ||
698 np->rxopt.bits.rxinfo ||
699 np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim ||
700 np->rxopt.bits.rxohlim || np->repflow)) {
701 atomic_inc(&skb->users);
702 ireq->pktopts = skb;
703 }
704 }
705
706 static struct dst_entry *tcp_v6_route_req(const struct sock *sk,
707 struct flowi *fl,
708 const struct request_sock *req,
709 bool *strict)
710 {
711 if (strict)
712 *strict = true;
713 return inet6_csk_route_req(sk, &fl->u.ip6, req, IPPROTO_TCP);
714 }
715
716 struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
717 .family = AF_INET6,
718 .obj_size = sizeof(struct tcp6_request_sock),
719 .rtx_syn_ack = tcp_rtx_synack,
720 .send_ack = tcp_v6_reqsk_send_ack,
721 .destructor = tcp_v6_reqsk_destructor,
722 .send_reset = tcp_v6_send_reset,
723 .syn_ack_timeout = tcp_syn_ack_timeout,
724 };
725
726 static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
727 .mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) -
728 sizeof(struct ipv6hdr),
729 #ifdef CONFIG_TCP_MD5SIG
730 .req_md5_lookup = tcp_v6_md5_lookup,
731 .calc_md5_hash = tcp_v6_md5_hash_skb,
732 #endif
733 .init_req = tcp_v6_init_req,
734 #ifdef CONFIG_SYN_COOKIES
735 .cookie_init_seq = cookie_v6_init_sequence,
736 #endif
737 .route_req = tcp_v6_route_req,
738 .init_seq = tcp_v6_init_sequence,
739 .send_synack = tcp_v6_send_synack,
740 };
741
742 static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 seq,
743 u32 ack, u32 win, u32 tsval, u32 tsecr,
744 int oif, struct tcp_md5sig_key *key, int rst,
745 u8 tclass, u32 label)
746 {
747 const struct tcphdr *th = tcp_hdr(skb);
748 struct tcphdr *t1;
749 struct sk_buff *buff;
750 struct flowi6 fl6;
751 struct net *net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
752 struct sock *ctl_sk = net->ipv6.tcp_sk;
753 unsigned int tot_len = sizeof(struct tcphdr);
754 struct dst_entry *dst;
755 __be32 *topt;
756
757 if (tsecr)
758 tot_len += TCPOLEN_TSTAMP_ALIGNED;
759 #ifdef CONFIG_TCP_MD5SIG
760 if (key)
761 tot_len += TCPOLEN_MD5SIG_ALIGNED;
762 #endif
763
764 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
765 GFP_ATOMIC);
766 if (!buff)
767 return;
768
769 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
770
771 t1 = (struct tcphdr *) skb_push(buff, tot_len);
772 skb_reset_transport_header(buff);
773
774 /* Swap the send and the receive. */
775 memset(t1, 0, sizeof(*t1));
776 t1->dest = th->source;
777 t1->source = th->dest;
778 t1->doff = tot_len / 4;
779 t1->seq = htonl(seq);
780 t1->ack_seq = htonl(ack);
781 t1->ack = !rst || !th->ack;
782 t1->rst = rst;
783 t1->window = htons(win);
784
785 topt = (__be32 *)(t1 + 1);
786
787 if (tsecr) {
788 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
789 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
790 *topt++ = htonl(tsval);
791 *topt++ = htonl(tsecr);
792 }
793
794 #ifdef CONFIG_TCP_MD5SIG
795 if (key) {
796 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
797 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
798 tcp_v6_md5_hash_hdr((__u8 *)topt, key,
799 &ipv6_hdr(skb)->saddr,
800 &ipv6_hdr(skb)->daddr, t1);
801 }
802 #endif
803
804 memset(&fl6, 0, sizeof(fl6));
805 fl6.daddr = ipv6_hdr(skb)->saddr;
806 fl6.saddr = ipv6_hdr(skb)->daddr;
807 fl6.flowlabel = label;
808
809 buff->ip_summed = CHECKSUM_PARTIAL;
810 buff->csum = 0;
811
812 __tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
813
814 fl6.flowi6_proto = IPPROTO_TCP;
815 if (rt6_need_strict(&fl6.daddr) && !oif)
816 fl6.flowi6_oif = tcp_v6_iif(skb);
817 else {
818 if (!oif && netif_index_is_l3_master(net, skb->skb_iif))
819 oif = skb->skb_iif;
820
821 fl6.flowi6_oif = oif;
822 }
823
824 fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark);
825 fl6.fl6_dport = t1->dest;
826 fl6.fl6_sport = t1->source;
827 security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
828
829 /* Pass a socket to ip6_dst_lookup either it is for RST
830 * Underlying function will use this to retrieve the network
831 * namespace
832 */
833 dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL);
834 if (!IS_ERR(dst)) {
835 skb_dst_set(buff, dst);
836 ip6_xmit(ctl_sk, buff, &fl6, NULL, tclass);
837 TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
838 if (rst)
839 TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
840 return;
841 }
842
843 kfree_skb(buff);
844 }
845
846 static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb)
847 {
848 const struct tcphdr *th = tcp_hdr(skb);
849 u32 seq = 0, ack_seq = 0;
850 struct tcp_md5sig_key *key = NULL;
851 #ifdef CONFIG_TCP_MD5SIG
852 const __u8 *hash_location = NULL;
853 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
854 unsigned char newhash[16];
855 int genhash;
856 struct sock *sk1 = NULL;
857 #endif
858 int oif;
859
860 if (th->rst)
861 return;
862
863 /* If sk not NULL, it means we did a successful lookup and incoming
864 * route had to be correct. prequeue might have dropped our dst.
865 */
866 if (!sk && !ipv6_unicast_destination(skb))
867 return;
868
869 #ifdef CONFIG_TCP_MD5SIG
870 rcu_read_lock();
871 hash_location = tcp_parse_md5sig_option(th);
872 if (sk && sk_fullsock(sk)) {
873 key = tcp_v6_md5_do_lookup(sk, &ipv6h->saddr);
874 } else if (hash_location) {
875 /*
876 * active side is lost. Try to find listening socket through
877 * source port, and then find md5 key through listening socket.
878 * we are not loose security here:
879 * Incoming packet is checked with md5 hash with finding key,
880 * no RST generated if md5 hash doesn't match.
881 */
882 sk1 = inet6_lookup_listener(dev_net(skb_dst(skb)->dev),
883 &tcp_hashinfo, NULL, 0,
884 &ipv6h->saddr,
885 th->source, &ipv6h->daddr,
886 ntohs(th->source), tcp_v6_iif(skb));
887 if (!sk1)
888 goto out;
889
890 key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr);
891 if (!key)
892 goto out;
893
894 genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, skb);
895 if (genhash || memcmp(hash_location, newhash, 16) != 0)
896 goto out;
897 }
898 #endif
899
900 if (th->ack)
901 seq = ntohl(th->ack_seq);
902 else
903 ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
904 (th->doff << 2);
905
906 oif = sk ? sk->sk_bound_dev_if : 0;
907 tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, key, 1, 0, 0);
908
909 #ifdef CONFIG_TCP_MD5SIG
910 out:
911 rcu_read_unlock();
912 #endif
913 }
914
915 static void tcp_v6_send_ack(const struct sock *sk, struct sk_buff *skb, u32 seq,
916 u32 ack, u32 win, u32 tsval, u32 tsecr, int oif,
917 struct tcp_md5sig_key *key, u8 tclass,
918 u32 label)
919 {
920 tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, key, 0,
921 tclass, label);
922 }
923
924 static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
925 {
926 struct inet_timewait_sock *tw = inet_twsk(sk);
927 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
928
929 tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
930 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
931 tcp_time_stamp + tcptw->tw_ts_offset,
932 tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw),
933 tw->tw_tclass, cpu_to_be32(tw->tw_flowlabel));
934
935 inet_twsk_put(tw);
936 }
937
938 static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
939 struct request_sock *req)
940 {
941 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
942 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
943 */
944 tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ?
945 tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
946 tcp_rsk(req)->rcv_nxt, req->rsk_rcv_wnd,
947 tcp_time_stamp, req->ts_recent, sk->sk_bound_dev_if,
948 tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr),
949 0, 0);
950 }
951
952
953 static struct sock *tcp_v6_cookie_check(struct sock *sk, struct sk_buff *skb)
954 {
955 #ifdef CONFIG_SYN_COOKIES
956 const struct tcphdr *th = tcp_hdr(skb);
957
958 if (!th->syn)
959 sk = cookie_v6_check(sk, skb);
960 #endif
961 return sk;
962 }
963
964 static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
965 {
966 if (skb->protocol == htons(ETH_P_IP))
967 return tcp_v4_conn_request(sk, skb);
968
969 if (!ipv6_unicast_destination(skb))
970 goto drop;
971
972 return tcp_conn_request(&tcp6_request_sock_ops,
973 &tcp_request_sock_ipv6_ops, sk, skb);
974
975 drop:
976 tcp_listendrop(sk);
977 return 0; /* don't send reset */
978 }
979
980 static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
981 struct request_sock *req,
982 struct dst_entry *dst,
983 struct request_sock *req_unhash,
984 bool *own_req)
985 {
986 struct inet_request_sock *ireq;
987 struct ipv6_pinfo *newnp;
988 const struct ipv6_pinfo *np = inet6_sk(sk);
989 struct ipv6_txoptions *opt;
990 struct tcp6_sock *newtcp6sk;
991 struct inet_sock *newinet;
992 struct tcp_sock *newtp;
993 struct sock *newsk;
994 #ifdef CONFIG_TCP_MD5SIG
995 struct tcp_md5sig_key *key;
996 #endif
997 struct flowi6 fl6;
998
999 if (skb->protocol == htons(ETH_P_IP)) {
1000 /*
1001 * v6 mapped
1002 */
1003
1004 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst,
1005 req_unhash, own_req);
1006
1007 if (!newsk)
1008 return NULL;
1009
1010 newtcp6sk = (struct tcp6_sock *)newsk;
1011 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1012
1013 newinet = inet_sk(newsk);
1014 newnp = inet6_sk(newsk);
1015 newtp = tcp_sk(newsk);
1016
1017 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1018
1019 newnp->saddr = newsk->sk_v6_rcv_saddr;
1020
1021 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1022 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1023 #ifdef CONFIG_TCP_MD5SIG
1024 newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1025 #endif
1026
1027 newnp->ipv6_ac_list = NULL;
1028 newnp->ipv6_fl_list = NULL;
1029 newnp->pktoptions = NULL;
1030 newnp->opt = NULL;
1031 newnp->mcast_oif = tcp_v6_iif(skb);
1032 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1033 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1034 if (np->repflow)
1035 newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1036
1037 /*
1038 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1039 * here, tcp_create_openreq_child now does this for us, see the comment in
1040 * that function for the gory details. -acme
1041 */
1042
1043 /* It is tricky place. Until this moment IPv4 tcp
1044 worked with IPv6 icsk.icsk_af_ops.
1045 Sync it now.
1046 */
1047 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1048
1049 return newsk;
1050 }
1051
1052 ireq = inet_rsk(req);
1053
1054 if (sk_acceptq_is_full(sk))
1055 goto out_overflow;
1056
1057 if (!dst) {
1058 dst = inet6_csk_route_req(sk, &fl6, req, IPPROTO_TCP);
1059 if (!dst)
1060 goto out;
1061 }
1062
1063 newsk = tcp_create_openreq_child(sk, req, skb);
1064 if (!newsk)
1065 goto out_nonewsk;
1066
1067 /*
1068 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1069 * count here, tcp_create_openreq_child now does this for us, see the
1070 * comment in that function for the gory details. -acme
1071 */
1072
1073 newsk->sk_gso_type = SKB_GSO_TCPV6;
1074 ip6_dst_store(newsk, dst, NULL, NULL);
1075 inet6_sk_rx_dst_set(newsk, skb);
1076
1077 newtcp6sk = (struct tcp6_sock *)newsk;
1078 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1079
1080 newtp = tcp_sk(newsk);
1081 newinet = inet_sk(newsk);
1082 newnp = inet6_sk(newsk);
1083
1084 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1085
1086 newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr;
1087 newnp->saddr = ireq->ir_v6_loc_addr;
1088 newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
1089 newsk->sk_bound_dev_if = ireq->ir_iif;
1090
1091 /* Now IPv6 options...
1092
1093 First: no IPv4 options.
1094 */
1095 newinet->inet_opt = NULL;
1096 newnp->ipv6_ac_list = NULL;
1097 newnp->ipv6_fl_list = NULL;
1098
1099 /* Clone RX bits */
1100 newnp->rxopt.all = np->rxopt.all;
1101
1102 newnp->pktoptions = NULL;
1103 newnp->opt = NULL;
1104 newnp->mcast_oif = tcp_v6_iif(skb);
1105 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1106 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1107 if (np->repflow)
1108 newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1109
1110 /* Clone native IPv6 options from listening socket (if any)
1111
1112 Yes, keeping reference count would be much more clever,
1113 but we make one more one thing there: reattach optmem
1114 to newsk.
1115 */
1116 opt = ireq->ipv6_opt;
1117 if (!opt)
1118 opt = rcu_dereference(np->opt);
1119 if (opt) {
1120 opt = ipv6_dup_options(newsk, opt);
1121 RCU_INIT_POINTER(newnp->opt, opt);
1122 }
1123 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1124 if (opt)
1125 inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen +
1126 opt->opt_flen;
1127
1128 tcp_ca_openreq_child(newsk, dst);
1129
1130 tcp_sync_mss(newsk, dst_mtu(dst));
1131 newtp->advmss = dst_metric_advmss(dst);
1132 if (tcp_sk(sk)->rx_opt.user_mss &&
1133 tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1134 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1135
1136 tcp_initialize_rcv_mss(newsk);
1137
1138 newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1139 newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
1140
1141 #ifdef CONFIG_TCP_MD5SIG
1142 /* Copy over the MD5 key from the original socket */
1143 key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr);
1144 if (key) {
1145 /* We're using one, so create a matching key
1146 * on the newsk structure. If we fail to get
1147 * memory, then we end up not copying the key
1148 * across. Shucks.
1149 */
1150 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newsk->sk_v6_daddr,
1151 AF_INET6, key->key, key->keylen,
1152 sk_gfp_mask(sk, GFP_ATOMIC));
1153 }
1154 #endif
1155
1156 if (__inet_inherit_port(sk, newsk) < 0) {
1157 inet_csk_prepare_forced_close(newsk);
1158 tcp_done(newsk);
1159 goto out;
1160 }
1161 *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
1162 if (*own_req) {
1163 tcp_move_syn(newtp, req);
1164
1165 /* Clone pktoptions received with SYN, if we own the req */
1166 if (ireq->pktopts) {
1167 newnp->pktoptions = skb_clone(ireq->pktopts,
1168 sk_gfp_mask(sk, GFP_ATOMIC));
1169 consume_skb(ireq->pktopts);
1170 ireq->pktopts = NULL;
1171 if (newnp->pktoptions)
1172 skb_set_owner_r(newnp->pktoptions, newsk);
1173 }
1174 }
1175
1176 return newsk;
1177
1178 out_overflow:
1179 __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1180 out_nonewsk:
1181 dst_release(dst);
1182 out:
1183 tcp_listendrop(sk);
1184 return NULL;
1185 }
1186
1187 /* The socket must have it's spinlock held when we get
1188 * here, unless it is a TCP_LISTEN socket.
1189 *
1190 * We have a potential double-lock case here, so even when
1191 * doing backlog processing we use the BH locking scheme.
1192 * This is because we cannot sleep with the original spinlock
1193 * held.
1194 */
1195 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1196 {
1197 struct ipv6_pinfo *np = inet6_sk(sk);
1198 struct tcp_sock *tp;
1199 struct sk_buff *opt_skb = NULL;
1200
1201 /* Imagine: socket is IPv6. IPv4 packet arrives,
1202 goes to IPv4 receive handler and backlogged.
1203 From backlog it always goes here. Kerboom...
1204 Fortunately, tcp_rcv_established and rcv_established
1205 handle them correctly, but it is not case with
1206 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1207 */
1208
1209 if (skb->protocol == htons(ETH_P_IP))
1210 return tcp_v4_do_rcv(sk, skb);
1211
1212 if (sk_filter(sk, skb))
1213 goto discard;
1214
1215 /*
1216 * socket locking is here for SMP purposes as backlog rcv
1217 * is currently called with bh processing disabled.
1218 */
1219
1220 /* Do Stevens' IPV6_PKTOPTIONS.
1221
1222 Yes, guys, it is the only place in our code, where we
1223 may make it not affecting IPv4.
1224 The rest of code is protocol independent,
1225 and I do not like idea to uglify IPv4.
1226
1227 Actually, all the idea behind IPV6_PKTOPTIONS
1228 looks not very well thought. For now we latch
1229 options, received in the last packet, enqueued
1230 by tcp. Feel free to propose better solution.
1231 --ANK (980728)
1232 */
1233 if (np->rxopt.all)
1234 opt_skb = skb_clone(skb, sk_gfp_mask(sk, GFP_ATOMIC));
1235
1236 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1237 struct dst_entry *dst = sk->sk_rx_dst;
1238
1239 sock_rps_save_rxhash(sk, skb);
1240 sk_mark_napi_id(sk, skb);
1241 if (dst) {
1242 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1243 dst->ops->check(dst, np->rx_dst_cookie) == NULL) {
1244 dst_release(dst);
1245 sk->sk_rx_dst = NULL;
1246 }
1247 }
1248
1249 tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len);
1250 if (opt_skb)
1251 goto ipv6_pktoptions;
1252 return 0;
1253 }
1254
1255 if (tcp_checksum_complete(skb))
1256 goto csum_err;
1257
1258 if (sk->sk_state == TCP_LISTEN) {
1259 struct sock *nsk = tcp_v6_cookie_check(sk, skb);
1260
1261 if (!nsk)
1262 goto discard;
1263
1264 if (nsk != sk) {
1265 sock_rps_save_rxhash(nsk, skb);
1266 sk_mark_napi_id(nsk, skb);
1267 if (tcp_child_process(sk, nsk, skb))
1268 goto reset;
1269 if (opt_skb)
1270 __kfree_skb(opt_skb);
1271 return 0;
1272 }
1273 } else
1274 sock_rps_save_rxhash(sk, skb);
1275
1276 if (tcp_rcv_state_process(sk, skb))
1277 goto reset;
1278 if (opt_skb)
1279 goto ipv6_pktoptions;
1280 return 0;
1281
1282 reset:
1283 tcp_v6_send_reset(sk, skb);
1284 discard:
1285 if (opt_skb)
1286 __kfree_skb(opt_skb);
1287 kfree_skb(skb);
1288 return 0;
1289 csum_err:
1290 TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
1291 TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
1292 goto discard;
1293
1294
1295 ipv6_pktoptions:
1296 /* Do you ask, what is it?
1297
1298 1. skb was enqueued by tcp.
1299 2. skb is added to tail of read queue, rather than out of order.
1300 3. socket is not in passive state.
1301 4. Finally, it really contains options, which user wants to receive.
1302 */
1303 tp = tcp_sk(sk);
1304 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1305 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1306 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
1307 np->mcast_oif = tcp_v6_iif(opt_skb);
1308 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1309 np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
1310 if (np->rxopt.bits.rxflow || np->rxopt.bits.rxtclass)
1311 np->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(opt_skb));
1312 if (np->repflow)
1313 np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
1314 if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) {
1315 skb_set_owner_r(opt_skb, sk);
1316 opt_skb = xchg(&np->pktoptions, opt_skb);
1317 } else {
1318 __kfree_skb(opt_skb);
1319 opt_skb = xchg(&np->pktoptions, NULL);
1320 }
1321 }
1322
1323 kfree_skb(opt_skb);
1324 return 0;
1325 }
1326
1327 static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr,
1328 const struct tcphdr *th)
1329 {
1330 /* This is tricky: we move IP6CB at its correct location into
1331 * TCP_SKB_CB(). It must be done after xfrm6_policy_check(), because
1332 * _decode_session6() uses IP6CB().
1333 * barrier() makes sure compiler won't play aliasing games.
1334 */
1335 memmove(&TCP_SKB_CB(skb)->header.h6, IP6CB(skb),
1336 sizeof(struct inet6_skb_parm));
1337 barrier();
1338
1339 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1340 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1341 skb->len - th->doff*4);
1342 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1343 TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1344 TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1345 TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
1346 TCP_SKB_CB(skb)->sacked = 0;
1347 }
1348
1349 static void tcp_v6_restore_cb(struct sk_buff *skb)
1350 {
1351 /* We need to move header back to the beginning if xfrm6_policy_check()
1352 * and tcp_v6_fill_cb() are going to be called again.
1353 */
1354 memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
1355 sizeof(struct inet6_skb_parm));
1356 }
1357
1358 static int tcp_v6_rcv(struct sk_buff *skb)
1359 {
1360 const struct tcphdr *th;
1361 const struct ipv6hdr *hdr;
1362 bool refcounted;
1363 struct sock *sk;
1364 int ret;
1365 struct net *net = dev_net(skb->dev);
1366
1367 if (skb->pkt_type != PACKET_HOST)
1368 goto discard_it;
1369
1370 /*
1371 * Count it even if it's bad.
1372 */
1373 __TCP_INC_STATS(net, TCP_MIB_INSEGS);
1374
1375 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1376 goto discard_it;
1377
1378 th = (const struct tcphdr *)skb->data;
1379
1380 if (unlikely(th->doff < sizeof(struct tcphdr)/4))
1381 goto bad_packet;
1382 if (!pskb_may_pull(skb, th->doff*4))
1383 goto discard_it;
1384
1385 if (skb_checksum_init(skb, IPPROTO_TCP, ip6_compute_pseudo))
1386 goto csum_error;
1387
1388 th = (const struct tcphdr *)skb->data;
1389 hdr = ipv6_hdr(skb);
1390
1391 lookup:
1392 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th),
1393 th->source, th->dest, inet6_iif(skb),
1394 &refcounted);
1395 if (!sk)
1396 goto no_tcp_socket;
1397
1398 process:
1399 if (sk->sk_state == TCP_TIME_WAIT)
1400 goto do_time_wait;
1401
1402 if (sk->sk_state == TCP_NEW_SYN_RECV) {
1403 struct request_sock *req = inet_reqsk(sk);
1404 struct sock *nsk;
1405
1406 sk = req->rsk_listener;
1407 tcp_v6_fill_cb(skb, hdr, th);
1408 if (tcp_v6_inbound_md5_hash(sk, skb)) {
1409 reqsk_put(req);
1410 goto discard_it;
1411 }
1412 if (unlikely(sk->sk_state != TCP_LISTEN)) {
1413 inet_csk_reqsk_queue_drop_and_put(sk, req);
1414 goto lookup;
1415 }
1416 sock_hold(sk);
1417 refcounted = true;
1418 nsk = tcp_check_req(sk, skb, req, false);
1419 if (!nsk) {
1420 reqsk_put(req);
1421 goto discard_and_relse;
1422 }
1423 if (nsk == sk) {
1424 reqsk_put(req);
1425 tcp_v6_restore_cb(skb);
1426 } else if (tcp_child_process(sk, nsk, skb)) {
1427 tcp_v6_send_reset(nsk, skb);
1428 goto discard_and_relse;
1429 } else {
1430 sock_put(sk);
1431 return 0;
1432 }
1433 }
1434 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
1435 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
1436 goto discard_and_relse;
1437 }
1438
1439 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1440 goto discard_and_relse;
1441
1442 tcp_v6_fill_cb(skb, hdr, th);
1443
1444 if (tcp_v6_inbound_md5_hash(sk, skb))
1445 goto discard_and_relse;
1446
1447 if (sk_filter(sk, skb))
1448 goto discard_and_relse;
1449
1450 skb->dev = NULL;
1451
1452 if (sk->sk_state == TCP_LISTEN) {
1453 ret = tcp_v6_do_rcv(sk, skb);
1454 goto put_and_return;
1455 }
1456
1457 sk_incoming_cpu_update(sk);
1458
1459 bh_lock_sock_nested(sk);
1460 tcp_segs_in(tcp_sk(sk), skb);
1461 ret = 0;
1462 if (!sock_owned_by_user(sk)) {
1463 if (!tcp_prequeue(sk, skb))
1464 ret = tcp_v6_do_rcv(sk, skb);
1465 } else if (unlikely(sk_add_backlog(sk, skb,
1466 sk->sk_rcvbuf + sk->sk_sndbuf))) {
1467 bh_unlock_sock(sk);
1468 __NET_INC_STATS(net, LINUX_MIB_TCPBACKLOGDROP);
1469 goto discard_and_relse;
1470 }
1471 bh_unlock_sock(sk);
1472
1473 put_and_return:
1474 if (refcounted)
1475 sock_put(sk);
1476 return ret ? -1 : 0;
1477
1478 no_tcp_socket:
1479 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1480 goto discard_it;
1481
1482 tcp_v6_fill_cb(skb, hdr, th);
1483
1484 if (tcp_checksum_complete(skb)) {
1485 csum_error:
1486 __TCP_INC_STATS(net, TCP_MIB_CSUMERRORS);
1487 bad_packet:
1488 __TCP_INC_STATS(net, TCP_MIB_INERRS);
1489 } else {
1490 tcp_v6_send_reset(NULL, skb);
1491 }
1492
1493 discard_it:
1494 kfree_skb(skb);
1495 return 0;
1496
1497 discard_and_relse:
1498 sk_drops_add(sk, skb);
1499 if (refcounted)
1500 sock_put(sk);
1501 goto discard_it;
1502
1503 do_time_wait:
1504 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1505 inet_twsk_put(inet_twsk(sk));
1506 goto discard_it;
1507 }
1508
1509 tcp_v6_fill_cb(skb, hdr, th);
1510
1511 if (tcp_checksum_complete(skb)) {
1512 inet_twsk_put(inet_twsk(sk));
1513 goto csum_error;
1514 }
1515
1516 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1517 case TCP_TW_SYN:
1518 {
1519 struct sock *sk2;
1520
1521 sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
1522 skb, __tcp_hdrlen(th),
1523 &ipv6_hdr(skb)->saddr, th->source,
1524 &ipv6_hdr(skb)->daddr,
1525 ntohs(th->dest), tcp_v6_iif(skb));
1526 if (sk2) {
1527 struct inet_timewait_sock *tw = inet_twsk(sk);
1528 inet_twsk_deschedule_put(tw);
1529 sk = sk2;
1530 tcp_v6_restore_cb(skb);
1531 refcounted = false;
1532 goto process;
1533 }
1534 /* Fall through to ACK */
1535 }
1536 case TCP_TW_ACK:
1537 tcp_v6_timewait_ack(sk, skb);
1538 break;
1539 case TCP_TW_RST:
1540 tcp_v6_restore_cb(skb);
1541 tcp_v6_send_reset(sk, skb);
1542 inet_twsk_deschedule_put(inet_twsk(sk));
1543 goto discard_it;
1544 case TCP_TW_SUCCESS:
1545 ;
1546 }
1547 goto discard_it;
1548 }
1549
1550 static void tcp_v6_early_demux(struct sk_buff *skb)
1551 {
1552 const struct ipv6hdr *hdr;
1553 const struct tcphdr *th;
1554 struct sock *sk;
1555
1556 if (skb->pkt_type != PACKET_HOST)
1557 return;
1558
1559 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1560 return;
1561
1562 hdr = ipv6_hdr(skb);
1563 th = tcp_hdr(skb);
1564
1565 if (th->doff < sizeof(struct tcphdr) / 4)
1566 return;
1567
1568 /* Note : We use inet6_iif() here, not tcp_v6_iif() */
1569 sk = __inet6_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1570 &hdr->saddr, th->source,
1571 &hdr->daddr, ntohs(th->dest),
1572 inet6_iif(skb));
1573 if (sk) {
1574 skb->sk = sk;
1575 skb->destructor = sock_edemux;
1576 if (sk_fullsock(sk)) {
1577 struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
1578
1579 if (dst)
1580 dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
1581 if (dst &&
1582 inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1583 skb_dst_set_noref(skb, dst);
1584 }
1585 }
1586 }
1587
1588 static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1589 .twsk_obj_size = sizeof(struct tcp6_timewait_sock),
1590 .twsk_unique = tcp_twsk_unique,
1591 .twsk_destructor = tcp_twsk_destructor,
1592 };
1593
1594 static const struct inet_connection_sock_af_ops ipv6_specific = {
1595 .queue_xmit = inet6_csk_xmit,
1596 .send_check = tcp_v6_send_check,
1597 .rebuild_header = inet6_sk_rebuild_header,
1598 .sk_rx_dst_set = inet6_sk_rx_dst_set,
1599 .conn_request = tcp_v6_conn_request,
1600 .syn_recv_sock = tcp_v6_syn_recv_sock,
1601 .net_header_len = sizeof(struct ipv6hdr),
1602 .net_frag_header_len = sizeof(struct frag_hdr),
1603 .setsockopt = ipv6_setsockopt,
1604 .getsockopt = ipv6_getsockopt,
1605 .addr2sockaddr = inet6_csk_addr2sockaddr,
1606 .sockaddr_len = sizeof(struct sockaddr_in6),
1607 .bind_conflict = inet6_csk_bind_conflict,
1608 #ifdef CONFIG_COMPAT
1609 .compat_setsockopt = compat_ipv6_setsockopt,
1610 .compat_getsockopt = compat_ipv6_getsockopt,
1611 #endif
1612 .mtu_reduced = tcp_v6_mtu_reduced,
1613 };
1614
1615 #ifdef CONFIG_TCP_MD5SIG
1616 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
1617 .md5_lookup = tcp_v6_md5_lookup,
1618 .calc_md5_hash = tcp_v6_md5_hash_skb,
1619 .md5_parse = tcp_v6_parse_md5_keys,
1620 };
1621 #endif
1622
1623 /*
1624 * TCP over IPv4 via INET6 API
1625 */
1626 static const struct inet_connection_sock_af_ops ipv6_mapped = {
1627 .queue_xmit = ip_queue_xmit,
1628 .send_check = tcp_v4_send_check,
1629 .rebuild_header = inet_sk_rebuild_header,
1630 .sk_rx_dst_set = inet_sk_rx_dst_set,
1631 .conn_request = tcp_v6_conn_request,
1632 .syn_recv_sock = tcp_v6_syn_recv_sock,
1633 .net_header_len = sizeof(struct iphdr),
1634 .setsockopt = ipv6_setsockopt,
1635 .getsockopt = ipv6_getsockopt,
1636 .addr2sockaddr = inet6_csk_addr2sockaddr,
1637 .sockaddr_len = sizeof(struct sockaddr_in6),
1638 .bind_conflict = inet6_csk_bind_conflict,
1639 #ifdef CONFIG_COMPAT
1640 .compat_setsockopt = compat_ipv6_setsockopt,
1641 .compat_getsockopt = compat_ipv6_getsockopt,
1642 #endif
1643 .mtu_reduced = tcp_v4_mtu_reduced,
1644 };
1645
1646 #ifdef CONFIG_TCP_MD5SIG
1647 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
1648 .md5_lookup = tcp_v4_md5_lookup,
1649 .calc_md5_hash = tcp_v4_md5_hash_skb,
1650 .md5_parse = tcp_v6_parse_md5_keys,
1651 };
1652 #endif
1653
1654 /* NOTE: A lot of things set to zero explicitly by call to
1655 * sk_alloc() so need not be done here.
1656 */
1657 static int tcp_v6_init_sock(struct sock *sk)
1658 {
1659 struct inet_connection_sock *icsk = inet_csk(sk);
1660
1661 tcp_init_sock(sk);
1662
1663 icsk->icsk_af_ops = &ipv6_specific;
1664
1665 #ifdef CONFIG_TCP_MD5SIG
1666 tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific;
1667 #endif
1668
1669 return 0;
1670 }
1671
1672 static void tcp_v6_destroy_sock(struct sock *sk)
1673 {
1674 tcp_v4_destroy_sock(sk);
1675 inet6_destroy_sock(sk);
1676 }
1677
1678 #ifdef CONFIG_PROC_FS
1679 /* Proc filesystem TCPv6 sock list dumping. */
1680 static void get_openreq6(struct seq_file *seq,
1681 const struct request_sock *req, int i)
1682 {
1683 long ttd = req->rsk_timer.expires - jiffies;
1684 const struct in6_addr *src = &inet_rsk(req)->ir_v6_loc_addr;
1685 const struct in6_addr *dest = &inet_rsk(req)->ir_v6_rmt_addr;
1686
1687 if (ttd < 0)
1688 ttd = 0;
1689
1690 seq_printf(seq,
1691 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1692 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %d %d %pK\n",
1693 i,
1694 src->s6_addr32[0], src->s6_addr32[1],
1695 src->s6_addr32[2], src->s6_addr32[3],
1696 inet_rsk(req)->ir_num,
1697 dest->s6_addr32[0], dest->s6_addr32[1],
1698 dest->s6_addr32[2], dest->s6_addr32[3],
1699 ntohs(inet_rsk(req)->ir_rmt_port),
1700 TCP_SYN_RECV,
1701 0, 0, /* could print option size, but that is af dependent. */
1702 1, /* timers active (only the expire timer) */
1703 jiffies_to_clock_t(ttd),
1704 req->num_timeout,
1705 from_kuid_munged(seq_user_ns(seq),
1706 sock_i_uid(req->rsk_listener)),
1707 0, /* non standard timer */
1708 0, /* open_requests have no inode */
1709 0, req);
1710 }
1711
1712 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1713 {
1714 const struct in6_addr *dest, *src;
1715 __u16 destp, srcp;
1716 int timer_active;
1717 unsigned long timer_expires;
1718 const struct inet_sock *inet = inet_sk(sp);
1719 const struct tcp_sock *tp = tcp_sk(sp);
1720 const struct inet_connection_sock *icsk = inet_csk(sp);
1721 const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
1722 int rx_queue;
1723 int state;
1724
1725 dest = &sp->sk_v6_daddr;
1726 src = &sp->sk_v6_rcv_saddr;
1727 destp = ntohs(inet->inet_dport);
1728 srcp = ntohs(inet->inet_sport);
1729
1730 if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
1731 timer_active = 1;
1732 timer_expires = icsk->icsk_timeout;
1733 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1734 timer_active = 4;
1735 timer_expires = icsk->icsk_timeout;
1736 } else if (timer_pending(&sp->sk_timer)) {
1737 timer_active = 2;
1738 timer_expires = sp->sk_timer.expires;
1739 } else {
1740 timer_active = 0;
1741 timer_expires = jiffies;
1742 }
1743
1744 state = sk_state_load(sp);
1745 if (state == TCP_LISTEN)
1746 rx_queue = sp->sk_ack_backlog;
1747 else
1748 /* Because we don't lock the socket,
1749 * we might find a transient negative value.
1750 */
1751 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
1752
1753 seq_printf(seq,
1754 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1755 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n",
1756 i,
1757 src->s6_addr32[0], src->s6_addr32[1],
1758 src->s6_addr32[2], src->s6_addr32[3], srcp,
1759 dest->s6_addr32[0], dest->s6_addr32[1],
1760 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1761 state,
1762 tp->write_seq - tp->snd_una,
1763 rx_queue,
1764 timer_active,
1765 jiffies_delta_to_clock_t(timer_expires - jiffies),
1766 icsk->icsk_retransmits,
1767 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
1768 icsk->icsk_probes_out,
1769 sock_i_ino(sp),
1770 atomic_read(&sp->sk_refcnt), sp,
1771 jiffies_to_clock_t(icsk->icsk_rto),
1772 jiffies_to_clock_t(icsk->icsk_ack.ato),
1773 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
1774 tp->snd_cwnd,
1775 state == TCP_LISTEN ?
1776 fastopenq->max_qlen :
1777 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)
1778 );
1779 }
1780
1781 static void get_timewait6_sock(struct seq_file *seq,
1782 struct inet_timewait_sock *tw, int i)
1783 {
1784 long delta = tw->tw_timer.expires - jiffies;
1785 const struct in6_addr *dest, *src;
1786 __u16 destp, srcp;
1787
1788 dest = &tw->tw_v6_daddr;
1789 src = &tw->tw_v6_rcv_saddr;
1790 destp = ntohs(tw->tw_dport);
1791 srcp = ntohs(tw->tw_sport);
1792
1793 seq_printf(seq,
1794 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1795 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1796 i,
1797 src->s6_addr32[0], src->s6_addr32[1],
1798 src->s6_addr32[2], src->s6_addr32[3], srcp,
1799 dest->s6_addr32[0], dest->s6_addr32[1],
1800 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1801 tw->tw_substate, 0, 0,
1802 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
1803 atomic_read(&tw->tw_refcnt), tw);
1804 }
1805
1806 static int tcp6_seq_show(struct seq_file *seq, void *v)
1807 {
1808 struct tcp_iter_state *st;
1809 struct sock *sk = v;
1810
1811 if (v == SEQ_START_TOKEN) {
1812 seq_puts(seq,
1813 " sl "
1814 "local_address "
1815 "remote_address "
1816 "st tx_queue rx_queue tr tm->when retrnsmt"
1817 " uid timeout inode\n");
1818 goto out;
1819 }
1820 st = seq->private;
1821
1822 if (sk->sk_state == TCP_TIME_WAIT)
1823 get_timewait6_sock(seq, v, st->num);
1824 else if (sk->sk_state == TCP_NEW_SYN_RECV)
1825 get_openreq6(seq, v, st->num);
1826 else
1827 get_tcp6_sock(seq, v, st->num);
1828 out:
1829 return 0;
1830 }
1831
1832 static const struct file_operations tcp6_afinfo_seq_fops = {
1833 .owner = THIS_MODULE,
1834 .open = tcp_seq_open,
1835 .read = seq_read,
1836 .llseek = seq_lseek,
1837 .release = seq_release_net
1838 };
1839
1840 static struct tcp_seq_afinfo tcp6_seq_afinfo = {
1841 .name = "tcp6",
1842 .family = AF_INET6,
1843 .seq_fops = &tcp6_afinfo_seq_fops,
1844 .seq_ops = {
1845 .show = tcp6_seq_show,
1846 },
1847 };
1848
1849 int __net_init tcp6_proc_init(struct net *net)
1850 {
1851 return tcp_proc_register(net, &tcp6_seq_afinfo);
1852 }
1853
1854 void tcp6_proc_exit(struct net *net)
1855 {
1856 tcp_proc_unregister(net, &tcp6_seq_afinfo);
1857 }
1858 #endif
1859
1860 static void tcp_v6_clear_sk(struct sock *sk, int size)
1861 {
1862 struct inet_sock *inet = inet_sk(sk);
1863
1864 /* we do not want to clear pinet6 field, because of RCU lookups */
1865 sk_prot_clear_nulls(sk, offsetof(struct inet_sock, pinet6));
1866
1867 size -= offsetof(struct inet_sock, pinet6) + sizeof(inet->pinet6);
1868 memset(&inet->pinet6 + 1, 0, size);
1869 }
1870
1871 struct proto tcpv6_prot = {
1872 .name = "TCPv6",
1873 .owner = THIS_MODULE,
1874 .close = tcp_close,
1875 .connect = tcp_v6_connect,
1876 .disconnect = tcp_disconnect,
1877 .accept = inet_csk_accept,
1878 .ioctl = tcp_ioctl,
1879 .init = tcp_v6_init_sock,
1880 .destroy = tcp_v6_destroy_sock,
1881 .shutdown = tcp_shutdown,
1882 .setsockopt = tcp_setsockopt,
1883 .getsockopt = tcp_getsockopt,
1884 .recvmsg = tcp_recvmsg,
1885 .sendmsg = tcp_sendmsg,
1886 .sendpage = tcp_sendpage,
1887 .backlog_rcv = tcp_v6_do_rcv,
1888 .release_cb = tcp_release_cb,
1889 .hash = inet6_hash,
1890 .unhash = inet_unhash,
1891 .get_port = inet_csk_get_port,
1892 .enter_memory_pressure = tcp_enter_memory_pressure,
1893 .stream_memory_free = tcp_stream_memory_free,
1894 .sockets_allocated = &tcp_sockets_allocated,
1895 .memory_allocated = &tcp_memory_allocated,
1896 .memory_pressure = &tcp_memory_pressure,
1897 .orphan_count = &tcp_orphan_count,
1898 .sysctl_mem = sysctl_tcp_mem,
1899 .sysctl_wmem = sysctl_tcp_wmem,
1900 .sysctl_rmem = sysctl_tcp_rmem,
1901 .max_header = MAX_TCP_HEADER,
1902 .obj_size = sizeof(struct tcp6_sock),
1903 .slab_flags = SLAB_DESTROY_BY_RCU,
1904 .twsk_prot = &tcp6_timewait_sock_ops,
1905 .rsk_prot = &tcp6_request_sock_ops,
1906 .h.hashinfo = &tcp_hashinfo,
1907 .no_autobind = true,
1908 #ifdef CONFIG_COMPAT
1909 .compat_setsockopt = compat_tcp_setsockopt,
1910 .compat_getsockopt = compat_tcp_getsockopt,
1911 #endif
1912 .clear_sk = tcp_v6_clear_sk,
1913 .diag_destroy = tcp_abort,
1914 };
1915
1916 static const struct inet6_protocol tcpv6_protocol = {
1917 .early_demux = tcp_v6_early_demux,
1918 .handler = tcp_v6_rcv,
1919 .err_handler = tcp_v6_err,
1920 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1921 };
1922
1923 static struct inet_protosw tcpv6_protosw = {
1924 .type = SOCK_STREAM,
1925 .protocol = IPPROTO_TCP,
1926 .prot = &tcpv6_prot,
1927 .ops = &inet6_stream_ops,
1928 .flags = INET_PROTOSW_PERMANENT |
1929 INET_PROTOSW_ICSK,
1930 };
1931
1932 static int __net_init tcpv6_net_init(struct net *net)
1933 {
1934 return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
1935 SOCK_RAW, IPPROTO_TCP, net);
1936 }
1937
1938 static void __net_exit tcpv6_net_exit(struct net *net)
1939 {
1940 inet_ctl_sock_destroy(net->ipv6.tcp_sk);
1941 }
1942
1943 static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
1944 {
1945 inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET6);
1946 }
1947
1948 static struct pernet_operations tcpv6_net_ops = {
1949 .init = tcpv6_net_init,
1950 .exit = tcpv6_net_exit,
1951 .exit_batch = tcpv6_net_exit_batch,
1952 };
1953
1954 int __init tcpv6_init(void)
1955 {
1956 int ret;
1957
1958 ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
1959 if (ret)
1960 goto out;
1961
1962 /* register inet6 protocol */
1963 ret = inet6_register_protosw(&tcpv6_protosw);
1964 if (ret)
1965 goto out_tcpv6_protocol;
1966
1967 ret = register_pernet_subsys(&tcpv6_net_ops);
1968 if (ret)
1969 goto out_tcpv6_protosw;
1970 out:
1971 return ret;
1972
1973 out_tcpv6_protosw:
1974 inet6_unregister_protosw(&tcpv6_protosw);
1975 out_tcpv6_protocol:
1976 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
1977 goto out;
1978 }
1979
1980 void tcpv6_exit(void)
1981 {
1982 unregister_pernet_subsys(&tcpv6_net_ops);
1983 inet6_unregister_protosw(&tcpv6_protosw);
1984 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
1985 }
This page took 0.098312 seconds and 5 git commands to generate.