Merge branch 'listener-sock-const'
[deliverable/linux.git] / net / ipv6 / tcp_ipv6.c
1 /*
2 * TCP over IPv6
3 * Linux INET6 implementation
4 *
5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt>
7 *
8 * Based on:
9 * linux/net/ipv4/tcp.c
10 * linux/net/ipv4/tcp_input.c
11 * linux/net/ipv4/tcp_output.c
12 *
13 * Fixes:
14 * Hideaki YOSHIFUJI : sin6_scope_id support
15 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
16 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
17 * a single port at the same time.
18 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
19 *
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
24 */
25
26 #include <linux/bottom_half.h>
27 #include <linux/module.h>
28 #include <linux/errno.h>
29 #include <linux/types.h>
30 #include <linux/socket.h>
31 #include <linux/sockios.h>
32 #include <linux/net.h>
33 #include <linux/jiffies.h>
34 #include <linux/in.h>
35 #include <linux/in6.h>
36 #include <linux/netdevice.h>
37 #include <linux/init.h>
38 #include <linux/jhash.h>
39 #include <linux/ipsec.h>
40 #include <linux/times.h>
41 #include <linux/slab.h>
42 #include <linux/uaccess.h>
43 #include <linux/ipv6.h>
44 #include <linux/icmpv6.h>
45 #include <linux/random.h>
46
47 #include <net/tcp.h>
48 #include <net/ndisc.h>
49 #include <net/inet6_hashtables.h>
50 #include <net/inet6_connection_sock.h>
51 #include <net/ipv6.h>
52 #include <net/transp_v6.h>
53 #include <net/addrconf.h>
54 #include <net/ip6_route.h>
55 #include <net/ip6_checksum.h>
56 #include <net/inet_ecn.h>
57 #include <net/protocol.h>
58 #include <net/xfrm.h>
59 #include <net/snmp.h>
60 #include <net/dsfield.h>
61 #include <net/timewait_sock.h>
62 #include <net/inet_common.h>
63 #include <net/secure_seq.h>
64 #include <net/tcp_memcontrol.h>
65 #include <net/busy_poll.h>
66
67 #include <linux/proc_fs.h>
68 #include <linux/seq_file.h>
69
70 #include <linux/crypto.h>
71 #include <linux/scatterlist.h>
72
73 static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb);
74 static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
75 struct request_sock *req);
76
77 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
78
79 static const struct inet_connection_sock_af_ops ipv6_mapped;
80 static const struct inet_connection_sock_af_ops ipv6_specific;
81 #ifdef CONFIG_TCP_MD5SIG
82 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
83 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
84 #else
85 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
86 const struct in6_addr *addr)
87 {
88 return NULL;
89 }
90 #endif
91
92 static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
93 {
94 struct dst_entry *dst = skb_dst(skb);
95
96 if (dst) {
97 const struct rt6_info *rt = (const struct rt6_info *)dst;
98
99 dst_hold(dst);
100 sk->sk_rx_dst = dst;
101 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
102 inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt);
103 }
104 }
105
106 static __u32 tcp_v6_init_sequence(const struct sk_buff *skb)
107 {
108 return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
109 ipv6_hdr(skb)->saddr.s6_addr32,
110 tcp_hdr(skb)->dest,
111 tcp_hdr(skb)->source);
112 }
113
114 static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
115 int addr_len)
116 {
117 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
118 struct inet_sock *inet = inet_sk(sk);
119 struct inet_connection_sock *icsk = inet_csk(sk);
120 struct ipv6_pinfo *np = inet6_sk(sk);
121 struct tcp_sock *tp = tcp_sk(sk);
122 struct in6_addr *saddr = NULL, *final_p, final;
123 struct flowi6 fl6;
124 struct dst_entry *dst;
125 int addr_type;
126 int err;
127
128 if (addr_len < SIN6_LEN_RFC2133)
129 return -EINVAL;
130
131 if (usin->sin6_family != AF_INET6)
132 return -EAFNOSUPPORT;
133
134 memset(&fl6, 0, sizeof(fl6));
135
136 if (np->sndflow) {
137 fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
138 IP6_ECN_flow_init(fl6.flowlabel);
139 if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
140 struct ip6_flowlabel *flowlabel;
141 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
142 if (!flowlabel)
143 return -EINVAL;
144 fl6_sock_release(flowlabel);
145 }
146 }
147
148 /*
149 * connect() to INADDR_ANY means loopback (BSD'ism).
150 */
151
152 if (ipv6_addr_any(&usin->sin6_addr))
153 usin->sin6_addr.s6_addr[15] = 0x1;
154
155 addr_type = ipv6_addr_type(&usin->sin6_addr);
156
157 if (addr_type & IPV6_ADDR_MULTICAST)
158 return -ENETUNREACH;
159
160 if (addr_type&IPV6_ADDR_LINKLOCAL) {
161 if (addr_len >= sizeof(struct sockaddr_in6) &&
162 usin->sin6_scope_id) {
163 /* If interface is set while binding, indices
164 * must coincide.
165 */
166 if (sk->sk_bound_dev_if &&
167 sk->sk_bound_dev_if != usin->sin6_scope_id)
168 return -EINVAL;
169
170 sk->sk_bound_dev_if = usin->sin6_scope_id;
171 }
172
173 /* Connect to link-local address requires an interface */
174 if (!sk->sk_bound_dev_if)
175 return -EINVAL;
176 }
177
178 if (tp->rx_opt.ts_recent_stamp &&
179 !ipv6_addr_equal(&sk->sk_v6_daddr, &usin->sin6_addr)) {
180 tp->rx_opt.ts_recent = 0;
181 tp->rx_opt.ts_recent_stamp = 0;
182 tp->write_seq = 0;
183 }
184
185 sk->sk_v6_daddr = usin->sin6_addr;
186 np->flow_label = fl6.flowlabel;
187
188 /*
189 * TCP over IPv4
190 */
191
192 if (addr_type == IPV6_ADDR_MAPPED) {
193 u32 exthdrlen = icsk->icsk_ext_hdr_len;
194 struct sockaddr_in sin;
195
196 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
197
198 if (__ipv6_only_sock(sk))
199 return -ENETUNREACH;
200
201 sin.sin_family = AF_INET;
202 sin.sin_port = usin->sin6_port;
203 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
204
205 icsk->icsk_af_ops = &ipv6_mapped;
206 sk->sk_backlog_rcv = tcp_v4_do_rcv;
207 #ifdef CONFIG_TCP_MD5SIG
208 tp->af_specific = &tcp_sock_ipv6_mapped_specific;
209 #endif
210
211 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
212
213 if (err) {
214 icsk->icsk_ext_hdr_len = exthdrlen;
215 icsk->icsk_af_ops = &ipv6_specific;
216 sk->sk_backlog_rcv = tcp_v6_do_rcv;
217 #ifdef CONFIG_TCP_MD5SIG
218 tp->af_specific = &tcp_sock_ipv6_specific;
219 #endif
220 goto failure;
221 }
222 np->saddr = sk->sk_v6_rcv_saddr;
223
224 return err;
225 }
226
227 if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr))
228 saddr = &sk->sk_v6_rcv_saddr;
229
230 fl6.flowi6_proto = IPPROTO_TCP;
231 fl6.daddr = sk->sk_v6_daddr;
232 fl6.saddr = saddr ? *saddr : np->saddr;
233 fl6.flowi6_oif = sk->sk_bound_dev_if;
234 fl6.flowi6_mark = sk->sk_mark;
235 fl6.fl6_dport = usin->sin6_port;
236 fl6.fl6_sport = inet->inet_sport;
237
238 final_p = fl6_update_dst(&fl6, np->opt, &final);
239
240 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
241
242 dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
243 if (IS_ERR(dst)) {
244 err = PTR_ERR(dst);
245 goto failure;
246 }
247
248 if (!saddr) {
249 saddr = &fl6.saddr;
250 sk->sk_v6_rcv_saddr = *saddr;
251 }
252
253 /* set the source address */
254 np->saddr = *saddr;
255 inet->inet_rcv_saddr = LOOPBACK4_IPV6;
256
257 sk->sk_gso_type = SKB_GSO_TCPV6;
258 __ip6_dst_store(sk, dst, NULL, NULL);
259
260 if (tcp_death_row.sysctl_tw_recycle &&
261 !tp->rx_opt.ts_recent_stamp &&
262 ipv6_addr_equal(&fl6.daddr, &sk->sk_v6_daddr))
263 tcp_fetch_timewait_stamp(sk, dst);
264
265 icsk->icsk_ext_hdr_len = 0;
266 if (np->opt)
267 icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
268 np->opt->opt_nflen);
269
270 tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
271
272 inet->inet_dport = usin->sin6_port;
273
274 tcp_set_state(sk, TCP_SYN_SENT);
275 err = inet6_hash_connect(&tcp_death_row, sk);
276 if (err)
277 goto late_failure;
278
279 sk_set_txhash(sk);
280
281 if (!tp->write_seq && likely(!tp->repair))
282 tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
283 sk->sk_v6_daddr.s6_addr32,
284 inet->inet_sport,
285 inet->inet_dport);
286
287 err = tcp_connect(sk);
288 if (err)
289 goto late_failure;
290
291 return 0;
292
293 late_failure:
294 tcp_set_state(sk, TCP_CLOSE);
295 __sk_dst_reset(sk);
296 failure:
297 inet->inet_dport = 0;
298 sk->sk_route_caps = 0;
299 return err;
300 }
301
302 static void tcp_v6_mtu_reduced(struct sock *sk)
303 {
304 struct dst_entry *dst;
305
306 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
307 return;
308
309 dst = inet6_csk_update_pmtu(sk, tcp_sk(sk)->mtu_info);
310 if (!dst)
311 return;
312
313 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
314 tcp_sync_mss(sk, dst_mtu(dst));
315 tcp_simple_retransmit(sk);
316 }
317 }
318
319 static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
320 u8 type, u8 code, int offset, __be32 info)
321 {
322 const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
323 const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
324 struct net *net = dev_net(skb->dev);
325 struct request_sock *fastopen;
326 struct ipv6_pinfo *np;
327 struct tcp_sock *tp;
328 __u32 seq, snd_una;
329 struct sock *sk;
330 int err;
331
332 sk = __inet6_lookup_established(net, &tcp_hashinfo,
333 &hdr->daddr, th->dest,
334 &hdr->saddr, ntohs(th->source),
335 skb->dev->ifindex);
336
337 if (!sk) {
338 ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
339 ICMP6_MIB_INERRORS);
340 return;
341 }
342
343 if (sk->sk_state == TCP_TIME_WAIT) {
344 inet_twsk_put(inet_twsk(sk));
345 return;
346 }
347 seq = ntohl(th->seq);
348 if (sk->sk_state == TCP_NEW_SYN_RECV)
349 return tcp_req_err(sk, seq);
350
351 bh_lock_sock(sk);
352 if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
353 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
354
355 if (sk->sk_state == TCP_CLOSE)
356 goto out;
357
358 if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
359 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
360 goto out;
361 }
362
363 tp = tcp_sk(sk);
364 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
365 fastopen = tp->fastopen_rsk;
366 snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
367 if (sk->sk_state != TCP_LISTEN &&
368 !between(seq, snd_una, tp->snd_nxt)) {
369 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
370 goto out;
371 }
372
373 np = inet6_sk(sk);
374
375 if (type == NDISC_REDIRECT) {
376 struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
377
378 if (dst)
379 dst->ops->redirect(dst, sk, skb);
380 goto out;
381 }
382
383 if (type == ICMPV6_PKT_TOOBIG) {
384 /* We are not interested in TCP_LISTEN and open_requests
385 * (SYN-ACKs send out by Linux are always <576bytes so
386 * they should go through unfragmented).
387 */
388 if (sk->sk_state == TCP_LISTEN)
389 goto out;
390
391 if (!ip6_sk_accept_pmtu(sk))
392 goto out;
393
394 tp->mtu_info = ntohl(info);
395 if (!sock_owned_by_user(sk))
396 tcp_v6_mtu_reduced(sk);
397 else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED,
398 &tp->tsq_flags))
399 sock_hold(sk);
400 goto out;
401 }
402
403 icmpv6_err_convert(type, code, &err);
404
405 /* Might be for an request_sock */
406 switch (sk->sk_state) {
407 case TCP_SYN_SENT:
408 case TCP_SYN_RECV:
409 /* Only in fast or simultaneous open. If a fast open socket is
410 * is already accepted it is treated as a connected one below.
411 */
412 if (fastopen && !fastopen->sk)
413 break;
414
415 if (!sock_owned_by_user(sk)) {
416 sk->sk_err = err;
417 sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
418
419 tcp_done(sk);
420 } else
421 sk->sk_err_soft = err;
422 goto out;
423 }
424
425 if (!sock_owned_by_user(sk) && np->recverr) {
426 sk->sk_err = err;
427 sk->sk_error_report(sk);
428 } else
429 sk->sk_err_soft = err;
430
431 out:
432 bh_unlock_sock(sk);
433 sock_put(sk);
434 }
435
436
437 static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
438 struct flowi *fl,
439 struct request_sock *req,
440 u16 queue_mapping,
441 struct tcp_fastopen_cookie *foc)
442 {
443 struct inet_request_sock *ireq = inet_rsk(req);
444 struct ipv6_pinfo *np = inet6_sk(sk);
445 struct flowi6 *fl6 = &fl->u.ip6;
446 struct sk_buff *skb;
447 int err = -ENOMEM;
448
449 /* First, grab a route. */
450 if (!dst && (dst = inet6_csk_route_req(sk, fl6, req)) == NULL)
451 goto done;
452
453 skb = tcp_make_synack(sk, dst, req, foc);
454
455 if (skb) {
456 __tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr,
457 &ireq->ir_v6_rmt_addr);
458
459 fl6->daddr = ireq->ir_v6_rmt_addr;
460 if (np->repflow && ireq->pktopts)
461 fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
462
463 skb_set_queue_mapping(skb, queue_mapping);
464 err = ip6_xmit(sk, skb, fl6, np->opt, np->tclass);
465 err = net_xmit_eval(err);
466 }
467
468 done:
469 return err;
470 }
471
472
473 static void tcp_v6_reqsk_destructor(struct request_sock *req)
474 {
475 kfree_skb(inet_rsk(req)->pktopts);
476 }
477
478 #ifdef CONFIG_TCP_MD5SIG
479 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
480 const struct in6_addr *addr)
481 {
482 return tcp_md5_do_lookup(sk, (union tcp_md5_addr *)addr, AF_INET6);
483 }
484
485 static struct tcp_md5sig_key *tcp_v6_md5_lookup(const struct sock *sk,
486 const struct sock *addr_sk)
487 {
488 return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr);
489 }
490
491 static int tcp_v6_parse_md5_keys(struct sock *sk, char __user *optval,
492 int optlen)
493 {
494 struct tcp_md5sig cmd;
495 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
496
497 if (optlen < sizeof(cmd))
498 return -EINVAL;
499
500 if (copy_from_user(&cmd, optval, sizeof(cmd)))
501 return -EFAULT;
502
503 if (sin6->sin6_family != AF_INET6)
504 return -EINVAL;
505
506 if (!cmd.tcpm_keylen) {
507 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
508 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
509 AF_INET);
510 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
511 AF_INET6);
512 }
513
514 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
515 return -EINVAL;
516
517 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
518 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
519 AF_INET, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
520
521 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
522 AF_INET6, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
523 }
524
525 static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
526 const struct in6_addr *daddr,
527 const struct in6_addr *saddr, int nbytes)
528 {
529 struct tcp6_pseudohdr *bp;
530 struct scatterlist sg;
531
532 bp = &hp->md5_blk.ip6;
533 /* 1. TCP pseudo-header (RFC2460) */
534 bp->saddr = *saddr;
535 bp->daddr = *daddr;
536 bp->protocol = cpu_to_be32(IPPROTO_TCP);
537 bp->len = cpu_to_be32(nbytes);
538
539 sg_init_one(&sg, bp, sizeof(*bp));
540 return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
541 }
542
543 static int tcp_v6_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
544 const struct in6_addr *daddr, struct in6_addr *saddr,
545 const struct tcphdr *th)
546 {
547 struct tcp_md5sig_pool *hp;
548 struct hash_desc *desc;
549
550 hp = tcp_get_md5sig_pool();
551 if (!hp)
552 goto clear_hash_noput;
553 desc = &hp->md5_desc;
554
555 if (crypto_hash_init(desc))
556 goto clear_hash;
557 if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
558 goto clear_hash;
559 if (tcp_md5_hash_header(hp, th))
560 goto clear_hash;
561 if (tcp_md5_hash_key(hp, key))
562 goto clear_hash;
563 if (crypto_hash_final(desc, md5_hash))
564 goto clear_hash;
565
566 tcp_put_md5sig_pool();
567 return 0;
568
569 clear_hash:
570 tcp_put_md5sig_pool();
571 clear_hash_noput:
572 memset(md5_hash, 0, 16);
573 return 1;
574 }
575
576 static int tcp_v6_md5_hash_skb(char *md5_hash,
577 const struct tcp_md5sig_key *key,
578 const struct sock *sk,
579 const struct sk_buff *skb)
580 {
581 const struct in6_addr *saddr, *daddr;
582 struct tcp_md5sig_pool *hp;
583 struct hash_desc *desc;
584 const struct tcphdr *th = tcp_hdr(skb);
585
586 if (sk) { /* valid for establish/request sockets */
587 saddr = &sk->sk_v6_rcv_saddr;
588 daddr = &sk->sk_v6_daddr;
589 } else {
590 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
591 saddr = &ip6h->saddr;
592 daddr = &ip6h->daddr;
593 }
594
595 hp = tcp_get_md5sig_pool();
596 if (!hp)
597 goto clear_hash_noput;
598 desc = &hp->md5_desc;
599
600 if (crypto_hash_init(desc))
601 goto clear_hash;
602
603 if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
604 goto clear_hash;
605 if (tcp_md5_hash_header(hp, th))
606 goto clear_hash;
607 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
608 goto clear_hash;
609 if (tcp_md5_hash_key(hp, key))
610 goto clear_hash;
611 if (crypto_hash_final(desc, md5_hash))
612 goto clear_hash;
613
614 tcp_put_md5sig_pool();
615 return 0;
616
617 clear_hash:
618 tcp_put_md5sig_pool();
619 clear_hash_noput:
620 memset(md5_hash, 0, 16);
621 return 1;
622 }
623
624 static bool tcp_v6_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
625 {
626 const __u8 *hash_location = NULL;
627 struct tcp_md5sig_key *hash_expected;
628 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
629 const struct tcphdr *th = tcp_hdr(skb);
630 int genhash;
631 u8 newhash[16];
632
633 hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
634 hash_location = tcp_parse_md5sig_option(th);
635
636 /* We've parsed the options - do we have a hash? */
637 if (!hash_expected && !hash_location)
638 return false;
639
640 if (hash_expected && !hash_location) {
641 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
642 return true;
643 }
644
645 if (!hash_expected && hash_location) {
646 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
647 return true;
648 }
649
650 /* check the signature */
651 genhash = tcp_v6_md5_hash_skb(newhash,
652 hash_expected,
653 NULL, skb);
654
655 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
656 net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
657 genhash ? "failed" : "mismatch",
658 &ip6h->saddr, ntohs(th->source),
659 &ip6h->daddr, ntohs(th->dest));
660 return true;
661 }
662 return false;
663 }
664 #endif
665
666 static void tcp_v6_init_req(struct request_sock *req,
667 const struct sock *sk_listener,
668 struct sk_buff *skb)
669 {
670 struct inet_request_sock *ireq = inet_rsk(req);
671 const struct ipv6_pinfo *np = inet6_sk(sk_listener);
672
673 ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
674 ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
675
676 /* So that link locals have meaning */
677 if (!sk_listener->sk_bound_dev_if &&
678 ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
679 ireq->ir_iif = tcp_v6_iif(skb);
680
681 if (!TCP_SKB_CB(skb)->tcp_tw_isn &&
682 (ipv6_opt_accepted(sk_listener, skb, &TCP_SKB_CB(skb)->header.h6) ||
683 np->rxopt.bits.rxinfo ||
684 np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim ||
685 np->rxopt.bits.rxohlim || np->repflow)) {
686 atomic_inc(&skb->users);
687 ireq->pktopts = skb;
688 }
689 }
690
691 static struct dst_entry *tcp_v6_route_req(struct sock *sk, struct flowi *fl,
692 const struct request_sock *req,
693 bool *strict)
694 {
695 if (strict)
696 *strict = true;
697 return inet6_csk_route_req(sk, &fl->u.ip6, req);
698 }
699
700 struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
701 .family = AF_INET6,
702 .obj_size = sizeof(struct tcp6_request_sock),
703 .rtx_syn_ack = tcp_rtx_synack,
704 .send_ack = tcp_v6_reqsk_send_ack,
705 .destructor = tcp_v6_reqsk_destructor,
706 .send_reset = tcp_v6_send_reset,
707 .syn_ack_timeout = tcp_syn_ack_timeout,
708 };
709
710 static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
711 .mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) -
712 sizeof(struct ipv6hdr),
713 #ifdef CONFIG_TCP_MD5SIG
714 .req_md5_lookup = tcp_v6_md5_lookup,
715 .calc_md5_hash = tcp_v6_md5_hash_skb,
716 #endif
717 .init_req = tcp_v6_init_req,
718 #ifdef CONFIG_SYN_COOKIES
719 .cookie_init_seq = cookie_v6_init_sequence,
720 #endif
721 .route_req = tcp_v6_route_req,
722 .init_seq = tcp_v6_init_sequence,
723 .send_synack = tcp_v6_send_synack,
724 .queue_hash_add = inet6_csk_reqsk_queue_hash_add,
725 };
726
727 static void tcp_v6_send_response(struct sock *sk, struct sk_buff *skb, u32 seq,
728 u32 ack, u32 win, u32 tsval, u32 tsecr,
729 int oif, struct tcp_md5sig_key *key, int rst,
730 u8 tclass, u32 label)
731 {
732 const struct tcphdr *th = tcp_hdr(skb);
733 struct tcphdr *t1;
734 struct sk_buff *buff;
735 struct flowi6 fl6;
736 struct net *net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
737 struct sock *ctl_sk = net->ipv6.tcp_sk;
738 unsigned int tot_len = sizeof(struct tcphdr);
739 struct dst_entry *dst;
740 __be32 *topt;
741
742 if (tsecr)
743 tot_len += TCPOLEN_TSTAMP_ALIGNED;
744 #ifdef CONFIG_TCP_MD5SIG
745 if (key)
746 tot_len += TCPOLEN_MD5SIG_ALIGNED;
747 #endif
748
749 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
750 GFP_ATOMIC);
751 if (!buff)
752 return;
753
754 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
755
756 t1 = (struct tcphdr *) skb_push(buff, tot_len);
757 skb_reset_transport_header(buff);
758
759 /* Swap the send and the receive. */
760 memset(t1, 0, sizeof(*t1));
761 t1->dest = th->source;
762 t1->source = th->dest;
763 t1->doff = tot_len / 4;
764 t1->seq = htonl(seq);
765 t1->ack_seq = htonl(ack);
766 t1->ack = !rst || !th->ack;
767 t1->rst = rst;
768 t1->window = htons(win);
769
770 topt = (__be32 *)(t1 + 1);
771
772 if (tsecr) {
773 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
774 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
775 *topt++ = htonl(tsval);
776 *topt++ = htonl(tsecr);
777 }
778
779 #ifdef CONFIG_TCP_MD5SIG
780 if (key) {
781 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
782 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
783 tcp_v6_md5_hash_hdr((__u8 *)topt, key,
784 &ipv6_hdr(skb)->saddr,
785 &ipv6_hdr(skb)->daddr, t1);
786 }
787 #endif
788
789 memset(&fl6, 0, sizeof(fl6));
790 fl6.daddr = ipv6_hdr(skb)->saddr;
791 fl6.saddr = ipv6_hdr(skb)->daddr;
792 fl6.flowlabel = label;
793
794 buff->ip_summed = CHECKSUM_PARTIAL;
795 buff->csum = 0;
796
797 __tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
798
799 fl6.flowi6_proto = IPPROTO_TCP;
800 if (rt6_need_strict(&fl6.daddr) && !oif)
801 fl6.flowi6_oif = tcp_v6_iif(skb);
802 else
803 fl6.flowi6_oif = oif;
804 fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark);
805 fl6.fl6_dport = t1->dest;
806 fl6.fl6_sport = t1->source;
807 security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
808
809 /* Pass a socket to ip6_dst_lookup either it is for RST
810 * Underlying function will use this to retrieve the network
811 * namespace
812 */
813 dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL);
814 if (!IS_ERR(dst)) {
815 skb_dst_set(buff, dst);
816 ip6_xmit(ctl_sk, buff, &fl6, NULL, tclass);
817 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
818 if (rst)
819 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
820 return;
821 }
822
823 kfree_skb(buff);
824 }
825
826 static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
827 {
828 const struct tcphdr *th = tcp_hdr(skb);
829 u32 seq = 0, ack_seq = 0;
830 struct tcp_md5sig_key *key = NULL;
831 #ifdef CONFIG_TCP_MD5SIG
832 const __u8 *hash_location = NULL;
833 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
834 unsigned char newhash[16];
835 int genhash;
836 struct sock *sk1 = NULL;
837 #endif
838 int oif;
839
840 if (th->rst)
841 return;
842
843 /* If sk not NULL, it means we did a successful lookup and incoming
844 * route had to be correct. prequeue might have dropped our dst.
845 */
846 if (!sk && !ipv6_unicast_destination(skb))
847 return;
848
849 #ifdef CONFIG_TCP_MD5SIG
850 hash_location = tcp_parse_md5sig_option(th);
851 if (!sk && hash_location) {
852 /*
853 * active side is lost. Try to find listening socket through
854 * source port, and then find md5 key through listening socket.
855 * we are not loose security here:
856 * Incoming packet is checked with md5 hash with finding key,
857 * no RST generated if md5 hash doesn't match.
858 */
859 sk1 = inet6_lookup_listener(dev_net(skb_dst(skb)->dev),
860 &tcp_hashinfo, &ipv6h->saddr,
861 th->source, &ipv6h->daddr,
862 ntohs(th->source), tcp_v6_iif(skb));
863 if (!sk1)
864 return;
865
866 rcu_read_lock();
867 key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr);
868 if (!key)
869 goto release_sk1;
870
871 genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, skb);
872 if (genhash || memcmp(hash_location, newhash, 16) != 0)
873 goto release_sk1;
874 } else {
875 key = sk ? tcp_v6_md5_do_lookup(sk, &ipv6h->saddr) : NULL;
876 }
877 #endif
878
879 if (th->ack)
880 seq = ntohl(th->ack_seq);
881 else
882 ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
883 (th->doff << 2);
884
885 oif = sk ? sk->sk_bound_dev_if : 0;
886 tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, key, 1, 0, 0);
887
888 #ifdef CONFIG_TCP_MD5SIG
889 release_sk1:
890 if (sk1) {
891 rcu_read_unlock();
892 sock_put(sk1);
893 }
894 #endif
895 }
896
897 static void tcp_v6_send_ack(struct sock *sk, struct sk_buff *skb, u32 seq,
898 u32 ack, u32 win, u32 tsval, u32 tsecr, int oif,
899 struct tcp_md5sig_key *key, u8 tclass,
900 u32 label)
901 {
902 tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, key, 0,
903 tclass, label);
904 }
905
906 static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
907 {
908 struct inet_timewait_sock *tw = inet_twsk(sk);
909 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
910
911 tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
912 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
913 tcp_time_stamp + tcptw->tw_ts_offset,
914 tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw),
915 tw->tw_tclass, cpu_to_be32(tw->tw_flowlabel));
916
917 inet_twsk_put(tw);
918 }
919
920 static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
921 struct request_sock *req)
922 {
923 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
924 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
925 */
926 tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ?
927 tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
928 tcp_rsk(req)->rcv_nxt, req->rcv_wnd,
929 tcp_time_stamp, req->ts_recent, sk->sk_bound_dev_if,
930 tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr),
931 0, 0);
932 }
933
934
935 static struct sock *tcp_v6_hnd_req(struct sock *sk, struct sk_buff *skb)
936 {
937 const struct tcphdr *th = tcp_hdr(skb);
938 struct request_sock *req;
939 struct sock *nsk;
940
941 /* Find possible connection requests. */
942 req = inet6_csk_search_req(sk, th->source,
943 &ipv6_hdr(skb)->saddr,
944 &ipv6_hdr(skb)->daddr, tcp_v6_iif(skb));
945 if (req) {
946 nsk = tcp_check_req(sk, skb, req, false);
947 if (!nsk || nsk == sk)
948 reqsk_put(req);
949 return nsk;
950 }
951 nsk = __inet6_lookup_established(sock_net(sk), &tcp_hashinfo,
952 &ipv6_hdr(skb)->saddr, th->source,
953 &ipv6_hdr(skb)->daddr, ntohs(th->dest),
954 tcp_v6_iif(skb));
955
956 if (nsk) {
957 if (nsk->sk_state != TCP_TIME_WAIT) {
958 bh_lock_sock(nsk);
959 return nsk;
960 }
961 inet_twsk_put(inet_twsk(nsk));
962 return NULL;
963 }
964
965 #ifdef CONFIG_SYN_COOKIES
966 if (!th->syn)
967 sk = cookie_v6_check(sk, skb);
968 #endif
969 return sk;
970 }
971
972 static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
973 {
974 if (skb->protocol == htons(ETH_P_IP))
975 return tcp_v4_conn_request(sk, skb);
976
977 if (!ipv6_unicast_destination(skb))
978 goto drop;
979
980 return tcp_conn_request(&tcp6_request_sock_ops,
981 &tcp_request_sock_ipv6_ops, sk, skb);
982
983 drop:
984 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
985 return 0; /* don't send reset */
986 }
987
988 static struct sock *tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
989 struct request_sock *req,
990 struct dst_entry *dst)
991 {
992 struct inet_request_sock *ireq;
993 struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
994 struct tcp6_sock *newtcp6sk;
995 struct inet_sock *newinet;
996 struct tcp_sock *newtp;
997 struct sock *newsk;
998 #ifdef CONFIG_TCP_MD5SIG
999 struct tcp_md5sig_key *key;
1000 #endif
1001 struct flowi6 fl6;
1002
1003 if (skb->protocol == htons(ETH_P_IP)) {
1004 /*
1005 * v6 mapped
1006 */
1007
1008 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst);
1009
1010 if (!newsk)
1011 return NULL;
1012
1013 newtcp6sk = (struct tcp6_sock *)newsk;
1014 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1015
1016 newinet = inet_sk(newsk);
1017 newnp = inet6_sk(newsk);
1018 newtp = tcp_sk(newsk);
1019
1020 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1021
1022 newnp->saddr = newsk->sk_v6_rcv_saddr;
1023
1024 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1025 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1026 #ifdef CONFIG_TCP_MD5SIG
1027 newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1028 #endif
1029
1030 newnp->ipv6_ac_list = NULL;
1031 newnp->ipv6_fl_list = NULL;
1032 newnp->pktoptions = NULL;
1033 newnp->opt = NULL;
1034 newnp->mcast_oif = tcp_v6_iif(skb);
1035 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1036 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1037 if (np->repflow)
1038 newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1039
1040 /*
1041 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1042 * here, tcp_create_openreq_child now does this for us, see the comment in
1043 * that function for the gory details. -acme
1044 */
1045
1046 /* It is tricky place. Until this moment IPv4 tcp
1047 worked with IPv6 icsk.icsk_af_ops.
1048 Sync it now.
1049 */
1050 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1051
1052 return newsk;
1053 }
1054
1055 ireq = inet_rsk(req);
1056
1057 if (sk_acceptq_is_full(sk))
1058 goto out_overflow;
1059
1060 if (!dst) {
1061 dst = inet6_csk_route_req(sk, &fl6, req);
1062 if (!dst)
1063 goto out;
1064 }
1065
1066 newsk = tcp_create_openreq_child(sk, req, skb);
1067 if (!newsk)
1068 goto out_nonewsk;
1069
1070 /*
1071 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1072 * count here, tcp_create_openreq_child now does this for us, see the
1073 * comment in that function for the gory details. -acme
1074 */
1075
1076 newsk->sk_gso_type = SKB_GSO_TCPV6;
1077 __ip6_dst_store(newsk, dst, NULL, NULL);
1078 inet6_sk_rx_dst_set(newsk, skb);
1079
1080 newtcp6sk = (struct tcp6_sock *)newsk;
1081 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1082
1083 newtp = tcp_sk(newsk);
1084 newinet = inet_sk(newsk);
1085 newnp = inet6_sk(newsk);
1086
1087 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1088
1089 newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr;
1090 newnp->saddr = ireq->ir_v6_loc_addr;
1091 newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
1092 newsk->sk_bound_dev_if = ireq->ir_iif;
1093
1094 /* Now IPv6 options...
1095
1096 First: no IPv4 options.
1097 */
1098 newinet->inet_opt = NULL;
1099 newnp->ipv6_ac_list = NULL;
1100 newnp->ipv6_fl_list = NULL;
1101
1102 /* Clone RX bits */
1103 newnp->rxopt.all = np->rxopt.all;
1104
1105 /* Clone pktoptions received with SYN */
1106 newnp->pktoptions = NULL;
1107 if (ireq->pktopts) {
1108 newnp->pktoptions = skb_clone(ireq->pktopts,
1109 sk_gfp_atomic(sk, GFP_ATOMIC));
1110 consume_skb(ireq->pktopts);
1111 ireq->pktopts = NULL;
1112 if (newnp->pktoptions)
1113 skb_set_owner_r(newnp->pktoptions, newsk);
1114 }
1115 newnp->opt = NULL;
1116 newnp->mcast_oif = tcp_v6_iif(skb);
1117 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1118 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1119 if (np->repflow)
1120 newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1121
1122 /* Clone native IPv6 options from listening socket (if any)
1123
1124 Yes, keeping reference count would be much more clever,
1125 but we make one more one thing there: reattach optmem
1126 to newsk.
1127 */
1128 if (np->opt)
1129 newnp->opt = ipv6_dup_options(newsk, np->opt);
1130
1131 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1132 if (newnp->opt)
1133 inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
1134 newnp->opt->opt_flen);
1135
1136 tcp_ca_openreq_child(newsk, dst);
1137
1138 tcp_sync_mss(newsk, dst_mtu(dst));
1139 newtp->advmss = dst_metric_advmss(dst);
1140 if (tcp_sk(sk)->rx_opt.user_mss &&
1141 tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1142 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1143
1144 tcp_initialize_rcv_mss(newsk);
1145
1146 newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1147 newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
1148
1149 #ifdef CONFIG_TCP_MD5SIG
1150 /* Copy over the MD5 key from the original socket */
1151 key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr);
1152 if (key) {
1153 /* We're using one, so create a matching key
1154 * on the newsk structure. If we fail to get
1155 * memory, then we end up not copying the key
1156 * across. Shucks.
1157 */
1158 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newsk->sk_v6_daddr,
1159 AF_INET6, key->key, key->keylen,
1160 sk_gfp_atomic(sk, GFP_ATOMIC));
1161 }
1162 #endif
1163
1164 if (__inet_inherit_port(sk, newsk) < 0) {
1165 inet_csk_prepare_forced_close(newsk);
1166 tcp_done(newsk);
1167 goto out;
1168 }
1169 __inet_hash(newsk, NULL);
1170
1171 return newsk;
1172
1173 out_overflow:
1174 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1175 out_nonewsk:
1176 dst_release(dst);
1177 out:
1178 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1179 return NULL;
1180 }
1181
1182 /* The socket must have it's spinlock held when we get
1183 * here.
1184 *
1185 * We have a potential double-lock case here, so even when
1186 * doing backlog processing we use the BH locking scheme.
1187 * This is because we cannot sleep with the original spinlock
1188 * held.
1189 */
1190 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1191 {
1192 struct ipv6_pinfo *np = inet6_sk(sk);
1193 struct tcp_sock *tp;
1194 struct sk_buff *opt_skb = NULL;
1195
1196 /* Imagine: socket is IPv6. IPv4 packet arrives,
1197 goes to IPv4 receive handler and backlogged.
1198 From backlog it always goes here. Kerboom...
1199 Fortunately, tcp_rcv_established and rcv_established
1200 handle them correctly, but it is not case with
1201 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1202 */
1203
1204 if (skb->protocol == htons(ETH_P_IP))
1205 return tcp_v4_do_rcv(sk, skb);
1206
1207 if (sk_filter(sk, skb))
1208 goto discard;
1209
1210 /*
1211 * socket locking is here for SMP purposes as backlog rcv
1212 * is currently called with bh processing disabled.
1213 */
1214
1215 /* Do Stevens' IPV6_PKTOPTIONS.
1216
1217 Yes, guys, it is the only place in our code, where we
1218 may make it not affecting IPv4.
1219 The rest of code is protocol independent,
1220 and I do not like idea to uglify IPv4.
1221
1222 Actually, all the idea behind IPV6_PKTOPTIONS
1223 looks not very well thought. For now we latch
1224 options, received in the last packet, enqueued
1225 by tcp. Feel free to propose better solution.
1226 --ANK (980728)
1227 */
1228 if (np->rxopt.all)
1229 opt_skb = skb_clone(skb, sk_gfp_atomic(sk, GFP_ATOMIC));
1230
1231 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1232 struct dst_entry *dst = sk->sk_rx_dst;
1233
1234 sock_rps_save_rxhash(sk, skb);
1235 sk_mark_napi_id(sk, skb);
1236 if (dst) {
1237 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1238 dst->ops->check(dst, np->rx_dst_cookie) == NULL) {
1239 dst_release(dst);
1240 sk->sk_rx_dst = NULL;
1241 }
1242 }
1243
1244 tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len);
1245 if (opt_skb)
1246 goto ipv6_pktoptions;
1247 return 0;
1248 }
1249
1250 if (tcp_checksum_complete(skb))
1251 goto csum_err;
1252
1253 if (sk->sk_state == TCP_LISTEN) {
1254 struct sock *nsk = tcp_v6_hnd_req(sk, skb);
1255 if (!nsk)
1256 goto discard;
1257
1258 /*
1259 * Queue it on the new socket if the new socket is active,
1260 * otherwise we just shortcircuit this and continue with
1261 * the new socket..
1262 */
1263 if (nsk != sk) {
1264 sock_rps_save_rxhash(nsk, skb);
1265 sk_mark_napi_id(sk, skb);
1266 if (tcp_child_process(sk, nsk, skb))
1267 goto reset;
1268 if (opt_skb)
1269 __kfree_skb(opt_skb);
1270 return 0;
1271 }
1272 } else
1273 sock_rps_save_rxhash(sk, skb);
1274
1275 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len))
1276 goto reset;
1277 if (opt_skb)
1278 goto ipv6_pktoptions;
1279 return 0;
1280
1281 reset:
1282 tcp_v6_send_reset(sk, skb);
1283 discard:
1284 if (opt_skb)
1285 __kfree_skb(opt_skb);
1286 kfree_skb(skb);
1287 return 0;
1288 csum_err:
1289 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_CSUMERRORS);
1290 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1291 goto discard;
1292
1293
1294 ipv6_pktoptions:
1295 /* Do you ask, what is it?
1296
1297 1. skb was enqueued by tcp.
1298 2. skb is added to tail of read queue, rather than out of order.
1299 3. socket is not in passive state.
1300 4. Finally, it really contains options, which user wants to receive.
1301 */
1302 tp = tcp_sk(sk);
1303 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1304 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1305 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
1306 np->mcast_oif = tcp_v6_iif(opt_skb);
1307 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1308 np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
1309 if (np->rxopt.bits.rxflow || np->rxopt.bits.rxtclass)
1310 np->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(opt_skb));
1311 if (np->repflow)
1312 np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
1313 if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) {
1314 skb_set_owner_r(opt_skb, sk);
1315 opt_skb = xchg(&np->pktoptions, opt_skb);
1316 } else {
1317 __kfree_skb(opt_skb);
1318 opt_skb = xchg(&np->pktoptions, NULL);
1319 }
1320 }
1321
1322 kfree_skb(opt_skb);
1323 return 0;
1324 }
1325
1326 static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr,
1327 const struct tcphdr *th)
1328 {
1329 /* This is tricky: we move IP6CB at its correct location into
1330 * TCP_SKB_CB(). It must be done after xfrm6_policy_check(), because
1331 * _decode_session6() uses IP6CB().
1332 * barrier() makes sure compiler won't play aliasing games.
1333 */
1334 memmove(&TCP_SKB_CB(skb)->header.h6, IP6CB(skb),
1335 sizeof(struct inet6_skb_parm));
1336 barrier();
1337
1338 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1339 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1340 skb->len - th->doff*4);
1341 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1342 TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1343 TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1344 TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
1345 TCP_SKB_CB(skb)->sacked = 0;
1346 }
1347
1348 static void tcp_v6_restore_cb(struct sk_buff *skb)
1349 {
1350 /* We need to move header back to the beginning if xfrm6_policy_check()
1351 * and tcp_v6_fill_cb() are going to be called again.
1352 */
1353 memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
1354 sizeof(struct inet6_skb_parm));
1355 }
1356
1357 static int tcp_v6_rcv(struct sk_buff *skb)
1358 {
1359 const struct tcphdr *th;
1360 const struct ipv6hdr *hdr;
1361 struct sock *sk;
1362 int ret;
1363 struct net *net = dev_net(skb->dev);
1364
1365 if (skb->pkt_type != PACKET_HOST)
1366 goto discard_it;
1367
1368 /*
1369 * Count it even if it's bad.
1370 */
1371 TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1372
1373 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1374 goto discard_it;
1375
1376 th = tcp_hdr(skb);
1377
1378 if (th->doff < sizeof(struct tcphdr)/4)
1379 goto bad_packet;
1380 if (!pskb_may_pull(skb, th->doff*4))
1381 goto discard_it;
1382
1383 if (skb_checksum_init(skb, IPPROTO_TCP, ip6_compute_pseudo))
1384 goto csum_error;
1385
1386 th = tcp_hdr(skb);
1387 hdr = ipv6_hdr(skb);
1388
1389 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest,
1390 inet6_iif(skb));
1391 if (!sk)
1392 goto no_tcp_socket;
1393
1394 process:
1395 if (sk->sk_state == TCP_TIME_WAIT)
1396 goto do_time_wait;
1397
1398 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
1399 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
1400 goto discard_and_relse;
1401 }
1402
1403 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1404 goto discard_and_relse;
1405
1406 tcp_v6_fill_cb(skb, hdr, th);
1407
1408 #ifdef CONFIG_TCP_MD5SIG
1409 if (tcp_v6_inbound_md5_hash(sk, skb))
1410 goto discard_and_relse;
1411 #endif
1412
1413 if (sk_filter(sk, skb))
1414 goto discard_and_relse;
1415
1416 sk_incoming_cpu_update(sk);
1417 skb->dev = NULL;
1418
1419 bh_lock_sock_nested(sk);
1420 tcp_sk(sk)->segs_in += max_t(u16, 1, skb_shinfo(skb)->gso_segs);
1421 ret = 0;
1422 if (!sock_owned_by_user(sk)) {
1423 if (!tcp_prequeue(sk, skb))
1424 ret = tcp_v6_do_rcv(sk, skb);
1425 } else if (unlikely(sk_add_backlog(sk, skb,
1426 sk->sk_rcvbuf + sk->sk_sndbuf))) {
1427 bh_unlock_sock(sk);
1428 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
1429 goto discard_and_relse;
1430 }
1431 bh_unlock_sock(sk);
1432
1433 sock_put(sk);
1434 return ret ? -1 : 0;
1435
1436 no_tcp_socket:
1437 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1438 goto discard_it;
1439
1440 tcp_v6_fill_cb(skb, hdr, th);
1441
1442 if (tcp_checksum_complete(skb)) {
1443 csum_error:
1444 TCP_INC_STATS_BH(net, TCP_MIB_CSUMERRORS);
1445 bad_packet:
1446 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1447 } else {
1448 tcp_v6_send_reset(NULL, skb);
1449 }
1450
1451 discard_it:
1452 kfree_skb(skb);
1453 return 0;
1454
1455 discard_and_relse:
1456 sock_put(sk);
1457 goto discard_it;
1458
1459 do_time_wait:
1460 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1461 inet_twsk_put(inet_twsk(sk));
1462 goto discard_it;
1463 }
1464
1465 tcp_v6_fill_cb(skb, hdr, th);
1466
1467 if (tcp_checksum_complete(skb)) {
1468 inet_twsk_put(inet_twsk(sk));
1469 goto csum_error;
1470 }
1471
1472 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1473 case TCP_TW_SYN:
1474 {
1475 struct sock *sk2;
1476
1477 sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
1478 &ipv6_hdr(skb)->saddr, th->source,
1479 &ipv6_hdr(skb)->daddr,
1480 ntohs(th->dest), tcp_v6_iif(skb));
1481 if (sk2) {
1482 struct inet_timewait_sock *tw = inet_twsk(sk);
1483 inet_twsk_deschedule_put(tw);
1484 sk = sk2;
1485 tcp_v6_restore_cb(skb);
1486 goto process;
1487 }
1488 /* Fall through to ACK */
1489 }
1490 case TCP_TW_ACK:
1491 tcp_v6_timewait_ack(sk, skb);
1492 break;
1493 case TCP_TW_RST:
1494 tcp_v6_restore_cb(skb);
1495 goto no_tcp_socket;
1496 case TCP_TW_SUCCESS:
1497 ;
1498 }
1499 goto discard_it;
1500 }
1501
1502 static void tcp_v6_early_demux(struct sk_buff *skb)
1503 {
1504 const struct ipv6hdr *hdr;
1505 const struct tcphdr *th;
1506 struct sock *sk;
1507
1508 if (skb->pkt_type != PACKET_HOST)
1509 return;
1510
1511 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1512 return;
1513
1514 hdr = ipv6_hdr(skb);
1515 th = tcp_hdr(skb);
1516
1517 if (th->doff < sizeof(struct tcphdr) / 4)
1518 return;
1519
1520 /* Note : We use inet6_iif() here, not tcp_v6_iif() */
1521 sk = __inet6_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1522 &hdr->saddr, th->source,
1523 &hdr->daddr, ntohs(th->dest),
1524 inet6_iif(skb));
1525 if (sk) {
1526 skb->sk = sk;
1527 skb->destructor = sock_edemux;
1528 if (sk_fullsock(sk)) {
1529 struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
1530
1531 if (dst)
1532 dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
1533 if (dst &&
1534 inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1535 skb_dst_set_noref(skb, dst);
1536 }
1537 }
1538 }
1539
1540 static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1541 .twsk_obj_size = sizeof(struct tcp6_timewait_sock),
1542 .twsk_unique = tcp_twsk_unique,
1543 .twsk_destructor = tcp_twsk_destructor,
1544 };
1545
1546 static const struct inet_connection_sock_af_ops ipv6_specific = {
1547 .queue_xmit = inet6_csk_xmit,
1548 .send_check = tcp_v6_send_check,
1549 .rebuild_header = inet6_sk_rebuild_header,
1550 .sk_rx_dst_set = inet6_sk_rx_dst_set,
1551 .conn_request = tcp_v6_conn_request,
1552 .syn_recv_sock = tcp_v6_syn_recv_sock,
1553 .net_header_len = sizeof(struct ipv6hdr),
1554 .net_frag_header_len = sizeof(struct frag_hdr),
1555 .setsockopt = ipv6_setsockopt,
1556 .getsockopt = ipv6_getsockopt,
1557 .addr2sockaddr = inet6_csk_addr2sockaddr,
1558 .sockaddr_len = sizeof(struct sockaddr_in6),
1559 .bind_conflict = inet6_csk_bind_conflict,
1560 #ifdef CONFIG_COMPAT
1561 .compat_setsockopt = compat_ipv6_setsockopt,
1562 .compat_getsockopt = compat_ipv6_getsockopt,
1563 #endif
1564 .mtu_reduced = tcp_v6_mtu_reduced,
1565 };
1566
1567 #ifdef CONFIG_TCP_MD5SIG
1568 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
1569 .md5_lookup = tcp_v6_md5_lookup,
1570 .calc_md5_hash = tcp_v6_md5_hash_skb,
1571 .md5_parse = tcp_v6_parse_md5_keys,
1572 };
1573 #endif
1574
1575 /*
1576 * TCP over IPv4 via INET6 API
1577 */
1578 static const struct inet_connection_sock_af_ops ipv6_mapped = {
1579 .queue_xmit = ip_queue_xmit,
1580 .send_check = tcp_v4_send_check,
1581 .rebuild_header = inet_sk_rebuild_header,
1582 .sk_rx_dst_set = inet_sk_rx_dst_set,
1583 .conn_request = tcp_v6_conn_request,
1584 .syn_recv_sock = tcp_v6_syn_recv_sock,
1585 .net_header_len = sizeof(struct iphdr),
1586 .setsockopt = ipv6_setsockopt,
1587 .getsockopt = ipv6_getsockopt,
1588 .addr2sockaddr = inet6_csk_addr2sockaddr,
1589 .sockaddr_len = sizeof(struct sockaddr_in6),
1590 .bind_conflict = inet6_csk_bind_conflict,
1591 #ifdef CONFIG_COMPAT
1592 .compat_setsockopt = compat_ipv6_setsockopt,
1593 .compat_getsockopt = compat_ipv6_getsockopt,
1594 #endif
1595 .mtu_reduced = tcp_v4_mtu_reduced,
1596 };
1597
1598 #ifdef CONFIG_TCP_MD5SIG
1599 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
1600 .md5_lookup = tcp_v4_md5_lookup,
1601 .calc_md5_hash = tcp_v4_md5_hash_skb,
1602 .md5_parse = tcp_v6_parse_md5_keys,
1603 };
1604 #endif
1605
1606 /* NOTE: A lot of things set to zero explicitly by call to
1607 * sk_alloc() so need not be done here.
1608 */
1609 static int tcp_v6_init_sock(struct sock *sk)
1610 {
1611 struct inet_connection_sock *icsk = inet_csk(sk);
1612
1613 tcp_init_sock(sk);
1614
1615 icsk->icsk_af_ops = &ipv6_specific;
1616
1617 #ifdef CONFIG_TCP_MD5SIG
1618 tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific;
1619 #endif
1620
1621 return 0;
1622 }
1623
1624 static void tcp_v6_destroy_sock(struct sock *sk)
1625 {
1626 tcp_v4_destroy_sock(sk);
1627 inet6_destroy_sock(sk);
1628 }
1629
1630 #ifdef CONFIG_PROC_FS
1631 /* Proc filesystem TCPv6 sock list dumping. */
1632 static void get_openreq6(struct seq_file *seq,
1633 struct request_sock *req, int i, kuid_t uid)
1634 {
1635 long ttd = req->rsk_timer.expires - jiffies;
1636 const struct in6_addr *src = &inet_rsk(req)->ir_v6_loc_addr;
1637 const struct in6_addr *dest = &inet_rsk(req)->ir_v6_rmt_addr;
1638
1639 if (ttd < 0)
1640 ttd = 0;
1641
1642 seq_printf(seq,
1643 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1644 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %d %d %pK\n",
1645 i,
1646 src->s6_addr32[0], src->s6_addr32[1],
1647 src->s6_addr32[2], src->s6_addr32[3],
1648 inet_rsk(req)->ir_num,
1649 dest->s6_addr32[0], dest->s6_addr32[1],
1650 dest->s6_addr32[2], dest->s6_addr32[3],
1651 ntohs(inet_rsk(req)->ir_rmt_port),
1652 TCP_SYN_RECV,
1653 0, 0, /* could print option size, but that is af dependent. */
1654 1, /* timers active (only the expire timer) */
1655 jiffies_to_clock_t(ttd),
1656 req->num_timeout,
1657 from_kuid_munged(seq_user_ns(seq), uid),
1658 0, /* non standard timer */
1659 0, /* open_requests have no inode */
1660 0, req);
1661 }
1662
1663 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1664 {
1665 const struct in6_addr *dest, *src;
1666 __u16 destp, srcp;
1667 int timer_active;
1668 unsigned long timer_expires;
1669 const struct inet_sock *inet = inet_sk(sp);
1670 const struct tcp_sock *tp = tcp_sk(sp);
1671 const struct inet_connection_sock *icsk = inet_csk(sp);
1672 struct fastopen_queue *fastopenq = icsk->icsk_accept_queue.fastopenq;
1673
1674 dest = &sp->sk_v6_daddr;
1675 src = &sp->sk_v6_rcv_saddr;
1676 destp = ntohs(inet->inet_dport);
1677 srcp = ntohs(inet->inet_sport);
1678
1679 if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
1680 timer_active = 1;
1681 timer_expires = icsk->icsk_timeout;
1682 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1683 timer_active = 4;
1684 timer_expires = icsk->icsk_timeout;
1685 } else if (timer_pending(&sp->sk_timer)) {
1686 timer_active = 2;
1687 timer_expires = sp->sk_timer.expires;
1688 } else {
1689 timer_active = 0;
1690 timer_expires = jiffies;
1691 }
1692
1693 seq_printf(seq,
1694 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1695 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n",
1696 i,
1697 src->s6_addr32[0], src->s6_addr32[1],
1698 src->s6_addr32[2], src->s6_addr32[3], srcp,
1699 dest->s6_addr32[0], dest->s6_addr32[1],
1700 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1701 sp->sk_state,
1702 tp->write_seq-tp->snd_una,
1703 (sp->sk_state == TCP_LISTEN) ? sp->sk_ack_backlog : (tp->rcv_nxt - tp->copied_seq),
1704 timer_active,
1705 jiffies_delta_to_clock_t(timer_expires - jiffies),
1706 icsk->icsk_retransmits,
1707 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
1708 icsk->icsk_probes_out,
1709 sock_i_ino(sp),
1710 atomic_read(&sp->sk_refcnt), sp,
1711 jiffies_to_clock_t(icsk->icsk_rto),
1712 jiffies_to_clock_t(icsk->icsk_ack.ato),
1713 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
1714 tp->snd_cwnd,
1715 sp->sk_state == TCP_LISTEN ?
1716 (fastopenq ? fastopenq->max_qlen : 0) :
1717 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)
1718 );
1719 }
1720
1721 static void get_timewait6_sock(struct seq_file *seq,
1722 struct inet_timewait_sock *tw, int i)
1723 {
1724 long delta = tw->tw_timer.expires - jiffies;
1725 const struct in6_addr *dest, *src;
1726 __u16 destp, srcp;
1727
1728 dest = &tw->tw_v6_daddr;
1729 src = &tw->tw_v6_rcv_saddr;
1730 destp = ntohs(tw->tw_dport);
1731 srcp = ntohs(tw->tw_sport);
1732
1733 seq_printf(seq,
1734 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1735 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1736 i,
1737 src->s6_addr32[0], src->s6_addr32[1],
1738 src->s6_addr32[2], src->s6_addr32[3], srcp,
1739 dest->s6_addr32[0], dest->s6_addr32[1],
1740 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1741 tw->tw_substate, 0, 0,
1742 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
1743 atomic_read(&tw->tw_refcnt), tw);
1744 }
1745
1746 static int tcp6_seq_show(struct seq_file *seq, void *v)
1747 {
1748 struct tcp_iter_state *st;
1749 struct sock *sk = v;
1750
1751 if (v == SEQ_START_TOKEN) {
1752 seq_puts(seq,
1753 " sl "
1754 "local_address "
1755 "remote_address "
1756 "st tx_queue rx_queue tr tm->when retrnsmt"
1757 " uid timeout inode\n");
1758 goto out;
1759 }
1760 st = seq->private;
1761
1762 switch (st->state) {
1763 case TCP_SEQ_STATE_LISTENING:
1764 case TCP_SEQ_STATE_ESTABLISHED:
1765 if (sk->sk_state == TCP_TIME_WAIT)
1766 get_timewait6_sock(seq, v, st->num);
1767 else
1768 get_tcp6_sock(seq, v, st->num);
1769 break;
1770 case TCP_SEQ_STATE_OPENREQ:
1771 get_openreq6(seq, v, st->num, st->uid);
1772 break;
1773 }
1774 out:
1775 return 0;
1776 }
1777
1778 static const struct file_operations tcp6_afinfo_seq_fops = {
1779 .owner = THIS_MODULE,
1780 .open = tcp_seq_open,
1781 .read = seq_read,
1782 .llseek = seq_lseek,
1783 .release = seq_release_net
1784 };
1785
1786 static struct tcp_seq_afinfo tcp6_seq_afinfo = {
1787 .name = "tcp6",
1788 .family = AF_INET6,
1789 .seq_fops = &tcp6_afinfo_seq_fops,
1790 .seq_ops = {
1791 .show = tcp6_seq_show,
1792 },
1793 };
1794
1795 int __net_init tcp6_proc_init(struct net *net)
1796 {
1797 return tcp_proc_register(net, &tcp6_seq_afinfo);
1798 }
1799
1800 void tcp6_proc_exit(struct net *net)
1801 {
1802 tcp_proc_unregister(net, &tcp6_seq_afinfo);
1803 }
1804 #endif
1805
1806 static void tcp_v6_clear_sk(struct sock *sk, int size)
1807 {
1808 struct inet_sock *inet = inet_sk(sk);
1809
1810 /* we do not want to clear pinet6 field, because of RCU lookups */
1811 sk_prot_clear_nulls(sk, offsetof(struct inet_sock, pinet6));
1812
1813 size -= offsetof(struct inet_sock, pinet6) + sizeof(inet->pinet6);
1814 memset(&inet->pinet6 + 1, 0, size);
1815 }
1816
1817 struct proto tcpv6_prot = {
1818 .name = "TCPv6",
1819 .owner = THIS_MODULE,
1820 .close = tcp_close,
1821 .connect = tcp_v6_connect,
1822 .disconnect = tcp_disconnect,
1823 .accept = inet_csk_accept,
1824 .ioctl = tcp_ioctl,
1825 .init = tcp_v6_init_sock,
1826 .destroy = tcp_v6_destroy_sock,
1827 .shutdown = tcp_shutdown,
1828 .setsockopt = tcp_setsockopt,
1829 .getsockopt = tcp_getsockopt,
1830 .recvmsg = tcp_recvmsg,
1831 .sendmsg = tcp_sendmsg,
1832 .sendpage = tcp_sendpage,
1833 .backlog_rcv = tcp_v6_do_rcv,
1834 .release_cb = tcp_release_cb,
1835 .hash = inet_hash,
1836 .unhash = inet_unhash,
1837 .get_port = inet_csk_get_port,
1838 .enter_memory_pressure = tcp_enter_memory_pressure,
1839 .stream_memory_free = tcp_stream_memory_free,
1840 .sockets_allocated = &tcp_sockets_allocated,
1841 .memory_allocated = &tcp_memory_allocated,
1842 .memory_pressure = &tcp_memory_pressure,
1843 .orphan_count = &tcp_orphan_count,
1844 .sysctl_mem = sysctl_tcp_mem,
1845 .sysctl_wmem = sysctl_tcp_wmem,
1846 .sysctl_rmem = sysctl_tcp_rmem,
1847 .max_header = MAX_TCP_HEADER,
1848 .obj_size = sizeof(struct tcp6_sock),
1849 .slab_flags = SLAB_DESTROY_BY_RCU,
1850 .twsk_prot = &tcp6_timewait_sock_ops,
1851 .rsk_prot = &tcp6_request_sock_ops,
1852 .h.hashinfo = &tcp_hashinfo,
1853 .no_autobind = true,
1854 #ifdef CONFIG_COMPAT
1855 .compat_setsockopt = compat_tcp_setsockopt,
1856 .compat_getsockopt = compat_tcp_getsockopt,
1857 #endif
1858 #ifdef CONFIG_MEMCG_KMEM
1859 .proto_cgroup = tcp_proto_cgroup,
1860 #endif
1861 .clear_sk = tcp_v6_clear_sk,
1862 };
1863
1864 static const struct inet6_protocol tcpv6_protocol = {
1865 .early_demux = tcp_v6_early_demux,
1866 .handler = tcp_v6_rcv,
1867 .err_handler = tcp_v6_err,
1868 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1869 };
1870
1871 static struct inet_protosw tcpv6_protosw = {
1872 .type = SOCK_STREAM,
1873 .protocol = IPPROTO_TCP,
1874 .prot = &tcpv6_prot,
1875 .ops = &inet6_stream_ops,
1876 .flags = INET_PROTOSW_PERMANENT |
1877 INET_PROTOSW_ICSK,
1878 };
1879
1880 static int __net_init tcpv6_net_init(struct net *net)
1881 {
1882 return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
1883 SOCK_RAW, IPPROTO_TCP, net);
1884 }
1885
1886 static void __net_exit tcpv6_net_exit(struct net *net)
1887 {
1888 inet_ctl_sock_destroy(net->ipv6.tcp_sk);
1889 }
1890
1891 static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
1892 {
1893 inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET6);
1894 }
1895
1896 static struct pernet_operations tcpv6_net_ops = {
1897 .init = tcpv6_net_init,
1898 .exit = tcpv6_net_exit,
1899 .exit_batch = tcpv6_net_exit_batch,
1900 };
1901
1902 int __init tcpv6_init(void)
1903 {
1904 int ret;
1905
1906 ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
1907 if (ret)
1908 goto out;
1909
1910 /* register inet6 protocol */
1911 ret = inet6_register_protosw(&tcpv6_protosw);
1912 if (ret)
1913 goto out_tcpv6_protocol;
1914
1915 ret = register_pernet_subsys(&tcpv6_net_ops);
1916 if (ret)
1917 goto out_tcpv6_protosw;
1918 out:
1919 return ret;
1920
1921 out_tcpv6_protosw:
1922 inet6_unregister_protosw(&tcpv6_protosw);
1923 out_tcpv6_protocol:
1924 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
1925 goto out;
1926 }
1927
1928 void tcpv6_exit(void)
1929 {
1930 unregister_pernet_subsys(&tcpv6_net_ops);
1931 inet6_unregister_protosw(&tcpv6_protosw);
1932 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
1933 }
This page took 0.069097 seconds and 6 git commands to generate.