Merge branch 'for-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/bluetoot...
[deliverable/linux.git] / net / ipv6 / tcp_ipv6.c
1 /*
2 * TCP over IPv6
3 * Linux INET6 implementation
4 *
5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt>
7 *
8 * Based on:
9 * linux/net/ipv4/tcp.c
10 * linux/net/ipv4/tcp_input.c
11 * linux/net/ipv4/tcp_output.c
12 *
13 * Fixes:
14 * Hideaki YOSHIFUJI : sin6_scope_id support
15 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
16 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
17 * a single port at the same time.
18 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
19 *
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
24 */
25
26 #include <linux/bottom_half.h>
27 #include <linux/module.h>
28 #include <linux/errno.h>
29 #include <linux/types.h>
30 #include <linux/socket.h>
31 #include <linux/sockios.h>
32 #include <linux/net.h>
33 #include <linux/jiffies.h>
34 #include <linux/in.h>
35 #include <linux/in6.h>
36 #include <linux/netdevice.h>
37 #include <linux/init.h>
38 #include <linux/jhash.h>
39 #include <linux/ipsec.h>
40 #include <linux/times.h>
41 #include <linux/slab.h>
42 #include <linux/uaccess.h>
43 #include <linux/ipv6.h>
44 #include <linux/icmpv6.h>
45 #include <linux/random.h>
46
47 #include <net/tcp.h>
48 #include <net/ndisc.h>
49 #include <net/inet6_hashtables.h>
50 #include <net/inet6_connection_sock.h>
51 #include <net/ipv6.h>
52 #include <net/transp_v6.h>
53 #include <net/addrconf.h>
54 #include <net/ip6_route.h>
55 #include <net/ip6_checksum.h>
56 #include <net/inet_ecn.h>
57 #include <net/protocol.h>
58 #include <net/xfrm.h>
59 #include <net/snmp.h>
60 #include <net/dsfield.h>
61 #include <net/timewait_sock.h>
62 #include <net/inet_common.h>
63 #include <net/secure_seq.h>
64 #include <net/tcp_memcontrol.h>
65 #include <net/busy_poll.h>
66
67 #include <linux/proc_fs.h>
68 #include <linux/seq_file.h>
69
70 #include <linux/crypto.h>
71 #include <linux/scatterlist.h>
72
73 static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb);
74 static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
75 struct request_sock *req);
76
77 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
78
79 static const struct inet_connection_sock_af_ops ipv6_mapped;
80 static const struct inet_connection_sock_af_ops ipv6_specific;
81 #ifdef CONFIG_TCP_MD5SIG
82 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
83 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
84 #else
85 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
86 const struct in6_addr *addr)
87 {
88 return NULL;
89 }
90 #endif
91
92 static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
93 {
94 struct dst_entry *dst = skb_dst(skb);
95
96 if (dst) {
97 const struct rt6_info *rt = (const struct rt6_info *)dst;
98
99 dst_hold(dst);
100 sk->sk_rx_dst = dst;
101 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
102 if (rt->rt6i_node)
103 inet6_sk(sk)->rx_dst_cookie = rt->rt6i_node->fn_sernum;
104 }
105 }
106
107 static __u32 tcp_v6_init_sequence(const struct sk_buff *skb)
108 {
109 return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
110 ipv6_hdr(skb)->saddr.s6_addr32,
111 tcp_hdr(skb)->dest,
112 tcp_hdr(skb)->source);
113 }
114
115 static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
116 int addr_len)
117 {
118 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
119 struct inet_sock *inet = inet_sk(sk);
120 struct inet_connection_sock *icsk = inet_csk(sk);
121 struct ipv6_pinfo *np = inet6_sk(sk);
122 struct tcp_sock *tp = tcp_sk(sk);
123 struct in6_addr *saddr = NULL, *final_p, final;
124 struct rt6_info *rt;
125 struct flowi6 fl6;
126 struct dst_entry *dst;
127 int addr_type;
128 int err;
129
130 if (addr_len < SIN6_LEN_RFC2133)
131 return -EINVAL;
132
133 if (usin->sin6_family != AF_INET6)
134 return -EAFNOSUPPORT;
135
136 memset(&fl6, 0, sizeof(fl6));
137
138 if (np->sndflow) {
139 fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
140 IP6_ECN_flow_init(fl6.flowlabel);
141 if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
142 struct ip6_flowlabel *flowlabel;
143 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
144 if (!flowlabel)
145 return -EINVAL;
146 fl6_sock_release(flowlabel);
147 }
148 }
149
150 /*
151 * connect() to INADDR_ANY means loopback (BSD'ism).
152 */
153
154 if (ipv6_addr_any(&usin->sin6_addr))
155 usin->sin6_addr.s6_addr[15] = 0x1;
156
157 addr_type = ipv6_addr_type(&usin->sin6_addr);
158
159 if (addr_type & IPV6_ADDR_MULTICAST)
160 return -ENETUNREACH;
161
162 if (addr_type&IPV6_ADDR_LINKLOCAL) {
163 if (addr_len >= sizeof(struct sockaddr_in6) &&
164 usin->sin6_scope_id) {
165 /* If interface is set while binding, indices
166 * must coincide.
167 */
168 if (sk->sk_bound_dev_if &&
169 sk->sk_bound_dev_if != usin->sin6_scope_id)
170 return -EINVAL;
171
172 sk->sk_bound_dev_if = usin->sin6_scope_id;
173 }
174
175 /* Connect to link-local address requires an interface */
176 if (!sk->sk_bound_dev_if)
177 return -EINVAL;
178 }
179
180 if (tp->rx_opt.ts_recent_stamp &&
181 !ipv6_addr_equal(&sk->sk_v6_daddr, &usin->sin6_addr)) {
182 tp->rx_opt.ts_recent = 0;
183 tp->rx_opt.ts_recent_stamp = 0;
184 tp->write_seq = 0;
185 }
186
187 sk->sk_v6_daddr = usin->sin6_addr;
188 np->flow_label = fl6.flowlabel;
189
190 /*
191 * TCP over IPv4
192 */
193
194 if (addr_type == IPV6_ADDR_MAPPED) {
195 u32 exthdrlen = icsk->icsk_ext_hdr_len;
196 struct sockaddr_in sin;
197
198 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
199
200 if (__ipv6_only_sock(sk))
201 return -ENETUNREACH;
202
203 sin.sin_family = AF_INET;
204 sin.sin_port = usin->sin6_port;
205 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
206
207 icsk->icsk_af_ops = &ipv6_mapped;
208 sk->sk_backlog_rcv = tcp_v4_do_rcv;
209 #ifdef CONFIG_TCP_MD5SIG
210 tp->af_specific = &tcp_sock_ipv6_mapped_specific;
211 #endif
212
213 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
214
215 if (err) {
216 icsk->icsk_ext_hdr_len = exthdrlen;
217 icsk->icsk_af_ops = &ipv6_specific;
218 sk->sk_backlog_rcv = tcp_v6_do_rcv;
219 #ifdef CONFIG_TCP_MD5SIG
220 tp->af_specific = &tcp_sock_ipv6_specific;
221 #endif
222 goto failure;
223 }
224 np->saddr = sk->sk_v6_rcv_saddr;
225
226 return err;
227 }
228
229 if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr))
230 saddr = &sk->sk_v6_rcv_saddr;
231
232 fl6.flowi6_proto = IPPROTO_TCP;
233 fl6.daddr = sk->sk_v6_daddr;
234 fl6.saddr = saddr ? *saddr : np->saddr;
235 fl6.flowi6_oif = sk->sk_bound_dev_if;
236 fl6.flowi6_mark = sk->sk_mark;
237 fl6.fl6_dport = usin->sin6_port;
238 fl6.fl6_sport = inet->inet_sport;
239
240 final_p = fl6_update_dst(&fl6, np->opt, &final);
241
242 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
243
244 dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
245 if (IS_ERR(dst)) {
246 err = PTR_ERR(dst);
247 goto failure;
248 }
249
250 if (!saddr) {
251 saddr = &fl6.saddr;
252 sk->sk_v6_rcv_saddr = *saddr;
253 }
254
255 /* set the source address */
256 np->saddr = *saddr;
257 inet->inet_rcv_saddr = LOOPBACK4_IPV6;
258
259 sk->sk_gso_type = SKB_GSO_TCPV6;
260 __ip6_dst_store(sk, dst, NULL, NULL);
261
262 rt = (struct rt6_info *) dst;
263 if (tcp_death_row.sysctl_tw_recycle &&
264 !tp->rx_opt.ts_recent_stamp &&
265 ipv6_addr_equal(&rt->rt6i_dst.addr, &sk->sk_v6_daddr))
266 tcp_fetch_timewait_stamp(sk, dst);
267
268 icsk->icsk_ext_hdr_len = 0;
269 if (np->opt)
270 icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
271 np->opt->opt_nflen);
272
273 tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
274
275 inet->inet_dport = usin->sin6_port;
276
277 tcp_set_state(sk, TCP_SYN_SENT);
278 err = inet6_hash_connect(&tcp_death_row, sk);
279 if (err)
280 goto late_failure;
281
282 ip6_set_txhash(sk);
283
284 if (!tp->write_seq && likely(!tp->repair))
285 tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
286 sk->sk_v6_daddr.s6_addr32,
287 inet->inet_sport,
288 inet->inet_dport);
289
290 err = tcp_connect(sk);
291 if (err)
292 goto late_failure;
293
294 return 0;
295
296 late_failure:
297 tcp_set_state(sk, TCP_CLOSE);
298 __sk_dst_reset(sk);
299 failure:
300 inet->inet_dport = 0;
301 sk->sk_route_caps = 0;
302 return err;
303 }
304
305 static void tcp_v6_mtu_reduced(struct sock *sk)
306 {
307 struct dst_entry *dst;
308
309 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
310 return;
311
312 dst = inet6_csk_update_pmtu(sk, tcp_sk(sk)->mtu_info);
313 if (!dst)
314 return;
315
316 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
317 tcp_sync_mss(sk, dst_mtu(dst));
318 tcp_simple_retransmit(sk);
319 }
320 }
321
322 static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
323 u8 type, u8 code, int offset, __be32 info)
324 {
325 const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
326 const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
327 struct net *net = dev_net(skb->dev);
328 struct request_sock *fastopen;
329 struct ipv6_pinfo *np;
330 struct tcp_sock *tp;
331 __u32 seq, snd_una;
332 struct sock *sk;
333 int err;
334
335 sk = __inet6_lookup_established(net, &tcp_hashinfo,
336 &hdr->daddr, th->dest,
337 &hdr->saddr, ntohs(th->source),
338 skb->dev->ifindex);
339
340 if (!sk) {
341 ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
342 ICMP6_MIB_INERRORS);
343 return;
344 }
345
346 if (sk->sk_state == TCP_TIME_WAIT) {
347 inet_twsk_put(inet_twsk(sk));
348 return;
349 }
350 seq = ntohl(th->seq);
351 if (sk->sk_state == TCP_NEW_SYN_RECV)
352 return tcp_req_err(sk, seq);
353
354 bh_lock_sock(sk);
355 if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
356 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
357
358 if (sk->sk_state == TCP_CLOSE)
359 goto out;
360
361 if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
362 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
363 goto out;
364 }
365
366 tp = tcp_sk(sk);
367 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
368 fastopen = tp->fastopen_rsk;
369 snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
370 if (sk->sk_state != TCP_LISTEN &&
371 !between(seq, snd_una, tp->snd_nxt)) {
372 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
373 goto out;
374 }
375
376 np = inet6_sk(sk);
377
378 if (type == NDISC_REDIRECT) {
379 struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
380
381 if (dst)
382 dst->ops->redirect(dst, sk, skb);
383 goto out;
384 }
385
386 if (type == ICMPV6_PKT_TOOBIG) {
387 /* We are not interested in TCP_LISTEN and open_requests
388 * (SYN-ACKs send out by Linux are always <576bytes so
389 * they should go through unfragmented).
390 */
391 if (sk->sk_state == TCP_LISTEN)
392 goto out;
393
394 if (!ip6_sk_accept_pmtu(sk))
395 goto out;
396
397 tp->mtu_info = ntohl(info);
398 if (!sock_owned_by_user(sk))
399 tcp_v6_mtu_reduced(sk);
400 else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED,
401 &tp->tsq_flags))
402 sock_hold(sk);
403 goto out;
404 }
405
406 icmpv6_err_convert(type, code, &err);
407
408 /* Might be for an request_sock */
409 switch (sk->sk_state) {
410 case TCP_SYN_SENT:
411 case TCP_SYN_RECV:
412 /* Only in fast or simultaneous open. If a fast open socket is
413 * is already accepted it is treated as a connected one below.
414 */
415 if (fastopen && !fastopen->sk)
416 break;
417
418 if (!sock_owned_by_user(sk)) {
419 sk->sk_err = err;
420 sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
421
422 tcp_done(sk);
423 } else
424 sk->sk_err_soft = err;
425 goto out;
426 }
427
428 if (!sock_owned_by_user(sk) && np->recverr) {
429 sk->sk_err = err;
430 sk->sk_error_report(sk);
431 } else
432 sk->sk_err_soft = err;
433
434 out:
435 bh_unlock_sock(sk);
436 sock_put(sk);
437 }
438
439
440 static int tcp_v6_send_synack(struct sock *sk, struct dst_entry *dst,
441 struct flowi *fl,
442 struct request_sock *req,
443 u16 queue_mapping,
444 struct tcp_fastopen_cookie *foc)
445 {
446 struct inet_request_sock *ireq = inet_rsk(req);
447 struct ipv6_pinfo *np = inet6_sk(sk);
448 struct flowi6 *fl6 = &fl->u.ip6;
449 struct sk_buff *skb;
450 int err = -ENOMEM;
451
452 /* First, grab a route. */
453 if (!dst && (dst = inet6_csk_route_req(sk, fl6, req)) == NULL)
454 goto done;
455
456 skb = tcp_make_synack(sk, dst, req, foc);
457
458 if (skb) {
459 __tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr,
460 &ireq->ir_v6_rmt_addr);
461
462 fl6->daddr = ireq->ir_v6_rmt_addr;
463 if (np->repflow && ireq->pktopts)
464 fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
465
466 skb_set_queue_mapping(skb, queue_mapping);
467 err = ip6_xmit(sk, skb, fl6, np->opt, np->tclass);
468 err = net_xmit_eval(err);
469 }
470
471 done:
472 return err;
473 }
474
475
476 static void tcp_v6_reqsk_destructor(struct request_sock *req)
477 {
478 kfree_skb(inet_rsk(req)->pktopts);
479 }
480
481 #ifdef CONFIG_TCP_MD5SIG
482 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
483 const struct in6_addr *addr)
484 {
485 return tcp_md5_do_lookup(sk, (union tcp_md5_addr *)addr, AF_INET6);
486 }
487
488 static struct tcp_md5sig_key *tcp_v6_md5_lookup(struct sock *sk,
489 const struct sock *addr_sk)
490 {
491 return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr);
492 }
493
494 static int tcp_v6_parse_md5_keys(struct sock *sk, char __user *optval,
495 int optlen)
496 {
497 struct tcp_md5sig cmd;
498 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
499
500 if (optlen < sizeof(cmd))
501 return -EINVAL;
502
503 if (copy_from_user(&cmd, optval, sizeof(cmd)))
504 return -EFAULT;
505
506 if (sin6->sin6_family != AF_INET6)
507 return -EINVAL;
508
509 if (!cmd.tcpm_keylen) {
510 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
511 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
512 AF_INET);
513 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
514 AF_INET6);
515 }
516
517 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
518 return -EINVAL;
519
520 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
521 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
522 AF_INET, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
523
524 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
525 AF_INET6, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
526 }
527
528 static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
529 const struct in6_addr *daddr,
530 const struct in6_addr *saddr, int nbytes)
531 {
532 struct tcp6_pseudohdr *bp;
533 struct scatterlist sg;
534
535 bp = &hp->md5_blk.ip6;
536 /* 1. TCP pseudo-header (RFC2460) */
537 bp->saddr = *saddr;
538 bp->daddr = *daddr;
539 bp->protocol = cpu_to_be32(IPPROTO_TCP);
540 bp->len = cpu_to_be32(nbytes);
541
542 sg_init_one(&sg, bp, sizeof(*bp));
543 return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
544 }
545
546 static int tcp_v6_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
547 const struct in6_addr *daddr, struct in6_addr *saddr,
548 const struct tcphdr *th)
549 {
550 struct tcp_md5sig_pool *hp;
551 struct hash_desc *desc;
552
553 hp = tcp_get_md5sig_pool();
554 if (!hp)
555 goto clear_hash_noput;
556 desc = &hp->md5_desc;
557
558 if (crypto_hash_init(desc))
559 goto clear_hash;
560 if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
561 goto clear_hash;
562 if (tcp_md5_hash_header(hp, th))
563 goto clear_hash;
564 if (tcp_md5_hash_key(hp, key))
565 goto clear_hash;
566 if (crypto_hash_final(desc, md5_hash))
567 goto clear_hash;
568
569 tcp_put_md5sig_pool();
570 return 0;
571
572 clear_hash:
573 tcp_put_md5sig_pool();
574 clear_hash_noput:
575 memset(md5_hash, 0, 16);
576 return 1;
577 }
578
579 static int tcp_v6_md5_hash_skb(char *md5_hash,
580 const struct tcp_md5sig_key *key,
581 const struct sock *sk,
582 const struct sk_buff *skb)
583 {
584 const struct in6_addr *saddr, *daddr;
585 struct tcp_md5sig_pool *hp;
586 struct hash_desc *desc;
587 const struct tcphdr *th = tcp_hdr(skb);
588
589 if (sk) { /* valid for establish/request sockets */
590 saddr = &sk->sk_v6_rcv_saddr;
591 daddr = &sk->sk_v6_daddr;
592 } else {
593 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
594 saddr = &ip6h->saddr;
595 daddr = &ip6h->daddr;
596 }
597
598 hp = tcp_get_md5sig_pool();
599 if (!hp)
600 goto clear_hash_noput;
601 desc = &hp->md5_desc;
602
603 if (crypto_hash_init(desc))
604 goto clear_hash;
605
606 if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
607 goto clear_hash;
608 if (tcp_md5_hash_header(hp, th))
609 goto clear_hash;
610 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
611 goto clear_hash;
612 if (tcp_md5_hash_key(hp, key))
613 goto clear_hash;
614 if (crypto_hash_final(desc, md5_hash))
615 goto clear_hash;
616
617 tcp_put_md5sig_pool();
618 return 0;
619
620 clear_hash:
621 tcp_put_md5sig_pool();
622 clear_hash_noput:
623 memset(md5_hash, 0, 16);
624 return 1;
625 }
626
627 static bool tcp_v6_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
628 {
629 const __u8 *hash_location = NULL;
630 struct tcp_md5sig_key *hash_expected;
631 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
632 const struct tcphdr *th = tcp_hdr(skb);
633 int genhash;
634 u8 newhash[16];
635
636 hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
637 hash_location = tcp_parse_md5sig_option(th);
638
639 /* We've parsed the options - do we have a hash? */
640 if (!hash_expected && !hash_location)
641 return false;
642
643 if (hash_expected && !hash_location) {
644 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
645 return true;
646 }
647
648 if (!hash_expected && hash_location) {
649 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
650 return true;
651 }
652
653 /* check the signature */
654 genhash = tcp_v6_md5_hash_skb(newhash,
655 hash_expected,
656 NULL, skb);
657
658 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
659 net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
660 genhash ? "failed" : "mismatch",
661 &ip6h->saddr, ntohs(th->source),
662 &ip6h->daddr, ntohs(th->dest));
663 return true;
664 }
665 return false;
666 }
667 #endif
668
669 static void tcp_v6_init_req(struct request_sock *req, struct sock *sk,
670 struct sk_buff *skb)
671 {
672 struct inet_request_sock *ireq = inet_rsk(req);
673 struct ipv6_pinfo *np = inet6_sk(sk);
674
675 ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
676 ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
677
678 /* So that link locals have meaning */
679 if (!sk->sk_bound_dev_if &&
680 ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
681 ireq->ir_iif = tcp_v6_iif(skb);
682
683 if (!TCP_SKB_CB(skb)->tcp_tw_isn &&
684 (ipv6_opt_accepted(sk, skb, &TCP_SKB_CB(skb)->header.h6) ||
685 np->rxopt.bits.rxinfo ||
686 np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim ||
687 np->rxopt.bits.rxohlim || np->repflow)) {
688 atomic_inc(&skb->users);
689 ireq->pktopts = skb;
690 }
691 }
692
693 static struct dst_entry *tcp_v6_route_req(struct sock *sk, struct flowi *fl,
694 const struct request_sock *req,
695 bool *strict)
696 {
697 if (strict)
698 *strict = true;
699 return inet6_csk_route_req(sk, &fl->u.ip6, req);
700 }
701
702 struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
703 .family = AF_INET6,
704 .obj_size = sizeof(struct tcp6_request_sock),
705 .rtx_syn_ack = tcp_rtx_synack,
706 .send_ack = tcp_v6_reqsk_send_ack,
707 .destructor = tcp_v6_reqsk_destructor,
708 .send_reset = tcp_v6_send_reset,
709 .syn_ack_timeout = tcp_syn_ack_timeout,
710 };
711
712 static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
713 .mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) -
714 sizeof(struct ipv6hdr),
715 #ifdef CONFIG_TCP_MD5SIG
716 .req_md5_lookup = tcp_v6_md5_lookup,
717 .calc_md5_hash = tcp_v6_md5_hash_skb,
718 #endif
719 .init_req = tcp_v6_init_req,
720 #ifdef CONFIG_SYN_COOKIES
721 .cookie_init_seq = cookie_v6_init_sequence,
722 #endif
723 .route_req = tcp_v6_route_req,
724 .init_seq = tcp_v6_init_sequence,
725 .send_synack = tcp_v6_send_synack,
726 .queue_hash_add = inet6_csk_reqsk_queue_hash_add,
727 };
728
729 static void tcp_v6_send_response(struct sock *sk, struct sk_buff *skb, u32 seq,
730 u32 ack, u32 win, u32 tsval, u32 tsecr,
731 int oif, struct tcp_md5sig_key *key, int rst,
732 u8 tclass, u32 label)
733 {
734 const struct tcphdr *th = tcp_hdr(skb);
735 struct tcphdr *t1;
736 struct sk_buff *buff;
737 struct flowi6 fl6;
738 struct net *net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
739 struct sock *ctl_sk = net->ipv6.tcp_sk;
740 unsigned int tot_len = sizeof(struct tcphdr);
741 struct dst_entry *dst;
742 __be32 *topt;
743
744 if (tsecr)
745 tot_len += TCPOLEN_TSTAMP_ALIGNED;
746 #ifdef CONFIG_TCP_MD5SIG
747 if (key)
748 tot_len += TCPOLEN_MD5SIG_ALIGNED;
749 #endif
750
751 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
752 GFP_ATOMIC);
753 if (!buff)
754 return;
755
756 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
757
758 t1 = (struct tcphdr *) skb_push(buff, tot_len);
759 skb_reset_transport_header(buff);
760
761 /* Swap the send and the receive. */
762 memset(t1, 0, sizeof(*t1));
763 t1->dest = th->source;
764 t1->source = th->dest;
765 t1->doff = tot_len / 4;
766 t1->seq = htonl(seq);
767 t1->ack_seq = htonl(ack);
768 t1->ack = !rst || !th->ack;
769 t1->rst = rst;
770 t1->window = htons(win);
771
772 topt = (__be32 *)(t1 + 1);
773
774 if (tsecr) {
775 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
776 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
777 *topt++ = htonl(tsval);
778 *topt++ = htonl(tsecr);
779 }
780
781 #ifdef CONFIG_TCP_MD5SIG
782 if (key) {
783 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
784 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
785 tcp_v6_md5_hash_hdr((__u8 *)topt, key,
786 &ipv6_hdr(skb)->saddr,
787 &ipv6_hdr(skb)->daddr, t1);
788 }
789 #endif
790
791 memset(&fl6, 0, sizeof(fl6));
792 fl6.daddr = ipv6_hdr(skb)->saddr;
793 fl6.saddr = ipv6_hdr(skb)->daddr;
794 fl6.flowlabel = label;
795
796 buff->ip_summed = CHECKSUM_PARTIAL;
797 buff->csum = 0;
798
799 __tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
800
801 fl6.flowi6_proto = IPPROTO_TCP;
802 if (rt6_need_strict(&fl6.daddr) && !oif)
803 fl6.flowi6_oif = tcp_v6_iif(skb);
804 else
805 fl6.flowi6_oif = oif;
806 fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark);
807 fl6.fl6_dport = t1->dest;
808 fl6.fl6_sport = t1->source;
809 security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
810
811 /* Pass a socket to ip6_dst_lookup either it is for RST
812 * Underlying function will use this to retrieve the network
813 * namespace
814 */
815 dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL);
816 if (!IS_ERR(dst)) {
817 skb_dst_set(buff, dst);
818 ip6_xmit(ctl_sk, buff, &fl6, NULL, tclass);
819 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
820 if (rst)
821 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
822 return;
823 }
824
825 kfree_skb(buff);
826 }
827
828 static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
829 {
830 const struct tcphdr *th = tcp_hdr(skb);
831 u32 seq = 0, ack_seq = 0;
832 struct tcp_md5sig_key *key = NULL;
833 #ifdef CONFIG_TCP_MD5SIG
834 const __u8 *hash_location = NULL;
835 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
836 unsigned char newhash[16];
837 int genhash;
838 struct sock *sk1 = NULL;
839 #endif
840 int oif;
841
842 if (th->rst)
843 return;
844
845 /* If sk not NULL, it means we did a successful lookup and incoming
846 * route had to be correct. prequeue might have dropped our dst.
847 */
848 if (!sk && !ipv6_unicast_destination(skb))
849 return;
850
851 #ifdef CONFIG_TCP_MD5SIG
852 hash_location = tcp_parse_md5sig_option(th);
853 if (!sk && hash_location) {
854 /*
855 * active side is lost. Try to find listening socket through
856 * source port, and then find md5 key through listening socket.
857 * we are not loose security here:
858 * Incoming packet is checked with md5 hash with finding key,
859 * no RST generated if md5 hash doesn't match.
860 */
861 sk1 = inet6_lookup_listener(dev_net(skb_dst(skb)->dev),
862 &tcp_hashinfo, &ipv6h->saddr,
863 th->source, &ipv6h->daddr,
864 ntohs(th->source), tcp_v6_iif(skb));
865 if (!sk1)
866 return;
867
868 rcu_read_lock();
869 key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr);
870 if (!key)
871 goto release_sk1;
872
873 genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, skb);
874 if (genhash || memcmp(hash_location, newhash, 16) != 0)
875 goto release_sk1;
876 } else {
877 key = sk ? tcp_v6_md5_do_lookup(sk, &ipv6h->saddr) : NULL;
878 }
879 #endif
880
881 if (th->ack)
882 seq = ntohl(th->ack_seq);
883 else
884 ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
885 (th->doff << 2);
886
887 oif = sk ? sk->sk_bound_dev_if : 0;
888 tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, key, 1, 0, 0);
889
890 #ifdef CONFIG_TCP_MD5SIG
891 release_sk1:
892 if (sk1) {
893 rcu_read_unlock();
894 sock_put(sk1);
895 }
896 #endif
897 }
898
899 static void tcp_v6_send_ack(struct sock *sk, struct sk_buff *skb, u32 seq,
900 u32 ack, u32 win, u32 tsval, u32 tsecr, int oif,
901 struct tcp_md5sig_key *key, u8 tclass,
902 u32 label)
903 {
904 tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, key, 0,
905 tclass, label);
906 }
907
908 static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
909 {
910 struct inet_timewait_sock *tw = inet_twsk(sk);
911 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
912
913 tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
914 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
915 tcp_time_stamp + tcptw->tw_ts_offset,
916 tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw),
917 tw->tw_tclass, (tw->tw_flowlabel << 12));
918
919 inet_twsk_put(tw);
920 }
921
922 static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
923 struct request_sock *req)
924 {
925 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
926 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
927 */
928 tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ?
929 tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
930 tcp_rsk(req)->rcv_nxt, req->rcv_wnd,
931 tcp_time_stamp, req->ts_recent, sk->sk_bound_dev_if,
932 tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr),
933 0, 0);
934 }
935
936
937 static struct sock *tcp_v6_hnd_req(struct sock *sk, struct sk_buff *skb)
938 {
939 const struct tcphdr *th = tcp_hdr(skb);
940 struct request_sock *req;
941 struct sock *nsk;
942
943 /* Find possible connection requests. */
944 req = inet6_csk_search_req(sk, th->source,
945 &ipv6_hdr(skb)->saddr,
946 &ipv6_hdr(skb)->daddr, tcp_v6_iif(skb));
947 if (req) {
948 nsk = tcp_check_req(sk, skb, req, false);
949 if (!nsk)
950 reqsk_put(req);
951 return nsk;
952 }
953 nsk = __inet6_lookup_established(sock_net(sk), &tcp_hashinfo,
954 &ipv6_hdr(skb)->saddr, th->source,
955 &ipv6_hdr(skb)->daddr, ntohs(th->dest),
956 tcp_v6_iif(skb));
957
958 if (nsk) {
959 if (nsk->sk_state != TCP_TIME_WAIT) {
960 bh_lock_sock(nsk);
961 return nsk;
962 }
963 inet_twsk_put(inet_twsk(nsk));
964 return NULL;
965 }
966
967 #ifdef CONFIG_SYN_COOKIES
968 if (!th->syn)
969 sk = cookie_v6_check(sk, skb);
970 #endif
971 return sk;
972 }
973
974 static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
975 {
976 if (skb->protocol == htons(ETH_P_IP))
977 return tcp_v4_conn_request(sk, skb);
978
979 if (!ipv6_unicast_destination(skb))
980 goto drop;
981
982 return tcp_conn_request(&tcp6_request_sock_ops,
983 &tcp_request_sock_ipv6_ops, sk, skb);
984
985 drop:
986 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
987 return 0; /* don't send reset */
988 }
989
990 static struct sock *tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
991 struct request_sock *req,
992 struct dst_entry *dst)
993 {
994 struct inet_request_sock *ireq;
995 struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
996 struct tcp6_sock *newtcp6sk;
997 struct inet_sock *newinet;
998 struct tcp_sock *newtp;
999 struct sock *newsk;
1000 #ifdef CONFIG_TCP_MD5SIG
1001 struct tcp_md5sig_key *key;
1002 #endif
1003 struct flowi6 fl6;
1004
1005 if (skb->protocol == htons(ETH_P_IP)) {
1006 /*
1007 * v6 mapped
1008 */
1009
1010 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst);
1011
1012 if (!newsk)
1013 return NULL;
1014
1015 newtcp6sk = (struct tcp6_sock *)newsk;
1016 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1017
1018 newinet = inet_sk(newsk);
1019 newnp = inet6_sk(newsk);
1020 newtp = tcp_sk(newsk);
1021
1022 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1023
1024 newnp->saddr = newsk->sk_v6_rcv_saddr;
1025
1026 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1027 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1028 #ifdef CONFIG_TCP_MD5SIG
1029 newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1030 #endif
1031
1032 newnp->ipv6_ac_list = NULL;
1033 newnp->ipv6_fl_list = NULL;
1034 newnp->pktoptions = NULL;
1035 newnp->opt = NULL;
1036 newnp->mcast_oif = tcp_v6_iif(skb);
1037 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1038 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1039 if (np->repflow)
1040 newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1041
1042 /*
1043 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1044 * here, tcp_create_openreq_child now does this for us, see the comment in
1045 * that function for the gory details. -acme
1046 */
1047
1048 /* It is tricky place. Until this moment IPv4 tcp
1049 worked with IPv6 icsk.icsk_af_ops.
1050 Sync it now.
1051 */
1052 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1053
1054 return newsk;
1055 }
1056
1057 ireq = inet_rsk(req);
1058
1059 if (sk_acceptq_is_full(sk))
1060 goto out_overflow;
1061
1062 if (!dst) {
1063 dst = inet6_csk_route_req(sk, &fl6, req);
1064 if (!dst)
1065 goto out;
1066 }
1067
1068 newsk = tcp_create_openreq_child(sk, req, skb);
1069 if (!newsk)
1070 goto out_nonewsk;
1071
1072 /*
1073 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1074 * count here, tcp_create_openreq_child now does this for us, see the
1075 * comment in that function for the gory details. -acme
1076 */
1077
1078 newsk->sk_gso_type = SKB_GSO_TCPV6;
1079 __ip6_dst_store(newsk, dst, NULL, NULL);
1080 inet6_sk_rx_dst_set(newsk, skb);
1081
1082 newtcp6sk = (struct tcp6_sock *)newsk;
1083 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1084
1085 newtp = tcp_sk(newsk);
1086 newinet = inet_sk(newsk);
1087 newnp = inet6_sk(newsk);
1088
1089 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1090
1091 newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr;
1092 newnp->saddr = ireq->ir_v6_loc_addr;
1093 newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
1094 newsk->sk_bound_dev_if = ireq->ir_iif;
1095
1096 ip6_set_txhash(newsk);
1097
1098 /* Now IPv6 options...
1099
1100 First: no IPv4 options.
1101 */
1102 newinet->inet_opt = NULL;
1103 newnp->ipv6_ac_list = NULL;
1104 newnp->ipv6_fl_list = NULL;
1105
1106 /* Clone RX bits */
1107 newnp->rxopt.all = np->rxopt.all;
1108
1109 /* Clone pktoptions received with SYN */
1110 newnp->pktoptions = NULL;
1111 if (ireq->pktopts) {
1112 newnp->pktoptions = skb_clone(ireq->pktopts,
1113 sk_gfp_atomic(sk, GFP_ATOMIC));
1114 consume_skb(ireq->pktopts);
1115 ireq->pktopts = NULL;
1116 if (newnp->pktoptions)
1117 skb_set_owner_r(newnp->pktoptions, newsk);
1118 }
1119 newnp->opt = NULL;
1120 newnp->mcast_oif = tcp_v6_iif(skb);
1121 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1122 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1123 if (np->repflow)
1124 newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1125
1126 /* Clone native IPv6 options from listening socket (if any)
1127
1128 Yes, keeping reference count would be much more clever,
1129 but we make one more one thing there: reattach optmem
1130 to newsk.
1131 */
1132 if (np->opt)
1133 newnp->opt = ipv6_dup_options(newsk, np->opt);
1134
1135 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1136 if (newnp->opt)
1137 inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
1138 newnp->opt->opt_flen);
1139
1140 tcp_ca_openreq_child(newsk, dst);
1141
1142 tcp_sync_mss(newsk, dst_mtu(dst));
1143 newtp->advmss = dst_metric_advmss(dst);
1144 if (tcp_sk(sk)->rx_opt.user_mss &&
1145 tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1146 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1147
1148 tcp_initialize_rcv_mss(newsk);
1149
1150 newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1151 newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
1152
1153 #ifdef CONFIG_TCP_MD5SIG
1154 /* Copy over the MD5 key from the original socket */
1155 key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr);
1156 if (key) {
1157 /* We're using one, so create a matching key
1158 * on the newsk structure. If we fail to get
1159 * memory, then we end up not copying the key
1160 * across. Shucks.
1161 */
1162 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newsk->sk_v6_daddr,
1163 AF_INET6, key->key, key->keylen,
1164 sk_gfp_atomic(sk, GFP_ATOMIC));
1165 }
1166 #endif
1167
1168 if (__inet_inherit_port(sk, newsk) < 0) {
1169 inet_csk_prepare_forced_close(newsk);
1170 tcp_done(newsk);
1171 goto out;
1172 }
1173 __inet_hash(newsk, NULL);
1174
1175 return newsk;
1176
1177 out_overflow:
1178 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1179 out_nonewsk:
1180 dst_release(dst);
1181 out:
1182 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1183 return NULL;
1184 }
1185
1186 /* The socket must have it's spinlock held when we get
1187 * here.
1188 *
1189 * We have a potential double-lock case here, so even when
1190 * doing backlog processing we use the BH locking scheme.
1191 * This is because we cannot sleep with the original spinlock
1192 * held.
1193 */
1194 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1195 {
1196 struct ipv6_pinfo *np = inet6_sk(sk);
1197 struct tcp_sock *tp;
1198 struct sk_buff *opt_skb = NULL;
1199
1200 /* Imagine: socket is IPv6. IPv4 packet arrives,
1201 goes to IPv4 receive handler and backlogged.
1202 From backlog it always goes here. Kerboom...
1203 Fortunately, tcp_rcv_established and rcv_established
1204 handle them correctly, but it is not case with
1205 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1206 */
1207
1208 if (skb->protocol == htons(ETH_P_IP))
1209 return tcp_v4_do_rcv(sk, skb);
1210
1211 if (sk_filter(sk, skb))
1212 goto discard;
1213
1214 /*
1215 * socket locking is here for SMP purposes as backlog rcv
1216 * is currently called with bh processing disabled.
1217 */
1218
1219 /* Do Stevens' IPV6_PKTOPTIONS.
1220
1221 Yes, guys, it is the only place in our code, where we
1222 may make it not affecting IPv4.
1223 The rest of code is protocol independent,
1224 and I do not like idea to uglify IPv4.
1225
1226 Actually, all the idea behind IPV6_PKTOPTIONS
1227 looks not very well thought. For now we latch
1228 options, received in the last packet, enqueued
1229 by tcp. Feel free to propose better solution.
1230 --ANK (980728)
1231 */
1232 if (np->rxopt.all)
1233 opt_skb = skb_clone(skb, sk_gfp_atomic(sk, GFP_ATOMIC));
1234
1235 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1236 struct dst_entry *dst = sk->sk_rx_dst;
1237
1238 sock_rps_save_rxhash(sk, skb);
1239 sk_mark_napi_id(sk, skb);
1240 if (dst) {
1241 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1242 dst->ops->check(dst, np->rx_dst_cookie) == NULL) {
1243 dst_release(dst);
1244 sk->sk_rx_dst = NULL;
1245 }
1246 }
1247
1248 tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len);
1249 if (opt_skb)
1250 goto ipv6_pktoptions;
1251 return 0;
1252 }
1253
1254 if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1255 goto csum_err;
1256
1257 if (sk->sk_state == TCP_LISTEN) {
1258 struct sock *nsk = tcp_v6_hnd_req(sk, skb);
1259 if (!nsk)
1260 goto discard;
1261
1262 /*
1263 * Queue it on the new socket if the new socket is active,
1264 * otherwise we just shortcircuit this and continue with
1265 * the new socket..
1266 */
1267 if (nsk != sk) {
1268 sock_rps_save_rxhash(nsk, skb);
1269 sk_mark_napi_id(sk, skb);
1270 if (tcp_child_process(sk, nsk, skb))
1271 goto reset;
1272 if (opt_skb)
1273 __kfree_skb(opt_skb);
1274 return 0;
1275 }
1276 } else
1277 sock_rps_save_rxhash(sk, skb);
1278
1279 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len))
1280 goto reset;
1281 if (opt_skb)
1282 goto ipv6_pktoptions;
1283 return 0;
1284
1285 reset:
1286 tcp_v6_send_reset(sk, skb);
1287 discard:
1288 if (opt_skb)
1289 __kfree_skb(opt_skb);
1290 kfree_skb(skb);
1291 return 0;
1292 csum_err:
1293 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_CSUMERRORS);
1294 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1295 goto discard;
1296
1297
1298 ipv6_pktoptions:
1299 /* Do you ask, what is it?
1300
1301 1. skb was enqueued by tcp.
1302 2. skb is added to tail of read queue, rather than out of order.
1303 3. socket is not in passive state.
1304 4. Finally, it really contains options, which user wants to receive.
1305 */
1306 tp = tcp_sk(sk);
1307 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1308 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1309 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
1310 np->mcast_oif = tcp_v6_iif(opt_skb);
1311 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1312 np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
1313 if (np->rxopt.bits.rxflow || np->rxopt.bits.rxtclass)
1314 np->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(opt_skb));
1315 if (np->repflow)
1316 np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
1317 if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) {
1318 skb_set_owner_r(opt_skb, sk);
1319 opt_skb = xchg(&np->pktoptions, opt_skb);
1320 } else {
1321 __kfree_skb(opt_skb);
1322 opt_skb = xchg(&np->pktoptions, NULL);
1323 }
1324 }
1325
1326 kfree_skb(opt_skb);
1327 return 0;
1328 }
1329
1330 static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr,
1331 const struct tcphdr *th)
1332 {
1333 /* This is tricky: we move IP6CB at its correct location into
1334 * TCP_SKB_CB(). It must be done after xfrm6_policy_check(), because
1335 * _decode_session6() uses IP6CB().
1336 * barrier() makes sure compiler won't play aliasing games.
1337 */
1338 memmove(&TCP_SKB_CB(skb)->header.h6, IP6CB(skb),
1339 sizeof(struct inet6_skb_parm));
1340 barrier();
1341
1342 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1343 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1344 skb->len - th->doff*4);
1345 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1346 TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1347 TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1348 TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
1349 TCP_SKB_CB(skb)->sacked = 0;
1350 }
1351
1352 static void tcp_v6_restore_cb(struct sk_buff *skb)
1353 {
1354 /* We need to move header back to the beginning if xfrm6_policy_check()
1355 * and tcp_v6_fill_cb() are going to be called again.
1356 */
1357 memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
1358 sizeof(struct inet6_skb_parm));
1359 }
1360
1361 static int tcp_v6_rcv(struct sk_buff *skb)
1362 {
1363 const struct tcphdr *th;
1364 const struct ipv6hdr *hdr;
1365 struct sock *sk;
1366 int ret;
1367 struct net *net = dev_net(skb->dev);
1368
1369 if (skb->pkt_type != PACKET_HOST)
1370 goto discard_it;
1371
1372 /*
1373 * Count it even if it's bad.
1374 */
1375 TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1376
1377 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1378 goto discard_it;
1379
1380 th = tcp_hdr(skb);
1381
1382 if (th->doff < sizeof(struct tcphdr)/4)
1383 goto bad_packet;
1384 if (!pskb_may_pull(skb, th->doff*4))
1385 goto discard_it;
1386
1387 if (skb_checksum_init(skb, IPPROTO_TCP, ip6_compute_pseudo))
1388 goto csum_error;
1389
1390 th = tcp_hdr(skb);
1391 hdr = ipv6_hdr(skb);
1392
1393 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest,
1394 inet6_iif(skb));
1395 if (!sk)
1396 goto no_tcp_socket;
1397
1398 process:
1399 if (sk->sk_state == TCP_TIME_WAIT)
1400 goto do_time_wait;
1401
1402 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
1403 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
1404 goto discard_and_relse;
1405 }
1406
1407 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1408 goto discard_and_relse;
1409
1410 tcp_v6_fill_cb(skb, hdr, th);
1411
1412 #ifdef CONFIG_TCP_MD5SIG
1413 if (tcp_v6_inbound_md5_hash(sk, skb))
1414 goto discard_and_relse;
1415 #endif
1416
1417 if (sk_filter(sk, skb))
1418 goto discard_and_relse;
1419
1420 sk_incoming_cpu_update(sk);
1421 skb->dev = NULL;
1422
1423 bh_lock_sock_nested(sk);
1424 ret = 0;
1425 if (!sock_owned_by_user(sk)) {
1426 if (!tcp_prequeue(sk, skb))
1427 ret = tcp_v6_do_rcv(sk, skb);
1428 } else if (unlikely(sk_add_backlog(sk, skb,
1429 sk->sk_rcvbuf + sk->sk_sndbuf))) {
1430 bh_unlock_sock(sk);
1431 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
1432 goto discard_and_relse;
1433 }
1434 bh_unlock_sock(sk);
1435
1436 sock_put(sk);
1437 return ret ? -1 : 0;
1438
1439 no_tcp_socket:
1440 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1441 goto discard_it;
1442
1443 tcp_v6_fill_cb(skb, hdr, th);
1444
1445 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1446 csum_error:
1447 TCP_INC_STATS_BH(net, TCP_MIB_CSUMERRORS);
1448 bad_packet:
1449 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1450 } else {
1451 tcp_v6_send_reset(NULL, skb);
1452 }
1453
1454 discard_it:
1455 kfree_skb(skb);
1456 return 0;
1457
1458 discard_and_relse:
1459 sock_put(sk);
1460 goto discard_it;
1461
1462 do_time_wait:
1463 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1464 inet_twsk_put(inet_twsk(sk));
1465 goto discard_it;
1466 }
1467
1468 tcp_v6_fill_cb(skb, hdr, th);
1469
1470 if (skb->len < (th->doff<<2)) {
1471 inet_twsk_put(inet_twsk(sk));
1472 goto bad_packet;
1473 }
1474 if (tcp_checksum_complete(skb)) {
1475 inet_twsk_put(inet_twsk(sk));
1476 goto csum_error;
1477 }
1478
1479 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1480 case TCP_TW_SYN:
1481 {
1482 struct sock *sk2;
1483
1484 sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
1485 &ipv6_hdr(skb)->saddr, th->source,
1486 &ipv6_hdr(skb)->daddr,
1487 ntohs(th->dest), tcp_v6_iif(skb));
1488 if (sk2) {
1489 struct inet_timewait_sock *tw = inet_twsk(sk);
1490 inet_twsk_deschedule(tw);
1491 inet_twsk_put(tw);
1492 sk = sk2;
1493 tcp_v6_restore_cb(skb);
1494 goto process;
1495 }
1496 /* Fall through to ACK */
1497 }
1498 case TCP_TW_ACK:
1499 tcp_v6_timewait_ack(sk, skb);
1500 break;
1501 case TCP_TW_RST:
1502 tcp_v6_restore_cb(skb);
1503 goto no_tcp_socket;
1504 case TCP_TW_SUCCESS:
1505 ;
1506 }
1507 goto discard_it;
1508 }
1509
1510 static void tcp_v6_early_demux(struct sk_buff *skb)
1511 {
1512 const struct ipv6hdr *hdr;
1513 const struct tcphdr *th;
1514 struct sock *sk;
1515
1516 if (skb->pkt_type != PACKET_HOST)
1517 return;
1518
1519 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1520 return;
1521
1522 hdr = ipv6_hdr(skb);
1523 th = tcp_hdr(skb);
1524
1525 if (th->doff < sizeof(struct tcphdr) / 4)
1526 return;
1527
1528 /* Note : We use inet6_iif() here, not tcp_v6_iif() */
1529 sk = __inet6_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1530 &hdr->saddr, th->source,
1531 &hdr->daddr, ntohs(th->dest),
1532 inet6_iif(skb));
1533 if (sk) {
1534 skb->sk = sk;
1535 skb->destructor = sock_edemux;
1536 if (sk_fullsock(sk)) {
1537 struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
1538
1539 if (dst)
1540 dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
1541 if (dst &&
1542 inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1543 skb_dst_set_noref(skb, dst);
1544 }
1545 }
1546 }
1547
1548 static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1549 .twsk_obj_size = sizeof(struct tcp6_timewait_sock),
1550 .twsk_unique = tcp_twsk_unique,
1551 .twsk_destructor = tcp_twsk_destructor,
1552 };
1553
1554 static const struct inet_connection_sock_af_ops ipv6_specific = {
1555 .queue_xmit = inet6_csk_xmit,
1556 .send_check = tcp_v6_send_check,
1557 .rebuild_header = inet6_sk_rebuild_header,
1558 .sk_rx_dst_set = inet6_sk_rx_dst_set,
1559 .conn_request = tcp_v6_conn_request,
1560 .syn_recv_sock = tcp_v6_syn_recv_sock,
1561 .net_header_len = sizeof(struct ipv6hdr),
1562 .net_frag_header_len = sizeof(struct frag_hdr),
1563 .setsockopt = ipv6_setsockopt,
1564 .getsockopt = ipv6_getsockopt,
1565 .addr2sockaddr = inet6_csk_addr2sockaddr,
1566 .sockaddr_len = sizeof(struct sockaddr_in6),
1567 .bind_conflict = inet6_csk_bind_conflict,
1568 #ifdef CONFIG_COMPAT
1569 .compat_setsockopt = compat_ipv6_setsockopt,
1570 .compat_getsockopt = compat_ipv6_getsockopt,
1571 #endif
1572 .mtu_reduced = tcp_v6_mtu_reduced,
1573 };
1574
1575 #ifdef CONFIG_TCP_MD5SIG
1576 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
1577 .md5_lookup = tcp_v6_md5_lookup,
1578 .calc_md5_hash = tcp_v6_md5_hash_skb,
1579 .md5_parse = tcp_v6_parse_md5_keys,
1580 };
1581 #endif
1582
1583 /*
1584 * TCP over IPv4 via INET6 API
1585 */
1586 static const struct inet_connection_sock_af_ops ipv6_mapped = {
1587 .queue_xmit = ip_queue_xmit,
1588 .send_check = tcp_v4_send_check,
1589 .rebuild_header = inet_sk_rebuild_header,
1590 .sk_rx_dst_set = inet_sk_rx_dst_set,
1591 .conn_request = tcp_v6_conn_request,
1592 .syn_recv_sock = tcp_v6_syn_recv_sock,
1593 .net_header_len = sizeof(struct iphdr),
1594 .setsockopt = ipv6_setsockopt,
1595 .getsockopt = ipv6_getsockopt,
1596 .addr2sockaddr = inet6_csk_addr2sockaddr,
1597 .sockaddr_len = sizeof(struct sockaddr_in6),
1598 .bind_conflict = inet6_csk_bind_conflict,
1599 #ifdef CONFIG_COMPAT
1600 .compat_setsockopt = compat_ipv6_setsockopt,
1601 .compat_getsockopt = compat_ipv6_getsockopt,
1602 #endif
1603 .mtu_reduced = tcp_v4_mtu_reduced,
1604 };
1605
1606 #ifdef CONFIG_TCP_MD5SIG
1607 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
1608 .md5_lookup = tcp_v4_md5_lookup,
1609 .calc_md5_hash = tcp_v4_md5_hash_skb,
1610 .md5_parse = tcp_v6_parse_md5_keys,
1611 };
1612 #endif
1613
1614 /* NOTE: A lot of things set to zero explicitly by call to
1615 * sk_alloc() so need not be done here.
1616 */
1617 static int tcp_v6_init_sock(struct sock *sk)
1618 {
1619 struct inet_connection_sock *icsk = inet_csk(sk);
1620
1621 tcp_init_sock(sk);
1622
1623 icsk->icsk_af_ops = &ipv6_specific;
1624
1625 #ifdef CONFIG_TCP_MD5SIG
1626 tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific;
1627 #endif
1628
1629 return 0;
1630 }
1631
1632 static void tcp_v6_destroy_sock(struct sock *sk)
1633 {
1634 tcp_v4_destroy_sock(sk);
1635 inet6_destroy_sock(sk);
1636 }
1637
1638 #ifdef CONFIG_PROC_FS
1639 /* Proc filesystem TCPv6 sock list dumping. */
1640 static void get_openreq6(struct seq_file *seq,
1641 struct request_sock *req, int i, kuid_t uid)
1642 {
1643 long ttd = req->rsk_timer.expires - jiffies;
1644 const struct in6_addr *src = &inet_rsk(req)->ir_v6_loc_addr;
1645 const struct in6_addr *dest = &inet_rsk(req)->ir_v6_rmt_addr;
1646
1647 if (ttd < 0)
1648 ttd = 0;
1649
1650 seq_printf(seq,
1651 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1652 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %d %d %pK\n",
1653 i,
1654 src->s6_addr32[0], src->s6_addr32[1],
1655 src->s6_addr32[2], src->s6_addr32[3],
1656 inet_rsk(req)->ir_num,
1657 dest->s6_addr32[0], dest->s6_addr32[1],
1658 dest->s6_addr32[2], dest->s6_addr32[3],
1659 ntohs(inet_rsk(req)->ir_rmt_port),
1660 TCP_SYN_RECV,
1661 0, 0, /* could print option size, but that is af dependent. */
1662 1, /* timers active (only the expire timer) */
1663 jiffies_to_clock_t(ttd),
1664 req->num_timeout,
1665 from_kuid_munged(seq_user_ns(seq), uid),
1666 0, /* non standard timer */
1667 0, /* open_requests have no inode */
1668 0, req);
1669 }
1670
1671 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1672 {
1673 const struct in6_addr *dest, *src;
1674 __u16 destp, srcp;
1675 int timer_active;
1676 unsigned long timer_expires;
1677 const struct inet_sock *inet = inet_sk(sp);
1678 const struct tcp_sock *tp = tcp_sk(sp);
1679 const struct inet_connection_sock *icsk = inet_csk(sp);
1680 struct fastopen_queue *fastopenq = icsk->icsk_accept_queue.fastopenq;
1681
1682 dest = &sp->sk_v6_daddr;
1683 src = &sp->sk_v6_rcv_saddr;
1684 destp = ntohs(inet->inet_dport);
1685 srcp = ntohs(inet->inet_sport);
1686
1687 if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
1688 timer_active = 1;
1689 timer_expires = icsk->icsk_timeout;
1690 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1691 timer_active = 4;
1692 timer_expires = icsk->icsk_timeout;
1693 } else if (timer_pending(&sp->sk_timer)) {
1694 timer_active = 2;
1695 timer_expires = sp->sk_timer.expires;
1696 } else {
1697 timer_active = 0;
1698 timer_expires = jiffies;
1699 }
1700
1701 seq_printf(seq,
1702 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1703 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n",
1704 i,
1705 src->s6_addr32[0], src->s6_addr32[1],
1706 src->s6_addr32[2], src->s6_addr32[3], srcp,
1707 dest->s6_addr32[0], dest->s6_addr32[1],
1708 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1709 sp->sk_state,
1710 tp->write_seq-tp->snd_una,
1711 (sp->sk_state == TCP_LISTEN) ? sp->sk_ack_backlog : (tp->rcv_nxt - tp->copied_seq),
1712 timer_active,
1713 jiffies_delta_to_clock_t(timer_expires - jiffies),
1714 icsk->icsk_retransmits,
1715 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
1716 icsk->icsk_probes_out,
1717 sock_i_ino(sp),
1718 atomic_read(&sp->sk_refcnt), sp,
1719 jiffies_to_clock_t(icsk->icsk_rto),
1720 jiffies_to_clock_t(icsk->icsk_ack.ato),
1721 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
1722 tp->snd_cwnd,
1723 sp->sk_state == TCP_LISTEN ?
1724 (fastopenq ? fastopenq->max_qlen : 0) :
1725 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)
1726 );
1727 }
1728
1729 static void get_timewait6_sock(struct seq_file *seq,
1730 struct inet_timewait_sock *tw, int i)
1731 {
1732 long delta = tw->tw_timer.expires - jiffies;
1733 const struct in6_addr *dest, *src;
1734 __u16 destp, srcp;
1735
1736 dest = &tw->tw_v6_daddr;
1737 src = &tw->tw_v6_rcv_saddr;
1738 destp = ntohs(tw->tw_dport);
1739 srcp = ntohs(tw->tw_sport);
1740
1741 seq_printf(seq,
1742 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1743 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1744 i,
1745 src->s6_addr32[0], src->s6_addr32[1],
1746 src->s6_addr32[2], src->s6_addr32[3], srcp,
1747 dest->s6_addr32[0], dest->s6_addr32[1],
1748 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1749 tw->tw_substate, 0, 0,
1750 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
1751 atomic_read(&tw->tw_refcnt), tw);
1752 }
1753
1754 static int tcp6_seq_show(struct seq_file *seq, void *v)
1755 {
1756 struct tcp_iter_state *st;
1757 struct sock *sk = v;
1758
1759 if (v == SEQ_START_TOKEN) {
1760 seq_puts(seq,
1761 " sl "
1762 "local_address "
1763 "remote_address "
1764 "st tx_queue rx_queue tr tm->when retrnsmt"
1765 " uid timeout inode\n");
1766 goto out;
1767 }
1768 st = seq->private;
1769
1770 switch (st->state) {
1771 case TCP_SEQ_STATE_LISTENING:
1772 case TCP_SEQ_STATE_ESTABLISHED:
1773 if (sk->sk_state == TCP_TIME_WAIT)
1774 get_timewait6_sock(seq, v, st->num);
1775 else
1776 get_tcp6_sock(seq, v, st->num);
1777 break;
1778 case TCP_SEQ_STATE_OPENREQ:
1779 get_openreq6(seq, v, st->num, st->uid);
1780 break;
1781 }
1782 out:
1783 return 0;
1784 }
1785
1786 static const struct file_operations tcp6_afinfo_seq_fops = {
1787 .owner = THIS_MODULE,
1788 .open = tcp_seq_open,
1789 .read = seq_read,
1790 .llseek = seq_lseek,
1791 .release = seq_release_net
1792 };
1793
1794 static struct tcp_seq_afinfo tcp6_seq_afinfo = {
1795 .name = "tcp6",
1796 .family = AF_INET6,
1797 .seq_fops = &tcp6_afinfo_seq_fops,
1798 .seq_ops = {
1799 .show = tcp6_seq_show,
1800 },
1801 };
1802
1803 int __net_init tcp6_proc_init(struct net *net)
1804 {
1805 return tcp_proc_register(net, &tcp6_seq_afinfo);
1806 }
1807
1808 void tcp6_proc_exit(struct net *net)
1809 {
1810 tcp_proc_unregister(net, &tcp6_seq_afinfo);
1811 }
1812 #endif
1813
1814 static void tcp_v6_clear_sk(struct sock *sk, int size)
1815 {
1816 struct inet_sock *inet = inet_sk(sk);
1817
1818 /* we do not want to clear pinet6 field, because of RCU lookups */
1819 sk_prot_clear_nulls(sk, offsetof(struct inet_sock, pinet6));
1820
1821 size -= offsetof(struct inet_sock, pinet6) + sizeof(inet->pinet6);
1822 memset(&inet->pinet6 + 1, 0, size);
1823 }
1824
1825 struct proto tcpv6_prot = {
1826 .name = "TCPv6",
1827 .owner = THIS_MODULE,
1828 .close = tcp_close,
1829 .connect = tcp_v6_connect,
1830 .disconnect = tcp_disconnect,
1831 .accept = inet_csk_accept,
1832 .ioctl = tcp_ioctl,
1833 .init = tcp_v6_init_sock,
1834 .destroy = tcp_v6_destroy_sock,
1835 .shutdown = tcp_shutdown,
1836 .setsockopt = tcp_setsockopt,
1837 .getsockopt = tcp_getsockopt,
1838 .recvmsg = tcp_recvmsg,
1839 .sendmsg = tcp_sendmsg,
1840 .sendpage = tcp_sendpage,
1841 .backlog_rcv = tcp_v6_do_rcv,
1842 .release_cb = tcp_release_cb,
1843 .hash = inet_hash,
1844 .unhash = inet_unhash,
1845 .get_port = inet_csk_get_port,
1846 .enter_memory_pressure = tcp_enter_memory_pressure,
1847 .stream_memory_free = tcp_stream_memory_free,
1848 .sockets_allocated = &tcp_sockets_allocated,
1849 .memory_allocated = &tcp_memory_allocated,
1850 .memory_pressure = &tcp_memory_pressure,
1851 .orphan_count = &tcp_orphan_count,
1852 .sysctl_mem = sysctl_tcp_mem,
1853 .sysctl_wmem = sysctl_tcp_wmem,
1854 .sysctl_rmem = sysctl_tcp_rmem,
1855 .max_header = MAX_TCP_HEADER,
1856 .obj_size = sizeof(struct tcp6_sock),
1857 .slab_flags = SLAB_DESTROY_BY_RCU,
1858 .twsk_prot = &tcp6_timewait_sock_ops,
1859 .rsk_prot = &tcp6_request_sock_ops,
1860 .h.hashinfo = &tcp_hashinfo,
1861 .no_autobind = true,
1862 #ifdef CONFIG_COMPAT
1863 .compat_setsockopt = compat_tcp_setsockopt,
1864 .compat_getsockopt = compat_tcp_getsockopt,
1865 #endif
1866 #ifdef CONFIG_MEMCG_KMEM
1867 .proto_cgroup = tcp_proto_cgroup,
1868 #endif
1869 .clear_sk = tcp_v6_clear_sk,
1870 };
1871
1872 static const struct inet6_protocol tcpv6_protocol = {
1873 .early_demux = tcp_v6_early_demux,
1874 .handler = tcp_v6_rcv,
1875 .err_handler = tcp_v6_err,
1876 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1877 };
1878
1879 static struct inet_protosw tcpv6_protosw = {
1880 .type = SOCK_STREAM,
1881 .protocol = IPPROTO_TCP,
1882 .prot = &tcpv6_prot,
1883 .ops = &inet6_stream_ops,
1884 .flags = INET_PROTOSW_PERMANENT |
1885 INET_PROTOSW_ICSK,
1886 };
1887
1888 static int __net_init tcpv6_net_init(struct net *net)
1889 {
1890 return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
1891 SOCK_RAW, IPPROTO_TCP, net);
1892 }
1893
1894 static void __net_exit tcpv6_net_exit(struct net *net)
1895 {
1896 inet_ctl_sock_destroy(net->ipv6.tcp_sk);
1897 }
1898
1899 static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
1900 {
1901 inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET6);
1902 }
1903
1904 static struct pernet_operations tcpv6_net_ops = {
1905 .init = tcpv6_net_init,
1906 .exit = tcpv6_net_exit,
1907 .exit_batch = tcpv6_net_exit_batch,
1908 };
1909
1910 int __init tcpv6_init(void)
1911 {
1912 int ret;
1913
1914 ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
1915 if (ret)
1916 goto out;
1917
1918 /* register inet6 protocol */
1919 ret = inet6_register_protosw(&tcpv6_protosw);
1920 if (ret)
1921 goto out_tcpv6_protocol;
1922
1923 ret = register_pernet_subsys(&tcpv6_net_ops);
1924 if (ret)
1925 goto out_tcpv6_protosw;
1926 out:
1927 return ret;
1928
1929 out_tcpv6_protosw:
1930 inet6_unregister_protosw(&tcpv6_protosw);
1931 out_tcpv6_protocol:
1932 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
1933 goto out;
1934 }
1935
1936 void tcpv6_exit(void)
1937 {
1938 unregister_pernet_subsys(&tcpv6_net_ops);
1939 inet6_unregister_protosw(&tcpv6_protosw);
1940 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
1941 }
This page took 0.090264 seconds and 5 git commands to generate.