ovs: allow nl 'flow set' to use ufid without flow key
[deliverable/linux.git] / net / ipv6 / tcp_ipv6.c
1 /*
2 * TCP over IPv6
3 * Linux INET6 implementation
4 *
5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt>
7 *
8 * Based on:
9 * linux/net/ipv4/tcp.c
10 * linux/net/ipv4/tcp_input.c
11 * linux/net/ipv4/tcp_output.c
12 *
13 * Fixes:
14 * Hideaki YOSHIFUJI : sin6_scope_id support
15 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
16 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
17 * a single port at the same time.
18 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
19 *
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
24 */
25
26 #include <linux/bottom_half.h>
27 #include <linux/module.h>
28 #include <linux/errno.h>
29 #include <linux/types.h>
30 #include <linux/socket.h>
31 #include <linux/sockios.h>
32 #include <linux/net.h>
33 #include <linux/jiffies.h>
34 #include <linux/in.h>
35 #include <linux/in6.h>
36 #include <linux/netdevice.h>
37 #include <linux/init.h>
38 #include <linux/jhash.h>
39 #include <linux/ipsec.h>
40 #include <linux/times.h>
41 #include <linux/slab.h>
42 #include <linux/uaccess.h>
43 #include <linux/ipv6.h>
44 #include <linux/icmpv6.h>
45 #include <linux/random.h>
46
47 #include <net/tcp.h>
48 #include <net/ndisc.h>
49 #include <net/inet6_hashtables.h>
50 #include <net/inet6_connection_sock.h>
51 #include <net/ipv6.h>
52 #include <net/transp_v6.h>
53 #include <net/addrconf.h>
54 #include <net/ip6_route.h>
55 #include <net/ip6_checksum.h>
56 #include <net/inet_ecn.h>
57 #include <net/protocol.h>
58 #include <net/xfrm.h>
59 #include <net/snmp.h>
60 #include <net/dsfield.h>
61 #include <net/timewait_sock.h>
62 #include <net/inet_common.h>
63 #include <net/secure_seq.h>
64 #include <net/busy_poll.h>
65
66 #include <linux/proc_fs.h>
67 #include <linux/seq_file.h>
68
69 #include <linux/crypto.h>
70 #include <linux/scatterlist.h>
71
72 static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb);
73 static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
74 struct request_sock *req);
75
76 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
77
78 static const struct inet_connection_sock_af_ops ipv6_mapped;
79 static const struct inet_connection_sock_af_ops ipv6_specific;
80 #ifdef CONFIG_TCP_MD5SIG
81 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
82 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
83 #else
84 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
85 const struct in6_addr *addr)
86 {
87 return NULL;
88 }
89 #endif
90
91 static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
92 {
93 struct dst_entry *dst = skb_dst(skb);
94
95 if (dst && dst_hold_safe(dst)) {
96 const struct rt6_info *rt = (const struct rt6_info *)dst;
97
98 sk->sk_rx_dst = dst;
99 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
100 inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt);
101 }
102 }
103
104 static __u32 tcp_v6_init_sequence(const struct sk_buff *skb)
105 {
106 return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
107 ipv6_hdr(skb)->saddr.s6_addr32,
108 tcp_hdr(skb)->dest,
109 tcp_hdr(skb)->source);
110 }
111
112 static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
113 int addr_len)
114 {
115 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
116 struct inet_sock *inet = inet_sk(sk);
117 struct inet_connection_sock *icsk = inet_csk(sk);
118 struct ipv6_pinfo *np = inet6_sk(sk);
119 struct tcp_sock *tp = tcp_sk(sk);
120 struct in6_addr *saddr = NULL, *final_p, final;
121 struct ipv6_txoptions *opt;
122 struct flowi6 fl6;
123 struct dst_entry *dst;
124 int addr_type;
125 int err;
126
127 if (addr_len < SIN6_LEN_RFC2133)
128 return -EINVAL;
129
130 if (usin->sin6_family != AF_INET6)
131 return -EAFNOSUPPORT;
132
133 memset(&fl6, 0, sizeof(fl6));
134
135 if (np->sndflow) {
136 fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
137 IP6_ECN_flow_init(fl6.flowlabel);
138 if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
139 struct ip6_flowlabel *flowlabel;
140 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
141 if (!flowlabel)
142 return -EINVAL;
143 fl6_sock_release(flowlabel);
144 }
145 }
146
147 /*
148 * connect() to INADDR_ANY means loopback (BSD'ism).
149 */
150
151 if (ipv6_addr_any(&usin->sin6_addr))
152 usin->sin6_addr.s6_addr[15] = 0x1;
153
154 addr_type = ipv6_addr_type(&usin->sin6_addr);
155
156 if (addr_type & IPV6_ADDR_MULTICAST)
157 return -ENETUNREACH;
158
159 if (addr_type&IPV6_ADDR_LINKLOCAL) {
160 if (addr_len >= sizeof(struct sockaddr_in6) &&
161 usin->sin6_scope_id) {
162 /* If interface is set while binding, indices
163 * must coincide.
164 */
165 if (sk->sk_bound_dev_if &&
166 sk->sk_bound_dev_if != usin->sin6_scope_id)
167 return -EINVAL;
168
169 sk->sk_bound_dev_if = usin->sin6_scope_id;
170 }
171
172 /* Connect to link-local address requires an interface */
173 if (!sk->sk_bound_dev_if)
174 return -EINVAL;
175 }
176
177 if (tp->rx_opt.ts_recent_stamp &&
178 !ipv6_addr_equal(&sk->sk_v6_daddr, &usin->sin6_addr)) {
179 tp->rx_opt.ts_recent = 0;
180 tp->rx_opt.ts_recent_stamp = 0;
181 tp->write_seq = 0;
182 }
183
184 sk->sk_v6_daddr = usin->sin6_addr;
185 np->flow_label = fl6.flowlabel;
186
187 /*
188 * TCP over IPv4
189 */
190
191 if (addr_type == IPV6_ADDR_MAPPED) {
192 u32 exthdrlen = icsk->icsk_ext_hdr_len;
193 struct sockaddr_in sin;
194
195 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
196
197 if (__ipv6_only_sock(sk))
198 return -ENETUNREACH;
199
200 sin.sin_family = AF_INET;
201 sin.sin_port = usin->sin6_port;
202 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
203
204 icsk->icsk_af_ops = &ipv6_mapped;
205 sk->sk_backlog_rcv = tcp_v4_do_rcv;
206 #ifdef CONFIG_TCP_MD5SIG
207 tp->af_specific = &tcp_sock_ipv6_mapped_specific;
208 #endif
209
210 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
211
212 if (err) {
213 icsk->icsk_ext_hdr_len = exthdrlen;
214 icsk->icsk_af_ops = &ipv6_specific;
215 sk->sk_backlog_rcv = tcp_v6_do_rcv;
216 #ifdef CONFIG_TCP_MD5SIG
217 tp->af_specific = &tcp_sock_ipv6_specific;
218 #endif
219 goto failure;
220 }
221 np->saddr = sk->sk_v6_rcv_saddr;
222
223 return err;
224 }
225
226 if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr))
227 saddr = &sk->sk_v6_rcv_saddr;
228
229 fl6.flowi6_proto = IPPROTO_TCP;
230 fl6.daddr = sk->sk_v6_daddr;
231 fl6.saddr = saddr ? *saddr : np->saddr;
232 fl6.flowi6_oif = sk->sk_bound_dev_if;
233 fl6.flowi6_mark = sk->sk_mark;
234 fl6.fl6_dport = usin->sin6_port;
235 fl6.fl6_sport = inet->inet_sport;
236
237 opt = rcu_dereference_protected(np->opt, sock_owned_by_user(sk));
238 final_p = fl6_update_dst(&fl6, opt, &final);
239
240 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
241
242 dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
243 if (IS_ERR(dst)) {
244 err = PTR_ERR(dst);
245 goto failure;
246 }
247
248 if (!saddr) {
249 saddr = &fl6.saddr;
250 sk->sk_v6_rcv_saddr = *saddr;
251 }
252
253 /* set the source address */
254 np->saddr = *saddr;
255 inet->inet_rcv_saddr = LOOPBACK4_IPV6;
256
257 sk->sk_gso_type = SKB_GSO_TCPV6;
258 ip6_dst_store(sk, dst, NULL, NULL);
259
260 if (tcp_death_row.sysctl_tw_recycle &&
261 !tp->rx_opt.ts_recent_stamp &&
262 ipv6_addr_equal(&fl6.daddr, &sk->sk_v6_daddr))
263 tcp_fetch_timewait_stamp(sk, dst);
264
265 icsk->icsk_ext_hdr_len = 0;
266 if (opt)
267 icsk->icsk_ext_hdr_len = opt->opt_flen +
268 opt->opt_nflen;
269
270 tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
271
272 inet->inet_dport = usin->sin6_port;
273
274 tcp_set_state(sk, TCP_SYN_SENT);
275 err = inet6_hash_connect(&tcp_death_row, sk);
276 if (err)
277 goto late_failure;
278
279 sk_set_txhash(sk);
280
281 if (!tp->write_seq && likely(!tp->repair))
282 tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
283 sk->sk_v6_daddr.s6_addr32,
284 inet->inet_sport,
285 inet->inet_dport);
286
287 err = tcp_connect(sk);
288 if (err)
289 goto late_failure;
290
291 return 0;
292
293 late_failure:
294 tcp_set_state(sk, TCP_CLOSE);
295 __sk_dst_reset(sk);
296 failure:
297 inet->inet_dport = 0;
298 sk->sk_route_caps = 0;
299 return err;
300 }
301
302 static void tcp_v6_mtu_reduced(struct sock *sk)
303 {
304 struct dst_entry *dst;
305
306 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
307 return;
308
309 dst = inet6_csk_update_pmtu(sk, tcp_sk(sk)->mtu_info);
310 if (!dst)
311 return;
312
313 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
314 tcp_sync_mss(sk, dst_mtu(dst));
315 tcp_simple_retransmit(sk);
316 }
317 }
318
319 static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
320 u8 type, u8 code, int offset, __be32 info)
321 {
322 const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
323 const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
324 struct net *net = dev_net(skb->dev);
325 struct request_sock *fastopen;
326 struct ipv6_pinfo *np;
327 struct tcp_sock *tp;
328 __u32 seq, snd_una;
329 struct sock *sk;
330 bool fatal;
331 int err;
332
333 sk = __inet6_lookup_established(net, &tcp_hashinfo,
334 &hdr->daddr, th->dest,
335 &hdr->saddr, ntohs(th->source),
336 skb->dev->ifindex);
337
338 if (!sk) {
339 ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
340 ICMP6_MIB_INERRORS);
341 return;
342 }
343
344 if (sk->sk_state == TCP_TIME_WAIT) {
345 inet_twsk_put(inet_twsk(sk));
346 return;
347 }
348 seq = ntohl(th->seq);
349 fatal = icmpv6_err_convert(type, code, &err);
350 if (sk->sk_state == TCP_NEW_SYN_RECV)
351 return tcp_req_err(sk, seq, fatal);
352
353 bh_lock_sock(sk);
354 if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
355 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
356
357 if (sk->sk_state == TCP_CLOSE)
358 goto out;
359
360 if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
361 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
362 goto out;
363 }
364
365 tp = tcp_sk(sk);
366 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
367 fastopen = tp->fastopen_rsk;
368 snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
369 if (sk->sk_state != TCP_LISTEN &&
370 !between(seq, snd_una, tp->snd_nxt)) {
371 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
372 goto out;
373 }
374
375 np = inet6_sk(sk);
376
377 if (type == NDISC_REDIRECT) {
378 struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
379
380 if (dst)
381 dst->ops->redirect(dst, sk, skb);
382 goto out;
383 }
384
385 if (type == ICMPV6_PKT_TOOBIG) {
386 /* We are not interested in TCP_LISTEN and open_requests
387 * (SYN-ACKs send out by Linux are always <576bytes so
388 * they should go through unfragmented).
389 */
390 if (sk->sk_state == TCP_LISTEN)
391 goto out;
392
393 if (!ip6_sk_accept_pmtu(sk))
394 goto out;
395
396 tp->mtu_info = ntohl(info);
397 if (!sock_owned_by_user(sk))
398 tcp_v6_mtu_reduced(sk);
399 else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED,
400 &tp->tsq_flags))
401 sock_hold(sk);
402 goto out;
403 }
404
405
406 /* Might be for an request_sock */
407 switch (sk->sk_state) {
408 case TCP_SYN_SENT:
409 case TCP_SYN_RECV:
410 /* Only in fast or simultaneous open. If a fast open socket is
411 * is already accepted it is treated as a connected one below.
412 */
413 if (fastopen && !fastopen->sk)
414 break;
415
416 if (!sock_owned_by_user(sk)) {
417 sk->sk_err = err;
418 sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
419
420 tcp_done(sk);
421 } else
422 sk->sk_err_soft = err;
423 goto out;
424 }
425
426 if (!sock_owned_by_user(sk) && np->recverr) {
427 sk->sk_err = err;
428 sk->sk_error_report(sk);
429 } else
430 sk->sk_err_soft = err;
431
432 out:
433 bh_unlock_sock(sk);
434 sock_put(sk);
435 }
436
437
438 static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
439 struct flowi *fl,
440 struct request_sock *req,
441 struct tcp_fastopen_cookie *foc,
442 bool attach_req)
443 {
444 struct inet_request_sock *ireq = inet_rsk(req);
445 struct ipv6_pinfo *np = inet6_sk(sk);
446 struct flowi6 *fl6 = &fl->u.ip6;
447 struct sk_buff *skb;
448 int err = -ENOMEM;
449
450 /* First, grab a route. */
451 if (!dst && (dst = inet6_csk_route_req(sk, fl6, req,
452 IPPROTO_TCP)) == NULL)
453 goto done;
454
455 skb = tcp_make_synack(sk, dst, req, foc, attach_req);
456
457 if (skb) {
458 __tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr,
459 &ireq->ir_v6_rmt_addr);
460
461 fl6->daddr = ireq->ir_v6_rmt_addr;
462 if (np->repflow && ireq->pktopts)
463 fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
464
465 rcu_read_lock();
466 err = ip6_xmit(sk, skb, fl6, rcu_dereference(np->opt),
467 np->tclass);
468 rcu_read_unlock();
469 err = net_xmit_eval(err);
470 }
471
472 done:
473 return err;
474 }
475
476
477 static void tcp_v6_reqsk_destructor(struct request_sock *req)
478 {
479 kfree_skb(inet_rsk(req)->pktopts);
480 }
481
482 #ifdef CONFIG_TCP_MD5SIG
483 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
484 const struct in6_addr *addr)
485 {
486 return tcp_md5_do_lookup(sk, (union tcp_md5_addr *)addr, AF_INET6);
487 }
488
489 static struct tcp_md5sig_key *tcp_v6_md5_lookup(const struct sock *sk,
490 const struct sock *addr_sk)
491 {
492 return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr);
493 }
494
495 static int tcp_v6_parse_md5_keys(struct sock *sk, char __user *optval,
496 int optlen)
497 {
498 struct tcp_md5sig cmd;
499 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
500
501 if (optlen < sizeof(cmd))
502 return -EINVAL;
503
504 if (copy_from_user(&cmd, optval, sizeof(cmd)))
505 return -EFAULT;
506
507 if (sin6->sin6_family != AF_INET6)
508 return -EINVAL;
509
510 if (!cmd.tcpm_keylen) {
511 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
512 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
513 AF_INET);
514 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
515 AF_INET6);
516 }
517
518 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
519 return -EINVAL;
520
521 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
522 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
523 AF_INET, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
524
525 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
526 AF_INET6, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
527 }
528
529 static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
530 const struct in6_addr *daddr,
531 const struct in6_addr *saddr, int nbytes)
532 {
533 struct tcp6_pseudohdr *bp;
534 struct scatterlist sg;
535
536 bp = &hp->md5_blk.ip6;
537 /* 1. TCP pseudo-header (RFC2460) */
538 bp->saddr = *saddr;
539 bp->daddr = *daddr;
540 bp->protocol = cpu_to_be32(IPPROTO_TCP);
541 bp->len = cpu_to_be32(nbytes);
542
543 sg_init_one(&sg, bp, sizeof(*bp));
544 return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
545 }
546
547 static int tcp_v6_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
548 const struct in6_addr *daddr, struct in6_addr *saddr,
549 const struct tcphdr *th)
550 {
551 struct tcp_md5sig_pool *hp;
552 struct hash_desc *desc;
553
554 hp = tcp_get_md5sig_pool();
555 if (!hp)
556 goto clear_hash_noput;
557 desc = &hp->md5_desc;
558
559 if (crypto_hash_init(desc))
560 goto clear_hash;
561 if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
562 goto clear_hash;
563 if (tcp_md5_hash_header(hp, th))
564 goto clear_hash;
565 if (tcp_md5_hash_key(hp, key))
566 goto clear_hash;
567 if (crypto_hash_final(desc, md5_hash))
568 goto clear_hash;
569
570 tcp_put_md5sig_pool();
571 return 0;
572
573 clear_hash:
574 tcp_put_md5sig_pool();
575 clear_hash_noput:
576 memset(md5_hash, 0, 16);
577 return 1;
578 }
579
580 static int tcp_v6_md5_hash_skb(char *md5_hash,
581 const struct tcp_md5sig_key *key,
582 const struct sock *sk,
583 const struct sk_buff *skb)
584 {
585 const struct in6_addr *saddr, *daddr;
586 struct tcp_md5sig_pool *hp;
587 struct hash_desc *desc;
588 const struct tcphdr *th = tcp_hdr(skb);
589
590 if (sk) { /* valid for establish/request sockets */
591 saddr = &sk->sk_v6_rcv_saddr;
592 daddr = &sk->sk_v6_daddr;
593 } else {
594 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
595 saddr = &ip6h->saddr;
596 daddr = &ip6h->daddr;
597 }
598
599 hp = tcp_get_md5sig_pool();
600 if (!hp)
601 goto clear_hash_noput;
602 desc = &hp->md5_desc;
603
604 if (crypto_hash_init(desc))
605 goto clear_hash;
606
607 if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
608 goto clear_hash;
609 if (tcp_md5_hash_header(hp, th))
610 goto clear_hash;
611 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
612 goto clear_hash;
613 if (tcp_md5_hash_key(hp, key))
614 goto clear_hash;
615 if (crypto_hash_final(desc, md5_hash))
616 goto clear_hash;
617
618 tcp_put_md5sig_pool();
619 return 0;
620
621 clear_hash:
622 tcp_put_md5sig_pool();
623 clear_hash_noput:
624 memset(md5_hash, 0, 16);
625 return 1;
626 }
627
628 #endif
629
630 static bool tcp_v6_inbound_md5_hash(const struct sock *sk,
631 const struct sk_buff *skb)
632 {
633 #ifdef CONFIG_TCP_MD5SIG
634 const __u8 *hash_location = NULL;
635 struct tcp_md5sig_key *hash_expected;
636 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
637 const struct tcphdr *th = tcp_hdr(skb);
638 int genhash;
639 u8 newhash[16];
640
641 hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
642 hash_location = tcp_parse_md5sig_option(th);
643
644 /* We've parsed the options - do we have a hash? */
645 if (!hash_expected && !hash_location)
646 return false;
647
648 if (hash_expected && !hash_location) {
649 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
650 return true;
651 }
652
653 if (!hash_expected && hash_location) {
654 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
655 return true;
656 }
657
658 /* check the signature */
659 genhash = tcp_v6_md5_hash_skb(newhash,
660 hash_expected,
661 NULL, skb);
662
663 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
664 net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
665 genhash ? "failed" : "mismatch",
666 &ip6h->saddr, ntohs(th->source),
667 &ip6h->daddr, ntohs(th->dest));
668 return true;
669 }
670 #endif
671 return false;
672 }
673
674 static void tcp_v6_init_req(struct request_sock *req,
675 const struct sock *sk_listener,
676 struct sk_buff *skb)
677 {
678 struct inet_request_sock *ireq = inet_rsk(req);
679 const struct ipv6_pinfo *np = inet6_sk(sk_listener);
680
681 ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
682 ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
683
684 /* So that link locals have meaning */
685 if (!sk_listener->sk_bound_dev_if &&
686 ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
687 ireq->ir_iif = tcp_v6_iif(skb);
688
689 if (!TCP_SKB_CB(skb)->tcp_tw_isn &&
690 (ipv6_opt_accepted(sk_listener, skb, &TCP_SKB_CB(skb)->header.h6) ||
691 np->rxopt.bits.rxinfo ||
692 np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim ||
693 np->rxopt.bits.rxohlim || np->repflow)) {
694 atomic_inc(&skb->users);
695 ireq->pktopts = skb;
696 }
697 }
698
699 static struct dst_entry *tcp_v6_route_req(const struct sock *sk,
700 struct flowi *fl,
701 const struct request_sock *req,
702 bool *strict)
703 {
704 if (strict)
705 *strict = true;
706 return inet6_csk_route_req(sk, &fl->u.ip6, req, IPPROTO_TCP);
707 }
708
709 struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
710 .family = AF_INET6,
711 .obj_size = sizeof(struct tcp6_request_sock),
712 .rtx_syn_ack = tcp_rtx_synack,
713 .send_ack = tcp_v6_reqsk_send_ack,
714 .destructor = tcp_v6_reqsk_destructor,
715 .send_reset = tcp_v6_send_reset,
716 .syn_ack_timeout = tcp_syn_ack_timeout,
717 };
718
719 static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
720 .mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) -
721 sizeof(struct ipv6hdr),
722 #ifdef CONFIG_TCP_MD5SIG
723 .req_md5_lookup = tcp_v6_md5_lookup,
724 .calc_md5_hash = tcp_v6_md5_hash_skb,
725 #endif
726 .init_req = tcp_v6_init_req,
727 #ifdef CONFIG_SYN_COOKIES
728 .cookie_init_seq = cookie_v6_init_sequence,
729 #endif
730 .route_req = tcp_v6_route_req,
731 .init_seq = tcp_v6_init_sequence,
732 .send_synack = tcp_v6_send_synack,
733 };
734
735 static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 seq,
736 u32 ack, u32 win, u32 tsval, u32 tsecr,
737 int oif, struct tcp_md5sig_key *key, int rst,
738 u8 tclass, u32 label)
739 {
740 const struct tcphdr *th = tcp_hdr(skb);
741 struct tcphdr *t1;
742 struct sk_buff *buff;
743 struct flowi6 fl6;
744 struct net *net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
745 struct sock *ctl_sk = net->ipv6.tcp_sk;
746 unsigned int tot_len = sizeof(struct tcphdr);
747 struct dst_entry *dst;
748 __be32 *topt;
749
750 if (tsecr)
751 tot_len += TCPOLEN_TSTAMP_ALIGNED;
752 #ifdef CONFIG_TCP_MD5SIG
753 if (key)
754 tot_len += TCPOLEN_MD5SIG_ALIGNED;
755 #endif
756
757 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
758 GFP_ATOMIC);
759 if (!buff)
760 return;
761
762 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
763
764 t1 = (struct tcphdr *) skb_push(buff, tot_len);
765 skb_reset_transport_header(buff);
766
767 /* Swap the send and the receive. */
768 memset(t1, 0, sizeof(*t1));
769 t1->dest = th->source;
770 t1->source = th->dest;
771 t1->doff = tot_len / 4;
772 t1->seq = htonl(seq);
773 t1->ack_seq = htonl(ack);
774 t1->ack = !rst || !th->ack;
775 t1->rst = rst;
776 t1->window = htons(win);
777
778 topt = (__be32 *)(t1 + 1);
779
780 if (tsecr) {
781 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
782 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
783 *topt++ = htonl(tsval);
784 *topt++ = htonl(tsecr);
785 }
786
787 #ifdef CONFIG_TCP_MD5SIG
788 if (key) {
789 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
790 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
791 tcp_v6_md5_hash_hdr((__u8 *)topt, key,
792 &ipv6_hdr(skb)->saddr,
793 &ipv6_hdr(skb)->daddr, t1);
794 }
795 #endif
796
797 memset(&fl6, 0, sizeof(fl6));
798 fl6.daddr = ipv6_hdr(skb)->saddr;
799 fl6.saddr = ipv6_hdr(skb)->daddr;
800 fl6.flowlabel = label;
801
802 buff->ip_summed = CHECKSUM_PARTIAL;
803 buff->csum = 0;
804
805 __tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
806
807 fl6.flowi6_proto = IPPROTO_TCP;
808 if (rt6_need_strict(&fl6.daddr) && !oif)
809 fl6.flowi6_oif = tcp_v6_iif(skb);
810 else
811 fl6.flowi6_oif = oif;
812 fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark);
813 fl6.fl6_dport = t1->dest;
814 fl6.fl6_sport = t1->source;
815 security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
816
817 /* Pass a socket to ip6_dst_lookup either it is for RST
818 * Underlying function will use this to retrieve the network
819 * namespace
820 */
821 dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL);
822 if (!IS_ERR(dst)) {
823 skb_dst_set(buff, dst);
824 ip6_xmit(ctl_sk, buff, &fl6, NULL, tclass);
825 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
826 if (rst)
827 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
828 return;
829 }
830
831 kfree_skb(buff);
832 }
833
834 static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb)
835 {
836 const struct tcphdr *th = tcp_hdr(skb);
837 u32 seq = 0, ack_seq = 0;
838 struct tcp_md5sig_key *key = NULL;
839 #ifdef CONFIG_TCP_MD5SIG
840 const __u8 *hash_location = NULL;
841 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
842 unsigned char newhash[16];
843 int genhash;
844 struct sock *sk1 = NULL;
845 #endif
846 int oif;
847
848 if (th->rst)
849 return;
850
851 /* If sk not NULL, it means we did a successful lookup and incoming
852 * route had to be correct. prequeue might have dropped our dst.
853 */
854 if (!sk && !ipv6_unicast_destination(skb))
855 return;
856
857 #ifdef CONFIG_TCP_MD5SIG
858 hash_location = tcp_parse_md5sig_option(th);
859 if (sk && sk_fullsock(sk)) {
860 key = tcp_v6_md5_do_lookup(sk, &ipv6h->saddr);
861 } else if (hash_location) {
862 /*
863 * active side is lost. Try to find listening socket through
864 * source port, and then find md5 key through listening socket.
865 * we are not loose security here:
866 * Incoming packet is checked with md5 hash with finding key,
867 * no RST generated if md5 hash doesn't match.
868 */
869 sk1 = inet6_lookup_listener(dev_net(skb_dst(skb)->dev),
870 &tcp_hashinfo, NULL, 0,
871 &ipv6h->saddr,
872 th->source, &ipv6h->daddr,
873 ntohs(th->source), tcp_v6_iif(skb));
874 if (!sk1)
875 return;
876
877 rcu_read_lock();
878 key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr);
879 if (!key)
880 goto release_sk1;
881
882 genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, skb);
883 if (genhash || memcmp(hash_location, newhash, 16) != 0)
884 goto release_sk1;
885 }
886 #endif
887
888 if (th->ack)
889 seq = ntohl(th->ack_seq);
890 else
891 ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
892 (th->doff << 2);
893
894 oif = sk ? sk->sk_bound_dev_if : 0;
895 tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, key, 1, 0, 0);
896
897 #ifdef CONFIG_TCP_MD5SIG
898 release_sk1:
899 if (sk1) {
900 rcu_read_unlock();
901 sock_put(sk1);
902 }
903 #endif
904 }
905
906 static void tcp_v6_send_ack(const struct sock *sk, struct sk_buff *skb, u32 seq,
907 u32 ack, u32 win, u32 tsval, u32 tsecr, int oif,
908 struct tcp_md5sig_key *key, u8 tclass,
909 u32 label)
910 {
911 tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, key, 0,
912 tclass, label);
913 }
914
915 static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
916 {
917 struct inet_timewait_sock *tw = inet_twsk(sk);
918 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
919
920 tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
921 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
922 tcp_time_stamp + tcptw->tw_ts_offset,
923 tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw),
924 tw->tw_tclass, cpu_to_be32(tw->tw_flowlabel));
925
926 inet_twsk_put(tw);
927 }
928
929 static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
930 struct request_sock *req)
931 {
932 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
933 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
934 */
935 tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ?
936 tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
937 tcp_rsk(req)->rcv_nxt, req->rsk_rcv_wnd,
938 tcp_time_stamp, req->ts_recent, sk->sk_bound_dev_if,
939 tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr),
940 0, 0);
941 }
942
943
944 static struct sock *tcp_v6_cookie_check(struct sock *sk, struct sk_buff *skb)
945 {
946 #ifdef CONFIG_SYN_COOKIES
947 const struct tcphdr *th = tcp_hdr(skb);
948
949 if (!th->syn)
950 sk = cookie_v6_check(sk, skb);
951 #endif
952 return sk;
953 }
954
955 static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
956 {
957 if (skb->protocol == htons(ETH_P_IP))
958 return tcp_v4_conn_request(sk, skb);
959
960 if (!ipv6_unicast_destination(skb))
961 goto drop;
962
963 return tcp_conn_request(&tcp6_request_sock_ops,
964 &tcp_request_sock_ipv6_ops, sk, skb);
965
966 drop:
967 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
968 return 0; /* don't send reset */
969 }
970
971 static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
972 struct request_sock *req,
973 struct dst_entry *dst,
974 struct request_sock *req_unhash,
975 bool *own_req)
976 {
977 struct inet_request_sock *ireq;
978 struct ipv6_pinfo *newnp;
979 const struct ipv6_pinfo *np = inet6_sk(sk);
980 struct ipv6_txoptions *opt;
981 struct tcp6_sock *newtcp6sk;
982 struct inet_sock *newinet;
983 struct tcp_sock *newtp;
984 struct sock *newsk;
985 #ifdef CONFIG_TCP_MD5SIG
986 struct tcp_md5sig_key *key;
987 #endif
988 struct flowi6 fl6;
989
990 if (skb->protocol == htons(ETH_P_IP)) {
991 /*
992 * v6 mapped
993 */
994
995 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst,
996 req_unhash, own_req);
997
998 if (!newsk)
999 return NULL;
1000
1001 newtcp6sk = (struct tcp6_sock *)newsk;
1002 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1003
1004 newinet = inet_sk(newsk);
1005 newnp = inet6_sk(newsk);
1006 newtp = tcp_sk(newsk);
1007
1008 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1009
1010 newnp->saddr = newsk->sk_v6_rcv_saddr;
1011
1012 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1013 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1014 #ifdef CONFIG_TCP_MD5SIG
1015 newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1016 #endif
1017
1018 newnp->ipv6_ac_list = NULL;
1019 newnp->ipv6_fl_list = NULL;
1020 newnp->pktoptions = NULL;
1021 newnp->opt = NULL;
1022 newnp->mcast_oif = tcp_v6_iif(skb);
1023 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1024 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1025 if (np->repflow)
1026 newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1027
1028 /*
1029 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1030 * here, tcp_create_openreq_child now does this for us, see the comment in
1031 * that function for the gory details. -acme
1032 */
1033
1034 /* It is tricky place. Until this moment IPv4 tcp
1035 worked with IPv6 icsk.icsk_af_ops.
1036 Sync it now.
1037 */
1038 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1039
1040 return newsk;
1041 }
1042
1043 ireq = inet_rsk(req);
1044
1045 if (sk_acceptq_is_full(sk))
1046 goto out_overflow;
1047
1048 if (!dst) {
1049 dst = inet6_csk_route_req(sk, &fl6, req, IPPROTO_TCP);
1050 if (!dst)
1051 goto out;
1052 }
1053
1054 newsk = tcp_create_openreq_child(sk, req, skb);
1055 if (!newsk)
1056 goto out_nonewsk;
1057
1058 /*
1059 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1060 * count here, tcp_create_openreq_child now does this for us, see the
1061 * comment in that function for the gory details. -acme
1062 */
1063
1064 newsk->sk_gso_type = SKB_GSO_TCPV6;
1065 ip6_dst_store(newsk, dst, NULL, NULL);
1066 inet6_sk_rx_dst_set(newsk, skb);
1067
1068 newtcp6sk = (struct tcp6_sock *)newsk;
1069 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1070
1071 newtp = tcp_sk(newsk);
1072 newinet = inet_sk(newsk);
1073 newnp = inet6_sk(newsk);
1074
1075 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1076
1077 newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr;
1078 newnp->saddr = ireq->ir_v6_loc_addr;
1079 newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
1080 newsk->sk_bound_dev_if = ireq->ir_iif;
1081
1082 /* Now IPv6 options...
1083
1084 First: no IPv4 options.
1085 */
1086 newinet->inet_opt = NULL;
1087 newnp->ipv6_ac_list = NULL;
1088 newnp->ipv6_fl_list = NULL;
1089
1090 /* Clone RX bits */
1091 newnp->rxopt.all = np->rxopt.all;
1092
1093 newnp->pktoptions = NULL;
1094 newnp->opt = NULL;
1095 newnp->mcast_oif = tcp_v6_iif(skb);
1096 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1097 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1098 if (np->repflow)
1099 newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1100
1101 /* Clone native IPv6 options from listening socket (if any)
1102
1103 Yes, keeping reference count would be much more clever,
1104 but we make one more one thing there: reattach optmem
1105 to newsk.
1106 */
1107 opt = rcu_dereference(np->opt);
1108 if (opt) {
1109 opt = ipv6_dup_options(newsk, opt);
1110 RCU_INIT_POINTER(newnp->opt, opt);
1111 }
1112 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1113 if (opt)
1114 inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen +
1115 opt->opt_flen;
1116
1117 tcp_ca_openreq_child(newsk, dst);
1118
1119 tcp_sync_mss(newsk, dst_mtu(dst));
1120 newtp->advmss = dst_metric_advmss(dst);
1121 if (tcp_sk(sk)->rx_opt.user_mss &&
1122 tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1123 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1124
1125 tcp_initialize_rcv_mss(newsk);
1126
1127 newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1128 newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
1129
1130 #ifdef CONFIG_TCP_MD5SIG
1131 /* Copy over the MD5 key from the original socket */
1132 key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr);
1133 if (key) {
1134 /* We're using one, so create a matching key
1135 * on the newsk structure. If we fail to get
1136 * memory, then we end up not copying the key
1137 * across. Shucks.
1138 */
1139 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newsk->sk_v6_daddr,
1140 AF_INET6, key->key, key->keylen,
1141 sk_gfp_mask(sk, GFP_ATOMIC));
1142 }
1143 #endif
1144
1145 if (__inet_inherit_port(sk, newsk) < 0) {
1146 inet_csk_prepare_forced_close(newsk);
1147 tcp_done(newsk);
1148 goto out;
1149 }
1150 *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
1151 if (*own_req) {
1152 tcp_move_syn(newtp, req);
1153
1154 /* Clone pktoptions received with SYN, if we own the req */
1155 if (ireq->pktopts) {
1156 newnp->pktoptions = skb_clone(ireq->pktopts,
1157 sk_gfp_mask(sk, GFP_ATOMIC));
1158 consume_skb(ireq->pktopts);
1159 ireq->pktopts = NULL;
1160 if (newnp->pktoptions)
1161 skb_set_owner_r(newnp->pktoptions, newsk);
1162 }
1163 }
1164
1165 return newsk;
1166
1167 out_overflow:
1168 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1169 out_nonewsk:
1170 dst_release(dst);
1171 out:
1172 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1173 return NULL;
1174 }
1175
1176 /* The socket must have it's spinlock held when we get
1177 * here, unless it is a TCP_LISTEN socket.
1178 *
1179 * We have a potential double-lock case here, so even when
1180 * doing backlog processing we use the BH locking scheme.
1181 * This is because we cannot sleep with the original spinlock
1182 * held.
1183 */
1184 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1185 {
1186 struct ipv6_pinfo *np = inet6_sk(sk);
1187 struct tcp_sock *tp;
1188 struct sk_buff *opt_skb = NULL;
1189
1190 /* Imagine: socket is IPv6. IPv4 packet arrives,
1191 goes to IPv4 receive handler and backlogged.
1192 From backlog it always goes here. Kerboom...
1193 Fortunately, tcp_rcv_established and rcv_established
1194 handle them correctly, but it is not case with
1195 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1196 */
1197
1198 if (skb->protocol == htons(ETH_P_IP))
1199 return tcp_v4_do_rcv(sk, skb);
1200
1201 if (sk_filter(sk, skb))
1202 goto discard;
1203
1204 /*
1205 * socket locking is here for SMP purposes as backlog rcv
1206 * is currently called with bh processing disabled.
1207 */
1208
1209 /* Do Stevens' IPV6_PKTOPTIONS.
1210
1211 Yes, guys, it is the only place in our code, where we
1212 may make it not affecting IPv4.
1213 The rest of code is protocol independent,
1214 and I do not like idea to uglify IPv4.
1215
1216 Actually, all the idea behind IPV6_PKTOPTIONS
1217 looks not very well thought. For now we latch
1218 options, received in the last packet, enqueued
1219 by tcp. Feel free to propose better solution.
1220 --ANK (980728)
1221 */
1222 if (np->rxopt.all)
1223 opt_skb = skb_clone(skb, sk_gfp_mask(sk, GFP_ATOMIC));
1224
1225 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1226 struct dst_entry *dst = sk->sk_rx_dst;
1227
1228 sock_rps_save_rxhash(sk, skb);
1229 sk_mark_napi_id(sk, skb);
1230 if (dst) {
1231 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1232 dst->ops->check(dst, np->rx_dst_cookie) == NULL) {
1233 dst_release(dst);
1234 sk->sk_rx_dst = NULL;
1235 }
1236 }
1237
1238 tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len);
1239 if (opt_skb)
1240 goto ipv6_pktoptions;
1241 return 0;
1242 }
1243
1244 if (tcp_checksum_complete(skb))
1245 goto csum_err;
1246
1247 if (sk->sk_state == TCP_LISTEN) {
1248 struct sock *nsk = tcp_v6_cookie_check(sk, skb);
1249
1250 if (!nsk)
1251 goto discard;
1252
1253 if (nsk != sk) {
1254 sock_rps_save_rxhash(nsk, skb);
1255 sk_mark_napi_id(nsk, skb);
1256 if (tcp_child_process(sk, nsk, skb))
1257 goto reset;
1258 if (opt_skb)
1259 __kfree_skb(opt_skb);
1260 return 0;
1261 }
1262 } else
1263 sock_rps_save_rxhash(sk, skb);
1264
1265 if (tcp_rcv_state_process(sk, skb))
1266 goto reset;
1267 if (opt_skb)
1268 goto ipv6_pktoptions;
1269 return 0;
1270
1271 reset:
1272 tcp_v6_send_reset(sk, skb);
1273 discard:
1274 if (opt_skb)
1275 __kfree_skb(opt_skb);
1276 kfree_skb(skb);
1277 return 0;
1278 csum_err:
1279 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_CSUMERRORS);
1280 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1281 goto discard;
1282
1283
1284 ipv6_pktoptions:
1285 /* Do you ask, what is it?
1286
1287 1. skb was enqueued by tcp.
1288 2. skb is added to tail of read queue, rather than out of order.
1289 3. socket is not in passive state.
1290 4. Finally, it really contains options, which user wants to receive.
1291 */
1292 tp = tcp_sk(sk);
1293 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1294 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1295 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
1296 np->mcast_oif = tcp_v6_iif(opt_skb);
1297 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1298 np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
1299 if (np->rxopt.bits.rxflow || np->rxopt.bits.rxtclass)
1300 np->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(opt_skb));
1301 if (np->repflow)
1302 np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
1303 if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) {
1304 skb_set_owner_r(opt_skb, sk);
1305 opt_skb = xchg(&np->pktoptions, opt_skb);
1306 } else {
1307 __kfree_skb(opt_skb);
1308 opt_skb = xchg(&np->pktoptions, NULL);
1309 }
1310 }
1311
1312 kfree_skb(opt_skb);
1313 return 0;
1314 }
1315
1316 static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr,
1317 const struct tcphdr *th)
1318 {
1319 /* This is tricky: we move IP6CB at its correct location into
1320 * TCP_SKB_CB(). It must be done after xfrm6_policy_check(), because
1321 * _decode_session6() uses IP6CB().
1322 * barrier() makes sure compiler won't play aliasing games.
1323 */
1324 memmove(&TCP_SKB_CB(skb)->header.h6, IP6CB(skb),
1325 sizeof(struct inet6_skb_parm));
1326 barrier();
1327
1328 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1329 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1330 skb->len - th->doff*4);
1331 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1332 TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1333 TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1334 TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
1335 TCP_SKB_CB(skb)->sacked = 0;
1336 }
1337
1338 static void tcp_v6_restore_cb(struct sk_buff *skb)
1339 {
1340 /* We need to move header back to the beginning if xfrm6_policy_check()
1341 * and tcp_v6_fill_cb() are going to be called again.
1342 */
1343 memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
1344 sizeof(struct inet6_skb_parm));
1345 }
1346
1347 static int tcp_v6_rcv(struct sk_buff *skb)
1348 {
1349 const struct tcphdr *th;
1350 const struct ipv6hdr *hdr;
1351 struct sock *sk;
1352 int ret;
1353 struct net *net = dev_net(skb->dev);
1354
1355 if (skb->pkt_type != PACKET_HOST)
1356 goto discard_it;
1357
1358 /*
1359 * Count it even if it's bad.
1360 */
1361 TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1362
1363 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1364 goto discard_it;
1365
1366 th = tcp_hdr(skb);
1367
1368 if (th->doff < sizeof(struct tcphdr)/4)
1369 goto bad_packet;
1370 if (!pskb_may_pull(skb, th->doff*4))
1371 goto discard_it;
1372
1373 if (skb_checksum_init(skb, IPPROTO_TCP, ip6_compute_pseudo))
1374 goto csum_error;
1375
1376 th = tcp_hdr(skb);
1377 hdr = ipv6_hdr(skb);
1378
1379 lookup:
1380 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th),
1381 th->source, th->dest, inet6_iif(skb));
1382 if (!sk)
1383 goto no_tcp_socket;
1384
1385 process:
1386 if (sk->sk_state == TCP_TIME_WAIT)
1387 goto do_time_wait;
1388
1389 if (sk->sk_state == TCP_NEW_SYN_RECV) {
1390 struct request_sock *req = inet_reqsk(sk);
1391 struct sock *nsk;
1392
1393 sk = req->rsk_listener;
1394 tcp_v6_fill_cb(skb, hdr, th);
1395 if (tcp_v6_inbound_md5_hash(sk, skb)) {
1396 reqsk_put(req);
1397 goto discard_it;
1398 }
1399 if (unlikely(sk->sk_state != TCP_LISTEN)) {
1400 inet_csk_reqsk_queue_drop_and_put(sk, req);
1401 goto lookup;
1402 }
1403 sock_hold(sk);
1404 nsk = tcp_check_req(sk, skb, req, false);
1405 if (!nsk) {
1406 reqsk_put(req);
1407 goto discard_and_relse;
1408 }
1409 if (nsk == sk) {
1410 reqsk_put(req);
1411 tcp_v6_restore_cb(skb);
1412 } else if (tcp_child_process(sk, nsk, skb)) {
1413 tcp_v6_send_reset(nsk, skb);
1414 goto discard_and_relse;
1415 } else {
1416 sock_put(sk);
1417 return 0;
1418 }
1419 }
1420 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
1421 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
1422 goto discard_and_relse;
1423 }
1424
1425 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1426 goto discard_and_relse;
1427
1428 tcp_v6_fill_cb(skb, hdr, th);
1429
1430 if (tcp_v6_inbound_md5_hash(sk, skb))
1431 goto discard_and_relse;
1432
1433 if (sk_filter(sk, skb))
1434 goto discard_and_relse;
1435
1436 skb->dev = NULL;
1437
1438 if (sk->sk_state == TCP_LISTEN) {
1439 ret = tcp_v6_do_rcv(sk, skb);
1440 goto put_and_return;
1441 }
1442
1443 sk_incoming_cpu_update(sk);
1444
1445 bh_lock_sock_nested(sk);
1446 tcp_sk(sk)->segs_in += max_t(u16, 1, skb_shinfo(skb)->gso_segs);
1447 ret = 0;
1448 if (!sock_owned_by_user(sk)) {
1449 if (!tcp_prequeue(sk, skb))
1450 ret = tcp_v6_do_rcv(sk, skb);
1451 } else if (unlikely(sk_add_backlog(sk, skb,
1452 sk->sk_rcvbuf + sk->sk_sndbuf))) {
1453 bh_unlock_sock(sk);
1454 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
1455 goto discard_and_relse;
1456 }
1457 bh_unlock_sock(sk);
1458
1459 put_and_return:
1460 sock_put(sk);
1461 return ret ? -1 : 0;
1462
1463 no_tcp_socket:
1464 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1465 goto discard_it;
1466
1467 tcp_v6_fill_cb(skb, hdr, th);
1468
1469 if (tcp_checksum_complete(skb)) {
1470 csum_error:
1471 TCP_INC_STATS_BH(net, TCP_MIB_CSUMERRORS);
1472 bad_packet:
1473 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1474 } else {
1475 tcp_v6_send_reset(NULL, skb);
1476 }
1477
1478 discard_it:
1479 kfree_skb(skb);
1480 return 0;
1481
1482 discard_and_relse:
1483 sock_put(sk);
1484 goto discard_it;
1485
1486 do_time_wait:
1487 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1488 inet_twsk_put(inet_twsk(sk));
1489 goto discard_it;
1490 }
1491
1492 tcp_v6_fill_cb(skb, hdr, th);
1493
1494 if (tcp_checksum_complete(skb)) {
1495 inet_twsk_put(inet_twsk(sk));
1496 goto csum_error;
1497 }
1498
1499 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1500 case TCP_TW_SYN:
1501 {
1502 struct sock *sk2;
1503
1504 sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
1505 skb, __tcp_hdrlen(th),
1506 &ipv6_hdr(skb)->saddr, th->source,
1507 &ipv6_hdr(skb)->daddr,
1508 ntohs(th->dest), tcp_v6_iif(skb));
1509 if (sk2) {
1510 struct inet_timewait_sock *tw = inet_twsk(sk);
1511 inet_twsk_deschedule_put(tw);
1512 sk = sk2;
1513 tcp_v6_restore_cb(skb);
1514 goto process;
1515 }
1516 /* Fall through to ACK */
1517 }
1518 case TCP_TW_ACK:
1519 tcp_v6_timewait_ack(sk, skb);
1520 break;
1521 case TCP_TW_RST:
1522 tcp_v6_restore_cb(skb);
1523 tcp_v6_send_reset(sk, skb);
1524 inet_twsk_deschedule_put(inet_twsk(sk));
1525 goto discard_it;
1526 case TCP_TW_SUCCESS:
1527 ;
1528 }
1529 goto discard_it;
1530 }
1531
1532 static void tcp_v6_early_demux(struct sk_buff *skb)
1533 {
1534 const struct ipv6hdr *hdr;
1535 const struct tcphdr *th;
1536 struct sock *sk;
1537
1538 if (skb->pkt_type != PACKET_HOST)
1539 return;
1540
1541 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1542 return;
1543
1544 hdr = ipv6_hdr(skb);
1545 th = tcp_hdr(skb);
1546
1547 if (th->doff < sizeof(struct tcphdr) / 4)
1548 return;
1549
1550 /* Note : We use inet6_iif() here, not tcp_v6_iif() */
1551 sk = __inet6_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1552 &hdr->saddr, th->source,
1553 &hdr->daddr, ntohs(th->dest),
1554 inet6_iif(skb));
1555 if (sk) {
1556 skb->sk = sk;
1557 skb->destructor = sock_edemux;
1558 if (sk_fullsock(sk)) {
1559 struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
1560
1561 if (dst)
1562 dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
1563 if (dst &&
1564 inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1565 skb_dst_set_noref(skb, dst);
1566 }
1567 }
1568 }
1569
1570 static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1571 .twsk_obj_size = sizeof(struct tcp6_timewait_sock),
1572 .twsk_unique = tcp_twsk_unique,
1573 .twsk_destructor = tcp_twsk_destructor,
1574 };
1575
1576 static const struct inet_connection_sock_af_ops ipv6_specific = {
1577 .queue_xmit = inet6_csk_xmit,
1578 .send_check = tcp_v6_send_check,
1579 .rebuild_header = inet6_sk_rebuild_header,
1580 .sk_rx_dst_set = inet6_sk_rx_dst_set,
1581 .conn_request = tcp_v6_conn_request,
1582 .syn_recv_sock = tcp_v6_syn_recv_sock,
1583 .net_header_len = sizeof(struct ipv6hdr),
1584 .net_frag_header_len = sizeof(struct frag_hdr),
1585 .setsockopt = ipv6_setsockopt,
1586 .getsockopt = ipv6_getsockopt,
1587 .addr2sockaddr = inet6_csk_addr2sockaddr,
1588 .sockaddr_len = sizeof(struct sockaddr_in6),
1589 .bind_conflict = inet6_csk_bind_conflict,
1590 #ifdef CONFIG_COMPAT
1591 .compat_setsockopt = compat_ipv6_setsockopt,
1592 .compat_getsockopt = compat_ipv6_getsockopt,
1593 #endif
1594 .mtu_reduced = tcp_v6_mtu_reduced,
1595 };
1596
1597 #ifdef CONFIG_TCP_MD5SIG
1598 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
1599 .md5_lookup = tcp_v6_md5_lookup,
1600 .calc_md5_hash = tcp_v6_md5_hash_skb,
1601 .md5_parse = tcp_v6_parse_md5_keys,
1602 };
1603 #endif
1604
1605 /*
1606 * TCP over IPv4 via INET6 API
1607 */
1608 static const struct inet_connection_sock_af_ops ipv6_mapped = {
1609 .queue_xmit = ip_queue_xmit,
1610 .send_check = tcp_v4_send_check,
1611 .rebuild_header = inet_sk_rebuild_header,
1612 .sk_rx_dst_set = inet_sk_rx_dst_set,
1613 .conn_request = tcp_v6_conn_request,
1614 .syn_recv_sock = tcp_v6_syn_recv_sock,
1615 .net_header_len = sizeof(struct iphdr),
1616 .setsockopt = ipv6_setsockopt,
1617 .getsockopt = ipv6_getsockopt,
1618 .addr2sockaddr = inet6_csk_addr2sockaddr,
1619 .sockaddr_len = sizeof(struct sockaddr_in6),
1620 .bind_conflict = inet6_csk_bind_conflict,
1621 #ifdef CONFIG_COMPAT
1622 .compat_setsockopt = compat_ipv6_setsockopt,
1623 .compat_getsockopt = compat_ipv6_getsockopt,
1624 #endif
1625 .mtu_reduced = tcp_v4_mtu_reduced,
1626 };
1627
1628 #ifdef CONFIG_TCP_MD5SIG
1629 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
1630 .md5_lookup = tcp_v4_md5_lookup,
1631 .calc_md5_hash = tcp_v4_md5_hash_skb,
1632 .md5_parse = tcp_v6_parse_md5_keys,
1633 };
1634 #endif
1635
1636 /* NOTE: A lot of things set to zero explicitly by call to
1637 * sk_alloc() so need not be done here.
1638 */
1639 static int tcp_v6_init_sock(struct sock *sk)
1640 {
1641 struct inet_connection_sock *icsk = inet_csk(sk);
1642
1643 tcp_init_sock(sk);
1644
1645 icsk->icsk_af_ops = &ipv6_specific;
1646
1647 #ifdef CONFIG_TCP_MD5SIG
1648 tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific;
1649 #endif
1650
1651 return 0;
1652 }
1653
1654 static void tcp_v6_destroy_sock(struct sock *sk)
1655 {
1656 tcp_v4_destroy_sock(sk);
1657 inet6_destroy_sock(sk);
1658 }
1659
1660 #ifdef CONFIG_PROC_FS
1661 /* Proc filesystem TCPv6 sock list dumping. */
1662 static void get_openreq6(struct seq_file *seq,
1663 const struct request_sock *req, int i)
1664 {
1665 long ttd = req->rsk_timer.expires - jiffies;
1666 const struct in6_addr *src = &inet_rsk(req)->ir_v6_loc_addr;
1667 const struct in6_addr *dest = &inet_rsk(req)->ir_v6_rmt_addr;
1668
1669 if (ttd < 0)
1670 ttd = 0;
1671
1672 seq_printf(seq,
1673 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1674 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %d %d %pK\n",
1675 i,
1676 src->s6_addr32[0], src->s6_addr32[1],
1677 src->s6_addr32[2], src->s6_addr32[3],
1678 inet_rsk(req)->ir_num,
1679 dest->s6_addr32[0], dest->s6_addr32[1],
1680 dest->s6_addr32[2], dest->s6_addr32[3],
1681 ntohs(inet_rsk(req)->ir_rmt_port),
1682 TCP_SYN_RECV,
1683 0, 0, /* could print option size, but that is af dependent. */
1684 1, /* timers active (only the expire timer) */
1685 jiffies_to_clock_t(ttd),
1686 req->num_timeout,
1687 from_kuid_munged(seq_user_ns(seq),
1688 sock_i_uid(req->rsk_listener)),
1689 0, /* non standard timer */
1690 0, /* open_requests have no inode */
1691 0, req);
1692 }
1693
1694 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1695 {
1696 const struct in6_addr *dest, *src;
1697 __u16 destp, srcp;
1698 int timer_active;
1699 unsigned long timer_expires;
1700 const struct inet_sock *inet = inet_sk(sp);
1701 const struct tcp_sock *tp = tcp_sk(sp);
1702 const struct inet_connection_sock *icsk = inet_csk(sp);
1703 const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
1704 int rx_queue;
1705 int state;
1706
1707 dest = &sp->sk_v6_daddr;
1708 src = &sp->sk_v6_rcv_saddr;
1709 destp = ntohs(inet->inet_dport);
1710 srcp = ntohs(inet->inet_sport);
1711
1712 if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
1713 timer_active = 1;
1714 timer_expires = icsk->icsk_timeout;
1715 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1716 timer_active = 4;
1717 timer_expires = icsk->icsk_timeout;
1718 } else if (timer_pending(&sp->sk_timer)) {
1719 timer_active = 2;
1720 timer_expires = sp->sk_timer.expires;
1721 } else {
1722 timer_active = 0;
1723 timer_expires = jiffies;
1724 }
1725
1726 state = sk_state_load(sp);
1727 if (state == TCP_LISTEN)
1728 rx_queue = sp->sk_ack_backlog;
1729 else
1730 /* Because we don't lock the socket,
1731 * we might find a transient negative value.
1732 */
1733 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
1734
1735 seq_printf(seq,
1736 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1737 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n",
1738 i,
1739 src->s6_addr32[0], src->s6_addr32[1],
1740 src->s6_addr32[2], src->s6_addr32[3], srcp,
1741 dest->s6_addr32[0], dest->s6_addr32[1],
1742 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1743 state,
1744 tp->write_seq - tp->snd_una,
1745 rx_queue,
1746 timer_active,
1747 jiffies_delta_to_clock_t(timer_expires - jiffies),
1748 icsk->icsk_retransmits,
1749 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
1750 icsk->icsk_probes_out,
1751 sock_i_ino(sp),
1752 atomic_read(&sp->sk_refcnt), sp,
1753 jiffies_to_clock_t(icsk->icsk_rto),
1754 jiffies_to_clock_t(icsk->icsk_ack.ato),
1755 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
1756 tp->snd_cwnd,
1757 state == TCP_LISTEN ?
1758 fastopenq->max_qlen :
1759 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)
1760 );
1761 }
1762
1763 static void get_timewait6_sock(struct seq_file *seq,
1764 struct inet_timewait_sock *tw, int i)
1765 {
1766 long delta = tw->tw_timer.expires - jiffies;
1767 const struct in6_addr *dest, *src;
1768 __u16 destp, srcp;
1769
1770 dest = &tw->tw_v6_daddr;
1771 src = &tw->tw_v6_rcv_saddr;
1772 destp = ntohs(tw->tw_dport);
1773 srcp = ntohs(tw->tw_sport);
1774
1775 seq_printf(seq,
1776 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1777 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1778 i,
1779 src->s6_addr32[0], src->s6_addr32[1],
1780 src->s6_addr32[2], src->s6_addr32[3], srcp,
1781 dest->s6_addr32[0], dest->s6_addr32[1],
1782 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1783 tw->tw_substate, 0, 0,
1784 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
1785 atomic_read(&tw->tw_refcnt), tw);
1786 }
1787
1788 static int tcp6_seq_show(struct seq_file *seq, void *v)
1789 {
1790 struct tcp_iter_state *st;
1791 struct sock *sk = v;
1792
1793 if (v == SEQ_START_TOKEN) {
1794 seq_puts(seq,
1795 " sl "
1796 "local_address "
1797 "remote_address "
1798 "st tx_queue rx_queue tr tm->when retrnsmt"
1799 " uid timeout inode\n");
1800 goto out;
1801 }
1802 st = seq->private;
1803
1804 if (sk->sk_state == TCP_TIME_WAIT)
1805 get_timewait6_sock(seq, v, st->num);
1806 else if (sk->sk_state == TCP_NEW_SYN_RECV)
1807 get_openreq6(seq, v, st->num);
1808 else
1809 get_tcp6_sock(seq, v, st->num);
1810 out:
1811 return 0;
1812 }
1813
1814 static const struct file_operations tcp6_afinfo_seq_fops = {
1815 .owner = THIS_MODULE,
1816 .open = tcp_seq_open,
1817 .read = seq_read,
1818 .llseek = seq_lseek,
1819 .release = seq_release_net
1820 };
1821
1822 static struct tcp_seq_afinfo tcp6_seq_afinfo = {
1823 .name = "tcp6",
1824 .family = AF_INET6,
1825 .seq_fops = &tcp6_afinfo_seq_fops,
1826 .seq_ops = {
1827 .show = tcp6_seq_show,
1828 },
1829 };
1830
1831 int __net_init tcp6_proc_init(struct net *net)
1832 {
1833 return tcp_proc_register(net, &tcp6_seq_afinfo);
1834 }
1835
1836 void tcp6_proc_exit(struct net *net)
1837 {
1838 tcp_proc_unregister(net, &tcp6_seq_afinfo);
1839 }
1840 #endif
1841
1842 static void tcp_v6_clear_sk(struct sock *sk, int size)
1843 {
1844 struct inet_sock *inet = inet_sk(sk);
1845
1846 /* we do not want to clear pinet6 field, because of RCU lookups */
1847 sk_prot_clear_nulls(sk, offsetof(struct inet_sock, pinet6));
1848
1849 size -= offsetof(struct inet_sock, pinet6) + sizeof(inet->pinet6);
1850 memset(&inet->pinet6 + 1, 0, size);
1851 }
1852
1853 struct proto tcpv6_prot = {
1854 .name = "TCPv6",
1855 .owner = THIS_MODULE,
1856 .close = tcp_close,
1857 .connect = tcp_v6_connect,
1858 .disconnect = tcp_disconnect,
1859 .accept = inet_csk_accept,
1860 .ioctl = tcp_ioctl,
1861 .init = tcp_v6_init_sock,
1862 .destroy = tcp_v6_destroy_sock,
1863 .shutdown = tcp_shutdown,
1864 .setsockopt = tcp_setsockopt,
1865 .getsockopt = tcp_getsockopt,
1866 .recvmsg = tcp_recvmsg,
1867 .sendmsg = tcp_sendmsg,
1868 .sendpage = tcp_sendpage,
1869 .backlog_rcv = tcp_v6_do_rcv,
1870 .release_cb = tcp_release_cb,
1871 .hash = inet6_hash,
1872 .unhash = inet_unhash,
1873 .get_port = inet_csk_get_port,
1874 .enter_memory_pressure = tcp_enter_memory_pressure,
1875 .stream_memory_free = tcp_stream_memory_free,
1876 .sockets_allocated = &tcp_sockets_allocated,
1877 .memory_allocated = &tcp_memory_allocated,
1878 .memory_pressure = &tcp_memory_pressure,
1879 .orphan_count = &tcp_orphan_count,
1880 .sysctl_mem = sysctl_tcp_mem,
1881 .sysctl_wmem = sysctl_tcp_wmem,
1882 .sysctl_rmem = sysctl_tcp_rmem,
1883 .max_header = MAX_TCP_HEADER,
1884 .obj_size = sizeof(struct tcp6_sock),
1885 .slab_flags = SLAB_DESTROY_BY_RCU,
1886 .twsk_prot = &tcp6_timewait_sock_ops,
1887 .rsk_prot = &tcp6_request_sock_ops,
1888 .h.hashinfo = &tcp_hashinfo,
1889 .no_autobind = true,
1890 #ifdef CONFIG_COMPAT
1891 .compat_setsockopt = compat_tcp_setsockopt,
1892 .compat_getsockopt = compat_tcp_getsockopt,
1893 #endif
1894 .clear_sk = tcp_v6_clear_sk,
1895 .diag_destroy = tcp_abort,
1896 };
1897
1898 static const struct inet6_protocol tcpv6_protocol = {
1899 .early_demux = tcp_v6_early_demux,
1900 .handler = tcp_v6_rcv,
1901 .err_handler = tcp_v6_err,
1902 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1903 };
1904
1905 static struct inet_protosw tcpv6_protosw = {
1906 .type = SOCK_STREAM,
1907 .protocol = IPPROTO_TCP,
1908 .prot = &tcpv6_prot,
1909 .ops = &inet6_stream_ops,
1910 .flags = INET_PROTOSW_PERMANENT |
1911 INET_PROTOSW_ICSK,
1912 };
1913
1914 static int __net_init tcpv6_net_init(struct net *net)
1915 {
1916 return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
1917 SOCK_RAW, IPPROTO_TCP, net);
1918 }
1919
1920 static void __net_exit tcpv6_net_exit(struct net *net)
1921 {
1922 inet_ctl_sock_destroy(net->ipv6.tcp_sk);
1923 }
1924
1925 static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
1926 {
1927 inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET6);
1928 }
1929
1930 static struct pernet_operations tcpv6_net_ops = {
1931 .init = tcpv6_net_init,
1932 .exit = tcpv6_net_exit,
1933 .exit_batch = tcpv6_net_exit_batch,
1934 };
1935
1936 int __init tcpv6_init(void)
1937 {
1938 int ret;
1939
1940 ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
1941 if (ret)
1942 goto out;
1943
1944 /* register inet6 protocol */
1945 ret = inet6_register_protosw(&tcpv6_protosw);
1946 if (ret)
1947 goto out_tcpv6_protocol;
1948
1949 ret = register_pernet_subsys(&tcpv6_net_ops);
1950 if (ret)
1951 goto out_tcpv6_protosw;
1952 out:
1953 return ret;
1954
1955 out_tcpv6_protosw:
1956 inet6_unregister_protosw(&tcpv6_protosw);
1957 out_tcpv6_protocol:
1958 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
1959 goto out;
1960 }
1961
1962 void tcpv6_exit(void)
1963 {
1964 unregister_pernet_subsys(&tcpv6_net_ops);
1965 inet6_unregister_protosw(&tcpv6_protosw);
1966 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
1967 }
This page took 0.147993 seconds and 5 git commands to generate.