ipv6: fib: fix fib dump restart
[deliverable/linux.git] / net / ipv6 / tcp_ipv6.c
1 /*
2 * TCP over IPv6
3 * Linux INET6 implementation
4 *
5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt>
7 *
8 * Based on:
9 * linux/net/ipv4/tcp.c
10 * linux/net/ipv4/tcp_input.c
11 * linux/net/ipv4/tcp_output.c
12 *
13 * Fixes:
14 * Hideaki YOSHIFUJI : sin6_scope_id support
15 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
16 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
17 * a single port at the same time.
18 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
19 *
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
24 */
25
26 #include <linux/bottom_half.h>
27 #include <linux/module.h>
28 #include <linux/errno.h>
29 #include <linux/types.h>
30 #include <linux/socket.h>
31 #include <linux/sockios.h>
32 #include <linux/net.h>
33 #include <linux/jiffies.h>
34 #include <linux/in.h>
35 #include <linux/in6.h>
36 #include <linux/netdevice.h>
37 #include <linux/init.h>
38 #include <linux/jhash.h>
39 #include <linux/ipsec.h>
40 #include <linux/times.h>
41 #include <linux/slab.h>
42
43 #include <linux/ipv6.h>
44 #include <linux/icmpv6.h>
45 #include <linux/random.h>
46
47 #include <net/tcp.h>
48 #include <net/ndisc.h>
49 #include <net/inet6_hashtables.h>
50 #include <net/inet6_connection_sock.h>
51 #include <net/ipv6.h>
52 #include <net/transp_v6.h>
53 #include <net/addrconf.h>
54 #include <net/ip6_route.h>
55 #include <net/ip6_checksum.h>
56 #include <net/inet_ecn.h>
57 #include <net/protocol.h>
58 #include <net/xfrm.h>
59 #include <net/snmp.h>
60 #include <net/dsfield.h>
61 #include <net/timewait_sock.h>
62 #include <net/netdma.h>
63 #include <net/inet_common.h>
64 #include <net/secure_seq.h>
65 #include <net/tcp_memcontrol.h>
66
67 #include <asm/uaccess.h>
68
69 #include <linux/proc_fs.h>
70 #include <linux/seq_file.h>
71
72 #include <linux/crypto.h>
73 #include <linux/scatterlist.h>
74
75 static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb);
76 static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
77 struct request_sock *req);
78
79 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
80 static void __tcp_v6_send_check(struct sk_buff *skb,
81 const struct in6_addr *saddr,
82 const struct in6_addr *daddr);
83
84 static const struct inet_connection_sock_af_ops ipv6_mapped;
85 static const struct inet_connection_sock_af_ops ipv6_specific;
86 #ifdef CONFIG_TCP_MD5SIG
87 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
88 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
89 #else
90 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
91 const struct in6_addr *addr)
92 {
93 return NULL;
94 }
95 #endif
96
97 static void tcp_v6_hash(struct sock *sk)
98 {
99 if (sk->sk_state != TCP_CLOSE) {
100 if (inet_csk(sk)->icsk_af_ops == &ipv6_mapped) {
101 tcp_prot.hash(sk);
102 return;
103 }
104 local_bh_disable();
105 __inet6_hash(sk, NULL);
106 local_bh_enable();
107 }
108 }
109
110 static __inline__ __sum16 tcp_v6_check(int len,
111 const struct in6_addr *saddr,
112 const struct in6_addr *daddr,
113 __wsum base)
114 {
115 return csum_ipv6_magic(saddr, daddr, len, IPPROTO_TCP, base);
116 }
117
118 static __u32 tcp_v6_init_sequence(const struct sk_buff *skb)
119 {
120 return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
121 ipv6_hdr(skb)->saddr.s6_addr32,
122 tcp_hdr(skb)->dest,
123 tcp_hdr(skb)->source);
124 }
125
126 static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
127 int addr_len)
128 {
129 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
130 struct inet_sock *inet = inet_sk(sk);
131 struct inet_connection_sock *icsk = inet_csk(sk);
132 struct ipv6_pinfo *np = inet6_sk(sk);
133 struct tcp_sock *tp = tcp_sk(sk);
134 struct in6_addr *saddr = NULL, *final_p, final;
135 struct rt6_info *rt;
136 struct flowi6 fl6;
137 struct dst_entry *dst;
138 int addr_type;
139 int err;
140
141 if (addr_len < SIN6_LEN_RFC2133)
142 return -EINVAL;
143
144 if (usin->sin6_family != AF_INET6)
145 return -EAFNOSUPPORT;
146
147 memset(&fl6, 0, sizeof(fl6));
148
149 if (np->sndflow) {
150 fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
151 IP6_ECN_flow_init(fl6.flowlabel);
152 if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
153 struct ip6_flowlabel *flowlabel;
154 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
155 if (flowlabel == NULL)
156 return -EINVAL;
157 usin->sin6_addr = flowlabel->dst;
158 fl6_sock_release(flowlabel);
159 }
160 }
161
162 /*
163 * connect() to INADDR_ANY means loopback (BSD'ism).
164 */
165
166 if(ipv6_addr_any(&usin->sin6_addr))
167 usin->sin6_addr.s6_addr[15] = 0x1;
168
169 addr_type = ipv6_addr_type(&usin->sin6_addr);
170
171 if(addr_type & IPV6_ADDR_MULTICAST)
172 return -ENETUNREACH;
173
174 if (addr_type&IPV6_ADDR_LINKLOCAL) {
175 if (addr_len >= sizeof(struct sockaddr_in6) &&
176 usin->sin6_scope_id) {
177 /* If interface is set while binding, indices
178 * must coincide.
179 */
180 if (sk->sk_bound_dev_if &&
181 sk->sk_bound_dev_if != usin->sin6_scope_id)
182 return -EINVAL;
183
184 sk->sk_bound_dev_if = usin->sin6_scope_id;
185 }
186
187 /* Connect to link-local address requires an interface */
188 if (!sk->sk_bound_dev_if)
189 return -EINVAL;
190 }
191
192 if (tp->rx_opt.ts_recent_stamp &&
193 !ipv6_addr_equal(&np->daddr, &usin->sin6_addr)) {
194 tp->rx_opt.ts_recent = 0;
195 tp->rx_opt.ts_recent_stamp = 0;
196 tp->write_seq = 0;
197 }
198
199 np->daddr = usin->sin6_addr;
200 np->flow_label = fl6.flowlabel;
201
202 /*
203 * TCP over IPv4
204 */
205
206 if (addr_type == IPV6_ADDR_MAPPED) {
207 u32 exthdrlen = icsk->icsk_ext_hdr_len;
208 struct sockaddr_in sin;
209
210 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
211
212 if (__ipv6_only_sock(sk))
213 return -ENETUNREACH;
214
215 sin.sin_family = AF_INET;
216 sin.sin_port = usin->sin6_port;
217 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
218
219 icsk->icsk_af_ops = &ipv6_mapped;
220 sk->sk_backlog_rcv = tcp_v4_do_rcv;
221 #ifdef CONFIG_TCP_MD5SIG
222 tp->af_specific = &tcp_sock_ipv6_mapped_specific;
223 #endif
224
225 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
226
227 if (err) {
228 icsk->icsk_ext_hdr_len = exthdrlen;
229 icsk->icsk_af_ops = &ipv6_specific;
230 sk->sk_backlog_rcv = tcp_v6_do_rcv;
231 #ifdef CONFIG_TCP_MD5SIG
232 tp->af_specific = &tcp_sock_ipv6_specific;
233 #endif
234 goto failure;
235 } else {
236 ipv6_addr_set_v4mapped(inet->inet_saddr, &np->saddr);
237 ipv6_addr_set_v4mapped(inet->inet_rcv_saddr,
238 &np->rcv_saddr);
239 }
240
241 return err;
242 }
243
244 if (!ipv6_addr_any(&np->rcv_saddr))
245 saddr = &np->rcv_saddr;
246
247 fl6.flowi6_proto = IPPROTO_TCP;
248 fl6.daddr = np->daddr;
249 fl6.saddr = saddr ? *saddr : np->saddr;
250 fl6.flowi6_oif = sk->sk_bound_dev_if;
251 fl6.flowi6_mark = sk->sk_mark;
252 fl6.fl6_dport = usin->sin6_port;
253 fl6.fl6_sport = inet->inet_sport;
254
255 final_p = fl6_update_dst(&fl6, np->opt, &final);
256
257 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
258
259 dst = ip6_dst_lookup_flow(sk, &fl6, final_p, true);
260 if (IS_ERR(dst)) {
261 err = PTR_ERR(dst);
262 goto failure;
263 }
264
265 if (saddr == NULL) {
266 saddr = &fl6.saddr;
267 np->rcv_saddr = *saddr;
268 }
269
270 /* set the source address */
271 np->saddr = *saddr;
272 inet->inet_rcv_saddr = LOOPBACK4_IPV6;
273
274 sk->sk_gso_type = SKB_GSO_TCPV6;
275 __ip6_dst_store(sk, dst, NULL, NULL);
276
277 rt = (struct rt6_info *) dst;
278 if (tcp_death_row.sysctl_tw_recycle &&
279 !tp->rx_opt.ts_recent_stamp &&
280 ipv6_addr_equal(&rt->rt6i_dst.addr, &np->daddr)) {
281 struct inet_peer *peer = rt6_get_peer(rt);
282 /*
283 * VJ's idea. We save last timestamp seen from
284 * the destination in peer table, when entering state
285 * TIME-WAIT * and initialize rx_opt.ts_recent from it,
286 * when trying new connection.
287 */
288 if (peer) {
289 inet_peer_refcheck(peer);
290 if ((u32)get_seconds() - peer->tcp_ts_stamp <= TCP_PAWS_MSL) {
291 tp->rx_opt.ts_recent_stamp = peer->tcp_ts_stamp;
292 tp->rx_opt.ts_recent = peer->tcp_ts;
293 }
294 }
295 }
296
297 icsk->icsk_ext_hdr_len = 0;
298 if (np->opt)
299 icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
300 np->opt->opt_nflen);
301
302 tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
303
304 inet->inet_dport = usin->sin6_port;
305
306 tcp_set_state(sk, TCP_SYN_SENT);
307 err = inet6_hash_connect(&tcp_death_row, sk);
308 if (err)
309 goto late_failure;
310
311 if (!tp->write_seq)
312 tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
313 np->daddr.s6_addr32,
314 inet->inet_sport,
315 inet->inet_dport);
316
317 err = tcp_connect(sk);
318 if (err)
319 goto late_failure;
320
321 return 0;
322
323 late_failure:
324 tcp_set_state(sk, TCP_CLOSE);
325 __sk_dst_reset(sk);
326 failure:
327 inet->inet_dport = 0;
328 sk->sk_route_caps = 0;
329 return err;
330 }
331
332 static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
333 u8 type, u8 code, int offset, __be32 info)
334 {
335 const struct ipv6hdr *hdr = (const struct ipv6hdr*)skb->data;
336 const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
337 struct ipv6_pinfo *np;
338 struct sock *sk;
339 int err;
340 struct tcp_sock *tp;
341 __u32 seq;
342 struct net *net = dev_net(skb->dev);
343
344 sk = inet6_lookup(net, &tcp_hashinfo, &hdr->daddr,
345 th->dest, &hdr->saddr, th->source, skb->dev->ifindex);
346
347 if (sk == NULL) {
348 ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
349 ICMP6_MIB_INERRORS);
350 return;
351 }
352
353 if (sk->sk_state == TCP_TIME_WAIT) {
354 inet_twsk_put(inet_twsk(sk));
355 return;
356 }
357
358 bh_lock_sock(sk);
359 if (sock_owned_by_user(sk))
360 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
361
362 if (sk->sk_state == TCP_CLOSE)
363 goto out;
364
365 if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
366 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
367 goto out;
368 }
369
370 tp = tcp_sk(sk);
371 seq = ntohl(th->seq);
372 if (sk->sk_state != TCP_LISTEN &&
373 !between(seq, tp->snd_una, tp->snd_nxt)) {
374 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
375 goto out;
376 }
377
378 np = inet6_sk(sk);
379
380 if (type == ICMPV6_PKT_TOOBIG) {
381 struct dst_entry *dst;
382
383 if (sock_owned_by_user(sk))
384 goto out;
385 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
386 goto out;
387
388 /* icmp should have updated the destination cache entry */
389 dst = __sk_dst_check(sk, np->dst_cookie);
390
391 if (dst == NULL) {
392 struct inet_sock *inet = inet_sk(sk);
393 struct flowi6 fl6;
394
395 /* BUGGG_FUTURE: Again, it is not clear how
396 to handle rthdr case. Ignore this complexity
397 for now.
398 */
399 memset(&fl6, 0, sizeof(fl6));
400 fl6.flowi6_proto = IPPROTO_TCP;
401 fl6.daddr = np->daddr;
402 fl6.saddr = np->saddr;
403 fl6.flowi6_oif = sk->sk_bound_dev_if;
404 fl6.flowi6_mark = sk->sk_mark;
405 fl6.fl6_dport = inet->inet_dport;
406 fl6.fl6_sport = inet->inet_sport;
407 security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
408
409 dst = ip6_dst_lookup_flow(sk, &fl6, NULL, false);
410 if (IS_ERR(dst)) {
411 sk->sk_err_soft = -PTR_ERR(dst);
412 goto out;
413 }
414
415 } else
416 dst_hold(dst);
417
418 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
419 tcp_sync_mss(sk, dst_mtu(dst));
420 tcp_simple_retransmit(sk);
421 } /* else let the usual retransmit timer handle it */
422 dst_release(dst);
423 goto out;
424 }
425
426 icmpv6_err_convert(type, code, &err);
427
428 /* Might be for an request_sock */
429 switch (sk->sk_state) {
430 struct request_sock *req, **prev;
431 case TCP_LISTEN:
432 if (sock_owned_by_user(sk))
433 goto out;
434
435 req = inet6_csk_search_req(sk, &prev, th->dest, &hdr->daddr,
436 &hdr->saddr, inet6_iif(skb));
437 if (!req)
438 goto out;
439
440 /* ICMPs are not backlogged, hence we cannot get
441 * an established socket here.
442 */
443 WARN_ON(req->sk != NULL);
444
445 if (seq != tcp_rsk(req)->snt_isn) {
446 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
447 goto out;
448 }
449
450 inet_csk_reqsk_queue_drop(sk, req, prev);
451 goto out;
452
453 case TCP_SYN_SENT:
454 case TCP_SYN_RECV: /* Cannot happen.
455 It can, it SYNs are crossed. --ANK */
456 if (!sock_owned_by_user(sk)) {
457 sk->sk_err = err;
458 sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
459
460 tcp_done(sk);
461 } else
462 sk->sk_err_soft = err;
463 goto out;
464 }
465
466 if (!sock_owned_by_user(sk) && np->recverr) {
467 sk->sk_err = err;
468 sk->sk_error_report(sk);
469 } else
470 sk->sk_err_soft = err;
471
472 out:
473 bh_unlock_sock(sk);
474 sock_put(sk);
475 }
476
477
478 static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req,
479 struct request_values *rvp,
480 u16 queue_mapping)
481 {
482 struct inet6_request_sock *treq = inet6_rsk(req);
483 struct ipv6_pinfo *np = inet6_sk(sk);
484 struct sk_buff * skb;
485 struct ipv6_txoptions *opt = NULL;
486 struct in6_addr * final_p, final;
487 struct flowi6 fl6;
488 struct dst_entry *dst;
489 int err;
490
491 memset(&fl6, 0, sizeof(fl6));
492 fl6.flowi6_proto = IPPROTO_TCP;
493 fl6.daddr = treq->rmt_addr;
494 fl6.saddr = treq->loc_addr;
495 fl6.flowlabel = 0;
496 fl6.flowi6_oif = treq->iif;
497 fl6.flowi6_mark = sk->sk_mark;
498 fl6.fl6_dport = inet_rsk(req)->rmt_port;
499 fl6.fl6_sport = inet_rsk(req)->loc_port;
500 security_req_classify_flow(req, flowi6_to_flowi(&fl6));
501
502 opt = np->opt;
503 final_p = fl6_update_dst(&fl6, opt, &final);
504
505 dst = ip6_dst_lookup_flow(sk, &fl6, final_p, false);
506 if (IS_ERR(dst)) {
507 err = PTR_ERR(dst);
508 dst = NULL;
509 goto done;
510 }
511 skb = tcp_make_synack(sk, dst, req, rvp);
512 err = -ENOMEM;
513 if (skb) {
514 __tcp_v6_send_check(skb, &treq->loc_addr, &treq->rmt_addr);
515
516 fl6.daddr = treq->rmt_addr;
517 skb_set_queue_mapping(skb, queue_mapping);
518 err = ip6_xmit(sk, skb, &fl6, opt, np->tclass);
519 err = net_xmit_eval(err);
520 }
521
522 done:
523 if (opt && opt != np->opt)
524 sock_kfree_s(sk, opt, opt->tot_len);
525 dst_release(dst);
526 return err;
527 }
528
529 static int tcp_v6_rtx_synack(struct sock *sk, struct request_sock *req,
530 struct request_values *rvp)
531 {
532 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
533 return tcp_v6_send_synack(sk, req, rvp, 0);
534 }
535
536 static void tcp_v6_reqsk_destructor(struct request_sock *req)
537 {
538 kfree_skb(inet6_rsk(req)->pktopts);
539 }
540
541 #ifdef CONFIG_TCP_MD5SIG
542 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
543 const struct in6_addr *addr)
544 {
545 return tcp_md5_do_lookup(sk, (union tcp_md5_addr *)addr, AF_INET6);
546 }
547
548 static struct tcp_md5sig_key *tcp_v6_md5_lookup(struct sock *sk,
549 struct sock *addr_sk)
550 {
551 return tcp_v6_md5_do_lookup(sk, &inet6_sk(addr_sk)->daddr);
552 }
553
554 static struct tcp_md5sig_key *tcp_v6_reqsk_md5_lookup(struct sock *sk,
555 struct request_sock *req)
556 {
557 return tcp_v6_md5_do_lookup(sk, &inet6_rsk(req)->rmt_addr);
558 }
559
560 static int tcp_v6_parse_md5_keys (struct sock *sk, char __user *optval,
561 int optlen)
562 {
563 struct tcp_md5sig cmd;
564 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
565
566 if (optlen < sizeof(cmd))
567 return -EINVAL;
568
569 if (copy_from_user(&cmd, optval, sizeof(cmd)))
570 return -EFAULT;
571
572 if (sin6->sin6_family != AF_INET6)
573 return -EINVAL;
574
575 if (!cmd.tcpm_keylen) {
576 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
577 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
578 AF_INET);
579 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
580 AF_INET6);
581 }
582
583 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
584 return -EINVAL;
585
586 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
587 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
588 AF_INET, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
589
590 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
591 AF_INET6, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
592 }
593
594 static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
595 const struct in6_addr *daddr,
596 const struct in6_addr *saddr, int nbytes)
597 {
598 struct tcp6_pseudohdr *bp;
599 struct scatterlist sg;
600
601 bp = &hp->md5_blk.ip6;
602 /* 1. TCP pseudo-header (RFC2460) */
603 bp->saddr = *saddr;
604 bp->daddr = *daddr;
605 bp->protocol = cpu_to_be32(IPPROTO_TCP);
606 bp->len = cpu_to_be32(nbytes);
607
608 sg_init_one(&sg, bp, sizeof(*bp));
609 return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
610 }
611
612 static int tcp_v6_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
613 const struct in6_addr *daddr, struct in6_addr *saddr,
614 const struct tcphdr *th)
615 {
616 struct tcp_md5sig_pool *hp;
617 struct hash_desc *desc;
618
619 hp = tcp_get_md5sig_pool();
620 if (!hp)
621 goto clear_hash_noput;
622 desc = &hp->md5_desc;
623
624 if (crypto_hash_init(desc))
625 goto clear_hash;
626 if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
627 goto clear_hash;
628 if (tcp_md5_hash_header(hp, th))
629 goto clear_hash;
630 if (tcp_md5_hash_key(hp, key))
631 goto clear_hash;
632 if (crypto_hash_final(desc, md5_hash))
633 goto clear_hash;
634
635 tcp_put_md5sig_pool();
636 return 0;
637
638 clear_hash:
639 tcp_put_md5sig_pool();
640 clear_hash_noput:
641 memset(md5_hash, 0, 16);
642 return 1;
643 }
644
645 static int tcp_v6_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
646 const struct sock *sk,
647 const struct request_sock *req,
648 const struct sk_buff *skb)
649 {
650 const struct in6_addr *saddr, *daddr;
651 struct tcp_md5sig_pool *hp;
652 struct hash_desc *desc;
653 const struct tcphdr *th = tcp_hdr(skb);
654
655 if (sk) {
656 saddr = &inet6_sk(sk)->saddr;
657 daddr = &inet6_sk(sk)->daddr;
658 } else if (req) {
659 saddr = &inet6_rsk(req)->loc_addr;
660 daddr = &inet6_rsk(req)->rmt_addr;
661 } else {
662 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
663 saddr = &ip6h->saddr;
664 daddr = &ip6h->daddr;
665 }
666
667 hp = tcp_get_md5sig_pool();
668 if (!hp)
669 goto clear_hash_noput;
670 desc = &hp->md5_desc;
671
672 if (crypto_hash_init(desc))
673 goto clear_hash;
674
675 if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
676 goto clear_hash;
677 if (tcp_md5_hash_header(hp, th))
678 goto clear_hash;
679 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
680 goto clear_hash;
681 if (tcp_md5_hash_key(hp, key))
682 goto clear_hash;
683 if (crypto_hash_final(desc, md5_hash))
684 goto clear_hash;
685
686 tcp_put_md5sig_pool();
687 return 0;
688
689 clear_hash:
690 tcp_put_md5sig_pool();
691 clear_hash_noput:
692 memset(md5_hash, 0, 16);
693 return 1;
694 }
695
696 static int tcp_v6_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
697 {
698 const __u8 *hash_location = NULL;
699 struct tcp_md5sig_key *hash_expected;
700 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
701 const struct tcphdr *th = tcp_hdr(skb);
702 int genhash;
703 u8 newhash[16];
704
705 hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
706 hash_location = tcp_parse_md5sig_option(th);
707
708 /* We've parsed the options - do we have a hash? */
709 if (!hash_expected && !hash_location)
710 return 0;
711
712 if (hash_expected && !hash_location) {
713 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
714 return 1;
715 }
716
717 if (!hash_expected && hash_location) {
718 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
719 return 1;
720 }
721
722 /* check the signature */
723 genhash = tcp_v6_md5_hash_skb(newhash,
724 hash_expected,
725 NULL, NULL, skb);
726
727 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
728 net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
729 genhash ? "failed" : "mismatch",
730 &ip6h->saddr, ntohs(th->source),
731 &ip6h->daddr, ntohs(th->dest));
732 return 1;
733 }
734 return 0;
735 }
736 #endif
737
738 struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
739 .family = AF_INET6,
740 .obj_size = sizeof(struct tcp6_request_sock),
741 .rtx_syn_ack = tcp_v6_rtx_synack,
742 .send_ack = tcp_v6_reqsk_send_ack,
743 .destructor = tcp_v6_reqsk_destructor,
744 .send_reset = tcp_v6_send_reset,
745 .syn_ack_timeout = tcp_syn_ack_timeout,
746 };
747
748 #ifdef CONFIG_TCP_MD5SIG
749 static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
750 .md5_lookup = tcp_v6_reqsk_md5_lookup,
751 .calc_md5_hash = tcp_v6_md5_hash_skb,
752 };
753 #endif
754
755 static void __tcp_v6_send_check(struct sk_buff *skb,
756 const struct in6_addr *saddr, const struct in6_addr *daddr)
757 {
758 struct tcphdr *th = tcp_hdr(skb);
759
760 if (skb->ip_summed == CHECKSUM_PARTIAL) {
761 th->check = ~tcp_v6_check(skb->len, saddr, daddr, 0);
762 skb->csum_start = skb_transport_header(skb) - skb->head;
763 skb->csum_offset = offsetof(struct tcphdr, check);
764 } else {
765 th->check = tcp_v6_check(skb->len, saddr, daddr,
766 csum_partial(th, th->doff << 2,
767 skb->csum));
768 }
769 }
770
771 static void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb)
772 {
773 struct ipv6_pinfo *np = inet6_sk(sk);
774
775 __tcp_v6_send_check(skb, &np->saddr, &np->daddr);
776 }
777
778 static int tcp_v6_gso_send_check(struct sk_buff *skb)
779 {
780 const struct ipv6hdr *ipv6h;
781 struct tcphdr *th;
782
783 if (!pskb_may_pull(skb, sizeof(*th)))
784 return -EINVAL;
785
786 ipv6h = ipv6_hdr(skb);
787 th = tcp_hdr(skb);
788
789 th->check = 0;
790 skb->ip_summed = CHECKSUM_PARTIAL;
791 __tcp_v6_send_check(skb, &ipv6h->saddr, &ipv6h->daddr);
792 return 0;
793 }
794
795 static struct sk_buff **tcp6_gro_receive(struct sk_buff **head,
796 struct sk_buff *skb)
797 {
798 const struct ipv6hdr *iph = skb_gro_network_header(skb);
799
800 switch (skb->ip_summed) {
801 case CHECKSUM_COMPLETE:
802 if (!tcp_v6_check(skb_gro_len(skb), &iph->saddr, &iph->daddr,
803 skb->csum)) {
804 skb->ip_summed = CHECKSUM_UNNECESSARY;
805 break;
806 }
807
808 /* fall through */
809 case CHECKSUM_NONE:
810 NAPI_GRO_CB(skb)->flush = 1;
811 return NULL;
812 }
813
814 return tcp_gro_receive(head, skb);
815 }
816
817 static int tcp6_gro_complete(struct sk_buff *skb)
818 {
819 const struct ipv6hdr *iph = ipv6_hdr(skb);
820 struct tcphdr *th = tcp_hdr(skb);
821
822 th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
823 &iph->saddr, &iph->daddr, 0);
824 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
825
826 return tcp_gro_complete(skb);
827 }
828
829 static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
830 u32 ts, struct tcp_md5sig_key *key, int rst, u8 tclass)
831 {
832 const struct tcphdr *th = tcp_hdr(skb);
833 struct tcphdr *t1;
834 struct sk_buff *buff;
835 struct flowi6 fl6;
836 struct net *net = dev_net(skb_dst(skb)->dev);
837 struct sock *ctl_sk = net->ipv6.tcp_sk;
838 unsigned int tot_len = sizeof(struct tcphdr);
839 struct dst_entry *dst;
840 __be32 *topt;
841
842 if (ts)
843 tot_len += TCPOLEN_TSTAMP_ALIGNED;
844 #ifdef CONFIG_TCP_MD5SIG
845 if (key)
846 tot_len += TCPOLEN_MD5SIG_ALIGNED;
847 #endif
848
849 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
850 GFP_ATOMIC);
851 if (buff == NULL)
852 return;
853
854 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
855
856 t1 = (struct tcphdr *) skb_push(buff, tot_len);
857 skb_reset_transport_header(buff);
858
859 /* Swap the send and the receive. */
860 memset(t1, 0, sizeof(*t1));
861 t1->dest = th->source;
862 t1->source = th->dest;
863 t1->doff = tot_len / 4;
864 t1->seq = htonl(seq);
865 t1->ack_seq = htonl(ack);
866 t1->ack = !rst || !th->ack;
867 t1->rst = rst;
868 t1->window = htons(win);
869
870 topt = (__be32 *)(t1 + 1);
871
872 if (ts) {
873 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
874 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
875 *topt++ = htonl(tcp_time_stamp);
876 *topt++ = htonl(ts);
877 }
878
879 #ifdef CONFIG_TCP_MD5SIG
880 if (key) {
881 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
882 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
883 tcp_v6_md5_hash_hdr((__u8 *)topt, key,
884 &ipv6_hdr(skb)->saddr,
885 &ipv6_hdr(skb)->daddr, t1);
886 }
887 #endif
888
889 memset(&fl6, 0, sizeof(fl6));
890 fl6.daddr = ipv6_hdr(skb)->saddr;
891 fl6.saddr = ipv6_hdr(skb)->daddr;
892
893 buff->ip_summed = CHECKSUM_PARTIAL;
894 buff->csum = 0;
895
896 __tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
897
898 fl6.flowi6_proto = IPPROTO_TCP;
899 fl6.flowi6_oif = inet6_iif(skb);
900 fl6.fl6_dport = t1->dest;
901 fl6.fl6_sport = t1->source;
902 security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
903
904 /* Pass a socket to ip6_dst_lookup either it is for RST
905 * Underlying function will use this to retrieve the network
906 * namespace
907 */
908 dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL, false);
909 if (!IS_ERR(dst)) {
910 skb_dst_set(buff, dst);
911 ip6_xmit(ctl_sk, buff, &fl6, NULL, tclass);
912 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
913 if (rst)
914 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
915 return;
916 }
917
918 kfree_skb(buff);
919 }
920
921 static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
922 {
923 const struct tcphdr *th = tcp_hdr(skb);
924 u32 seq = 0, ack_seq = 0;
925 struct tcp_md5sig_key *key = NULL;
926 #ifdef CONFIG_TCP_MD5SIG
927 const __u8 *hash_location = NULL;
928 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
929 unsigned char newhash[16];
930 int genhash;
931 struct sock *sk1 = NULL;
932 #endif
933
934 if (th->rst)
935 return;
936
937 if (!ipv6_unicast_destination(skb))
938 return;
939
940 #ifdef CONFIG_TCP_MD5SIG
941 hash_location = tcp_parse_md5sig_option(th);
942 if (!sk && hash_location) {
943 /*
944 * active side is lost. Try to find listening socket through
945 * source port, and then find md5 key through listening socket.
946 * we are not loose security here:
947 * Incoming packet is checked with md5 hash with finding key,
948 * no RST generated if md5 hash doesn't match.
949 */
950 sk1 = inet6_lookup_listener(dev_net(skb_dst(skb)->dev),
951 &tcp_hashinfo, &ipv6h->daddr,
952 ntohs(th->source), inet6_iif(skb));
953 if (!sk1)
954 return;
955
956 rcu_read_lock();
957 key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr);
958 if (!key)
959 goto release_sk1;
960
961 genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, NULL, skb);
962 if (genhash || memcmp(hash_location, newhash, 16) != 0)
963 goto release_sk1;
964 } else {
965 key = sk ? tcp_v6_md5_do_lookup(sk, &ipv6h->saddr) : NULL;
966 }
967 #endif
968
969 if (th->ack)
970 seq = ntohl(th->ack_seq);
971 else
972 ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
973 (th->doff << 2);
974
975 tcp_v6_send_response(skb, seq, ack_seq, 0, 0, key, 1, 0);
976
977 #ifdef CONFIG_TCP_MD5SIG
978 release_sk1:
979 if (sk1) {
980 rcu_read_unlock();
981 sock_put(sk1);
982 }
983 #endif
984 }
985
986 static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts,
987 struct tcp_md5sig_key *key, u8 tclass)
988 {
989 tcp_v6_send_response(skb, seq, ack, win, ts, key, 0, tclass);
990 }
991
992 static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
993 {
994 struct inet_timewait_sock *tw = inet_twsk(sk);
995 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
996
997 tcp_v6_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
998 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
999 tcptw->tw_ts_recent, tcp_twsk_md5_key(tcptw),
1000 tw->tw_tclass);
1001
1002 inet_twsk_put(tw);
1003 }
1004
1005 static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
1006 struct request_sock *req)
1007 {
1008 tcp_v6_send_ack(skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, req->ts_recent,
1009 tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr), 0);
1010 }
1011
1012
1013 static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
1014 {
1015 struct request_sock *req, **prev;
1016 const struct tcphdr *th = tcp_hdr(skb);
1017 struct sock *nsk;
1018
1019 /* Find possible connection requests. */
1020 req = inet6_csk_search_req(sk, &prev, th->source,
1021 &ipv6_hdr(skb)->saddr,
1022 &ipv6_hdr(skb)->daddr, inet6_iif(skb));
1023 if (req)
1024 return tcp_check_req(sk, skb, req, prev);
1025
1026 nsk = __inet6_lookup_established(sock_net(sk), &tcp_hashinfo,
1027 &ipv6_hdr(skb)->saddr, th->source,
1028 &ipv6_hdr(skb)->daddr, ntohs(th->dest), inet6_iif(skb));
1029
1030 if (nsk) {
1031 if (nsk->sk_state != TCP_TIME_WAIT) {
1032 bh_lock_sock(nsk);
1033 return nsk;
1034 }
1035 inet_twsk_put(inet_twsk(nsk));
1036 return NULL;
1037 }
1038
1039 #ifdef CONFIG_SYN_COOKIES
1040 if (!th->syn)
1041 sk = cookie_v6_check(sk, skb);
1042 #endif
1043 return sk;
1044 }
1045
1046 /* FIXME: this is substantially similar to the ipv4 code.
1047 * Can some kind of merge be done? -- erics
1048 */
1049 static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1050 {
1051 struct tcp_extend_values tmp_ext;
1052 struct tcp_options_received tmp_opt;
1053 const u8 *hash_location;
1054 struct request_sock *req;
1055 struct inet6_request_sock *treq;
1056 struct ipv6_pinfo *np = inet6_sk(sk);
1057 struct tcp_sock *tp = tcp_sk(sk);
1058 __u32 isn = TCP_SKB_CB(skb)->when;
1059 struct dst_entry *dst = NULL;
1060 bool want_cookie = false;
1061
1062 if (skb->protocol == htons(ETH_P_IP))
1063 return tcp_v4_conn_request(sk, skb);
1064
1065 if (!ipv6_unicast_destination(skb))
1066 goto drop;
1067
1068 if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
1069 want_cookie = tcp_syn_flood_action(sk, skb, "TCPv6");
1070 if (!want_cookie)
1071 goto drop;
1072 }
1073
1074 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
1075 goto drop;
1076
1077 req = inet6_reqsk_alloc(&tcp6_request_sock_ops);
1078 if (req == NULL)
1079 goto drop;
1080
1081 #ifdef CONFIG_TCP_MD5SIG
1082 tcp_rsk(req)->af_specific = &tcp_request_sock_ipv6_ops;
1083 #endif
1084
1085 tcp_clear_options(&tmp_opt);
1086 tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
1087 tmp_opt.user_mss = tp->rx_opt.user_mss;
1088 tcp_parse_options(skb, &tmp_opt, &hash_location, 0);
1089
1090 if (tmp_opt.cookie_plus > 0 &&
1091 tmp_opt.saw_tstamp &&
1092 !tp->rx_opt.cookie_out_never &&
1093 (sysctl_tcp_cookie_size > 0 ||
1094 (tp->cookie_values != NULL &&
1095 tp->cookie_values->cookie_desired > 0))) {
1096 u8 *c;
1097 u32 *d;
1098 u32 *mess = &tmp_ext.cookie_bakery[COOKIE_DIGEST_WORDS];
1099 int l = tmp_opt.cookie_plus - TCPOLEN_COOKIE_BASE;
1100
1101 if (tcp_cookie_generator(&tmp_ext.cookie_bakery[0]) != 0)
1102 goto drop_and_free;
1103
1104 /* Secret recipe starts with IP addresses */
1105 d = (__force u32 *)&ipv6_hdr(skb)->daddr.s6_addr32[0];
1106 *mess++ ^= *d++;
1107 *mess++ ^= *d++;
1108 *mess++ ^= *d++;
1109 *mess++ ^= *d++;
1110 d = (__force u32 *)&ipv6_hdr(skb)->saddr.s6_addr32[0];
1111 *mess++ ^= *d++;
1112 *mess++ ^= *d++;
1113 *mess++ ^= *d++;
1114 *mess++ ^= *d++;
1115
1116 /* plus variable length Initiator Cookie */
1117 c = (u8 *)mess;
1118 while (l-- > 0)
1119 *c++ ^= *hash_location++;
1120
1121 want_cookie = false; /* not our kind of cookie */
1122 tmp_ext.cookie_out_never = 0; /* false */
1123 tmp_ext.cookie_plus = tmp_opt.cookie_plus;
1124 } else if (!tp->rx_opt.cookie_in_always) {
1125 /* redundant indications, but ensure initialization. */
1126 tmp_ext.cookie_out_never = 1; /* true */
1127 tmp_ext.cookie_plus = 0;
1128 } else {
1129 goto drop_and_free;
1130 }
1131 tmp_ext.cookie_in_always = tp->rx_opt.cookie_in_always;
1132
1133 if (want_cookie && !tmp_opt.saw_tstamp)
1134 tcp_clear_options(&tmp_opt);
1135
1136 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
1137 tcp_openreq_init(req, &tmp_opt, skb);
1138
1139 treq = inet6_rsk(req);
1140 treq->rmt_addr = ipv6_hdr(skb)->saddr;
1141 treq->loc_addr = ipv6_hdr(skb)->daddr;
1142 if (!want_cookie || tmp_opt.tstamp_ok)
1143 TCP_ECN_create_request(req, skb);
1144
1145 treq->iif = sk->sk_bound_dev_if;
1146
1147 /* So that link locals have meaning */
1148 if (!sk->sk_bound_dev_if &&
1149 ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL)
1150 treq->iif = inet6_iif(skb);
1151
1152 if (!isn) {
1153 struct inet_peer *peer = NULL;
1154
1155 if (ipv6_opt_accepted(sk, skb) ||
1156 np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
1157 np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
1158 atomic_inc(&skb->users);
1159 treq->pktopts = skb;
1160 }
1161
1162 if (want_cookie) {
1163 isn = cookie_v6_init_sequence(sk, skb, &req->mss);
1164 req->cookie_ts = tmp_opt.tstamp_ok;
1165 goto have_isn;
1166 }
1167
1168 /* VJ's idea. We save last timestamp seen
1169 * from the destination in peer table, when entering
1170 * state TIME-WAIT, and check against it before
1171 * accepting new connection request.
1172 *
1173 * If "isn" is not zero, this request hit alive
1174 * timewait bucket, so that all the necessary checks
1175 * are made in the function processing timewait state.
1176 */
1177 if (tmp_opt.saw_tstamp &&
1178 tcp_death_row.sysctl_tw_recycle &&
1179 (dst = inet6_csk_route_req(sk, req)) != NULL &&
1180 (peer = rt6_get_peer((struct rt6_info *)dst)) != NULL &&
1181 ipv6_addr_equal((struct in6_addr *)peer->daddr.addr.a6,
1182 &treq->rmt_addr)) {
1183 inet_peer_refcheck(peer);
1184 if ((u32)get_seconds() - peer->tcp_ts_stamp < TCP_PAWS_MSL &&
1185 (s32)(peer->tcp_ts - req->ts_recent) >
1186 TCP_PAWS_WINDOW) {
1187 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
1188 goto drop_and_release;
1189 }
1190 }
1191 /* Kill the following clause, if you dislike this way. */
1192 else if (!sysctl_tcp_syncookies &&
1193 (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
1194 (sysctl_max_syn_backlog >> 2)) &&
1195 (!peer || !peer->tcp_ts_stamp) &&
1196 (!dst || !dst_metric(dst, RTAX_RTT))) {
1197 /* Without syncookies last quarter of
1198 * backlog is filled with destinations,
1199 * proven to be alive.
1200 * It means that we continue to communicate
1201 * to destinations, already remembered
1202 * to the moment of synflood.
1203 */
1204 LIMIT_NETDEBUG(KERN_DEBUG "TCP: drop open request from %pI6/%u\n",
1205 &treq->rmt_addr, ntohs(tcp_hdr(skb)->source));
1206 goto drop_and_release;
1207 }
1208
1209 isn = tcp_v6_init_sequence(skb);
1210 }
1211 have_isn:
1212 tcp_rsk(req)->snt_isn = isn;
1213 tcp_rsk(req)->snt_synack = tcp_time_stamp;
1214
1215 security_inet_conn_request(sk, skb, req);
1216
1217 if (tcp_v6_send_synack(sk, req,
1218 (struct request_values *)&tmp_ext,
1219 skb_get_queue_mapping(skb)) ||
1220 want_cookie)
1221 goto drop_and_free;
1222
1223 inet6_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1224 return 0;
1225
1226 drop_and_release:
1227 dst_release(dst);
1228 drop_and_free:
1229 reqsk_free(req);
1230 drop:
1231 return 0; /* don't send reset */
1232 }
1233
1234 static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1235 struct request_sock *req,
1236 struct dst_entry *dst)
1237 {
1238 struct inet6_request_sock *treq;
1239 struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
1240 struct tcp6_sock *newtcp6sk;
1241 struct inet_sock *newinet;
1242 struct tcp_sock *newtp;
1243 struct sock *newsk;
1244 struct ipv6_txoptions *opt;
1245 #ifdef CONFIG_TCP_MD5SIG
1246 struct tcp_md5sig_key *key;
1247 #endif
1248
1249 if (skb->protocol == htons(ETH_P_IP)) {
1250 /*
1251 * v6 mapped
1252 */
1253
1254 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst);
1255
1256 if (newsk == NULL)
1257 return NULL;
1258
1259 newtcp6sk = (struct tcp6_sock *)newsk;
1260 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1261
1262 newinet = inet_sk(newsk);
1263 newnp = inet6_sk(newsk);
1264 newtp = tcp_sk(newsk);
1265
1266 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1267
1268 ipv6_addr_set_v4mapped(newinet->inet_daddr, &newnp->daddr);
1269
1270 ipv6_addr_set_v4mapped(newinet->inet_saddr, &newnp->saddr);
1271
1272 newnp->rcv_saddr = newnp->saddr;
1273
1274 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1275 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1276 #ifdef CONFIG_TCP_MD5SIG
1277 newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1278 #endif
1279
1280 newnp->ipv6_ac_list = NULL;
1281 newnp->ipv6_fl_list = NULL;
1282 newnp->pktoptions = NULL;
1283 newnp->opt = NULL;
1284 newnp->mcast_oif = inet6_iif(skb);
1285 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1286 newnp->rcv_tclass = ipv6_tclass(ipv6_hdr(skb));
1287
1288 /*
1289 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1290 * here, tcp_create_openreq_child now does this for us, see the comment in
1291 * that function for the gory details. -acme
1292 */
1293
1294 /* It is tricky place. Until this moment IPv4 tcp
1295 worked with IPv6 icsk.icsk_af_ops.
1296 Sync it now.
1297 */
1298 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1299
1300 return newsk;
1301 }
1302
1303 treq = inet6_rsk(req);
1304 opt = np->opt;
1305
1306 if (sk_acceptq_is_full(sk))
1307 goto out_overflow;
1308
1309 if (!dst) {
1310 dst = inet6_csk_route_req(sk, req);
1311 if (!dst)
1312 goto out;
1313 }
1314
1315 newsk = tcp_create_openreq_child(sk, req, skb);
1316 if (newsk == NULL)
1317 goto out_nonewsk;
1318
1319 /*
1320 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1321 * count here, tcp_create_openreq_child now does this for us, see the
1322 * comment in that function for the gory details. -acme
1323 */
1324
1325 newsk->sk_gso_type = SKB_GSO_TCPV6;
1326 __ip6_dst_store(newsk, dst, NULL, NULL);
1327
1328 newtcp6sk = (struct tcp6_sock *)newsk;
1329 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1330
1331 newtp = tcp_sk(newsk);
1332 newinet = inet_sk(newsk);
1333 newnp = inet6_sk(newsk);
1334
1335 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1336
1337 newnp->daddr = treq->rmt_addr;
1338 newnp->saddr = treq->loc_addr;
1339 newnp->rcv_saddr = treq->loc_addr;
1340 newsk->sk_bound_dev_if = treq->iif;
1341
1342 /* Now IPv6 options...
1343
1344 First: no IPv4 options.
1345 */
1346 newinet->inet_opt = NULL;
1347 newnp->ipv6_ac_list = NULL;
1348 newnp->ipv6_fl_list = NULL;
1349
1350 /* Clone RX bits */
1351 newnp->rxopt.all = np->rxopt.all;
1352
1353 /* Clone pktoptions received with SYN */
1354 newnp->pktoptions = NULL;
1355 if (treq->pktopts != NULL) {
1356 newnp->pktoptions = skb_clone(treq->pktopts, GFP_ATOMIC);
1357 consume_skb(treq->pktopts);
1358 treq->pktopts = NULL;
1359 if (newnp->pktoptions)
1360 skb_set_owner_r(newnp->pktoptions, newsk);
1361 }
1362 newnp->opt = NULL;
1363 newnp->mcast_oif = inet6_iif(skb);
1364 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1365 newnp->rcv_tclass = ipv6_tclass(ipv6_hdr(skb));
1366
1367 /* Clone native IPv6 options from listening socket (if any)
1368
1369 Yes, keeping reference count would be much more clever,
1370 but we make one more one thing there: reattach optmem
1371 to newsk.
1372 */
1373 if (opt) {
1374 newnp->opt = ipv6_dup_options(newsk, opt);
1375 if (opt != np->opt)
1376 sock_kfree_s(sk, opt, opt->tot_len);
1377 }
1378
1379 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1380 if (newnp->opt)
1381 inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
1382 newnp->opt->opt_flen);
1383
1384 tcp_mtup_init(newsk);
1385 tcp_sync_mss(newsk, dst_mtu(dst));
1386 newtp->advmss = dst_metric_advmss(dst);
1387 if (tcp_sk(sk)->rx_opt.user_mss &&
1388 tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1389 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1390
1391 tcp_initialize_rcv_mss(newsk);
1392 if (tcp_rsk(req)->snt_synack)
1393 tcp_valid_rtt_meas(newsk,
1394 tcp_time_stamp - tcp_rsk(req)->snt_synack);
1395 newtp->total_retrans = req->retrans;
1396
1397 newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1398 newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
1399
1400 #ifdef CONFIG_TCP_MD5SIG
1401 /* Copy over the MD5 key from the original socket */
1402 if ((key = tcp_v6_md5_do_lookup(sk, &newnp->daddr)) != NULL) {
1403 /* We're using one, so create a matching key
1404 * on the newsk structure. If we fail to get
1405 * memory, then we end up not copying the key
1406 * across. Shucks.
1407 */
1408 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newnp->daddr,
1409 AF_INET6, key->key, key->keylen, GFP_ATOMIC);
1410 }
1411 #endif
1412
1413 if (__inet_inherit_port(sk, newsk) < 0) {
1414 sock_put(newsk);
1415 goto out;
1416 }
1417 __inet6_hash(newsk, NULL);
1418
1419 return newsk;
1420
1421 out_overflow:
1422 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1423 out_nonewsk:
1424 if (opt && opt != np->opt)
1425 sock_kfree_s(sk, opt, opt->tot_len);
1426 dst_release(dst);
1427 out:
1428 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1429 return NULL;
1430 }
1431
1432 static __sum16 tcp_v6_checksum_init(struct sk_buff *skb)
1433 {
1434 if (skb->ip_summed == CHECKSUM_COMPLETE) {
1435 if (!tcp_v6_check(skb->len, &ipv6_hdr(skb)->saddr,
1436 &ipv6_hdr(skb)->daddr, skb->csum)) {
1437 skb->ip_summed = CHECKSUM_UNNECESSARY;
1438 return 0;
1439 }
1440 }
1441
1442 skb->csum = ~csum_unfold(tcp_v6_check(skb->len,
1443 &ipv6_hdr(skb)->saddr,
1444 &ipv6_hdr(skb)->daddr, 0));
1445
1446 if (skb->len <= 76) {
1447 return __skb_checksum_complete(skb);
1448 }
1449 return 0;
1450 }
1451
1452 /* The socket must have it's spinlock held when we get
1453 * here.
1454 *
1455 * We have a potential double-lock case here, so even when
1456 * doing backlog processing we use the BH locking scheme.
1457 * This is because we cannot sleep with the original spinlock
1458 * held.
1459 */
1460 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1461 {
1462 struct ipv6_pinfo *np = inet6_sk(sk);
1463 struct tcp_sock *tp;
1464 struct sk_buff *opt_skb = NULL;
1465
1466 /* Imagine: socket is IPv6. IPv4 packet arrives,
1467 goes to IPv4 receive handler and backlogged.
1468 From backlog it always goes here. Kerboom...
1469 Fortunately, tcp_rcv_established and rcv_established
1470 handle them correctly, but it is not case with
1471 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1472 */
1473
1474 if (skb->protocol == htons(ETH_P_IP))
1475 return tcp_v4_do_rcv(sk, skb);
1476
1477 #ifdef CONFIG_TCP_MD5SIG
1478 if (tcp_v6_inbound_md5_hash (sk, skb))
1479 goto discard;
1480 #endif
1481
1482 if (sk_filter(sk, skb))
1483 goto discard;
1484
1485 /*
1486 * socket locking is here for SMP purposes as backlog rcv
1487 * is currently called with bh processing disabled.
1488 */
1489
1490 /* Do Stevens' IPV6_PKTOPTIONS.
1491
1492 Yes, guys, it is the only place in our code, where we
1493 may make it not affecting IPv4.
1494 The rest of code is protocol independent,
1495 and I do not like idea to uglify IPv4.
1496
1497 Actually, all the idea behind IPV6_PKTOPTIONS
1498 looks not very well thought. For now we latch
1499 options, received in the last packet, enqueued
1500 by tcp. Feel free to propose better solution.
1501 --ANK (980728)
1502 */
1503 if (np->rxopt.all)
1504 opt_skb = skb_clone(skb, GFP_ATOMIC);
1505
1506 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1507 sock_rps_save_rxhash(sk, skb);
1508 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len))
1509 goto reset;
1510 if (opt_skb)
1511 goto ipv6_pktoptions;
1512 return 0;
1513 }
1514
1515 if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1516 goto csum_err;
1517
1518 if (sk->sk_state == TCP_LISTEN) {
1519 struct sock *nsk = tcp_v6_hnd_req(sk, skb);
1520 if (!nsk)
1521 goto discard;
1522
1523 /*
1524 * Queue it on the new socket if the new socket is active,
1525 * otherwise we just shortcircuit this and continue with
1526 * the new socket..
1527 */
1528 if(nsk != sk) {
1529 sock_rps_save_rxhash(nsk, skb);
1530 if (tcp_child_process(sk, nsk, skb))
1531 goto reset;
1532 if (opt_skb)
1533 __kfree_skb(opt_skb);
1534 return 0;
1535 }
1536 } else
1537 sock_rps_save_rxhash(sk, skb);
1538
1539 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len))
1540 goto reset;
1541 if (opt_skb)
1542 goto ipv6_pktoptions;
1543 return 0;
1544
1545 reset:
1546 tcp_v6_send_reset(sk, skb);
1547 discard:
1548 if (opt_skb)
1549 __kfree_skb(opt_skb);
1550 kfree_skb(skb);
1551 return 0;
1552 csum_err:
1553 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1554 goto discard;
1555
1556
1557 ipv6_pktoptions:
1558 /* Do you ask, what is it?
1559
1560 1. skb was enqueued by tcp.
1561 2. skb is added to tail of read queue, rather than out of order.
1562 3. socket is not in passive state.
1563 4. Finally, it really contains options, which user wants to receive.
1564 */
1565 tp = tcp_sk(sk);
1566 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1567 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1568 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
1569 np->mcast_oif = inet6_iif(opt_skb);
1570 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1571 np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
1572 if (np->rxopt.bits.rxtclass)
1573 np->rcv_tclass = ipv6_tclass(ipv6_hdr(skb));
1574 if (ipv6_opt_accepted(sk, opt_skb)) {
1575 skb_set_owner_r(opt_skb, sk);
1576 opt_skb = xchg(&np->pktoptions, opt_skb);
1577 } else {
1578 __kfree_skb(opt_skb);
1579 opt_skb = xchg(&np->pktoptions, NULL);
1580 }
1581 }
1582
1583 kfree_skb(opt_skb);
1584 return 0;
1585 }
1586
1587 static int tcp_v6_rcv(struct sk_buff *skb)
1588 {
1589 const struct tcphdr *th;
1590 const struct ipv6hdr *hdr;
1591 struct sock *sk;
1592 int ret;
1593 struct net *net = dev_net(skb->dev);
1594
1595 if (skb->pkt_type != PACKET_HOST)
1596 goto discard_it;
1597
1598 /*
1599 * Count it even if it's bad.
1600 */
1601 TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1602
1603 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1604 goto discard_it;
1605
1606 th = tcp_hdr(skb);
1607
1608 if (th->doff < sizeof(struct tcphdr)/4)
1609 goto bad_packet;
1610 if (!pskb_may_pull(skb, th->doff*4))
1611 goto discard_it;
1612
1613 if (!skb_csum_unnecessary(skb) && tcp_v6_checksum_init(skb))
1614 goto bad_packet;
1615
1616 th = tcp_hdr(skb);
1617 hdr = ipv6_hdr(skb);
1618 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1619 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1620 skb->len - th->doff*4);
1621 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1622 TCP_SKB_CB(skb)->when = 0;
1623 TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
1624 TCP_SKB_CB(skb)->sacked = 0;
1625
1626 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
1627 if (!sk)
1628 goto no_tcp_socket;
1629
1630 process:
1631 if (sk->sk_state == TCP_TIME_WAIT)
1632 goto do_time_wait;
1633
1634 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
1635 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
1636 goto discard_and_relse;
1637 }
1638
1639 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1640 goto discard_and_relse;
1641
1642 if (sk_filter(sk, skb))
1643 goto discard_and_relse;
1644
1645 skb->dev = NULL;
1646
1647 bh_lock_sock_nested(sk);
1648 ret = 0;
1649 if (!sock_owned_by_user(sk)) {
1650 #ifdef CONFIG_NET_DMA
1651 struct tcp_sock *tp = tcp_sk(sk);
1652 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1653 tp->ucopy.dma_chan = net_dma_find_channel();
1654 if (tp->ucopy.dma_chan)
1655 ret = tcp_v6_do_rcv(sk, skb);
1656 else
1657 #endif
1658 {
1659 if (!tcp_prequeue(sk, skb))
1660 ret = tcp_v6_do_rcv(sk, skb);
1661 }
1662 } else if (unlikely(sk_add_backlog(sk, skb,
1663 sk->sk_rcvbuf + sk->sk_sndbuf))) {
1664 bh_unlock_sock(sk);
1665 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
1666 goto discard_and_relse;
1667 }
1668 bh_unlock_sock(sk);
1669
1670 sock_put(sk);
1671 return ret ? -1 : 0;
1672
1673 no_tcp_socket:
1674 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1675 goto discard_it;
1676
1677 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1678 bad_packet:
1679 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1680 } else {
1681 tcp_v6_send_reset(NULL, skb);
1682 }
1683
1684 discard_it:
1685
1686 /*
1687 * Discard frame
1688 */
1689
1690 kfree_skb(skb);
1691 return 0;
1692
1693 discard_and_relse:
1694 sock_put(sk);
1695 goto discard_it;
1696
1697 do_time_wait:
1698 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1699 inet_twsk_put(inet_twsk(sk));
1700 goto discard_it;
1701 }
1702
1703 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1704 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1705 inet_twsk_put(inet_twsk(sk));
1706 goto discard_it;
1707 }
1708
1709 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1710 case TCP_TW_SYN:
1711 {
1712 struct sock *sk2;
1713
1714 sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
1715 &ipv6_hdr(skb)->daddr,
1716 ntohs(th->dest), inet6_iif(skb));
1717 if (sk2 != NULL) {
1718 struct inet_timewait_sock *tw = inet_twsk(sk);
1719 inet_twsk_deschedule(tw, &tcp_death_row);
1720 inet_twsk_put(tw);
1721 sk = sk2;
1722 goto process;
1723 }
1724 /* Fall through to ACK */
1725 }
1726 case TCP_TW_ACK:
1727 tcp_v6_timewait_ack(sk, skb);
1728 break;
1729 case TCP_TW_RST:
1730 goto no_tcp_socket;
1731 case TCP_TW_SUCCESS:;
1732 }
1733 goto discard_it;
1734 }
1735
1736 static struct inet_peer *tcp_v6_get_peer(struct sock *sk, bool *release_it)
1737 {
1738 struct rt6_info *rt = (struct rt6_info *) __sk_dst_get(sk);
1739 struct ipv6_pinfo *np = inet6_sk(sk);
1740 struct inet_peer *peer;
1741
1742 if (!rt ||
1743 !ipv6_addr_equal(&np->daddr, &rt->rt6i_dst.addr)) {
1744 peer = inet_getpeer_v6(&np->daddr, 1);
1745 *release_it = true;
1746 } else {
1747 if (!rt->rt6i_peer)
1748 rt6_bind_peer(rt, 1);
1749 peer = rt->rt6i_peer;
1750 *release_it = false;
1751 }
1752
1753 return peer;
1754 }
1755
1756 static void *tcp_v6_tw_get_peer(struct sock *sk)
1757 {
1758 const struct inet6_timewait_sock *tw6 = inet6_twsk(sk);
1759 const struct inet_timewait_sock *tw = inet_twsk(sk);
1760
1761 if (tw->tw_family == AF_INET)
1762 return tcp_v4_tw_get_peer(sk);
1763
1764 return inet_getpeer_v6(&tw6->tw_v6_daddr, 1);
1765 }
1766
1767 static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1768 .twsk_obj_size = sizeof(struct tcp6_timewait_sock),
1769 .twsk_unique = tcp_twsk_unique,
1770 .twsk_destructor= tcp_twsk_destructor,
1771 .twsk_getpeer = tcp_v6_tw_get_peer,
1772 };
1773
1774 static const struct inet_connection_sock_af_ops ipv6_specific = {
1775 .queue_xmit = inet6_csk_xmit,
1776 .send_check = tcp_v6_send_check,
1777 .rebuild_header = inet6_sk_rebuild_header,
1778 .conn_request = tcp_v6_conn_request,
1779 .syn_recv_sock = tcp_v6_syn_recv_sock,
1780 .get_peer = tcp_v6_get_peer,
1781 .net_header_len = sizeof(struct ipv6hdr),
1782 .net_frag_header_len = sizeof(struct frag_hdr),
1783 .setsockopt = ipv6_setsockopt,
1784 .getsockopt = ipv6_getsockopt,
1785 .addr2sockaddr = inet6_csk_addr2sockaddr,
1786 .sockaddr_len = sizeof(struct sockaddr_in6),
1787 .bind_conflict = inet6_csk_bind_conflict,
1788 #ifdef CONFIG_COMPAT
1789 .compat_setsockopt = compat_ipv6_setsockopt,
1790 .compat_getsockopt = compat_ipv6_getsockopt,
1791 #endif
1792 };
1793
1794 #ifdef CONFIG_TCP_MD5SIG
1795 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
1796 .md5_lookup = tcp_v6_md5_lookup,
1797 .calc_md5_hash = tcp_v6_md5_hash_skb,
1798 .md5_parse = tcp_v6_parse_md5_keys,
1799 };
1800 #endif
1801
1802 /*
1803 * TCP over IPv4 via INET6 API
1804 */
1805
1806 static const struct inet_connection_sock_af_ops ipv6_mapped = {
1807 .queue_xmit = ip_queue_xmit,
1808 .send_check = tcp_v4_send_check,
1809 .rebuild_header = inet_sk_rebuild_header,
1810 .conn_request = tcp_v6_conn_request,
1811 .syn_recv_sock = tcp_v6_syn_recv_sock,
1812 .get_peer = tcp_v4_get_peer,
1813 .net_header_len = sizeof(struct iphdr),
1814 .setsockopt = ipv6_setsockopt,
1815 .getsockopt = ipv6_getsockopt,
1816 .addr2sockaddr = inet6_csk_addr2sockaddr,
1817 .sockaddr_len = sizeof(struct sockaddr_in6),
1818 .bind_conflict = inet6_csk_bind_conflict,
1819 #ifdef CONFIG_COMPAT
1820 .compat_setsockopt = compat_ipv6_setsockopt,
1821 .compat_getsockopt = compat_ipv6_getsockopt,
1822 #endif
1823 };
1824
1825 #ifdef CONFIG_TCP_MD5SIG
1826 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
1827 .md5_lookup = tcp_v4_md5_lookup,
1828 .calc_md5_hash = tcp_v4_md5_hash_skb,
1829 .md5_parse = tcp_v6_parse_md5_keys,
1830 };
1831 #endif
1832
1833 /* NOTE: A lot of things set to zero explicitly by call to
1834 * sk_alloc() so need not be done here.
1835 */
1836 static int tcp_v6_init_sock(struct sock *sk)
1837 {
1838 struct inet_connection_sock *icsk = inet_csk(sk);
1839
1840 tcp_init_sock(sk);
1841
1842 icsk->icsk_af_ops = &ipv6_specific;
1843
1844 #ifdef CONFIG_TCP_MD5SIG
1845 tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific;
1846 #endif
1847
1848 return 0;
1849 }
1850
1851 static void tcp_v6_destroy_sock(struct sock *sk)
1852 {
1853 tcp_v4_destroy_sock(sk);
1854 inet6_destroy_sock(sk);
1855 }
1856
1857 #ifdef CONFIG_PROC_FS
1858 /* Proc filesystem TCPv6 sock list dumping. */
1859 static void get_openreq6(struct seq_file *seq,
1860 const struct sock *sk, struct request_sock *req, int i, int uid)
1861 {
1862 int ttd = req->expires - jiffies;
1863 const struct in6_addr *src = &inet6_rsk(req)->loc_addr;
1864 const struct in6_addr *dest = &inet6_rsk(req)->rmt_addr;
1865
1866 if (ttd < 0)
1867 ttd = 0;
1868
1869 seq_printf(seq,
1870 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1871 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1872 i,
1873 src->s6_addr32[0], src->s6_addr32[1],
1874 src->s6_addr32[2], src->s6_addr32[3],
1875 ntohs(inet_rsk(req)->loc_port),
1876 dest->s6_addr32[0], dest->s6_addr32[1],
1877 dest->s6_addr32[2], dest->s6_addr32[3],
1878 ntohs(inet_rsk(req)->rmt_port),
1879 TCP_SYN_RECV,
1880 0,0, /* could print option size, but that is af dependent. */
1881 1, /* timers active (only the expire timer) */
1882 jiffies_to_clock_t(ttd),
1883 req->retrans,
1884 uid,
1885 0, /* non standard timer */
1886 0, /* open_requests have no inode */
1887 0, req);
1888 }
1889
1890 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1891 {
1892 const struct in6_addr *dest, *src;
1893 __u16 destp, srcp;
1894 int timer_active;
1895 unsigned long timer_expires;
1896 const struct inet_sock *inet = inet_sk(sp);
1897 const struct tcp_sock *tp = tcp_sk(sp);
1898 const struct inet_connection_sock *icsk = inet_csk(sp);
1899 const struct ipv6_pinfo *np = inet6_sk(sp);
1900
1901 dest = &np->daddr;
1902 src = &np->rcv_saddr;
1903 destp = ntohs(inet->inet_dport);
1904 srcp = ntohs(inet->inet_sport);
1905
1906 if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
1907 timer_active = 1;
1908 timer_expires = icsk->icsk_timeout;
1909 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1910 timer_active = 4;
1911 timer_expires = icsk->icsk_timeout;
1912 } else if (timer_pending(&sp->sk_timer)) {
1913 timer_active = 2;
1914 timer_expires = sp->sk_timer.expires;
1915 } else {
1916 timer_active = 0;
1917 timer_expires = jiffies;
1918 }
1919
1920 seq_printf(seq,
1921 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1922 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %pK %lu %lu %u %u %d\n",
1923 i,
1924 src->s6_addr32[0], src->s6_addr32[1],
1925 src->s6_addr32[2], src->s6_addr32[3], srcp,
1926 dest->s6_addr32[0], dest->s6_addr32[1],
1927 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1928 sp->sk_state,
1929 tp->write_seq-tp->snd_una,
1930 (sp->sk_state == TCP_LISTEN) ? sp->sk_ack_backlog : (tp->rcv_nxt - tp->copied_seq),
1931 timer_active,
1932 jiffies_to_clock_t(timer_expires - jiffies),
1933 icsk->icsk_retransmits,
1934 sock_i_uid(sp),
1935 icsk->icsk_probes_out,
1936 sock_i_ino(sp),
1937 atomic_read(&sp->sk_refcnt), sp,
1938 jiffies_to_clock_t(icsk->icsk_rto),
1939 jiffies_to_clock_t(icsk->icsk_ack.ato),
1940 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
1941 tp->snd_cwnd,
1942 tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh
1943 );
1944 }
1945
1946 static void get_timewait6_sock(struct seq_file *seq,
1947 struct inet_timewait_sock *tw, int i)
1948 {
1949 const struct in6_addr *dest, *src;
1950 __u16 destp, srcp;
1951 const struct inet6_timewait_sock *tw6 = inet6_twsk((struct sock *)tw);
1952 int ttd = tw->tw_ttd - jiffies;
1953
1954 if (ttd < 0)
1955 ttd = 0;
1956
1957 dest = &tw6->tw_v6_daddr;
1958 src = &tw6->tw_v6_rcv_saddr;
1959 destp = ntohs(tw->tw_dport);
1960 srcp = ntohs(tw->tw_sport);
1961
1962 seq_printf(seq,
1963 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1964 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1965 i,
1966 src->s6_addr32[0], src->s6_addr32[1],
1967 src->s6_addr32[2], src->s6_addr32[3], srcp,
1968 dest->s6_addr32[0], dest->s6_addr32[1],
1969 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1970 tw->tw_substate, 0, 0,
1971 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
1972 atomic_read(&tw->tw_refcnt), tw);
1973 }
1974
1975 static int tcp6_seq_show(struct seq_file *seq, void *v)
1976 {
1977 struct tcp_iter_state *st;
1978
1979 if (v == SEQ_START_TOKEN) {
1980 seq_puts(seq,
1981 " sl "
1982 "local_address "
1983 "remote_address "
1984 "st tx_queue rx_queue tr tm->when retrnsmt"
1985 " uid timeout inode\n");
1986 goto out;
1987 }
1988 st = seq->private;
1989
1990 switch (st->state) {
1991 case TCP_SEQ_STATE_LISTENING:
1992 case TCP_SEQ_STATE_ESTABLISHED:
1993 get_tcp6_sock(seq, v, st->num);
1994 break;
1995 case TCP_SEQ_STATE_OPENREQ:
1996 get_openreq6(seq, st->syn_wait_sk, v, st->num, st->uid);
1997 break;
1998 case TCP_SEQ_STATE_TIME_WAIT:
1999 get_timewait6_sock(seq, v, st->num);
2000 break;
2001 }
2002 out:
2003 return 0;
2004 }
2005
2006 static const struct file_operations tcp6_afinfo_seq_fops = {
2007 .owner = THIS_MODULE,
2008 .open = tcp_seq_open,
2009 .read = seq_read,
2010 .llseek = seq_lseek,
2011 .release = seq_release_net
2012 };
2013
2014 static struct tcp_seq_afinfo tcp6_seq_afinfo = {
2015 .name = "tcp6",
2016 .family = AF_INET6,
2017 .seq_fops = &tcp6_afinfo_seq_fops,
2018 .seq_ops = {
2019 .show = tcp6_seq_show,
2020 },
2021 };
2022
2023 int __net_init tcp6_proc_init(struct net *net)
2024 {
2025 return tcp_proc_register(net, &tcp6_seq_afinfo);
2026 }
2027
2028 void tcp6_proc_exit(struct net *net)
2029 {
2030 tcp_proc_unregister(net, &tcp6_seq_afinfo);
2031 }
2032 #endif
2033
2034 struct proto tcpv6_prot = {
2035 .name = "TCPv6",
2036 .owner = THIS_MODULE,
2037 .close = tcp_close,
2038 .connect = tcp_v6_connect,
2039 .disconnect = tcp_disconnect,
2040 .accept = inet_csk_accept,
2041 .ioctl = tcp_ioctl,
2042 .init = tcp_v6_init_sock,
2043 .destroy = tcp_v6_destroy_sock,
2044 .shutdown = tcp_shutdown,
2045 .setsockopt = tcp_setsockopt,
2046 .getsockopt = tcp_getsockopt,
2047 .recvmsg = tcp_recvmsg,
2048 .sendmsg = tcp_sendmsg,
2049 .sendpage = tcp_sendpage,
2050 .backlog_rcv = tcp_v6_do_rcv,
2051 .hash = tcp_v6_hash,
2052 .unhash = inet_unhash,
2053 .get_port = inet_csk_get_port,
2054 .enter_memory_pressure = tcp_enter_memory_pressure,
2055 .sockets_allocated = &tcp_sockets_allocated,
2056 .memory_allocated = &tcp_memory_allocated,
2057 .memory_pressure = &tcp_memory_pressure,
2058 .orphan_count = &tcp_orphan_count,
2059 .sysctl_wmem = sysctl_tcp_wmem,
2060 .sysctl_rmem = sysctl_tcp_rmem,
2061 .max_header = MAX_TCP_HEADER,
2062 .obj_size = sizeof(struct tcp6_sock),
2063 .slab_flags = SLAB_DESTROY_BY_RCU,
2064 .twsk_prot = &tcp6_timewait_sock_ops,
2065 .rsk_prot = &tcp6_request_sock_ops,
2066 .h.hashinfo = &tcp_hashinfo,
2067 .no_autobind = true,
2068 #ifdef CONFIG_COMPAT
2069 .compat_setsockopt = compat_tcp_setsockopt,
2070 .compat_getsockopt = compat_tcp_getsockopt,
2071 #endif
2072 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
2073 .proto_cgroup = tcp_proto_cgroup,
2074 #endif
2075 };
2076
2077 static const struct inet6_protocol tcpv6_protocol = {
2078 .handler = tcp_v6_rcv,
2079 .err_handler = tcp_v6_err,
2080 .gso_send_check = tcp_v6_gso_send_check,
2081 .gso_segment = tcp_tso_segment,
2082 .gro_receive = tcp6_gro_receive,
2083 .gro_complete = tcp6_gro_complete,
2084 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
2085 };
2086
2087 static struct inet_protosw tcpv6_protosw = {
2088 .type = SOCK_STREAM,
2089 .protocol = IPPROTO_TCP,
2090 .prot = &tcpv6_prot,
2091 .ops = &inet6_stream_ops,
2092 .no_check = 0,
2093 .flags = INET_PROTOSW_PERMANENT |
2094 INET_PROTOSW_ICSK,
2095 };
2096
2097 static int __net_init tcpv6_net_init(struct net *net)
2098 {
2099 return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
2100 SOCK_RAW, IPPROTO_TCP, net);
2101 }
2102
2103 static void __net_exit tcpv6_net_exit(struct net *net)
2104 {
2105 inet_ctl_sock_destroy(net->ipv6.tcp_sk);
2106 }
2107
2108 static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
2109 {
2110 inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET6);
2111 }
2112
2113 static struct pernet_operations tcpv6_net_ops = {
2114 .init = tcpv6_net_init,
2115 .exit = tcpv6_net_exit,
2116 .exit_batch = tcpv6_net_exit_batch,
2117 };
2118
2119 int __init tcpv6_init(void)
2120 {
2121 int ret;
2122
2123 ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
2124 if (ret)
2125 goto out;
2126
2127 /* register inet6 protocol */
2128 ret = inet6_register_protosw(&tcpv6_protosw);
2129 if (ret)
2130 goto out_tcpv6_protocol;
2131
2132 ret = register_pernet_subsys(&tcpv6_net_ops);
2133 if (ret)
2134 goto out_tcpv6_protosw;
2135 out:
2136 return ret;
2137
2138 out_tcpv6_protocol:
2139 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
2140 out_tcpv6_protosw:
2141 inet6_unregister_protosw(&tcpv6_protosw);
2142 goto out;
2143 }
2144
2145 void tcpv6_exit(void)
2146 {
2147 unregister_pernet_subsys(&tcpv6_net_ops);
2148 inet6_unregister_protosw(&tcpv6_protosw);
2149 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
2150 }
This page took 0.073442 seconds and 6 git commands to generate.