Merge branch 'from-linus' into upstream
[deliverable/linux.git] / net / ipv6 / tcp_ipv6.c
1 /*
2 * TCP over IPv6
3 * Linux INET6 implementation
4 *
5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt>
7 *
8 * $Id: tcp_ipv6.c,v 1.144 2002/02/01 22:01:04 davem Exp $
9 *
10 * Based on:
11 * linux/net/ipv4/tcp.c
12 * linux/net/ipv4/tcp_input.c
13 * linux/net/ipv4/tcp_output.c
14 *
15 * Fixes:
16 * Hideaki YOSHIFUJI : sin6_scope_id support
17 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
18 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
19 * a single port at the same time.
20 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
21 *
22 * This program is free software; you can redistribute it and/or
23 * modify it under the terms of the GNU General Public License
24 * as published by the Free Software Foundation; either version
25 * 2 of the License, or (at your option) any later version.
26 */
27
28 #include <linux/module.h>
29 #include <linux/errno.h>
30 #include <linux/types.h>
31 #include <linux/socket.h>
32 #include <linux/sockios.h>
33 #include <linux/net.h>
34 #include <linux/jiffies.h>
35 #include <linux/in.h>
36 #include <linux/in6.h>
37 #include <linux/netdevice.h>
38 #include <linux/init.h>
39 #include <linux/jhash.h>
40 #include <linux/ipsec.h>
41 #include <linux/times.h>
42
43 #include <linux/ipv6.h>
44 #include <linux/icmpv6.h>
45 #include <linux/random.h>
46
47 #include <net/tcp.h>
48 #include <net/ndisc.h>
49 #include <net/inet6_hashtables.h>
50 #include <net/inet6_connection_sock.h>
51 #include <net/ipv6.h>
52 #include <net/transp_v6.h>
53 #include <net/addrconf.h>
54 #include <net/ip6_route.h>
55 #include <net/ip6_checksum.h>
56 #include <net/inet_ecn.h>
57 #include <net/protocol.h>
58 #include <net/xfrm.h>
59 #include <net/addrconf.h>
60 #include <net/snmp.h>
61 #include <net/dsfield.h>
62 #include <net/timewait_sock.h>
63
64 #include <asm/uaccess.h>
65
66 #include <linux/proc_fs.h>
67 #include <linux/seq_file.h>
68
69 /* Socket used for sending RSTs and ACKs */
70 static struct socket *tcp6_socket;
71
72 static void tcp_v6_send_reset(struct sk_buff *skb);
73 static void tcp_v6_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req);
74 static void tcp_v6_send_check(struct sock *sk, int len,
75 struct sk_buff *skb);
76
77 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
78
79 static struct inet_connection_sock_af_ops ipv6_mapped;
80 static struct inet_connection_sock_af_ops ipv6_specific;
81
82 static int tcp_v6_get_port(struct sock *sk, unsigned short snum)
83 {
84 return inet_csk_get_port(&tcp_hashinfo, sk, snum,
85 inet6_csk_bind_conflict);
86 }
87
88 static void tcp_v6_hash(struct sock *sk)
89 {
90 if (sk->sk_state != TCP_CLOSE) {
91 if (inet_csk(sk)->icsk_af_ops == &ipv6_mapped) {
92 tcp_prot.hash(sk);
93 return;
94 }
95 local_bh_disable();
96 __inet6_hash(&tcp_hashinfo, sk);
97 local_bh_enable();
98 }
99 }
100
101 static __inline__ u16 tcp_v6_check(struct tcphdr *th, int len,
102 struct in6_addr *saddr,
103 struct in6_addr *daddr,
104 unsigned long base)
105 {
106 return csum_ipv6_magic(saddr, daddr, len, IPPROTO_TCP, base);
107 }
108
109 static __u32 tcp_v6_init_sequence(struct sock *sk, struct sk_buff *skb)
110 {
111 if (skb->protocol == htons(ETH_P_IPV6)) {
112 return secure_tcpv6_sequence_number(skb->nh.ipv6h->daddr.s6_addr32,
113 skb->nh.ipv6h->saddr.s6_addr32,
114 skb->h.th->dest,
115 skb->h.th->source);
116 } else {
117 return secure_tcp_sequence_number(skb->nh.iph->daddr,
118 skb->nh.iph->saddr,
119 skb->h.th->dest,
120 skb->h.th->source);
121 }
122 }
123
124 static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
125 int addr_len)
126 {
127 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
128 struct inet_sock *inet = inet_sk(sk);
129 struct inet_connection_sock *icsk = inet_csk(sk);
130 struct ipv6_pinfo *np = inet6_sk(sk);
131 struct tcp_sock *tp = tcp_sk(sk);
132 struct in6_addr *saddr = NULL, *final_p = NULL, final;
133 struct flowi fl;
134 struct dst_entry *dst;
135 int addr_type;
136 int err;
137
138 if (addr_len < SIN6_LEN_RFC2133)
139 return -EINVAL;
140
141 if (usin->sin6_family != AF_INET6)
142 return(-EAFNOSUPPORT);
143
144 memset(&fl, 0, sizeof(fl));
145
146 if (np->sndflow) {
147 fl.fl6_flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
148 IP6_ECN_flow_init(fl.fl6_flowlabel);
149 if (fl.fl6_flowlabel&IPV6_FLOWLABEL_MASK) {
150 struct ip6_flowlabel *flowlabel;
151 flowlabel = fl6_sock_lookup(sk, fl.fl6_flowlabel);
152 if (flowlabel == NULL)
153 return -EINVAL;
154 ipv6_addr_copy(&usin->sin6_addr, &flowlabel->dst);
155 fl6_sock_release(flowlabel);
156 }
157 }
158
159 /*
160 * connect() to INADDR_ANY means loopback (BSD'ism).
161 */
162
163 if(ipv6_addr_any(&usin->sin6_addr))
164 usin->sin6_addr.s6_addr[15] = 0x1;
165
166 addr_type = ipv6_addr_type(&usin->sin6_addr);
167
168 if(addr_type & IPV6_ADDR_MULTICAST)
169 return -ENETUNREACH;
170
171 if (addr_type&IPV6_ADDR_LINKLOCAL) {
172 if (addr_len >= sizeof(struct sockaddr_in6) &&
173 usin->sin6_scope_id) {
174 /* If interface is set while binding, indices
175 * must coincide.
176 */
177 if (sk->sk_bound_dev_if &&
178 sk->sk_bound_dev_if != usin->sin6_scope_id)
179 return -EINVAL;
180
181 sk->sk_bound_dev_if = usin->sin6_scope_id;
182 }
183
184 /* Connect to link-local address requires an interface */
185 if (!sk->sk_bound_dev_if)
186 return -EINVAL;
187 }
188
189 if (tp->rx_opt.ts_recent_stamp &&
190 !ipv6_addr_equal(&np->daddr, &usin->sin6_addr)) {
191 tp->rx_opt.ts_recent = 0;
192 tp->rx_opt.ts_recent_stamp = 0;
193 tp->write_seq = 0;
194 }
195
196 ipv6_addr_copy(&np->daddr, &usin->sin6_addr);
197 np->flow_label = fl.fl6_flowlabel;
198
199 /*
200 * TCP over IPv4
201 */
202
203 if (addr_type == IPV6_ADDR_MAPPED) {
204 u32 exthdrlen = icsk->icsk_ext_hdr_len;
205 struct sockaddr_in sin;
206
207 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
208
209 if (__ipv6_only_sock(sk))
210 return -ENETUNREACH;
211
212 sin.sin_family = AF_INET;
213 sin.sin_port = usin->sin6_port;
214 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
215
216 icsk->icsk_af_ops = &ipv6_mapped;
217 sk->sk_backlog_rcv = tcp_v4_do_rcv;
218
219 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
220
221 if (err) {
222 icsk->icsk_ext_hdr_len = exthdrlen;
223 icsk->icsk_af_ops = &ipv6_specific;
224 sk->sk_backlog_rcv = tcp_v6_do_rcv;
225 goto failure;
226 } else {
227 ipv6_addr_set(&np->saddr, 0, 0, htonl(0x0000FFFF),
228 inet->saddr);
229 ipv6_addr_set(&np->rcv_saddr, 0, 0, htonl(0x0000FFFF),
230 inet->rcv_saddr);
231 }
232
233 return err;
234 }
235
236 if (!ipv6_addr_any(&np->rcv_saddr))
237 saddr = &np->rcv_saddr;
238
239 fl.proto = IPPROTO_TCP;
240 ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
241 ipv6_addr_copy(&fl.fl6_src,
242 (saddr ? saddr : &np->saddr));
243 fl.oif = sk->sk_bound_dev_if;
244 fl.fl_ip_dport = usin->sin6_port;
245 fl.fl_ip_sport = inet->sport;
246
247 if (np->opt && np->opt->srcrt) {
248 struct rt0_hdr *rt0 = (struct rt0_hdr *)np->opt->srcrt;
249 ipv6_addr_copy(&final, &fl.fl6_dst);
250 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
251 final_p = &final;
252 }
253
254 err = ip6_dst_lookup(sk, &dst, &fl);
255 if (err)
256 goto failure;
257 if (final_p)
258 ipv6_addr_copy(&fl.fl6_dst, final_p);
259
260 if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0)
261 goto failure;
262
263 if (saddr == NULL) {
264 saddr = &fl.fl6_src;
265 ipv6_addr_copy(&np->rcv_saddr, saddr);
266 }
267
268 /* set the source address */
269 ipv6_addr_copy(&np->saddr, saddr);
270 inet->rcv_saddr = LOOPBACK4_IPV6;
271
272 sk->sk_gso_type = SKB_GSO_TCPV6;
273 ip6_dst_store(sk, dst, NULL);
274
275 icsk->icsk_ext_hdr_len = 0;
276 if (np->opt)
277 icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
278 np->opt->opt_nflen);
279
280 tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
281
282 inet->dport = usin->sin6_port;
283
284 tcp_set_state(sk, TCP_SYN_SENT);
285 err = inet6_hash_connect(&tcp_death_row, sk);
286 if (err)
287 goto late_failure;
288
289 if (!tp->write_seq)
290 tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
291 np->daddr.s6_addr32,
292 inet->sport,
293 inet->dport);
294
295 err = tcp_connect(sk);
296 if (err)
297 goto late_failure;
298
299 return 0;
300
301 late_failure:
302 tcp_set_state(sk, TCP_CLOSE);
303 __sk_dst_reset(sk);
304 failure:
305 inet->dport = 0;
306 sk->sk_route_caps = 0;
307 return err;
308 }
309
310 static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
311 int type, int code, int offset, __u32 info)
312 {
313 struct ipv6hdr *hdr = (struct ipv6hdr*)skb->data;
314 const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
315 struct ipv6_pinfo *np;
316 struct sock *sk;
317 int err;
318 struct tcp_sock *tp;
319 __u32 seq;
320
321 sk = inet6_lookup(&tcp_hashinfo, &hdr->daddr, th->dest, &hdr->saddr,
322 th->source, skb->dev->ifindex);
323
324 if (sk == NULL) {
325 ICMP6_INC_STATS_BH(__in6_dev_get(skb->dev), ICMP6_MIB_INERRORS);
326 return;
327 }
328
329 if (sk->sk_state == TCP_TIME_WAIT) {
330 inet_twsk_put((struct inet_timewait_sock *)sk);
331 return;
332 }
333
334 bh_lock_sock(sk);
335 if (sock_owned_by_user(sk))
336 NET_INC_STATS_BH(LINUX_MIB_LOCKDROPPEDICMPS);
337
338 if (sk->sk_state == TCP_CLOSE)
339 goto out;
340
341 tp = tcp_sk(sk);
342 seq = ntohl(th->seq);
343 if (sk->sk_state != TCP_LISTEN &&
344 !between(seq, tp->snd_una, tp->snd_nxt)) {
345 NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS);
346 goto out;
347 }
348
349 np = inet6_sk(sk);
350
351 if (type == ICMPV6_PKT_TOOBIG) {
352 struct dst_entry *dst = NULL;
353
354 if (sock_owned_by_user(sk))
355 goto out;
356 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
357 goto out;
358
359 /* icmp should have updated the destination cache entry */
360 dst = __sk_dst_check(sk, np->dst_cookie);
361
362 if (dst == NULL) {
363 struct inet_sock *inet = inet_sk(sk);
364 struct flowi fl;
365
366 /* BUGGG_FUTURE: Again, it is not clear how
367 to handle rthdr case. Ignore this complexity
368 for now.
369 */
370 memset(&fl, 0, sizeof(fl));
371 fl.proto = IPPROTO_TCP;
372 ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
373 ipv6_addr_copy(&fl.fl6_src, &np->saddr);
374 fl.oif = sk->sk_bound_dev_if;
375 fl.fl_ip_dport = inet->dport;
376 fl.fl_ip_sport = inet->sport;
377
378 if ((err = ip6_dst_lookup(sk, &dst, &fl))) {
379 sk->sk_err_soft = -err;
380 goto out;
381 }
382
383 if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0) {
384 sk->sk_err_soft = -err;
385 goto out;
386 }
387
388 } else
389 dst_hold(dst);
390
391 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
392 tcp_sync_mss(sk, dst_mtu(dst));
393 tcp_simple_retransmit(sk);
394 } /* else let the usual retransmit timer handle it */
395 dst_release(dst);
396 goto out;
397 }
398
399 icmpv6_err_convert(type, code, &err);
400
401 /* Might be for an request_sock */
402 switch (sk->sk_state) {
403 struct request_sock *req, **prev;
404 case TCP_LISTEN:
405 if (sock_owned_by_user(sk))
406 goto out;
407
408 req = inet6_csk_search_req(sk, &prev, th->dest, &hdr->daddr,
409 &hdr->saddr, inet6_iif(skb));
410 if (!req)
411 goto out;
412
413 /* ICMPs are not backlogged, hence we cannot get
414 * an established socket here.
415 */
416 BUG_TRAP(req->sk == NULL);
417
418 if (seq != tcp_rsk(req)->snt_isn) {
419 NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS);
420 goto out;
421 }
422
423 inet_csk_reqsk_queue_drop(sk, req, prev);
424 goto out;
425
426 case TCP_SYN_SENT:
427 case TCP_SYN_RECV: /* Cannot happen.
428 It can, it SYNs are crossed. --ANK */
429 if (!sock_owned_by_user(sk)) {
430 TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS);
431 sk->sk_err = err;
432 sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
433
434 tcp_done(sk);
435 } else
436 sk->sk_err_soft = err;
437 goto out;
438 }
439
440 if (!sock_owned_by_user(sk) && np->recverr) {
441 sk->sk_err = err;
442 sk->sk_error_report(sk);
443 } else
444 sk->sk_err_soft = err;
445
446 out:
447 bh_unlock_sock(sk);
448 sock_put(sk);
449 }
450
451
452 static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req,
453 struct dst_entry *dst)
454 {
455 struct inet6_request_sock *treq = inet6_rsk(req);
456 struct ipv6_pinfo *np = inet6_sk(sk);
457 struct sk_buff * skb;
458 struct ipv6_txoptions *opt = NULL;
459 struct in6_addr * final_p = NULL, final;
460 struct flowi fl;
461 int err = -1;
462
463 memset(&fl, 0, sizeof(fl));
464 fl.proto = IPPROTO_TCP;
465 ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
466 ipv6_addr_copy(&fl.fl6_src, &treq->loc_addr);
467 fl.fl6_flowlabel = 0;
468 fl.oif = treq->iif;
469 fl.fl_ip_dport = inet_rsk(req)->rmt_port;
470 fl.fl_ip_sport = inet_sk(sk)->sport;
471
472 if (dst == NULL) {
473 opt = np->opt;
474 if (opt == NULL &&
475 np->rxopt.bits.osrcrt == 2 &&
476 treq->pktopts) {
477 struct sk_buff *pktopts = treq->pktopts;
478 struct inet6_skb_parm *rxopt = IP6CB(pktopts);
479 if (rxopt->srcrt)
480 opt = ipv6_invert_rthdr(sk, (struct ipv6_rt_hdr*)(pktopts->nh.raw + rxopt->srcrt));
481 }
482
483 if (opt && opt->srcrt) {
484 struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt;
485 ipv6_addr_copy(&final, &fl.fl6_dst);
486 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
487 final_p = &final;
488 }
489
490 err = ip6_dst_lookup(sk, &dst, &fl);
491 if (err)
492 goto done;
493 if (final_p)
494 ipv6_addr_copy(&fl.fl6_dst, final_p);
495 if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0)
496 goto done;
497 }
498
499 skb = tcp_make_synack(sk, dst, req);
500 if (skb) {
501 struct tcphdr *th = skb->h.th;
502
503 th->check = tcp_v6_check(th, skb->len,
504 &treq->loc_addr, &treq->rmt_addr,
505 csum_partial((char *)th, skb->len, skb->csum));
506
507 ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
508 err = ip6_xmit(sk, skb, &fl, opt, 0);
509 if (err == NET_XMIT_CN)
510 err = 0;
511 }
512
513 done:
514 if (opt && opt != np->opt)
515 sock_kfree_s(sk, opt, opt->tot_len);
516 dst_release(dst);
517 return err;
518 }
519
520 static void tcp_v6_reqsk_destructor(struct request_sock *req)
521 {
522 if (inet6_rsk(req)->pktopts)
523 kfree_skb(inet6_rsk(req)->pktopts);
524 }
525
526 static struct request_sock_ops tcp6_request_sock_ops = {
527 .family = AF_INET6,
528 .obj_size = sizeof(struct tcp6_request_sock),
529 .rtx_syn_ack = tcp_v6_send_synack,
530 .send_ack = tcp_v6_reqsk_send_ack,
531 .destructor = tcp_v6_reqsk_destructor,
532 .send_reset = tcp_v6_send_reset
533 };
534
535 static struct timewait_sock_ops tcp6_timewait_sock_ops = {
536 .twsk_obj_size = sizeof(struct tcp6_timewait_sock),
537 .twsk_unique = tcp_twsk_unique,
538 };
539
540 static void tcp_v6_send_check(struct sock *sk, int len, struct sk_buff *skb)
541 {
542 struct ipv6_pinfo *np = inet6_sk(sk);
543 struct tcphdr *th = skb->h.th;
544
545 if (skb->ip_summed == CHECKSUM_HW) {
546 th->check = ~csum_ipv6_magic(&np->saddr, &np->daddr, len, IPPROTO_TCP, 0);
547 skb->csum = offsetof(struct tcphdr, check);
548 } else {
549 th->check = csum_ipv6_magic(&np->saddr, &np->daddr, len, IPPROTO_TCP,
550 csum_partial((char *)th, th->doff<<2,
551 skb->csum));
552 }
553 }
554
555 static int tcp_v6_gso_send_check(struct sk_buff *skb)
556 {
557 struct ipv6hdr *ipv6h;
558 struct tcphdr *th;
559
560 if (!pskb_may_pull(skb, sizeof(*th)))
561 return -EINVAL;
562
563 ipv6h = skb->nh.ipv6h;
564 th = skb->h.th;
565
566 th->check = 0;
567 th->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, skb->len,
568 IPPROTO_TCP, 0);
569 skb->csum = offsetof(struct tcphdr, check);
570 skb->ip_summed = CHECKSUM_HW;
571 return 0;
572 }
573
574 static void tcp_v6_send_reset(struct sk_buff *skb)
575 {
576 struct tcphdr *th = skb->h.th, *t1;
577 struct sk_buff *buff;
578 struct flowi fl;
579
580 if (th->rst)
581 return;
582
583 if (!ipv6_unicast_destination(skb))
584 return;
585
586 /*
587 * We need to grab some memory, and put together an RST,
588 * and then put it into the queue to be sent.
589 */
590
591 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + sizeof(struct tcphdr),
592 GFP_ATOMIC);
593 if (buff == NULL)
594 return;
595
596 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + sizeof(struct tcphdr));
597
598 t1 = (struct tcphdr *) skb_push(buff,sizeof(struct tcphdr));
599
600 /* Swap the send and the receive. */
601 memset(t1, 0, sizeof(*t1));
602 t1->dest = th->source;
603 t1->source = th->dest;
604 t1->doff = sizeof(*t1)/4;
605 t1->rst = 1;
606
607 if(th->ack) {
608 t1->seq = th->ack_seq;
609 } else {
610 t1->ack = 1;
611 t1->ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin
612 + skb->len - (th->doff<<2));
613 }
614
615 buff->csum = csum_partial((char *)t1, sizeof(*t1), 0);
616
617 memset(&fl, 0, sizeof(fl));
618 ipv6_addr_copy(&fl.fl6_dst, &skb->nh.ipv6h->saddr);
619 ipv6_addr_copy(&fl.fl6_src, &skb->nh.ipv6h->daddr);
620
621 t1->check = csum_ipv6_magic(&fl.fl6_src, &fl.fl6_dst,
622 sizeof(*t1), IPPROTO_TCP,
623 buff->csum);
624
625 fl.proto = IPPROTO_TCP;
626 fl.oif = inet6_iif(skb);
627 fl.fl_ip_dport = t1->dest;
628 fl.fl_ip_sport = t1->source;
629
630 /* sk = NULL, but it is safe for now. RST socket required. */
631 if (!ip6_dst_lookup(NULL, &buff->dst, &fl)) {
632
633 if (xfrm_lookup(&buff->dst, &fl, NULL, 0) >= 0) {
634 ip6_xmit(tcp6_socket->sk, buff, &fl, NULL, 0);
635 TCP_INC_STATS_BH(TCP_MIB_OUTSEGS);
636 TCP_INC_STATS_BH(TCP_MIB_OUTRSTS);
637 return;
638 }
639 }
640
641 kfree_skb(buff);
642 }
643
644 static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts)
645 {
646 struct tcphdr *th = skb->h.th, *t1;
647 struct sk_buff *buff;
648 struct flowi fl;
649 int tot_len = sizeof(struct tcphdr);
650
651 if (ts)
652 tot_len += 3*4;
653
654 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
655 GFP_ATOMIC);
656 if (buff == NULL)
657 return;
658
659 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
660
661 t1 = (struct tcphdr *) skb_push(buff,tot_len);
662
663 /* Swap the send and the receive. */
664 memset(t1, 0, sizeof(*t1));
665 t1->dest = th->source;
666 t1->source = th->dest;
667 t1->doff = tot_len/4;
668 t1->seq = htonl(seq);
669 t1->ack_seq = htonl(ack);
670 t1->ack = 1;
671 t1->window = htons(win);
672
673 if (ts) {
674 u32 *ptr = (u32*)(t1 + 1);
675 *ptr++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
676 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
677 *ptr++ = htonl(tcp_time_stamp);
678 *ptr = htonl(ts);
679 }
680
681 buff->csum = csum_partial((char *)t1, tot_len, 0);
682
683 memset(&fl, 0, sizeof(fl));
684 ipv6_addr_copy(&fl.fl6_dst, &skb->nh.ipv6h->saddr);
685 ipv6_addr_copy(&fl.fl6_src, &skb->nh.ipv6h->daddr);
686
687 t1->check = csum_ipv6_magic(&fl.fl6_src, &fl.fl6_dst,
688 tot_len, IPPROTO_TCP,
689 buff->csum);
690
691 fl.proto = IPPROTO_TCP;
692 fl.oif = inet6_iif(skb);
693 fl.fl_ip_dport = t1->dest;
694 fl.fl_ip_sport = t1->source;
695
696 if (!ip6_dst_lookup(NULL, &buff->dst, &fl)) {
697 if (xfrm_lookup(&buff->dst, &fl, NULL, 0) >= 0) {
698 ip6_xmit(tcp6_socket->sk, buff, &fl, NULL, 0);
699 TCP_INC_STATS_BH(TCP_MIB_OUTSEGS);
700 return;
701 }
702 }
703
704 kfree_skb(buff);
705 }
706
707 static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
708 {
709 struct inet_timewait_sock *tw = inet_twsk(sk);
710 const struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
711
712 tcp_v6_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
713 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
714 tcptw->tw_ts_recent);
715
716 inet_twsk_put(tw);
717 }
718
719 static void tcp_v6_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req)
720 {
721 tcp_v6_send_ack(skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, req->ts_recent);
722 }
723
724
725 static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
726 {
727 struct request_sock *req, **prev;
728 const struct tcphdr *th = skb->h.th;
729 struct sock *nsk;
730
731 /* Find possible connection requests. */
732 req = inet6_csk_search_req(sk, &prev, th->source,
733 &skb->nh.ipv6h->saddr,
734 &skb->nh.ipv6h->daddr, inet6_iif(skb));
735 if (req)
736 return tcp_check_req(sk, skb, req, prev);
737
738 nsk = __inet6_lookup_established(&tcp_hashinfo, &skb->nh.ipv6h->saddr,
739 th->source, &skb->nh.ipv6h->daddr,
740 ntohs(th->dest), inet6_iif(skb));
741
742 if (nsk) {
743 if (nsk->sk_state != TCP_TIME_WAIT) {
744 bh_lock_sock(nsk);
745 return nsk;
746 }
747 inet_twsk_put((struct inet_timewait_sock *)nsk);
748 return NULL;
749 }
750
751 #if 0 /*def CONFIG_SYN_COOKIES*/
752 if (!th->rst && !th->syn && th->ack)
753 sk = cookie_v6_check(sk, skb, &(IPCB(skb)->opt));
754 #endif
755 return sk;
756 }
757
758 /* FIXME: this is substantially similar to the ipv4 code.
759 * Can some kind of merge be done? -- erics
760 */
761 static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
762 {
763 struct inet6_request_sock *treq;
764 struct ipv6_pinfo *np = inet6_sk(sk);
765 struct tcp_options_received tmp_opt;
766 struct tcp_sock *tp = tcp_sk(sk);
767 struct request_sock *req = NULL;
768 __u32 isn = TCP_SKB_CB(skb)->when;
769
770 if (skb->protocol == htons(ETH_P_IP))
771 return tcp_v4_conn_request(sk, skb);
772
773 if (!ipv6_unicast_destination(skb))
774 goto drop;
775
776 /*
777 * There are no SYN attacks on IPv6, yet...
778 */
779 if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
780 if (net_ratelimit())
781 printk(KERN_INFO "TCPv6: dropping request, synflood is possible\n");
782 goto drop;
783 }
784
785 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
786 goto drop;
787
788 req = inet6_reqsk_alloc(&tcp6_request_sock_ops);
789 if (req == NULL)
790 goto drop;
791
792 tcp_clear_options(&tmp_opt);
793 tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
794 tmp_opt.user_mss = tp->rx_opt.user_mss;
795
796 tcp_parse_options(skb, &tmp_opt, 0);
797
798 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
799 tcp_openreq_init(req, &tmp_opt, skb);
800
801 treq = inet6_rsk(req);
802 ipv6_addr_copy(&treq->rmt_addr, &skb->nh.ipv6h->saddr);
803 ipv6_addr_copy(&treq->loc_addr, &skb->nh.ipv6h->daddr);
804 TCP_ECN_create_request(req, skb->h.th);
805 treq->pktopts = NULL;
806 if (ipv6_opt_accepted(sk, skb) ||
807 np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
808 np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
809 atomic_inc(&skb->users);
810 treq->pktopts = skb;
811 }
812 treq->iif = sk->sk_bound_dev_if;
813
814 /* So that link locals have meaning */
815 if (!sk->sk_bound_dev_if &&
816 ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL)
817 treq->iif = inet6_iif(skb);
818
819 if (isn == 0)
820 isn = tcp_v6_init_sequence(sk,skb);
821
822 tcp_rsk(req)->snt_isn = isn;
823
824 if (tcp_v6_send_synack(sk, req, NULL))
825 goto drop;
826
827 inet6_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
828 return 0;
829
830 drop:
831 if (req)
832 reqsk_free(req);
833
834 TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS);
835 return 0; /* don't send reset */
836 }
837
838 static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
839 struct request_sock *req,
840 struct dst_entry *dst)
841 {
842 struct inet6_request_sock *treq = inet6_rsk(req);
843 struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
844 struct tcp6_sock *newtcp6sk;
845 struct inet_sock *newinet;
846 struct tcp_sock *newtp;
847 struct sock *newsk;
848 struct ipv6_txoptions *opt;
849
850 if (skb->protocol == htons(ETH_P_IP)) {
851 /*
852 * v6 mapped
853 */
854
855 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst);
856
857 if (newsk == NULL)
858 return NULL;
859
860 newtcp6sk = (struct tcp6_sock *)newsk;
861 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
862
863 newinet = inet_sk(newsk);
864 newnp = inet6_sk(newsk);
865 newtp = tcp_sk(newsk);
866
867 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
868
869 ipv6_addr_set(&newnp->daddr, 0, 0, htonl(0x0000FFFF),
870 newinet->daddr);
871
872 ipv6_addr_set(&newnp->saddr, 0, 0, htonl(0x0000FFFF),
873 newinet->saddr);
874
875 ipv6_addr_copy(&newnp->rcv_saddr, &newnp->saddr);
876
877 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
878 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
879 newnp->pktoptions = NULL;
880 newnp->opt = NULL;
881 newnp->mcast_oif = inet6_iif(skb);
882 newnp->mcast_hops = skb->nh.ipv6h->hop_limit;
883
884 /*
885 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
886 * here, tcp_create_openreq_child now does this for us, see the comment in
887 * that function for the gory details. -acme
888 */
889
890 /* It is tricky place. Until this moment IPv4 tcp
891 worked with IPv6 icsk.icsk_af_ops.
892 Sync it now.
893 */
894 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
895
896 return newsk;
897 }
898
899 opt = np->opt;
900
901 if (sk_acceptq_is_full(sk))
902 goto out_overflow;
903
904 if (np->rxopt.bits.osrcrt == 2 &&
905 opt == NULL && treq->pktopts) {
906 struct inet6_skb_parm *rxopt = IP6CB(treq->pktopts);
907 if (rxopt->srcrt)
908 opt = ipv6_invert_rthdr(sk, (struct ipv6_rt_hdr *)(treq->pktopts->nh.raw + rxopt->srcrt));
909 }
910
911 if (dst == NULL) {
912 struct in6_addr *final_p = NULL, final;
913 struct flowi fl;
914
915 memset(&fl, 0, sizeof(fl));
916 fl.proto = IPPROTO_TCP;
917 ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
918 if (opt && opt->srcrt) {
919 struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt;
920 ipv6_addr_copy(&final, &fl.fl6_dst);
921 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
922 final_p = &final;
923 }
924 ipv6_addr_copy(&fl.fl6_src, &treq->loc_addr);
925 fl.oif = sk->sk_bound_dev_if;
926 fl.fl_ip_dport = inet_rsk(req)->rmt_port;
927 fl.fl_ip_sport = inet_sk(sk)->sport;
928
929 if (ip6_dst_lookup(sk, &dst, &fl))
930 goto out;
931
932 if (final_p)
933 ipv6_addr_copy(&fl.fl6_dst, final_p);
934
935 if ((xfrm_lookup(&dst, &fl, sk, 0)) < 0)
936 goto out;
937 }
938
939 newsk = tcp_create_openreq_child(sk, req, skb);
940 if (newsk == NULL)
941 goto out;
942
943 /*
944 * No need to charge this sock to the relevant IPv6 refcnt debug socks
945 * count here, tcp_create_openreq_child now does this for us, see the
946 * comment in that function for the gory details. -acme
947 */
948
949 sk->sk_gso_type = SKB_GSO_TCPV6;
950 ip6_dst_store(newsk, dst, NULL);
951
952 newtcp6sk = (struct tcp6_sock *)newsk;
953 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
954
955 newtp = tcp_sk(newsk);
956 newinet = inet_sk(newsk);
957 newnp = inet6_sk(newsk);
958
959 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
960
961 ipv6_addr_copy(&newnp->daddr, &treq->rmt_addr);
962 ipv6_addr_copy(&newnp->saddr, &treq->loc_addr);
963 ipv6_addr_copy(&newnp->rcv_saddr, &treq->loc_addr);
964 newsk->sk_bound_dev_if = treq->iif;
965
966 /* Now IPv6 options...
967
968 First: no IPv4 options.
969 */
970 newinet->opt = NULL;
971
972 /* Clone RX bits */
973 newnp->rxopt.all = np->rxopt.all;
974
975 /* Clone pktoptions received with SYN */
976 newnp->pktoptions = NULL;
977 if (treq->pktopts != NULL) {
978 newnp->pktoptions = skb_clone(treq->pktopts, GFP_ATOMIC);
979 kfree_skb(treq->pktopts);
980 treq->pktopts = NULL;
981 if (newnp->pktoptions)
982 skb_set_owner_r(newnp->pktoptions, newsk);
983 }
984 newnp->opt = NULL;
985 newnp->mcast_oif = inet6_iif(skb);
986 newnp->mcast_hops = skb->nh.ipv6h->hop_limit;
987
988 /* Clone native IPv6 options from listening socket (if any)
989
990 Yes, keeping reference count would be much more clever,
991 but we make one more one thing there: reattach optmem
992 to newsk.
993 */
994 if (opt) {
995 newnp->opt = ipv6_dup_options(newsk, opt);
996 if (opt != np->opt)
997 sock_kfree_s(sk, opt, opt->tot_len);
998 }
999
1000 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1001 if (newnp->opt)
1002 inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
1003 newnp->opt->opt_flen);
1004
1005 tcp_mtup_init(newsk);
1006 tcp_sync_mss(newsk, dst_mtu(dst));
1007 newtp->advmss = dst_metric(dst, RTAX_ADVMSS);
1008 tcp_initialize_rcv_mss(newsk);
1009
1010 newinet->daddr = newinet->saddr = newinet->rcv_saddr = LOOPBACK4_IPV6;
1011
1012 __inet6_hash(&tcp_hashinfo, newsk);
1013 inet_inherit_port(&tcp_hashinfo, sk, newsk);
1014
1015 return newsk;
1016
1017 out_overflow:
1018 NET_INC_STATS_BH(LINUX_MIB_LISTENOVERFLOWS);
1019 out:
1020 NET_INC_STATS_BH(LINUX_MIB_LISTENDROPS);
1021 if (opt && opt != np->opt)
1022 sock_kfree_s(sk, opt, opt->tot_len);
1023 dst_release(dst);
1024 return NULL;
1025 }
1026
1027 static int tcp_v6_checksum_init(struct sk_buff *skb)
1028 {
1029 if (skb->ip_summed == CHECKSUM_HW) {
1030 if (!tcp_v6_check(skb->h.th,skb->len,&skb->nh.ipv6h->saddr,
1031 &skb->nh.ipv6h->daddr,skb->csum)) {
1032 skb->ip_summed = CHECKSUM_UNNECESSARY;
1033 return 0;
1034 }
1035 }
1036
1037 skb->csum = ~tcp_v6_check(skb->h.th,skb->len,&skb->nh.ipv6h->saddr,
1038 &skb->nh.ipv6h->daddr, 0);
1039
1040 if (skb->len <= 76) {
1041 return __skb_checksum_complete(skb);
1042 }
1043 return 0;
1044 }
1045
1046 /* The socket must have it's spinlock held when we get
1047 * here.
1048 *
1049 * We have a potential double-lock case here, so even when
1050 * doing backlog processing we use the BH locking scheme.
1051 * This is because we cannot sleep with the original spinlock
1052 * held.
1053 */
1054 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1055 {
1056 struct ipv6_pinfo *np = inet6_sk(sk);
1057 struct tcp_sock *tp;
1058 struct sk_buff *opt_skb = NULL;
1059
1060 /* Imagine: socket is IPv6. IPv4 packet arrives,
1061 goes to IPv4 receive handler and backlogged.
1062 From backlog it always goes here. Kerboom...
1063 Fortunately, tcp_rcv_established and rcv_established
1064 handle them correctly, but it is not case with
1065 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1066 */
1067
1068 if (skb->protocol == htons(ETH_P_IP))
1069 return tcp_v4_do_rcv(sk, skb);
1070
1071 if (sk_filter(sk, skb, 0))
1072 goto discard;
1073
1074 /*
1075 * socket locking is here for SMP purposes as backlog rcv
1076 * is currently called with bh processing disabled.
1077 */
1078
1079 /* Do Stevens' IPV6_PKTOPTIONS.
1080
1081 Yes, guys, it is the only place in our code, where we
1082 may make it not affecting IPv4.
1083 The rest of code is protocol independent,
1084 and I do not like idea to uglify IPv4.
1085
1086 Actually, all the idea behind IPV6_PKTOPTIONS
1087 looks not very well thought. For now we latch
1088 options, received in the last packet, enqueued
1089 by tcp. Feel free to propose better solution.
1090 --ANK (980728)
1091 */
1092 if (np->rxopt.all)
1093 opt_skb = skb_clone(skb, GFP_ATOMIC);
1094
1095 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1096 TCP_CHECK_TIMER(sk);
1097 if (tcp_rcv_established(sk, skb, skb->h.th, skb->len))
1098 goto reset;
1099 TCP_CHECK_TIMER(sk);
1100 if (opt_skb)
1101 goto ipv6_pktoptions;
1102 return 0;
1103 }
1104
1105 if (skb->len < (skb->h.th->doff<<2) || tcp_checksum_complete(skb))
1106 goto csum_err;
1107
1108 if (sk->sk_state == TCP_LISTEN) {
1109 struct sock *nsk = tcp_v6_hnd_req(sk, skb);
1110 if (!nsk)
1111 goto discard;
1112
1113 /*
1114 * Queue it on the new socket if the new socket is active,
1115 * otherwise we just shortcircuit this and continue with
1116 * the new socket..
1117 */
1118 if(nsk != sk) {
1119 if (tcp_child_process(sk, nsk, skb))
1120 goto reset;
1121 if (opt_skb)
1122 __kfree_skb(opt_skb);
1123 return 0;
1124 }
1125 }
1126
1127 TCP_CHECK_TIMER(sk);
1128 if (tcp_rcv_state_process(sk, skb, skb->h.th, skb->len))
1129 goto reset;
1130 TCP_CHECK_TIMER(sk);
1131 if (opt_skb)
1132 goto ipv6_pktoptions;
1133 return 0;
1134
1135 reset:
1136 tcp_v6_send_reset(skb);
1137 discard:
1138 if (opt_skb)
1139 __kfree_skb(opt_skb);
1140 kfree_skb(skb);
1141 return 0;
1142 csum_err:
1143 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1144 goto discard;
1145
1146
1147 ipv6_pktoptions:
1148 /* Do you ask, what is it?
1149
1150 1. skb was enqueued by tcp.
1151 2. skb is added to tail of read queue, rather than out of order.
1152 3. socket is not in passive state.
1153 4. Finally, it really contains options, which user wants to receive.
1154 */
1155 tp = tcp_sk(sk);
1156 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1157 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1158 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
1159 np->mcast_oif = inet6_iif(opt_skb);
1160 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1161 np->mcast_hops = opt_skb->nh.ipv6h->hop_limit;
1162 if (ipv6_opt_accepted(sk, opt_skb)) {
1163 skb_set_owner_r(opt_skb, sk);
1164 opt_skb = xchg(&np->pktoptions, opt_skb);
1165 } else {
1166 __kfree_skb(opt_skb);
1167 opt_skb = xchg(&np->pktoptions, NULL);
1168 }
1169 }
1170
1171 if (opt_skb)
1172 kfree_skb(opt_skb);
1173 return 0;
1174 }
1175
1176 static int tcp_v6_rcv(struct sk_buff **pskb)
1177 {
1178 struct sk_buff *skb = *pskb;
1179 struct tcphdr *th;
1180 struct sock *sk;
1181 int ret;
1182
1183 if (skb->pkt_type != PACKET_HOST)
1184 goto discard_it;
1185
1186 /*
1187 * Count it even if it's bad.
1188 */
1189 TCP_INC_STATS_BH(TCP_MIB_INSEGS);
1190
1191 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1192 goto discard_it;
1193
1194 th = skb->h.th;
1195
1196 if (th->doff < sizeof(struct tcphdr)/4)
1197 goto bad_packet;
1198 if (!pskb_may_pull(skb, th->doff*4))
1199 goto discard_it;
1200
1201 if ((skb->ip_summed != CHECKSUM_UNNECESSARY &&
1202 tcp_v6_checksum_init(skb)))
1203 goto bad_packet;
1204
1205 th = skb->h.th;
1206 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1207 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1208 skb->len - th->doff*4);
1209 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1210 TCP_SKB_CB(skb)->when = 0;
1211 TCP_SKB_CB(skb)->flags = ipv6_get_dsfield(skb->nh.ipv6h);
1212 TCP_SKB_CB(skb)->sacked = 0;
1213
1214 sk = __inet6_lookup(&tcp_hashinfo, &skb->nh.ipv6h->saddr, th->source,
1215 &skb->nh.ipv6h->daddr, ntohs(th->dest),
1216 inet6_iif(skb));
1217
1218 if (!sk)
1219 goto no_tcp_socket;
1220
1221 process:
1222 if (sk->sk_state == TCP_TIME_WAIT)
1223 goto do_time_wait;
1224
1225 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1226 goto discard_and_relse;
1227
1228 if (sk_filter(sk, skb, 0))
1229 goto discard_and_relse;
1230
1231 skb->dev = NULL;
1232
1233 bh_lock_sock(sk);
1234 ret = 0;
1235 if (!sock_owned_by_user(sk)) {
1236 #ifdef CONFIG_NET_DMA
1237 struct tcp_sock *tp = tcp_sk(sk);
1238 if (tp->ucopy.dma_chan)
1239 ret = tcp_v6_do_rcv(sk, skb);
1240 else
1241 #endif
1242 {
1243 if (!tcp_prequeue(sk, skb))
1244 ret = tcp_v6_do_rcv(sk, skb);
1245 }
1246 } else
1247 sk_add_backlog(sk, skb);
1248 bh_unlock_sock(sk);
1249
1250 sock_put(sk);
1251 return ret ? -1 : 0;
1252
1253 no_tcp_socket:
1254 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1255 goto discard_it;
1256
1257 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1258 bad_packet:
1259 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1260 } else {
1261 tcp_v6_send_reset(skb);
1262 }
1263
1264 discard_it:
1265
1266 /*
1267 * Discard frame
1268 */
1269
1270 kfree_skb(skb);
1271 return 0;
1272
1273 discard_and_relse:
1274 sock_put(sk);
1275 goto discard_it;
1276
1277 do_time_wait:
1278 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1279 inet_twsk_put((struct inet_timewait_sock *)sk);
1280 goto discard_it;
1281 }
1282
1283 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1284 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1285 inet_twsk_put((struct inet_timewait_sock *)sk);
1286 goto discard_it;
1287 }
1288
1289 switch (tcp_timewait_state_process((struct inet_timewait_sock *)sk,
1290 skb, th)) {
1291 case TCP_TW_SYN:
1292 {
1293 struct sock *sk2;
1294
1295 sk2 = inet6_lookup_listener(&tcp_hashinfo,
1296 &skb->nh.ipv6h->daddr,
1297 ntohs(th->dest), inet6_iif(skb));
1298 if (sk2 != NULL) {
1299 struct inet_timewait_sock *tw = inet_twsk(sk);
1300 inet_twsk_deschedule(tw, &tcp_death_row);
1301 inet_twsk_put(tw);
1302 sk = sk2;
1303 goto process;
1304 }
1305 /* Fall through to ACK */
1306 }
1307 case TCP_TW_ACK:
1308 tcp_v6_timewait_ack(sk, skb);
1309 break;
1310 case TCP_TW_RST:
1311 goto no_tcp_socket;
1312 case TCP_TW_SUCCESS:;
1313 }
1314 goto discard_it;
1315 }
1316
1317 static int tcp_v6_remember_stamp(struct sock *sk)
1318 {
1319 /* Alas, not yet... */
1320 return 0;
1321 }
1322
1323 static struct inet_connection_sock_af_ops ipv6_specific = {
1324 .queue_xmit = inet6_csk_xmit,
1325 .send_check = tcp_v6_send_check,
1326 .rebuild_header = inet6_sk_rebuild_header,
1327 .conn_request = tcp_v6_conn_request,
1328 .syn_recv_sock = tcp_v6_syn_recv_sock,
1329 .remember_stamp = tcp_v6_remember_stamp,
1330 .net_header_len = sizeof(struct ipv6hdr),
1331 .setsockopt = ipv6_setsockopt,
1332 .getsockopt = ipv6_getsockopt,
1333 .addr2sockaddr = inet6_csk_addr2sockaddr,
1334 .sockaddr_len = sizeof(struct sockaddr_in6),
1335 #ifdef CONFIG_COMPAT
1336 .compat_setsockopt = compat_ipv6_setsockopt,
1337 .compat_getsockopt = compat_ipv6_getsockopt,
1338 #endif
1339 };
1340
1341 /*
1342 * TCP over IPv4 via INET6 API
1343 */
1344
1345 static struct inet_connection_sock_af_ops ipv6_mapped = {
1346 .queue_xmit = ip_queue_xmit,
1347 .send_check = tcp_v4_send_check,
1348 .rebuild_header = inet_sk_rebuild_header,
1349 .conn_request = tcp_v6_conn_request,
1350 .syn_recv_sock = tcp_v6_syn_recv_sock,
1351 .remember_stamp = tcp_v4_remember_stamp,
1352 .net_header_len = sizeof(struct iphdr),
1353 .setsockopt = ipv6_setsockopt,
1354 .getsockopt = ipv6_getsockopt,
1355 .addr2sockaddr = inet6_csk_addr2sockaddr,
1356 .sockaddr_len = sizeof(struct sockaddr_in6),
1357 #ifdef CONFIG_COMPAT
1358 .compat_setsockopt = compat_ipv6_setsockopt,
1359 .compat_getsockopt = compat_ipv6_getsockopt,
1360 #endif
1361 };
1362
1363 /* NOTE: A lot of things set to zero explicitly by call to
1364 * sk_alloc() so need not be done here.
1365 */
1366 static int tcp_v6_init_sock(struct sock *sk)
1367 {
1368 struct inet_connection_sock *icsk = inet_csk(sk);
1369 struct tcp_sock *tp = tcp_sk(sk);
1370
1371 skb_queue_head_init(&tp->out_of_order_queue);
1372 tcp_init_xmit_timers(sk);
1373 tcp_prequeue_init(tp);
1374
1375 icsk->icsk_rto = TCP_TIMEOUT_INIT;
1376 tp->mdev = TCP_TIMEOUT_INIT;
1377
1378 /* So many TCP implementations out there (incorrectly) count the
1379 * initial SYN frame in their delayed-ACK and congestion control
1380 * algorithms that we must have the following bandaid to talk
1381 * efficiently to them. -DaveM
1382 */
1383 tp->snd_cwnd = 2;
1384
1385 /* See draft-stevens-tcpca-spec-01 for discussion of the
1386 * initialization of these values.
1387 */
1388 tp->snd_ssthresh = 0x7fffffff;
1389 tp->snd_cwnd_clamp = ~0;
1390 tp->mss_cache = 536;
1391
1392 tp->reordering = sysctl_tcp_reordering;
1393
1394 sk->sk_state = TCP_CLOSE;
1395
1396 icsk->icsk_af_ops = &ipv6_specific;
1397 icsk->icsk_ca_ops = &tcp_init_congestion_ops;
1398 icsk->icsk_sync_mss = tcp_sync_mss;
1399 sk->sk_write_space = sk_stream_write_space;
1400 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
1401
1402 sk->sk_sndbuf = sysctl_tcp_wmem[1];
1403 sk->sk_rcvbuf = sysctl_tcp_rmem[1];
1404
1405 atomic_inc(&tcp_sockets_allocated);
1406
1407 return 0;
1408 }
1409
1410 static int tcp_v6_destroy_sock(struct sock *sk)
1411 {
1412 tcp_v4_destroy_sock(sk);
1413 return inet6_destroy_sock(sk);
1414 }
1415
1416 /* Proc filesystem TCPv6 sock list dumping. */
1417 static void get_openreq6(struct seq_file *seq,
1418 struct sock *sk, struct request_sock *req, int i, int uid)
1419 {
1420 int ttd = req->expires - jiffies;
1421 struct in6_addr *src = &inet6_rsk(req)->loc_addr;
1422 struct in6_addr *dest = &inet6_rsk(req)->rmt_addr;
1423
1424 if (ttd < 0)
1425 ttd = 0;
1426
1427 seq_printf(seq,
1428 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1429 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p\n",
1430 i,
1431 src->s6_addr32[0], src->s6_addr32[1],
1432 src->s6_addr32[2], src->s6_addr32[3],
1433 ntohs(inet_sk(sk)->sport),
1434 dest->s6_addr32[0], dest->s6_addr32[1],
1435 dest->s6_addr32[2], dest->s6_addr32[3],
1436 ntohs(inet_rsk(req)->rmt_port),
1437 TCP_SYN_RECV,
1438 0,0, /* could print option size, but that is af dependent. */
1439 1, /* timers active (only the expire timer) */
1440 jiffies_to_clock_t(ttd),
1441 req->retrans,
1442 uid,
1443 0, /* non standard timer */
1444 0, /* open_requests have no inode */
1445 0, req);
1446 }
1447
1448 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1449 {
1450 struct in6_addr *dest, *src;
1451 __u16 destp, srcp;
1452 int timer_active;
1453 unsigned long timer_expires;
1454 struct inet_sock *inet = inet_sk(sp);
1455 struct tcp_sock *tp = tcp_sk(sp);
1456 const struct inet_connection_sock *icsk = inet_csk(sp);
1457 struct ipv6_pinfo *np = inet6_sk(sp);
1458
1459 dest = &np->daddr;
1460 src = &np->rcv_saddr;
1461 destp = ntohs(inet->dport);
1462 srcp = ntohs(inet->sport);
1463
1464 if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
1465 timer_active = 1;
1466 timer_expires = icsk->icsk_timeout;
1467 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1468 timer_active = 4;
1469 timer_expires = icsk->icsk_timeout;
1470 } else if (timer_pending(&sp->sk_timer)) {
1471 timer_active = 2;
1472 timer_expires = sp->sk_timer.expires;
1473 } else {
1474 timer_active = 0;
1475 timer_expires = jiffies;
1476 }
1477
1478 seq_printf(seq,
1479 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1480 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %u %u %u %u %d\n",
1481 i,
1482 src->s6_addr32[0], src->s6_addr32[1],
1483 src->s6_addr32[2], src->s6_addr32[3], srcp,
1484 dest->s6_addr32[0], dest->s6_addr32[1],
1485 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1486 sp->sk_state,
1487 tp->write_seq-tp->snd_una,
1488 (sp->sk_state == TCP_LISTEN) ? sp->sk_ack_backlog : (tp->rcv_nxt - tp->copied_seq),
1489 timer_active,
1490 jiffies_to_clock_t(timer_expires - jiffies),
1491 icsk->icsk_retransmits,
1492 sock_i_uid(sp),
1493 icsk->icsk_probes_out,
1494 sock_i_ino(sp),
1495 atomic_read(&sp->sk_refcnt), sp,
1496 icsk->icsk_rto,
1497 icsk->icsk_ack.ato,
1498 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
1499 tp->snd_cwnd, tp->snd_ssthresh>=0xFFFF?-1:tp->snd_ssthresh
1500 );
1501 }
1502
1503 static void get_timewait6_sock(struct seq_file *seq,
1504 struct inet_timewait_sock *tw, int i)
1505 {
1506 struct in6_addr *dest, *src;
1507 __u16 destp, srcp;
1508 struct inet6_timewait_sock *tw6 = inet6_twsk((struct sock *)tw);
1509 int ttd = tw->tw_ttd - jiffies;
1510
1511 if (ttd < 0)
1512 ttd = 0;
1513
1514 dest = &tw6->tw_v6_daddr;
1515 src = &tw6->tw_v6_rcv_saddr;
1516 destp = ntohs(tw->tw_dport);
1517 srcp = ntohs(tw->tw_sport);
1518
1519 seq_printf(seq,
1520 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1521 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p\n",
1522 i,
1523 src->s6_addr32[0], src->s6_addr32[1],
1524 src->s6_addr32[2], src->s6_addr32[3], srcp,
1525 dest->s6_addr32[0], dest->s6_addr32[1],
1526 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1527 tw->tw_substate, 0, 0,
1528 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
1529 atomic_read(&tw->tw_refcnt), tw);
1530 }
1531
1532 #ifdef CONFIG_PROC_FS
1533 static int tcp6_seq_show(struct seq_file *seq, void *v)
1534 {
1535 struct tcp_iter_state *st;
1536
1537 if (v == SEQ_START_TOKEN) {
1538 seq_puts(seq,
1539 " sl "
1540 "local_address "
1541 "remote_address "
1542 "st tx_queue rx_queue tr tm->when retrnsmt"
1543 " uid timeout inode\n");
1544 goto out;
1545 }
1546 st = seq->private;
1547
1548 switch (st->state) {
1549 case TCP_SEQ_STATE_LISTENING:
1550 case TCP_SEQ_STATE_ESTABLISHED:
1551 get_tcp6_sock(seq, v, st->num);
1552 break;
1553 case TCP_SEQ_STATE_OPENREQ:
1554 get_openreq6(seq, st->syn_wait_sk, v, st->num, st->uid);
1555 break;
1556 case TCP_SEQ_STATE_TIME_WAIT:
1557 get_timewait6_sock(seq, v, st->num);
1558 break;
1559 }
1560 out:
1561 return 0;
1562 }
1563
1564 static struct file_operations tcp6_seq_fops;
1565 static struct tcp_seq_afinfo tcp6_seq_afinfo = {
1566 .owner = THIS_MODULE,
1567 .name = "tcp6",
1568 .family = AF_INET6,
1569 .seq_show = tcp6_seq_show,
1570 .seq_fops = &tcp6_seq_fops,
1571 };
1572
1573 int __init tcp6_proc_init(void)
1574 {
1575 return tcp_proc_register(&tcp6_seq_afinfo);
1576 }
1577
1578 void tcp6_proc_exit(void)
1579 {
1580 tcp_proc_unregister(&tcp6_seq_afinfo);
1581 }
1582 #endif
1583
1584 struct proto tcpv6_prot = {
1585 .name = "TCPv6",
1586 .owner = THIS_MODULE,
1587 .close = tcp_close,
1588 .connect = tcp_v6_connect,
1589 .disconnect = tcp_disconnect,
1590 .accept = inet_csk_accept,
1591 .ioctl = tcp_ioctl,
1592 .init = tcp_v6_init_sock,
1593 .destroy = tcp_v6_destroy_sock,
1594 .shutdown = tcp_shutdown,
1595 .setsockopt = tcp_setsockopt,
1596 .getsockopt = tcp_getsockopt,
1597 .sendmsg = tcp_sendmsg,
1598 .recvmsg = tcp_recvmsg,
1599 .backlog_rcv = tcp_v6_do_rcv,
1600 .hash = tcp_v6_hash,
1601 .unhash = tcp_unhash,
1602 .get_port = tcp_v6_get_port,
1603 .enter_memory_pressure = tcp_enter_memory_pressure,
1604 .sockets_allocated = &tcp_sockets_allocated,
1605 .memory_allocated = &tcp_memory_allocated,
1606 .memory_pressure = &tcp_memory_pressure,
1607 .orphan_count = &tcp_orphan_count,
1608 .sysctl_mem = sysctl_tcp_mem,
1609 .sysctl_wmem = sysctl_tcp_wmem,
1610 .sysctl_rmem = sysctl_tcp_rmem,
1611 .max_header = MAX_TCP_HEADER,
1612 .obj_size = sizeof(struct tcp6_sock),
1613 .twsk_prot = &tcp6_timewait_sock_ops,
1614 .rsk_prot = &tcp6_request_sock_ops,
1615 #ifdef CONFIG_COMPAT
1616 .compat_setsockopt = compat_tcp_setsockopt,
1617 .compat_getsockopt = compat_tcp_getsockopt,
1618 #endif
1619 };
1620
1621 static struct inet6_protocol tcpv6_protocol = {
1622 .handler = tcp_v6_rcv,
1623 .err_handler = tcp_v6_err,
1624 .gso_send_check = tcp_v6_gso_send_check,
1625 .gso_segment = tcp_tso_segment,
1626 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1627 };
1628
1629 static struct inet_protosw tcpv6_protosw = {
1630 .type = SOCK_STREAM,
1631 .protocol = IPPROTO_TCP,
1632 .prot = &tcpv6_prot,
1633 .ops = &inet6_stream_ops,
1634 .capability = -1,
1635 .no_check = 0,
1636 .flags = INET_PROTOSW_PERMANENT |
1637 INET_PROTOSW_ICSK,
1638 };
1639
1640 void __init tcpv6_init(void)
1641 {
1642 /* register inet6 protocol */
1643 if (inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP) < 0)
1644 printk(KERN_ERR "tcpv6_init: Could not register protocol\n");
1645 inet6_register_protosw(&tcpv6_protosw);
1646
1647 if (inet_csk_ctl_sock_create(&tcp6_socket, PF_INET6, SOCK_RAW,
1648 IPPROTO_TCP) < 0)
1649 panic("Failed to create the TCPv6 control socket.\n");
1650 }
This page took 0.121894 seconds and 6 git commands to generate.