Merge rsync://rsync.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
[deliverable/linux.git] / net / ipv6 / tcp_ipv6.c
1 /*
2 * TCP over IPv6
3 * Linux INET6 implementation
4 *
5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt>
7 *
8 * $Id: tcp_ipv6.c,v 1.144 2002/02/01 22:01:04 davem Exp $
9 *
10 * Based on:
11 * linux/net/ipv4/tcp.c
12 * linux/net/ipv4/tcp_input.c
13 * linux/net/ipv4/tcp_output.c
14 *
15 * Fixes:
16 * Hideaki YOSHIFUJI : sin6_scope_id support
17 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
18 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
19 * a single port at the same time.
20 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
21 *
22 * This program is free software; you can redistribute it and/or
23 * modify it under the terms of the GNU General Public License
24 * as published by the Free Software Foundation; either version
25 * 2 of the License, or (at your option) any later version.
26 */
27
28 #include <linux/module.h>
29 #include <linux/errno.h>
30 #include <linux/types.h>
31 #include <linux/socket.h>
32 #include <linux/sockios.h>
33 #include <linux/net.h>
34 #include <linux/jiffies.h>
35 #include <linux/in.h>
36 #include <linux/in6.h>
37 #include <linux/netdevice.h>
38 #include <linux/init.h>
39 #include <linux/jhash.h>
40 #include <linux/ipsec.h>
41 #include <linux/times.h>
42
43 #include <linux/ipv6.h>
44 #include <linux/icmpv6.h>
45 #include <linux/random.h>
46
47 #include <net/tcp.h>
48 #include <net/ndisc.h>
49 #include <net/inet6_hashtables.h>
50 #include <net/inet6_connection_sock.h>
51 #include <net/ipv6.h>
52 #include <net/transp_v6.h>
53 #include <net/addrconf.h>
54 #include <net/ip6_route.h>
55 #include <net/ip6_checksum.h>
56 #include <net/inet_ecn.h>
57 #include <net/protocol.h>
58 #include <net/xfrm.h>
59 #include <net/addrconf.h>
60 #include <net/snmp.h>
61 #include <net/dsfield.h>
62 #include <net/timewait_sock.h>
63
64 #include <asm/uaccess.h>
65
66 #include <linux/proc_fs.h>
67 #include <linux/seq_file.h>
68
69 #include <linux/crypto.h>
70 #include <linux/scatterlist.h>
71
72 /* Socket used for sending RSTs and ACKs */
73 static struct socket *tcp6_socket;
74
75 static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb);
76 static void tcp_v6_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req);
77 static void tcp_v6_send_check(struct sock *sk, int len,
78 struct sk_buff *skb);
79
80 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
81
82 static struct inet_connection_sock_af_ops ipv6_mapped;
83 static struct inet_connection_sock_af_ops ipv6_specific;
84 #ifdef CONFIG_TCP_MD5SIG
85 static struct tcp_sock_af_ops tcp_sock_ipv6_specific;
86 static struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
87 #endif
88
89 static int tcp_v6_get_port(struct sock *sk, unsigned short snum)
90 {
91 return inet_csk_get_port(&tcp_hashinfo, sk, snum,
92 inet6_csk_bind_conflict);
93 }
94
95 static void tcp_v6_hash(struct sock *sk)
96 {
97 if (sk->sk_state != TCP_CLOSE) {
98 if (inet_csk(sk)->icsk_af_ops == &ipv6_mapped) {
99 tcp_prot.hash(sk);
100 return;
101 }
102 local_bh_disable();
103 __inet6_hash(&tcp_hashinfo, sk);
104 local_bh_enable();
105 }
106 }
107
108 static __inline__ __sum16 tcp_v6_check(struct tcphdr *th, int len,
109 struct in6_addr *saddr,
110 struct in6_addr *daddr,
111 __wsum base)
112 {
113 return csum_ipv6_magic(saddr, daddr, len, IPPROTO_TCP, base);
114 }
115
116 static __u32 tcp_v6_init_sequence(struct sk_buff *skb)
117 {
118 return secure_tcpv6_sequence_number(skb->nh.ipv6h->daddr.s6_addr32,
119 skb->nh.ipv6h->saddr.s6_addr32,
120 skb->h.th->dest,
121 skb->h.th->source);
122 }
123
124 static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
125 int addr_len)
126 {
127 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
128 struct inet_sock *inet = inet_sk(sk);
129 struct inet_connection_sock *icsk = inet_csk(sk);
130 struct ipv6_pinfo *np = inet6_sk(sk);
131 struct tcp_sock *tp = tcp_sk(sk);
132 struct in6_addr *saddr = NULL, *final_p = NULL, final;
133 struct flowi fl;
134 struct dst_entry *dst;
135 int addr_type;
136 int err;
137
138 if (addr_len < SIN6_LEN_RFC2133)
139 return -EINVAL;
140
141 if (usin->sin6_family != AF_INET6)
142 return(-EAFNOSUPPORT);
143
144 memset(&fl, 0, sizeof(fl));
145
146 if (np->sndflow) {
147 fl.fl6_flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
148 IP6_ECN_flow_init(fl.fl6_flowlabel);
149 if (fl.fl6_flowlabel&IPV6_FLOWLABEL_MASK) {
150 struct ip6_flowlabel *flowlabel;
151 flowlabel = fl6_sock_lookup(sk, fl.fl6_flowlabel);
152 if (flowlabel == NULL)
153 return -EINVAL;
154 ipv6_addr_copy(&usin->sin6_addr, &flowlabel->dst);
155 fl6_sock_release(flowlabel);
156 }
157 }
158
159 /*
160 * connect() to INADDR_ANY means loopback (BSD'ism).
161 */
162
163 if(ipv6_addr_any(&usin->sin6_addr))
164 usin->sin6_addr.s6_addr[15] = 0x1;
165
166 addr_type = ipv6_addr_type(&usin->sin6_addr);
167
168 if(addr_type & IPV6_ADDR_MULTICAST)
169 return -ENETUNREACH;
170
171 if (addr_type&IPV6_ADDR_LINKLOCAL) {
172 if (addr_len >= sizeof(struct sockaddr_in6) &&
173 usin->sin6_scope_id) {
174 /* If interface is set while binding, indices
175 * must coincide.
176 */
177 if (sk->sk_bound_dev_if &&
178 sk->sk_bound_dev_if != usin->sin6_scope_id)
179 return -EINVAL;
180
181 sk->sk_bound_dev_if = usin->sin6_scope_id;
182 }
183
184 /* Connect to link-local address requires an interface */
185 if (!sk->sk_bound_dev_if)
186 return -EINVAL;
187 }
188
189 if (tp->rx_opt.ts_recent_stamp &&
190 !ipv6_addr_equal(&np->daddr, &usin->sin6_addr)) {
191 tp->rx_opt.ts_recent = 0;
192 tp->rx_opt.ts_recent_stamp = 0;
193 tp->write_seq = 0;
194 }
195
196 ipv6_addr_copy(&np->daddr, &usin->sin6_addr);
197 np->flow_label = fl.fl6_flowlabel;
198
199 /*
200 * TCP over IPv4
201 */
202
203 if (addr_type == IPV6_ADDR_MAPPED) {
204 u32 exthdrlen = icsk->icsk_ext_hdr_len;
205 struct sockaddr_in sin;
206
207 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
208
209 if (__ipv6_only_sock(sk))
210 return -ENETUNREACH;
211
212 sin.sin_family = AF_INET;
213 sin.sin_port = usin->sin6_port;
214 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
215
216 icsk->icsk_af_ops = &ipv6_mapped;
217 sk->sk_backlog_rcv = tcp_v4_do_rcv;
218 #ifdef CONFIG_TCP_MD5SIG
219 tp->af_specific = &tcp_sock_ipv6_mapped_specific;
220 #endif
221
222 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
223
224 if (err) {
225 icsk->icsk_ext_hdr_len = exthdrlen;
226 icsk->icsk_af_ops = &ipv6_specific;
227 sk->sk_backlog_rcv = tcp_v6_do_rcv;
228 #ifdef CONFIG_TCP_MD5SIG
229 tp->af_specific = &tcp_sock_ipv6_specific;
230 #endif
231 goto failure;
232 } else {
233 ipv6_addr_set(&np->saddr, 0, 0, htonl(0x0000FFFF),
234 inet->saddr);
235 ipv6_addr_set(&np->rcv_saddr, 0, 0, htonl(0x0000FFFF),
236 inet->rcv_saddr);
237 }
238
239 return err;
240 }
241
242 if (!ipv6_addr_any(&np->rcv_saddr))
243 saddr = &np->rcv_saddr;
244
245 fl.proto = IPPROTO_TCP;
246 ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
247 ipv6_addr_copy(&fl.fl6_src,
248 (saddr ? saddr : &np->saddr));
249 fl.oif = sk->sk_bound_dev_if;
250 fl.fl_ip_dport = usin->sin6_port;
251 fl.fl_ip_sport = inet->sport;
252
253 if (np->opt && np->opt->srcrt) {
254 struct rt0_hdr *rt0 = (struct rt0_hdr *)np->opt->srcrt;
255 ipv6_addr_copy(&final, &fl.fl6_dst);
256 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
257 final_p = &final;
258 }
259
260 security_sk_classify_flow(sk, &fl);
261
262 err = ip6_dst_lookup(sk, &dst, &fl);
263 if (err)
264 goto failure;
265 if (final_p)
266 ipv6_addr_copy(&fl.fl6_dst, final_p);
267
268 if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0)
269 goto failure;
270
271 if (saddr == NULL) {
272 saddr = &fl.fl6_src;
273 ipv6_addr_copy(&np->rcv_saddr, saddr);
274 }
275
276 /* set the source address */
277 ipv6_addr_copy(&np->saddr, saddr);
278 inet->rcv_saddr = LOOPBACK4_IPV6;
279
280 sk->sk_gso_type = SKB_GSO_TCPV6;
281 __ip6_dst_store(sk, dst, NULL, NULL);
282
283 icsk->icsk_ext_hdr_len = 0;
284 if (np->opt)
285 icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
286 np->opt->opt_nflen);
287
288 tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
289
290 inet->dport = usin->sin6_port;
291
292 tcp_set_state(sk, TCP_SYN_SENT);
293 err = inet6_hash_connect(&tcp_death_row, sk);
294 if (err)
295 goto late_failure;
296
297 if (!tp->write_seq)
298 tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
299 np->daddr.s6_addr32,
300 inet->sport,
301 inet->dport);
302
303 err = tcp_connect(sk);
304 if (err)
305 goto late_failure;
306
307 return 0;
308
309 late_failure:
310 tcp_set_state(sk, TCP_CLOSE);
311 __sk_dst_reset(sk);
312 failure:
313 inet->dport = 0;
314 sk->sk_route_caps = 0;
315 return err;
316 }
317
318 static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
319 int type, int code, int offset, __be32 info)
320 {
321 struct ipv6hdr *hdr = (struct ipv6hdr*)skb->data;
322 const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
323 struct ipv6_pinfo *np;
324 struct sock *sk;
325 int err;
326 struct tcp_sock *tp;
327 __u32 seq;
328
329 sk = inet6_lookup(&tcp_hashinfo, &hdr->daddr, th->dest, &hdr->saddr,
330 th->source, skb->dev->ifindex);
331
332 if (sk == NULL) {
333 ICMP6_INC_STATS_BH(__in6_dev_get(skb->dev), ICMP6_MIB_INERRORS);
334 return;
335 }
336
337 if (sk->sk_state == TCP_TIME_WAIT) {
338 inet_twsk_put(inet_twsk(sk));
339 return;
340 }
341
342 bh_lock_sock(sk);
343 if (sock_owned_by_user(sk))
344 NET_INC_STATS_BH(LINUX_MIB_LOCKDROPPEDICMPS);
345
346 if (sk->sk_state == TCP_CLOSE)
347 goto out;
348
349 tp = tcp_sk(sk);
350 seq = ntohl(th->seq);
351 if (sk->sk_state != TCP_LISTEN &&
352 !between(seq, tp->snd_una, tp->snd_nxt)) {
353 NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS);
354 goto out;
355 }
356
357 np = inet6_sk(sk);
358
359 if (type == ICMPV6_PKT_TOOBIG) {
360 struct dst_entry *dst = NULL;
361
362 if (sock_owned_by_user(sk))
363 goto out;
364 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
365 goto out;
366
367 /* icmp should have updated the destination cache entry */
368 dst = __sk_dst_check(sk, np->dst_cookie);
369
370 if (dst == NULL) {
371 struct inet_sock *inet = inet_sk(sk);
372 struct flowi fl;
373
374 /* BUGGG_FUTURE: Again, it is not clear how
375 to handle rthdr case. Ignore this complexity
376 for now.
377 */
378 memset(&fl, 0, sizeof(fl));
379 fl.proto = IPPROTO_TCP;
380 ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
381 ipv6_addr_copy(&fl.fl6_src, &np->saddr);
382 fl.oif = sk->sk_bound_dev_if;
383 fl.fl_ip_dport = inet->dport;
384 fl.fl_ip_sport = inet->sport;
385 security_skb_classify_flow(skb, &fl);
386
387 if ((err = ip6_dst_lookup(sk, &dst, &fl))) {
388 sk->sk_err_soft = -err;
389 goto out;
390 }
391
392 if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0) {
393 sk->sk_err_soft = -err;
394 goto out;
395 }
396
397 } else
398 dst_hold(dst);
399
400 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
401 tcp_sync_mss(sk, dst_mtu(dst));
402 tcp_simple_retransmit(sk);
403 } /* else let the usual retransmit timer handle it */
404 dst_release(dst);
405 goto out;
406 }
407
408 icmpv6_err_convert(type, code, &err);
409
410 /* Might be for an request_sock */
411 switch (sk->sk_state) {
412 struct request_sock *req, **prev;
413 case TCP_LISTEN:
414 if (sock_owned_by_user(sk))
415 goto out;
416
417 req = inet6_csk_search_req(sk, &prev, th->dest, &hdr->daddr,
418 &hdr->saddr, inet6_iif(skb));
419 if (!req)
420 goto out;
421
422 /* ICMPs are not backlogged, hence we cannot get
423 * an established socket here.
424 */
425 BUG_TRAP(req->sk == NULL);
426
427 if (seq != tcp_rsk(req)->snt_isn) {
428 NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS);
429 goto out;
430 }
431
432 inet_csk_reqsk_queue_drop(sk, req, prev);
433 goto out;
434
435 case TCP_SYN_SENT:
436 case TCP_SYN_RECV: /* Cannot happen.
437 It can, it SYNs are crossed. --ANK */
438 if (!sock_owned_by_user(sk)) {
439 sk->sk_err = err;
440 sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
441
442 tcp_done(sk);
443 } else
444 sk->sk_err_soft = err;
445 goto out;
446 }
447
448 if (!sock_owned_by_user(sk) && np->recverr) {
449 sk->sk_err = err;
450 sk->sk_error_report(sk);
451 } else
452 sk->sk_err_soft = err;
453
454 out:
455 bh_unlock_sock(sk);
456 sock_put(sk);
457 }
458
459
460 static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req,
461 struct dst_entry *dst)
462 {
463 struct inet6_request_sock *treq = inet6_rsk(req);
464 struct ipv6_pinfo *np = inet6_sk(sk);
465 struct sk_buff * skb;
466 struct ipv6_txoptions *opt = NULL;
467 struct in6_addr * final_p = NULL, final;
468 struct flowi fl;
469 int err = -1;
470
471 memset(&fl, 0, sizeof(fl));
472 fl.proto = IPPROTO_TCP;
473 ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
474 ipv6_addr_copy(&fl.fl6_src, &treq->loc_addr);
475 fl.fl6_flowlabel = 0;
476 fl.oif = treq->iif;
477 fl.fl_ip_dport = inet_rsk(req)->rmt_port;
478 fl.fl_ip_sport = inet_sk(sk)->sport;
479 security_req_classify_flow(req, &fl);
480
481 if (dst == NULL) {
482 opt = np->opt;
483 if (opt == NULL &&
484 np->rxopt.bits.osrcrt == 2 &&
485 treq->pktopts) {
486 struct sk_buff *pktopts = treq->pktopts;
487 struct inet6_skb_parm *rxopt = IP6CB(pktopts);
488 if (rxopt->srcrt)
489 opt = ipv6_invert_rthdr(sk, (struct ipv6_rt_hdr*)(pktopts->nh.raw + rxopt->srcrt));
490 }
491
492 if (opt && opt->srcrt) {
493 struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt;
494 ipv6_addr_copy(&final, &fl.fl6_dst);
495 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
496 final_p = &final;
497 }
498
499 err = ip6_dst_lookup(sk, &dst, &fl);
500 if (err)
501 goto done;
502 if (final_p)
503 ipv6_addr_copy(&fl.fl6_dst, final_p);
504 if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0)
505 goto done;
506 }
507
508 skb = tcp_make_synack(sk, dst, req);
509 if (skb) {
510 struct tcphdr *th = skb->h.th;
511
512 th->check = tcp_v6_check(th, skb->len,
513 &treq->loc_addr, &treq->rmt_addr,
514 csum_partial((char *)th, skb->len, skb->csum));
515
516 ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
517 err = ip6_xmit(sk, skb, &fl, opt, 0);
518 err = net_xmit_eval(err);
519 }
520
521 done:
522 if (opt && opt != np->opt)
523 sock_kfree_s(sk, opt, opt->tot_len);
524 dst_release(dst);
525 return err;
526 }
527
528 static void tcp_v6_reqsk_destructor(struct request_sock *req)
529 {
530 if (inet6_rsk(req)->pktopts)
531 kfree_skb(inet6_rsk(req)->pktopts);
532 }
533
534 #ifdef CONFIG_TCP_MD5SIG
535 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
536 struct in6_addr *addr)
537 {
538 struct tcp_sock *tp = tcp_sk(sk);
539 int i;
540
541 BUG_ON(tp == NULL);
542
543 if (!tp->md5sig_info || !tp->md5sig_info->entries6)
544 return NULL;
545
546 for (i = 0; i < tp->md5sig_info->entries6; i++) {
547 if (ipv6_addr_cmp(&tp->md5sig_info->keys6[i].addr, addr) == 0)
548 return (struct tcp_md5sig_key *)&tp->md5sig_info->keys6[i];
549 }
550 return NULL;
551 }
552
553 static struct tcp_md5sig_key *tcp_v6_md5_lookup(struct sock *sk,
554 struct sock *addr_sk)
555 {
556 return tcp_v6_md5_do_lookup(sk, &inet6_sk(addr_sk)->daddr);
557 }
558
559 static struct tcp_md5sig_key *tcp_v6_reqsk_md5_lookup(struct sock *sk,
560 struct request_sock *req)
561 {
562 return tcp_v6_md5_do_lookup(sk, &inet6_rsk(req)->rmt_addr);
563 }
564
565 static int tcp_v6_md5_do_add(struct sock *sk, struct in6_addr *peer,
566 char *newkey, u8 newkeylen)
567 {
568 /* Add key to the list */
569 struct tcp6_md5sig_key *key;
570 struct tcp_sock *tp = tcp_sk(sk);
571 struct tcp6_md5sig_key *keys;
572
573 key = (struct tcp6_md5sig_key*) tcp_v6_md5_do_lookup(sk, peer);
574 if (key) {
575 /* modify existing entry - just update that one */
576 kfree(key->key);
577 key->key = newkey;
578 key->keylen = newkeylen;
579 } else {
580 /* reallocate new list if current one is full. */
581 if (!tp->md5sig_info) {
582 tp->md5sig_info = kzalloc(sizeof(*tp->md5sig_info), GFP_ATOMIC);
583 if (!tp->md5sig_info) {
584 kfree(newkey);
585 return -ENOMEM;
586 }
587 }
588 tcp_alloc_md5sig_pool();
589 if (tp->md5sig_info->alloced6 == tp->md5sig_info->entries6) {
590 keys = kmalloc((sizeof (tp->md5sig_info->keys6[0]) *
591 (tp->md5sig_info->entries6 + 1)), GFP_ATOMIC);
592
593 if (!keys) {
594 tcp_free_md5sig_pool();
595 kfree(newkey);
596 return -ENOMEM;
597 }
598
599 if (tp->md5sig_info->entries6)
600 memmove(keys, tp->md5sig_info->keys6,
601 (sizeof (tp->md5sig_info->keys6[0]) *
602 tp->md5sig_info->entries6));
603
604 kfree(tp->md5sig_info->keys6);
605 tp->md5sig_info->keys6 = keys;
606 tp->md5sig_info->alloced6++;
607 }
608
609 ipv6_addr_copy(&tp->md5sig_info->keys6[tp->md5sig_info->entries6].addr,
610 peer);
611 tp->md5sig_info->keys6[tp->md5sig_info->entries6].key = newkey;
612 tp->md5sig_info->keys6[tp->md5sig_info->entries6].keylen = newkeylen;
613
614 tp->md5sig_info->entries6++;
615 }
616 return 0;
617 }
618
619 static int tcp_v6_md5_add_func(struct sock *sk, struct sock *addr_sk,
620 u8 *newkey, __u8 newkeylen)
621 {
622 return tcp_v6_md5_do_add(sk, &inet6_sk(addr_sk)->daddr,
623 newkey, newkeylen);
624 }
625
626 static int tcp_v6_md5_do_del(struct sock *sk, struct in6_addr *peer)
627 {
628 struct tcp_sock *tp = tcp_sk(sk);
629 int i;
630
631 for (i = 0; i < tp->md5sig_info->entries6; i++) {
632 if (ipv6_addr_cmp(&tp->md5sig_info->keys6[i].addr, peer) == 0) {
633 /* Free the key */
634 kfree(tp->md5sig_info->keys6[i].key);
635 tp->md5sig_info->entries6--;
636
637 if (tp->md5sig_info->entries6 == 0) {
638 kfree(tp->md5sig_info->keys6);
639 tp->md5sig_info->keys6 = NULL;
640
641 tcp_free_md5sig_pool();
642
643 return 0;
644 } else {
645 /* shrink the database */
646 if (tp->md5sig_info->entries6 != i)
647 memmove(&tp->md5sig_info->keys6[i],
648 &tp->md5sig_info->keys6[i+1],
649 (tp->md5sig_info->entries6 - i)
650 * sizeof (tp->md5sig_info->keys6[0]));
651 }
652 }
653 }
654 return -ENOENT;
655 }
656
657 static void tcp_v6_clear_md5_list (struct sock *sk)
658 {
659 struct tcp_sock *tp = tcp_sk(sk);
660 int i;
661
662 if (tp->md5sig_info->entries6) {
663 for (i = 0; i < tp->md5sig_info->entries6; i++)
664 kfree(tp->md5sig_info->keys6[i].key);
665 tp->md5sig_info->entries6 = 0;
666 tcp_free_md5sig_pool();
667 }
668
669 kfree(tp->md5sig_info->keys6);
670 tp->md5sig_info->keys6 = NULL;
671 tp->md5sig_info->alloced6 = 0;
672
673 if (tp->md5sig_info->entries4) {
674 for (i = 0; i < tp->md5sig_info->entries4; i++)
675 kfree(tp->md5sig_info->keys4[i].key);
676 tp->md5sig_info->entries4 = 0;
677 tcp_free_md5sig_pool();
678 }
679
680 kfree(tp->md5sig_info->keys4);
681 tp->md5sig_info->keys4 = NULL;
682 tp->md5sig_info->alloced4 = 0;
683 }
684
685 static int tcp_v6_parse_md5_keys (struct sock *sk, char __user *optval,
686 int optlen)
687 {
688 struct tcp_md5sig cmd;
689 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
690 u8 *newkey;
691
692 if (optlen < sizeof(cmd))
693 return -EINVAL;
694
695 if (copy_from_user(&cmd, optval, sizeof(cmd)))
696 return -EFAULT;
697
698 if (sin6->sin6_family != AF_INET6)
699 return -EINVAL;
700
701 if (!cmd.tcpm_keylen) {
702 if (!tcp_sk(sk)->md5sig_info)
703 return -ENOENT;
704 if (ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_MAPPED)
705 return tcp_v4_md5_do_del(sk, sin6->sin6_addr.s6_addr32[3]);
706 return tcp_v6_md5_do_del(sk, &sin6->sin6_addr);
707 }
708
709 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
710 return -EINVAL;
711
712 if (!tcp_sk(sk)->md5sig_info) {
713 struct tcp_sock *tp = tcp_sk(sk);
714 struct tcp_md5sig_info *p;
715
716 p = kzalloc(sizeof(struct tcp_md5sig_info), GFP_KERNEL);
717 if (!p)
718 return -ENOMEM;
719
720 tp->md5sig_info = p;
721 }
722
723 newkey = kmemdup(cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
724 if (!newkey)
725 return -ENOMEM;
726 if (ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_MAPPED) {
727 return tcp_v4_md5_do_add(sk, sin6->sin6_addr.s6_addr32[3],
728 newkey, cmd.tcpm_keylen);
729 }
730 return tcp_v6_md5_do_add(sk, &sin6->sin6_addr, newkey, cmd.tcpm_keylen);
731 }
732
733 static int tcp_v6_do_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key,
734 struct in6_addr *saddr,
735 struct in6_addr *daddr,
736 struct tcphdr *th, int protocol,
737 int tcplen)
738 {
739 struct scatterlist sg[4];
740 __u16 data_len;
741 int block = 0;
742 __sum16 cksum;
743 struct tcp_md5sig_pool *hp;
744 struct tcp6_pseudohdr *bp;
745 struct hash_desc *desc;
746 int err;
747 unsigned int nbytes = 0;
748
749 hp = tcp_get_md5sig_pool();
750 if (!hp) {
751 printk(KERN_WARNING "%s(): hash pool not found...\n", __FUNCTION__);
752 goto clear_hash_noput;
753 }
754 bp = &hp->md5_blk.ip6;
755 desc = &hp->md5_desc;
756
757 /* 1. TCP pseudo-header (RFC2460) */
758 ipv6_addr_copy(&bp->saddr, saddr);
759 ipv6_addr_copy(&bp->daddr, daddr);
760 bp->len = htonl(tcplen);
761 bp->protocol = htonl(protocol);
762
763 sg_set_buf(&sg[block++], bp, sizeof(*bp));
764 nbytes += sizeof(*bp);
765
766 /* 2. TCP header, excluding options */
767 cksum = th->check;
768 th->check = 0;
769 sg_set_buf(&sg[block++], th, sizeof(*th));
770 nbytes += sizeof(*th);
771
772 /* 3. TCP segment data (if any) */
773 data_len = tcplen - (th->doff << 2);
774 if (data_len > 0) {
775 u8 *data = (u8 *)th + (th->doff << 2);
776 sg_set_buf(&sg[block++], data, data_len);
777 nbytes += data_len;
778 }
779
780 /* 4. shared key */
781 sg_set_buf(&sg[block++], key->key, key->keylen);
782 nbytes += key->keylen;
783
784 /* Now store the hash into the packet */
785 err = crypto_hash_init(desc);
786 if (err) {
787 printk(KERN_WARNING "%s(): hash_init failed\n", __FUNCTION__);
788 goto clear_hash;
789 }
790 err = crypto_hash_update(desc, sg, nbytes);
791 if (err) {
792 printk(KERN_WARNING "%s(): hash_update failed\n", __FUNCTION__);
793 goto clear_hash;
794 }
795 err = crypto_hash_final(desc, md5_hash);
796 if (err) {
797 printk(KERN_WARNING "%s(): hash_final failed\n", __FUNCTION__);
798 goto clear_hash;
799 }
800
801 /* Reset header, and free up the crypto */
802 tcp_put_md5sig_pool();
803 th->check = cksum;
804 out:
805 return 0;
806 clear_hash:
807 tcp_put_md5sig_pool();
808 clear_hash_noput:
809 memset(md5_hash, 0, 16);
810 goto out;
811 }
812
813 static int tcp_v6_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key,
814 struct sock *sk,
815 struct dst_entry *dst,
816 struct request_sock *req,
817 struct tcphdr *th, int protocol,
818 int tcplen)
819 {
820 struct in6_addr *saddr, *daddr;
821
822 if (sk) {
823 saddr = &inet6_sk(sk)->saddr;
824 daddr = &inet6_sk(sk)->daddr;
825 } else {
826 saddr = &inet6_rsk(req)->loc_addr;
827 daddr = &inet6_rsk(req)->rmt_addr;
828 }
829 return tcp_v6_do_calc_md5_hash(md5_hash, key,
830 saddr, daddr,
831 th, protocol, tcplen);
832 }
833
834 static int tcp_v6_inbound_md5_hash (struct sock *sk, struct sk_buff *skb)
835 {
836 __u8 *hash_location = NULL;
837 struct tcp_md5sig_key *hash_expected;
838 struct ipv6hdr *ip6h = skb->nh.ipv6h;
839 struct tcphdr *th = skb->h.th;
840 int length = (th->doff << 2) - sizeof (*th);
841 int genhash;
842 u8 *ptr;
843 u8 newhash[16];
844
845 hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
846
847 /* If the TCP option is too short, we can short cut */
848 if (length < TCPOLEN_MD5SIG)
849 return hash_expected ? 1 : 0;
850
851 /* parse options */
852 ptr = (u8*)(th + 1);
853 while (length > 0) {
854 int opcode = *ptr++;
855 int opsize;
856
857 switch(opcode) {
858 case TCPOPT_EOL:
859 goto done_opts;
860 case TCPOPT_NOP:
861 length--;
862 continue;
863 default:
864 opsize = *ptr++;
865 if (opsize < 2 || opsize > length)
866 goto done_opts;
867 if (opcode == TCPOPT_MD5SIG) {
868 hash_location = ptr;
869 goto done_opts;
870 }
871 }
872 ptr += opsize - 2;
873 length -= opsize;
874 }
875
876 done_opts:
877 /* do we have a hash as expected? */
878 if (!hash_expected) {
879 if (!hash_location)
880 return 0;
881 if (net_ratelimit()) {
882 printk(KERN_INFO "MD5 Hash NOT expected but found "
883 "(" NIP6_FMT ", %u)->"
884 "(" NIP6_FMT ", %u)\n",
885 NIP6(ip6h->saddr), ntohs(th->source),
886 NIP6(ip6h->daddr), ntohs(th->dest));
887 }
888 return 1;
889 }
890
891 if (!hash_location) {
892 if (net_ratelimit()) {
893 printk(KERN_INFO "MD5 Hash expected but NOT found "
894 "(" NIP6_FMT ", %u)->"
895 "(" NIP6_FMT ", %u)\n",
896 NIP6(ip6h->saddr), ntohs(th->source),
897 NIP6(ip6h->daddr), ntohs(th->dest));
898 }
899 return 1;
900 }
901
902 /* check the signature */
903 genhash = tcp_v6_do_calc_md5_hash(newhash,
904 hash_expected,
905 &ip6h->saddr, &ip6h->daddr,
906 th, sk->sk_protocol,
907 skb->len);
908 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
909 if (net_ratelimit()) {
910 printk(KERN_INFO "MD5 Hash %s for "
911 "(" NIP6_FMT ", %u)->"
912 "(" NIP6_FMT ", %u)\n",
913 genhash ? "failed" : "mismatch",
914 NIP6(ip6h->saddr), ntohs(th->source),
915 NIP6(ip6h->daddr), ntohs(th->dest));
916 }
917 return 1;
918 }
919 return 0;
920 }
921 #endif
922
923 static struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
924 .family = AF_INET6,
925 .obj_size = sizeof(struct tcp6_request_sock),
926 .rtx_syn_ack = tcp_v6_send_synack,
927 .send_ack = tcp_v6_reqsk_send_ack,
928 .destructor = tcp_v6_reqsk_destructor,
929 .send_reset = tcp_v6_send_reset
930 };
931
932 #ifdef CONFIG_TCP_MD5SIG
933 static struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
934 .md5_lookup = tcp_v6_reqsk_md5_lookup,
935 };
936 #endif
937
938 static struct timewait_sock_ops tcp6_timewait_sock_ops = {
939 .twsk_obj_size = sizeof(struct tcp6_timewait_sock),
940 .twsk_unique = tcp_twsk_unique,
941 .twsk_destructor= tcp_twsk_destructor,
942 };
943
944 static void tcp_v6_send_check(struct sock *sk, int len, struct sk_buff *skb)
945 {
946 struct ipv6_pinfo *np = inet6_sk(sk);
947 struct tcphdr *th = skb->h.th;
948
949 if (skb->ip_summed == CHECKSUM_PARTIAL) {
950 th->check = ~csum_ipv6_magic(&np->saddr, &np->daddr, len, IPPROTO_TCP, 0);
951 skb->csum_offset = offsetof(struct tcphdr, check);
952 } else {
953 th->check = csum_ipv6_magic(&np->saddr, &np->daddr, len, IPPROTO_TCP,
954 csum_partial((char *)th, th->doff<<2,
955 skb->csum));
956 }
957 }
958
959 static int tcp_v6_gso_send_check(struct sk_buff *skb)
960 {
961 struct ipv6hdr *ipv6h;
962 struct tcphdr *th;
963
964 if (!pskb_may_pull(skb, sizeof(*th)))
965 return -EINVAL;
966
967 ipv6h = skb->nh.ipv6h;
968 th = skb->h.th;
969
970 th->check = 0;
971 th->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, skb->len,
972 IPPROTO_TCP, 0);
973 skb->csum_offset = offsetof(struct tcphdr, check);
974 skb->ip_summed = CHECKSUM_PARTIAL;
975 return 0;
976 }
977
978 static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
979 {
980 struct tcphdr *th = skb->h.th, *t1;
981 struct sk_buff *buff;
982 struct flowi fl;
983 int tot_len = sizeof(*th);
984 #ifdef CONFIG_TCP_MD5SIG
985 struct tcp_md5sig_key *key;
986 #endif
987
988 if (th->rst)
989 return;
990
991 if (!ipv6_unicast_destination(skb))
992 return;
993
994 #ifdef CONFIG_TCP_MD5SIG
995 if (sk)
996 key = tcp_v6_md5_do_lookup(sk, &skb->nh.ipv6h->daddr);
997 else
998 key = NULL;
999
1000 if (key)
1001 tot_len += TCPOLEN_MD5SIG_ALIGNED;
1002 #endif
1003
1004 /*
1005 * We need to grab some memory, and put together an RST,
1006 * and then put it into the queue to be sent.
1007 */
1008
1009 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
1010 GFP_ATOMIC);
1011 if (buff == NULL)
1012 return;
1013
1014 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
1015
1016 t1 = (struct tcphdr *) skb_push(buff, tot_len);
1017
1018 /* Swap the send and the receive. */
1019 memset(t1, 0, sizeof(*t1));
1020 t1->dest = th->source;
1021 t1->source = th->dest;
1022 t1->doff = tot_len / 4;
1023 t1->rst = 1;
1024
1025 if(th->ack) {
1026 t1->seq = th->ack_seq;
1027 } else {
1028 t1->ack = 1;
1029 t1->ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin
1030 + skb->len - (th->doff<<2));
1031 }
1032
1033 #ifdef CONFIG_TCP_MD5SIG
1034 if (key) {
1035 __be32 *opt = (__be32*)(t1 + 1);
1036 opt[0] = htonl((TCPOPT_NOP << 24) |
1037 (TCPOPT_NOP << 16) |
1038 (TCPOPT_MD5SIG << 8) |
1039 TCPOLEN_MD5SIG);
1040 tcp_v6_do_calc_md5_hash((__u8*)&opt[1],
1041 key,
1042 &skb->nh.ipv6h->daddr,
1043 &skb->nh.ipv6h->saddr,
1044 t1, IPPROTO_TCP,
1045 tot_len);
1046 }
1047 #endif
1048
1049 buff->csum = csum_partial((char *)t1, sizeof(*t1), 0);
1050
1051 memset(&fl, 0, sizeof(fl));
1052 ipv6_addr_copy(&fl.fl6_dst, &skb->nh.ipv6h->saddr);
1053 ipv6_addr_copy(&fl.fl6_src, &skb->nh.ipv6h->daddr);
1054
1055 t1->check = csum_ipv6_magic(&fl.fl6_src, &fl.fl6_dst,
1056 sizeof(*t1), IPPROTO_TCP,
1057 buff->csum);
1058
1059 fl.proto = IPPROTO_TCP;
1060 fl.oif = inet6_iif(skb);
1061 fl.fl_ip_dport = t1->dest;
1062 fl.fl_ip_sport = t1->source;
1063 security_skb_classify_flow(skb, &fl);
1064
1065 /* sk = NULL, but it is safe for now. RST socket required. */
1066 if (!ip6_dst_lookup(NULL, &buff->dst, &fl)) {
1067
1068 if (xfrm_lookup(&buff->dst, &fl, NULL, 0) >= 0) {
1069 ip6_xmit(tcp6_socket->sk, buff, &fl, NULL, 0);
1070 TCP_INC_STATS_BH(TCP_MIB_OUTSEGS);
1071 TCP_INC_STATS_BH(TCP_MIB_OUTRSTS);
1072 return;
1073 }
1074 }
1075
1076 kfree_skb(buff);
1077 }
1078
1079 static void tcp_v6_send_ack(struct tcp_timewait_sock *tw,
1080 struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts)
1081 {
1082 struct tcphdr *th = skb->h.th, *t1;
1083 struct sk_buff *buff;
1084 struct flowi fl;
1085 int tot_len = sizeof(struct tcphdr);
1086 __be32 *topt;
1087 #ifdef CONFIG_TCP_MD5SIG
1088 struct tcp_md5sig_key *key;
1089 struct tcp_md5sig_key tw_key;
1090 #endif
1091
1092 #ifdef CONFIG_TCP_MD5SIG
1093 if (!tw && skb->sk) {
1094 key = tcp_v6_md5_do_lookup(skb->sk, &skb->nh.ipv6h->daddr);
1095 } else if (tw && tw->tw_md5_keylen) {
1096 tw_key.key = tw->tw_md5_key;
1097 tw_key.keylen = tw->tw_md5_keylen;
1098 key = &tw_key;
1099 } else {
1100 key = NULL;
1101 }
1102 #endif
1103
1104 if (ts)
1105 tot_len += TCPOLEN_TSTAMP_ALIGNED;
1106 #ifdef CONFIG_TCP_MD5SIG
1107 if (key)
1108 tot_len += TCPOLEN_MD5SIG_ALIGNED;
1109 #endif
1110
1111 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
1112 GFP_ATOMIC);
1113 if (buff == NULL)
1114 return;
1115
1116 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
1117
1118 t1 = (struct tcphdr *) skb_push(buff,tot_len);
1119
1120 /* Swap the send and the receive. */
1121 memset(t1, 0, sizeof(*t1));
1122 t1->dest = th->source;
1123 t1->source = th->dest;
1124 t1->doff = tot_len/4;
1125 t1->seq = htonl(seq);
1126 t1->ack_seq = htonl(ack);
1127 t1->ack = 1;
1128 t1->window = htons(win);
1129
1130 topt = (__be32 *)(t1 + 1);
1131
1132 if (ts) {
1133 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
1134 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
1135 *topt++ = htonl(tcp_time_stamp);
1136 *topt = htonl(ts);
1137 }
1138
1139 #ifdef CONFIG_TCP_MD5SIG
1140 if (key) {
1141 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
1142 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
1143 tcp_v6_do_calc_md5_hash((__u8 *)topt,
1144 key,
1145 &skb->nh.ipv6h->daddr,
1146 &skb->nh.ipv6h->saddr,
1147 t1, IPPROTO_TCP,
1148 tot_len);
1149 }
1150 #endif
1151
1152 buff->csum = csum_partial((char *)t1, tot_len, 0);
1153
1154 memset(&fl, 0, sizeof(fl));
1155 ipv6_addr_copy(&fl.fl6_dst, &skb->nh.ipv6h->saddr);
1156 ipv6_addr_copy(&fl.fl6_src, &skb->nh.ipv6h->daddr);
1157
1158 t1->check = csum_ipv6_magic(&fl.fl6_src, &fl.fl6_dst,
1159 tot_len, IPPROTO_TCP,
1160 buff->csum);
1161
1162 fl.proto = IPPROTO_TCP;
1163 fl.oif = inet6_iif(skb);
1164 fl.fl_ip_dport = t1->dest;
1165 fl.fl_ip_sport = t1->source;
1166 security_skb_classify_flow(skb, &fl);
1167
1168 if (!ip6_dst_lookup(NULL, &buff->dst, &fl)) {
1169 if (xfrm_lookup(&buff->dst, &fl, NULL, 0) >= 0) {
1170 ip6_xmit(tcp6_socket->sk, buff, &fl, NULL, 0);
1171 TCP_INC_STATS_BH(TCP_MIB_OUTSEGS);
1172 return;
1173 }
1174 }
1175
1176 kfree_skb(buff);
1177 }
1178
1179 static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
1180 {
1181 struct inet_timewait_sock *tw = inet_twsk(sk);
1182 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
1183
1184 tcp_v6_send_ack(tcptw, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
1185 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
1186 tcptw->tw_ts_recent);
1187
1188 inet_twsk_put(tw);
1189 }
1190
1191 static void tcp_v6_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req)
1192 {
1193 tcp_v6_send_ack(NULL, skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, req->ts_recent);
1194 }
1195
1196
1197 static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
1198 {
1199 struct request_sock *req, **prev;
1200 const struct tcphdr *th = skb->h.th;
1201 struct sock *nsk;
1202
1203 /* Find possible connection requests. */
1204 req = inet6_csk_search_req(sk, &prev, th->source,
1205 &skb->nh.ipv6h->saddr,
1206 &skb->nh.ipv6h->daddr, inet6_iif(skb));
1207 if (req)
1208 return tcp_check_req(sk, skb, req, prev);
1209
1210 nsk = __inet6_lookup_established(&tcp_hashinfo, &skb->nh.ipv6h->saddr,
1211 th->source, &skb->nh.ipv6h->daddr,
1212 ntohs(th->dest), inet6_iif(skb));
1213
1214 if (nsk) {
1215 if (nsk->sk_state != TCP_TIME_WAIT) {
1216 bh_lock_sock(nsk);
1217 return nsk;
1218 }
1219 inet_twsk_put(inet_twsk(nsk));
1220 return NULL;
1221 }
1222
1223 #if 0 /*def CONFIG_SYN_COOKIES*/
1224 if (!th->rst && !th->syn && th->ack)
1225 sk = cookie_v6_check(sk, skb, &(IPCB(skb)->opt));
1226 #endif
1227 return sk;
1228 }
1229
1230 /* FIXME: this is substantially similar to the ipv4 code.
1231 * Can some kind of merge be done? -- erics
1232 */
1233 static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1234 {
1235 struct inet6_request_sock *treq;
1236 struct ipv6_pinfo *np = inet6_sk(sk);
1237 struct tcp_options_received tmp_opt;
1238 struct tcp_sock *tp = tcp_sk(sk);
1239 struct request_sock *req = NULL;
1240 __u32 isn = TCP_SKB_CB(skb)->when;
1241
1242 if (skb->protocol == htons(ETH_P_IP))
1243 return tcp_v4_conn_request(sk, skb);
1244
1245 if (!ipv6_unicast_destination(skb))
1246 goto drop;
1247
1248 /*
1249 * There are no SYN attacks on IPv6, yet...
1250 */
1251 if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
1252 if (net_ratelimit())
1253 printk(KERN_INFO "TCPv6: dropping request, synflood is possible\n");
1254 goto drop;
1255 }
1256
1257 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
1258 goto drop;
1259
1260 req = inet6_reqsk_alloc(&tcp6_request_sock_ops);
1261 if (req == NULL)
1262 goto drop;
1263
1264 #ifdef CONFIG_TCP_MD5SIG
1265 tcp_rsk(req)->af_specific = &tcp_request_sock_ipv6_ops;
1266 #endif
1267
1268 tcp_clear_options(&tmp_opt);
1269 tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
1270 tmp_opt.user_mss = tp->rx_opt.user_mss;
1271
1272 tcp_parse_options(skb, &tmp_opt, 0);
1273
1274 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
1275 tcp_openreq_init(req, &tmp_opt, skb);
1276
1277 treq = inet6_rsk(req);
1278 ipv6_addr_copy(&treq->rmt_addr, &skb->nh.ipv6h->saddr);
1279 ipv6_addr_copy(&treq->loc_addr, &skb->nh.ipv6h->daddr);
1280 TCP_ECN_create_request(req, skb->h.th);
1281 treq->pktopts = NULL;
1282 if (ipv6_opt_accepted(sk, skb) ||
1283 np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
1284 np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
1285 atomic_inc(&skb->users);
1286 treq->pktopts = skb;
1287 }
1288 treq->iif = sk->sk_bound_dev_if;
1289
1290 /* So that link locals have meaning */
1291 if (!sk->sk_bound_dev_if &&
1292 ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL)
1293 treq->iif = inet6_iif(skb);
1294
1295 if (isn == 0)
1296 isn = tcp_v6_init_sequence(skb);
1297
1298 tcp_rsk(req)->snt_isn = isn;
1299
1300 security_inet_conn_request(sk, skb, req);
1301
1302 if (tcp_v6_send_synack(sk, req, NULL))
1303 goto drop;
1304
1305 inet6_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1306 return 0;
1307
1308 drop:
1309 if (req)
1310 reqsk_free(req);
1311
1312 return 0; /* don't send reset */
1313 }
1314
1315 static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1316 struct request_sock *req,
1317 struct dst_entry *dst)
1318 {
1319 struct inet6_request_sock *treq = inet6_rsk(req);
1320 struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
1321 struct tcp6_sock *newtcp6sk;
1322 struct inet_sock *newinet;
1323 struct tcp_sock *newtp;
1324 struct sock *newsk;
1325 struct ipv6_txoptions *opt;
1326 #ifdef CONFIG_TCP_MD5SIG
1327 struct tcp_md5sig_key *key;
1328 #endif
1329
1330 if (skb->protocol == htons(ETH_P_IP)) {
1331 /*
1332 * v6 mapped
1333 */
1334
1335 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst);
1336
1337 if (newsk == NULL)
1338 return NULL;
1339
1340 newtcp6sk = (struct tcp6_sock *)newsk;
1341 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1342
1343 newinet = inet_sk(newsk);
1344 newnp = inet6_sk(newsk);
1345 newtp = tcp_sk(newsk);
1346
1347 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1348
1349 ipv6_addr_set(&newnp->daddr, 0, 0, htonl(0x0000FFFF),
1350 newinet->daddr);
1351
1352 ipv6_addr_set(&newnp->saddr, 0, 0, htonl(0x0000FFFF),
1353 newinet->saddr);
1354
1355 ipv6_addr_copy(&newnp->rcv_saddr, &newnp->saddr);
1356
1357 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1358 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1359 #ifdef CONFIG_TCP_MD5SIG
1360 newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1361 #endif
1362
1363 newnp->pktoptions = NULL;
1364 newnp->opt = NULL;
1365 newnp->mcast_oif = inet6_iif(skb);
1366 newnp->mcast_hops = skb->nh.ipv6h->hop_limit;
1367
1368 /*
1369 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1370 * here, tcp_create_openreq_child now does this for us, see the comment in
1371 * that function for the gory details. -acme
1372 */
1373
1374 /* It is tricky place. Until this moment IPv4 tcp
1375 worked with IPv6 icsk.icsk_af_ops.
1376 Sync it now.
1377 */
1378 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1379
1380 return newsk;
1381 }
1382
1383 opt = np->opt;
1384
1385 if (sk_acceptq_is_full(sk))
1386 goto out_overflow;
1387
1388 if (np->rxopt.bits.osrcrt == 2 &&
1389 opt == NULL && treq->pktopts) {
1390 struct inet6_skb_parm *rxopt = IP6CB(treq->pktopts);
1391 if (rxopt->srcrt)
1392 opt = ipv6_invert_rthdr(sk, (struct ipv6_rt_hdr *)(treq->pktopts->nh.raw + rxopt->srcrt));
1393 }
1394
1395 if (dst == NULL) {
1396 struct in6_addr *final_p = NULL, final;
1397 struct flowi fl;
1398
1399 memset(&fl, 0, sizeof(fl));
1400 fl.proto = IPPROTO_TCP;
1401 ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
1402 if (opt && opt->srcrt) {
1403 struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt;
1404 ipv6_addr_copy(&final, &fl.fl6_dst);
1405 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
1406 final_p = &final;
1407 }
1408 ipv6_addr_copy(&fl.fl6_src, &treq->loc_addr);
1409 fl.oif = sk->sk_bound_dev_if;
1410 fl.fl_ip_dport = inet_rsk(req)->rmt_port;
1411 fl.fl_ip_sport = inet_sk(sk)->sport;
1412 security_req_classify_flow(req, &fl);
1413
1414 if (ip6_dst_lookup(sk, &dst, &fl))
1415 goto out;
1416
1417 if (final_p)
1418 ipv6_addr_copy(&fl.fl6_dst, final_p);
1419
1420 if ((xfrm_lookup(&dst, &fl, sk, 0)) < 0)
1421 goto out;
1422 }
1423
1424 newsk = tcp_create_openreq_child(sk, req, skb);
1425 if (newsk == NULL)
1426 goto out;
1427
1428 /*
1429 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1430 * count here, tcp_create_openreq_child now does this for us, see the
1431 * comment in that function for the gory details. -acme
1432 */
1433
1434 newsk->sk_gso_type = SKB_GSO_TCPV6;
1435 __ip6_dst_store(newsk, dst, NULL, NULL);
1436
1437 newtcp6sk = (struct tcp6_sock *)newsk;
1438 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1439
1440 newtp = tcp_sk(newsk);
1441 newinet = inet_sk(newsk);
1442 newnp = inet6_sk(newsk);
1443
1444 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1445
1446 ipv6_addr_copy(&newnp->daddr, &treq->rmt_addr);
1447 ipv6_addr_copy(&newnp->saddr, &treq->loc_addr);
1448 ipv6_addr_copy(&newnp->rcv_saddr, &treq->loc_addr);
1449 newsk->sk_bound_dev_if = treq->iif;
1450
1451 /* Now IPv6 options...
1452
1453 First: no IPv4 options.
1454 */
1455 newinet->opt = NULL;
1456
1457 /* Clone RX bits */
1458 newnp->rxopt.all = np->rxopt.all;
1459
1460 /* Clone pktoptions received with SYN */
1461 newnp->pktoptions = NULL;
1462 if (treq->pktopts != NULL) {
1463 newnp->pktoptions = skb_clone(treq->pktopts, GFP_ATOMIC);
1464 kfree_skb(treq->pktopts);
1465 treq->pktopts = NULL;
1466 if (newnp->pktoptions)
1467 skb_set_owner_r(newnp->pktoptions, newsk);
1468 }
1469 newnp->opt = NULL;
1470 newnp->mcast_oif = inet6_iif(skb);
1471 newnp->mcast_hops = skb->nh.ipv6h->hop_limit;
1472
1473 /* Clone native IPv6 options from listening socket (if any)
1474
1475 Yes, keeping reference count would be much more clever,
1476 but we make one more one thing there: reattach optmem
1477 to newsk.
1478 */
1479 if (opt) {
1480 newnp->opt = ipv6_dup_options(newsk, opt);
1481 if (opt != np->opt)
1482 sock_kfree_s(sk, opt, opt->tot_len);
1483 }
1484
1485 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1486 if (newnp->opt)
1487 inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
1488 newnp->opt->opt_flen);
1489
1490 tcp_mtup_init(newsk);
1491 tcp_sync_mss(newsk, dst_mtu(dst));
1492 newtp->advmss = dst_metric(dst, RTAX_ADVMSS);
1493 tcp_initialize_rcv_mss(newsk);
1494
1495 newinet->daddr = newinet->saddr = newinet->rcv_saddr = LOOPBACK4_IPV6;
1496
1497 #ifdef CONFIG_TCP_MD5SIG
1498 /* Copy over the MD5 key from the original socket */
1499 if ((key = tcp_v6_md5_do_lookup(sk, &newnp->daddr)) != NULL) {
1500 /* We're using one, so create a matching key
1501 * on the newsk structure. If we fail to get
1502 * memory, then we end up not copying the key
1503 * across. Shucks.
1504 */
1505 char *newkey = kmemdup(key->key, key->keylen, GFP_ATOMIC);
1506 if (newkey != NULL)
1507 tcp_v6_md5_do_add(newsk, &inet6_sk(sk)->daddr,
1508 newkey, key->keylen);
1509 }
1510 #endif
1511
1512 __inet6_hash(&tcp_hashinfo, newsk);
1513 inet_inherit_port(&tcp_hashinfo, sk, newsk);
1514
1515 return newsk;
1516
1517 out_overflow:
1518 NET_INC_STATS_BH(LINUX_MIB_LISTENOVERFLOWS);
1519 out:
1520 NET_INC_STATS_BH(LINUX_MIB_LISTENDROPS);
1521 if (opt && opt != np->opt)
1522 sock_kfree_s(sk, opt, opt->tot_len);
1523 dst_release(dst);
1524 return NULL;
1525 }
1526
1527 static __sum16 tcp_v6_checksum_init(struct sk_buff *skb)
1528 {
1529 if (skb->ip_summed == CHECKSUM_COMPLETE) {
1530 if (!tcp_v6_check(skb->h.th,skb->len,&skb->nh.ipv6h->saddr,
1531 &skb->nh.ipv6h->daddr,skb->csum)) {
1532 skb->ip_summed = CHECKSUM_UNNECESSARY;
1533 return 0;
1534 }
1535 }
1536
1537 skb->csum = ~csum_unfold(tcp_v6_check(skb->h.th,skb->len,&skb->nh.ipv6h->saddr,
1538 &skb->nh.ipv6h->daddr, 0));
1539
1540 if (skb->len <= 76) {
1541 return __skb_checksum_complete(skb);
1542 }
1543 return 0;
1544 }
1545
1546 /* The socket must have it's spinlock held when we get
1547 * here.
1548 *
1549 * We have a potential double-lock case here, so even when
1550 * doing backlog processing we use the BH locking scheme.
1551 * This is because we cannot sleep with the original spinlock
1552 * held.
1553 */
1554 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1555 {
1556 struct ipv6_pinfo *np = inet6_sk(sk);
1557 struct tcp_sock *tp;
1558 struct sk_buff *opt_skb = NULL;
1559
1560 /* Imagine: socket is IPv6. IPv4 packet arrives,
1561 goes to IPv4 receive handler and backlogged.
1562 From backlog it always goes here. Kerboom...
1563 Fortunately, tcp_rcv_established and rcv_established
1564 handle them correctly, but it is not case with
1565 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1566 */
1567
1568 if (skb->protocol == htons(ETH_P_IP))
1569 return tcp_v4_do_rcv(sk, skb);
1570
1571 #ifdef CONFIG_TCP_MD5SIG
1572 if (tcp_v6_inbound_md5_hash (sk, skb))
1573 goto discard;
1574 #endif
1575
1576 if (sk_filter(sk, skb))
1577 goto discard;
1578
1579 /*
1580 * socket locking is here for SMP purposes as backlog rcv
1581 * is currently called with bh processing disabled.
1582 */
1583
1584 /* Do Stevens' IPV6_PKTOPTIONS.
1585
1586 Yes, guys, it is the only place in our code, where we
1587 may make it not affecting IPv4.
1588 The rest of code is protocol independent,
1589 and I do not like idea to uglify IPv4.
1590
1591 Actually, all the idea behind IPV6_PKTOPTIONS
1592 looks not very well thought. For now we latch
1593 options, received in the last packet, enqueued
1594 by tcp. Feel free to propose better solution.
1595 --ANK (980728)
1596 */
1597 if (np->rxopt.all)
1598 opt_skb = skb_clone(skb, GFP_ATOMIC);
1599
1600 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1601 TCP_CHECK_TIMER(sk);
1602 if (tcp_rcv_established(sk, skb, skb->h.th, skb->len))
1603 goto reset;
1604 TCP_CHECK_TIMER(sk);
1605 if (opt_skb)
1606 goto ipv6_pktoptions;
1607 return 0;
1608 }
1609
1610 if (skb->len < (skb->h.th->doff<<2) || tcp_checksum_complete(skb))
1611 goto csum_err;
1612
1613 if (sk->sk_state == TCP_LISTEN) {
1614 struct sock *nsk = tcp_v6_hnd_req(sk, skb);
1615 if (!nsk)
1616 goto discard;
1617
1618 /*
1619 * Queue it on the new socket if the new socket is active,
1620 * otherwise we just shortcircuit this and continue with
1621 * the new socket..
1622 */
1623 if(nsk != sk) {
1624 if (tcp_child_process(sk, nsk, skb))
1625 goto reset;
1626 if (opt_skb)
1627 __kfree_skb(opt_skb);
1628 return 0;
1629 }
1630 }
1631
1632 TCP_CHECK_TIMER(sk);
1633 if (tcp_rcv_state_process(sk, skb, skb->h.th, skb->len))
1634 goto reset;
1635 TCP_CHECK_TIMER(sk);
1636 if (opt_skb)
1637 goto ipv6_pktoptions;
1638 return 0;
1639
1640 reset:
1641 tcp_v6_send_reset(sk, skb);
1642 discard:
1643 if (opt_skb)
1644 __kfree_skb(opt_skb);
1645 kfree_skb(skb);
1646 return 0;
1647 csum_err:
1648 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1649 goto discard;
1650
1651
1652 ipv6_pktoptions:
1653 /* Do you ask, what is it?
1654
1655 1. skb was enqueued by tcp.
1656 2. skb is added to tail of read queue, rather than out of order.
1657 3. socket is not in passive state.
1658 4. Finally, it really contains options, which user wants to receive.
1659 */
1660 tp = tcp_sk(sk);
1661 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1662 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1663 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
1664 np->mcast_oif = inet6_iif(opt_skb);
1665 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1666 np->mcast_hops = opt_skb->nh.ipv6h->hop_limit;
1667 if (ipv6_opt_accepted(sk, opt_skb)) {
1668 skb_set_owner_r(opt_skb, sk);
1669 opt_skb = xchg(&np->pktoptions, opt_skb);
1670 } else {
1671 __kfree_skb(opt_skb);
1672 opt_skb = xchg(&np->pktoptions, NULL);
1673 }
1674 }
1675
1676 if (opt_skb)
1677 kfree_skb(opt_skb);
1678 return 0;
1679 }
1680
1681 static int tcp_v6_rcv(struct sk_buff **pskb)
1682 {
1683 struct sk_buff *skb = *pskb;
1684 struct tcphdr *th;
1685 struct sock *sk;
1686 int ret;
1687
1688 if (skb->pkt_type != PACKET_HOST)
1689 goto discard_it;
1690
1691 /*
1692 * Count it even if it's bad.
1693 */
1694 TCP_INC_STATS_BH(TCP_MIB_INSEGS);
1695
1696 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1697 goto discard_it;
1698
1699 th = skb->h.th;
1700
1701 if (th->doff < sizeof(struct tcphdr)/4)
1702 goto bad_packet;
1703 if (!pskb_may_pull(skb, th->doff*4))
1704 goto discard_it;
1705
1706 if ((skb->ip_summed != CHECKSUM_UNNECESSARY &&
1707 tcp_v6_checksum_init(skb)))
1708 goto bad_packet;
1709
1710 th = skb->h.th;
1711 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1712 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1713 skb->len - th->doff*4);
1714 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1715 TCP_SKB_CB(skb)->when = 0;
1716 TCP_SKB_CB(skb)->flags = ipv6_get_dsfield(skb->nh.ipv6h);
1717 TCP_SKB_CB(skb)->sacked = 0;
1718
1719 sk = __inet6_lookup(&tcp_hashinfo, &skb->nh.ipv6h->saddr, th->source,
1720 &skb->nh.ipv6h->daddr, ntohs(th->dest),
1721 inet6_iif(skb));
1722
1723 if (!sk)
1724 goto no_tcp_socket;
1725
1726 process:
1727 if (sk->sk_state == TCP_TIME_WAIT)
1728 goto do_time_wait;
1729
1730 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1731 goto discard_and_relse;
1732
1733 if (sk_filter(sk, skb))
1734 goto discard_and_relse;
1735
1736 skb->dev = NULL;
1737
1738 bh_lock_sock_nested(sk);
1739 ret = 0;
1740 if (!sock_owned_by_user(sk)) {
1741 #ifdef CONFIG_NET_DMA
1742 struct tcp_sock *tp = tcp_sk(sk);
1743 if (tp->ucopy.dma_chan)
1744 ret = tcp_v6_do_rcv(sk, skb);
1745 else
1746 #endif
1747 {
1748 if (!tcp_prequeue(sk, skb))
1749 ret = tcp_v6_do_rcv(sk, skb);
1750 }
1751 } else
1752 sk_add_backlog(sk, skb);
1753 bh_unlock_sock(sk);
1754
1755 sock_put(sk);
1756 return ret ? -1 : 0;
1757
1758 no_tcp_socket:
1759 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1760 goto discard_it;
1761
1762 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1763 bad_packet:
1764 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1765 } else {
1766 tcp_v6_send_reset(NULL, skb);
1767 }
1768
1769 discard_it:
1770
1771 /*
1772 * Discard frame
1773 */
1774
1775 kfree_skb(skb);
1776 return 0;
1777
1778 discard_and_relse:
1779 sock_put(sk);
1780 goto discard_it;
1781
1782 do_time_wait:
1783 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1784 inet_twsk_put(inet_twsk(sk));
1785 goto discard_it;
1786 }
1787
1788 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1789 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1790 inet_twsk_put(inet_twsk(sk));
1791 goto discard_it;
1792 }
1793
1794 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1795 case TCP_TW_SYN:
1796 {
1797 struct sock *sk2;
1798
1799 sk2 = inet6_lookup_listener(&tcp_hashinfo,
1800 &skb->nh.ipv6h->daddr,
1801 ntohs(th->dest), inet6_iif(skb));
1802 if (sk2 != NULL) {
1803 struct inet_timewait_sock *tw = inet_twsk(sk);
1804 inet_twsk_deschedule(tw, &tcp_death_row);
1805 inet_twsk_put(tw);
1806 sk = sk2;
1807 goto process;
1808 }
1809 /* Fall through to ACK */
1810 }
1811 case TCP_TW_ACK:
1812 tcp_v6_timewait_ack(sk, skb);
1813 break;
1814 case TCP_TW_RST:
1815 goto no_tcp_socket;
1816 case TCP_TW_SUCCESS:;
1817 }
1818 goto discard_it;
1819 }
1820
1821 static int tcp_v6_remember_stamp(struct sock *sk)
1822 {
1823 /* Alas, not yet... */
1824 return 0;
1825 }
1826
1827 static struct inet_connection_sock_af_ops ipv6_specific = {
1828 .queue_xmit = inet6_csk_xmit,
1829 .send_check = tcp_v6_send_check,
1830 .rebuild_header = inet6_sk_rebuild_header,
1831 .conn_request = tcp_v6_conn_request,
1832 .syn_recv_sock = tcp_v6_syn_recv_sock,
1833 .remember_stamp = tcp_v6_remember_stamp,
1834 .net_header_len = sizeof(struct ipv6hdr),
1835 .setsockopt = ipv6_setsockopt,
1836 .getsockopt = ipv6_getsockopt,
1837 .addr2sockaddr = inet6_csk_addr2sockaddr,
1838 .sockaddr_len = sizeof(struct sockaddr_in6),
1839 #ifdef CONFIG_COMPAT
1840 .compat_setsockopt = compat_ipv6_setsockopt,
1841 .compat_getsockopt = compat_ipv6_getsockopt,
1842 #endif
1843 };
1844
1845 #ifdef CONFIG_TCP_MD5SIG
1846 static struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
1847 .md5_lookup = tcp_v6_md5_lookup,
1848 .calc_md5_hash = tcp_v6_calc_md5_hash,
1849 .md5_add = tcp_v6_md5_add_func,
1850 .md5_parse = tcp_v6_parse_md5_keys,
1851 };
1852 #endif
1853
1854 /*
1855 * TCP over IPv4 via INET6 API
1856 */
1857
1858 static struct inet_connection_sock_af_ops ipv6_mapped = {
1859 .queue_xmit = ip_queue_xmit,
1860 .send_check = tcp_v4_send_check,
1861 .rebuild_header = inet_sk_rebuild_header,
1862 .conn_request = tcp_v6_conn_request,
1863 .syn_recv_sock = tcp_v6_syn_recv_sock,
1864 .remember_stamp = tcp_v4_remember_stamp,
1865 .net_header_len = sizeof(struct iphdr),
1866 .setsockopt = ipv6_setsockopt,
1867 .getsockopt = ipv6_getsockopt,
1868 .addr2sockaddr = inet6_csk_addr2sockaddr,
1869 .sockaddr_len = sizeof(struct sockaddr_in6),
1870 #ifdef CONFIG_COMPAT
1871 .compat_setsockopt = compat_ipv6_setsockopt,
1872 .compat_getsockopt = compat_ipv6_getsockopt,
1873 #endif
1874 };
1875
1876 #ifdef CONFIG_TCP_MD5SIG
1877 static struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
1878 .md5_lookup = tcp_v4_md5_lookup,
1879 .calc_md5_hash = tcp_v4_calc_md5_hash,
1880 .md5_add = tcp_v6_md5_add_func,
1881 .md5_parse = tcp_v6_parse_md5_keys,
1882 };
1883 #endif
1884
1885 /* NOTE: A lot of things set to zero explicitly by call to
1886 * sk_alloc() so need not be done here.
1887 */
1888 static int tcp_v6_init_sock(struct sock *sk)
1889 {
1890 struct inet_connection_sock *icsk = inet_csk(sk);
1891 struct tcp_sock *tp = tcp_sk(sk);
1892
1893 skb_queue_head_init(&tp->out_of_order_queue);
1894 tcp_init_xmit_timers(sk);
1895 tcp_prequeue_init(tp);
1896
1897 icsk->icsk_rto = TCP_TIMEOUT_INIT;
1898 tp->mdev = TCP_TIMEOUT_INIT;
1899
1900 /* So many TCP implementations out there (incorrectly) count the
1901 * initial SYN frame in their delayed-ACK and congestion control
1902 * algorithms that we must have the following bandaid to talk
1903 * efficiently to them. -DaveM
1904 */
1905 tp->snd_cwnd = 2;
1906
1907 /* See draft-stevens-tcpca-spec-01 for discussion of the
1908 * initialization of these values.
1909 */
1910 tp->snd_ssthresh = 0x7fffffff;
1911 tp->snd_cwnd_clamp = ~0;
1912 tp->mss_cache = 536;
1913
1914 tp->reordering = sysctl_tcp_reordering;
1915
1916 sk->sk_state = TCP_CLOSE;
1917
1918 icsk->icsk_af_ops = &ipv6_specific;
1919 icsk->icsk_ca_ops = &tcp_init_congestion_ops;
1920 icsk->icsk_sync_mss = tcp_sync_mss;
1921 sk->sk_write_space = sk_stream_write_space;
1922 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
1923
1924 #ifdef CONFIG_TCP_MD5SIG
1925 tp->af_specific = &tcp_sock_ipv6_specific;
1926 #endif
1927
1928 sk->sk_sndbuf = sysctl_tcp_wmem[1];
1929 sk->sk_rcvbuf = sysctl_tcp_rmem[1];
1930
1931 atomic_inc(&tcp_sockets_allocated);
1932
1933 return 0;
1934 }
1935
1936 static int tcp_v6_destroy_sock(struct sock *sk)
1937 {
1938 #ifdef CONFIG_TCP_MD5SIG
1939 /* Clean up the MD5 key list */
1940 if (tcp_sk(sk)->md5sig_info)
1941 tcp_v6_clear_md5_list(sk);
1942 #endif
1943 tcp_v4_destroy_sock(sk);
1944 return inet6_destroy_sock(sk);
1945 }
1946
1947 /* Proc filesystem TCPv6 sock list dumping. */
1948 static void get_openreq6(struct seq_file *seq,
1949 struct sock *sk, struct request_sock *req, int i, int uid)
1950 {
1951 int ttd = req->expires - jiffies;
1952 struct in6_addr *src = &inet6_rsk(req)->loc_addr;
1953 struct in6_addr *dest = &inet6_rsk(req)->rmt_addr;
1954
1955 if (ttd < 0)
1956 ttd = 0;
1957
1958 seq_printf(seq,
1959 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1960 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p\n",
1961 i,
1962 src->s6_addr32[0], src->s6_addr32[1],
1963 src->s6_addr32[2], src->s6_addr32[3],
1964 ntohs(inet_sk(sk)->sport),
1965 dest->s6_addr32[0], dest->s6_addr32[1],
1966 dest->s6_addr32[2], dest->s6_addr32[3],
1967 ntohs(inet_rsk(req)->rmt_port),
1968 TCP_SYN_RECV,
1969 0,0, /* could print option size, but that is af dependent. */
1970 1, /* timers active (only the expire timer) */
1971 jiffies_to_clock_t(ttd),
1972 req->retrans,
1973 uid,
1974 0, /* non standard timer */
1975 0, /* open_requests have no inode */
1976 0, req);
1977 }
1978
1979 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1980 {
1981 struct in6_addr *dest, *src;
1982 __u16 destp, srcp;
1983 int timer_active;
1984 unsigned long timer_expires;
1985 struct inet_sock *inet = inet_sk(sp);
1986 struct tcp_sock *tp = tcp_sk(sp);
1987 const struct inet_connection_sock *icsk = inet_csk(sp);
1988 struct ipv6_pinfo *np = inet6_sk(sp);
1989
1990 dest = &np->daddr;
1991 src = &np->rcv_saddr;
1992 destp = ntohs(inet->dport);
1993 srcp = ntohs(inet->sport);
1994
1995 if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
1996 timer_active = 1;
1997 timer_expires = icsk->icsk_timeout;
1998 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1999 timer_active = 4;
2000 timer_expires = icsk->icsk_timeout;
2001 } else if (timer_pending(&sp->sk_timer)) {
2002 timer_active = 2;
2003 timer_expires = sp->sk_timer.expires;
2004 } else {
2005 timer_active = 0;
2006 timer_expires = jiffies;
2007 }
2008
2009 seq_printf(seq,
2010 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2011 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %u %u %u %u %d\n",
2012 i,
2013 src->s6_addr32[0], src->s6_addr32[1],
2014 src->s6_addr32[2], src->s6_addr32[3], srcp,
2015 dest->s6_addr32[0], dest->s6_addr32[1],
2016 dest->s6_addr32[2], dest->s6_addr32[3], destp,
2017 sp->sk_state,
2018 tp->write_seq-tp->snd_una,
2019 (sp->sk_state == TCP_LISTEN) ? sp->sk_ack_backlog : (tp->rcv_nxt - tp->copied_seq),
2020 timer_active,
2021 jiffies_to_clock_t(timer_expires - jiffies),
2022 icsk->icsk_retransmits,
2023 sock_i_uid(sp),
2024 icsk->icsk_probes_out,
2025 sock_i_ino(sp),
2026 atomic_read(&sp->sk_refcnt), sp,
2027 icsk->icsk_rto,
2028 icsk->icsk_ack.ato,
2029 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
2030 tp->snd_cwnd, tp->snd_ssthresh>=0xFFFF?-1:tp->snd_ssthresh
2031 );
2032 }
2033
2034 static void get_timewait6_sock(struct seq_file *seq,
2035 struct inet_timewait_sock *tw, int i)
2036 {
2037 struct in6_addr *dest, *src;
2038 __u16 destp, srcp;
2039 struct inet6_timewait_sock *tw6 = inet6_twsk((struct sock *)tw);
2040 int ttd = tw->tw_ttd - jiffies;
2041
2042 if (ttd < 0)
2043 ttd = 0;
2044
2045 dest = &tw6->tw_v6_daddr;
2046 src = &tw6->tw_v6_rcv_saddr;
2047 destp = ntohs(tw->tw_dport);
2048 srcp = ntohs(tw->tw_sport);
2049
2050 seq_printf(seq,
2051 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2052 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p\n",
2053 i,
2054 src->s6_addr32[0], src->s6_addr32[1],
2055 src->s6_addr32[2], src->s6_addr32[3], srcp,
2056 dest->s6_addr32[0], dest->s6_addr32[1],
2057 dest->s6_addr32[2], dest->s6_addr32[3], destp,
2058 tw->tw_substate, 0, 0,
2059 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
2060 atomic_read(&tw->tw_refcnt), tw);
2061 }
2062
2063 #ifdef CONFIG_PROC_FS
2064 static int tcp6_seq_show(struct seq_file *seq, void *v)
2065 {
2066 struct tcp_iter_state *st;
2067
2068 if (v == SEQ_START_TOKEN) {
2069 seq_puts(seq,
2070 " sl "
2071 "local_address "
2072 "remote_address "
2073 "st tx_queue rx_queue tr tm->when retrnsmt"
2074 " uid timeout inode\n");
2075 goto out;
2076 }
2077 st = seq->private;
2078
2079 switch (st->state) {
2080 case TCP_SEQ_STATE_LISTENING:
2081 case TCP_SEQ_STATE_ESTABLISHED:
2082 get_tcp6_sock(seq, v, st->num);
2083 break;
2084 case TCP_SEQ_STATE_OPENREQ:
2085 get_openreq6(seq, st->syn_wait_sk, v, st->num, st->uid);
2086 break;
2087 case TCP_SEQ_STATE_TIME_WAIT:
2088 get_timewait6_sock(seq, v, st->num);
2089 break;
2090 }
2091 out:
2092 return 0;
2093 }
2094
2095 static struct file_operations tcp6_seq_fops;
2096 static struct tcp_seq_afinfo tcp6_seq_afinfo = {
2097 .owner = THIS_MODULE,
2098 .name = "tcp6",
2099 .family = AF_INET6,
2100 .seq_show = tcp6_seq_show,
2101 .seq_fops = &tcp6_seq_fops,
2102 };
2103
2104 int __init tcp6_proc_init(void)
2105 {
2106 return tcp_proc_register(&tcp6_seq_afinfo);
2107 }
2108
2109 void tcp6_proc_exit(void)
2110 {
2111 tcp_proc_unregister(&tcp6_seq_afinfo);
2112 }
2113 #endif
2114
2115 struct proto tcpv6_prot = {
2116 .name = "TCPv6",
2117 .owner = THIS_MODULE,
2118 .close = tcp_close,
2119 .connect = tcp_v6_connect,
2120 .disconnect = tcp_disconnect,
2121 .accept = inet_csk_accept,
2122 .ioctl = tcp_ioctl,
2123 .init = tcp_v6_init_sock,
2124 .destroy = tcp_v6_destroy_sock,
2125 .shutdown = tcp_shutdown,
2126 .setsockopt = tcp_setsockopt,
2127 .getsockopt = tcp_getsockopt,
2128 .sendmsg = tcp_sendmsg,
2129 .recvmsg = tcp_recvmsg,
2130 .backlog_rcv = tcp_v6_do_rcv,
2131 .hash = tcp_v6_hash,
2132 .unhash = tcp_unhash,
2133 .get_port = tcp_v6_get_port,
2134 .enter_memory_pressure = tcp_enter_memory_pressure,
2135 .sockets_allocated = &tcp_sockets_allocated,
2136 .memory_allocated = &tcp_memory_allocated,
2137 .memory_pressure = &tcp_memory_pressure,
2138 .orphan_count = &tcp_orphan_count,
2139 .sysctl_mem = sysctl_tcp_mem,
2140 .sysctl_wmem = sysctl_tcp_wmem,
2141 .sysctl_rmem = sysctl_tcp_rmem,
2142 .max_header = MAX_TCP_HEADER,
2143 .obj_size = sizeof(struct tcp6_sock),
2144 .twsk_prot = &tcp6_timewait_sock_ops,
2145 .rsk_prot = &tcp6_request_sock_ops,
2146 #ifdef CONFIG_COMPAT
2147 .compat_setsockopt = compat_tcp_setsockopt,
2148 .compat_getsockopt = compat_tcp_getsockopt,
2149 #endif
2150 };
2151
2152 static struct inet6_protocol tcpv6_protocol = {
2153 .handler = tcp_v6_rcv,
2154 .err_handler = tcp_v6_err,
2155 .gso_send_check = tcp_v6_gso_send_check,
2156 .gso_segment = tcp_tso_segment,
2157 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
2158 };
2159
2160 static struct inet_protosw tcpv6_protosw = {
2161 .type = SOCK_STREAM,
2162 .protocol = IPPROTO_TCP,
2163 .prot = &tcpv6_prot,
2164 .ops = &inet6_stream_ops,
2165 .capability = -1,
2166 .no_check = 0,
2167 .flags = INET_PROTOSW_PERMANENT |
2168 INET_PROTOSW_ICSK,
2169 };
2170
2171 void __init tcpv6_init(void)
2172 {
2173 /* register inet6 protocol */
2174 if (inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP) < 0)
2175 printk(KERN_ERR "tcpv6_init: Could not register protocol\n");
2176 inet6_register_protosw(&tcpv6_protosw);
2177
2178 if (inet_csk_ctl_sock_create(&tcp6_socket, PF_INET6, SOCK_RAW,
2179 IPPROTO_TCP) < 0)
2180 panic("Failed to create the TCPv6 control socket.\n");
2181 }
This page took 0.104812 seconds and 5 git commands to generate.