[XFRM]: Allow packet drops during larval state resolution.
[deliverable/linux.git] / net / ipv6 / tcp_ipv6.c
1 /*
2 * TCP over IPv6
3 * Linux INET6 implementation
4 *
5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt>
7 *
8 * $Id: tcp_ipv6.c,v 1.144 2002/02/01 22:01:04 davem Exp $
9 *
10 * Based on:
11 * linux/net/ipv4/tcp.c
12 * linux/net/ipv4/tcp_input.c
13 * linux/net/ipv4/tcp_output.c
14 *
15 * Fixes:
16 * Hideaki YOSHIFUJI : sin6_scope_id support
17 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
18 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
19 * a single port at the same time.
20 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
21 *
22 * This program is free software; you can redistribute it and/or
23 * modify it under the terms of the GNU General Public License
24 * as published by the Free Software Foundation; either version
25 * 2 of the License, or (at your option) any later version.
26 */
27
28 #include <linux/module.h>
29 #include <linux/errno.h>
30 #include <linux/types.h>
31 #include <linux/socket.h>
32 #include <linux/sockios.h>
33 #include <linux/net.h>
34 #include <linux/jiffies.h>
35 #include <linux/in.h>
36 #include <linux/in6.h>
37 #include <linux/netdevice.h>
38 #include <linux/init.h>
39 #include <linux/jhash.h>
40 #include <linux/ipsec.h>
41 #include <linux/times.h>
42
43 #include <linux/ipv6.h>
44 #include <linux/icmpv6.h>
45 #include <linux/random.h>
46
47 #include <net/tcp.h>
48 #include <net/ndisc.h>
49 #include <net/inet6_hashtables.h>
50 #include <net/inet6_connection_sock.h>
51 #include <net/ipv6.h>
52 #include <net/transp_v6.h>
53 #include <net/addrconf.h>
54 #include <net/ip6_route.h>
55 #include <net/ip6_checksum.h>
56 #include <net/inet_ecn.h>
57 #include <net/protocol.h>
58 #include <net/xfrm.h>
59 #include <net/addrconf.h>
60 #include <net/snmp.h>
61 #include <net/dsfield.h>
62 #include <net/timewait_sock.h>
63
64 #include <asm/uaccess.h>
65
66 #include <linux/proc_fs.h>
67 #include <linux/seq_file.h>
68
69 #include <linux/crypto.h>
70 #include <linux/scatterlist.h>
71
72 /* Socket used for sending RSTs and ACKs */
73 static struct socket *tcp6_socket;
74
75 static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb);
76 static void tcp_v6_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req);
77 static void tcp_v6_send_check(struct sock *sk, int len,
78 struct sk_buff *skb);
79
80 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
81
82 static struct inet_connection_sock_af_ops ipv6_mapped;
83 static struct inet_connection_sock_af_ops ipv6_specific;
84 #ifdef CONFIG_TCP_MD5SIG
85 static struct tcp_sock_af_ops tcp_sock_ipv6_specific;
86 static struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
87 #endif
88
89 static int tcp_v6_get_port(struct sock *sk, unsigned short snum)
90 {
91 return inet_csk_get_port(&tcp_hashinfo, sk, snum,
92 inet6_csk_bind_conflict);
93 }
94
95 static void tcp_v6_hash(struct sock *sk)
96 {
97 if (sk->sk_state != TCP_CLOSE) {
98 if (inet_csk(sk)->icsk_af_ops == &ipv6_mapped) {
99 tcp_prot.hash(sk);
100 return;
101 }
102 local_bh_disable();
103 __inet6_hash(&tcp_hashinfo, sk);
104 local_bh_enable();
105 }
106 }
107
108 static __inline__ __sum16 tcp_v6_check(struct tcphdr *th, int len,
109 struct in6_addr *saddr,
110 struct in6_addr *daddr,
111 __wsum base)
112 {
113 return csum_ipv6_magic(saddr, daddr, len, IPPROTO_TCP, base);
114 }
115
116 static __u32 tcp_v6_init_sequence(struct sk_buff *skb)
117 {
118 return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
119 ipv6_hdr(skb)->saddr.s6_addr32,
120 tcp_hdr(skb)->dest,
121 tcp_hdr(skb)->source);
122 }
123
124 static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
125 int addr_len)
126 {
127 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
128 struct inet_sock *inet = inet_sk(sk);
129 struct inet_connection_sock *icsk = inet_csk(sk);
130 struct ipv6_pinfo *np = inet6_sk(sk);
131 struct tcp_sock *tp = tcp_sk(sk);
132 struct in6_addr *saddr = NULL, *final_p = NULL, final;
133 struct flowi fl;
134 struct dst_entry *dst;
135 int addr_type;
136 int err;
137
138 if (addr_len < SIN6_LEN_RFC2133)
139 return -EINVAL;
140
141 if (usin->sin6_family != AF_INET6)
142 return(-EAFNOSUPPORT);
143
144 memset(&fl, 0, sizeof(fl));
145
146 if (np->sndflow) {
147 fl.fl6_flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
148 IP6_ECN_flow_init(fl.fl6_flowlabel);
149 if (fl.fl6_flowlabel&IPV6_FLOWLABEL_MASK) {
150 struct ip6_flowlabel *flowlabel;
151 flowlabel = fl6_sock_lookup(sk, fl.fl6_flowlabel);
152 if (flowlabel == NULL)
153 return -EINVAL;
154 ipv6_addr_copy(&usin->sin6_addr, &flowlabel->dst);
155 fl6_sock_release(flowlabel);
156 }
157 }
158
159 /*
160 * connect() to INADDR_ANY means loopback (BSD'ism).
161 */
162
163 if(ipv6_addr_any(&usin->sin6_addr))
164 usin->sin6_addr.s6_addr[15] = 0x1;
165
166 addr_type = ipv6_addr_type(&usin->sin6_addr);
167
168 if(addr_type & IPV6_ADDR_MULTICAST)
169 return -ENETUNREACH;
170
171 if (addr_type&IPV6_ADDR_LINKLOCAL) {
172 if (addr_len >= sizeof(struct sockaddr_in6) &&
173 usin->sin6_scope_id) {
174 /* If interface is set while binding, indices
175 * must coincide.
176 */
177 if (sk->sk_bound_dev_if &&
178 sk->sk_bound_dev_if != usin->sin6_scope_id)
179 return -EINVAL;
180
181 sk->sk_bound_dev_if = usin->sin6_scope_id;
182 }
183
184 /* Connect to link-local address requires an interface */
185 if (!sk->sk_bound_dev_if)
186 return -EINVAL;
187 }
188
189 if (tp->rx_opt.ts_recent_stamp &&
190 !ipv6_addr_equal(&np->daddr, &usin->sin6_addr)) {
191 tp->rx_opt.ts_recent = 0;
192 tp->rx_opt.ts_recent_stamp = 0;
193 tp->write_seq = 0;
194 }
195
196 ipv6_addr_copy(&np->daddr, &usin->sin6_addr);
197 np->flow_label = fl.fl6_flowlabel;
198
199 /*
200 * TCP over IPv4
201 */
202
203 if (addr_type == IPV6_ADDR_MAPPED) {
204 u32 exthdrlen = icsk->icsk_ext_hdr_len;
205 struct sockaddr_in sin;
206
207 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
208
209 if (__ipv6_only_sock(sk))
210 return -ENETUNREACH;
211
212 sin.sin_family = AF_INET;
213 sin.sin_port = usin->sin6_port;
214 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
215
216 icsk->icsk_af_ops = &ipv6_mapped;
217 sk->sk_backlog_rcv = tcp_v4_do_rcv;
218 #ifdef CONFIG_TCP_MD5SIG
219 tp->af_specific = &tcp_sock_ipv6_mapped_specific;
220 #endif
221
222 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
223
224 if (err) {
225 icsk->icsk_ext_hdr_len = exthdrlen;
226 icsk->icsk_af_ops = &ipv6_specific;
227 sk->sk_backlog_rcv = tcp_v6_do_rcv;
228 #ifdef CONFIG_TCP_MD5SIG
229 tp->af_specific = &tcp_sock_ipv6_specific;
230 #endif
231 goto failure;
232 } else {
233 ipv6_addr_set(&np->saddr, 0, 0, htonl(0x0000FFFF),
234 inet->saddr);
235 ipv6_addr_set(&np->rcv_saddr, 0, 0, htonl(0x0000FFFF),
236 inet->rcv_saddr);
237 }
238
239 return err;
240 }
241
242 if (!ipv6_addr_any(&np->rcv_saddr))
243 saddr = &np->rcv_saddr;
244
245 fl.proto = IPPROTO_TCP;
246 ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
247 ipv6_addr_copy(&fl.fl6_src,
248 (saddr ? saddr : &np->saddr));
249 fl.oif = sk->sk_bound_dev_if;
250 fl.fl_ip_dport = usin->sin6_port;
251 fl.fl_ip_sport = inet->sport;
252
253 if (np->opt && np->opt->srcrt) {
254 struct rt0_hdr *rt0 = (struct rt0_hdr *)np->opt->srcrt;
255 ipv6_addr_copy(&final, &fl.fl6_dst);
256 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
257 final_p = &final;
258 }
259
260 security_sk_classify_flow(sk, &fl);
261
262 err = ip6_dst_lookup(sk, &dst, &fl);
263 if (err)
264 goto failure;
265 if (final_p)
266 ipv6_addr_copy(&fl.fl6_dst, final_p);
267
268 if ((err = __xfrm_lookup(&dst, &fl, sk, 1)) < 0) {
269 if (err == -EREMOTE)
270 err = ip6_dst_blackhole(sk, &dst, &fl);
271 if (err < 0)
272 goto failure;
273 }
274
275 if (saddr == NULL) {
276 saddr = &fl.fl6_src;
277 ipv6_addr_copy(&np->rcv_saddr, saddr);
278 }
279
280 /* set the source address */
281 ipv6_addr_copy(&np->saddr, saddr);
282 inet->rcv_saddr = LOOPBACK4_IPV6;
283
284 sk->sk_gso_type = SKB_GSO_TCPV6;
285 __ip6_dst_store(sk, dst, NULL, NULL);
286
287 icsk->icsk_ext_hdr_len = 0;
288 if (np->opt)
289 icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
290 np->opt->opt_nflen);
291
292 tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
293
294 inet->dport = usin->sin6_port;
295
296 tcp_set_state(sk, TCP_SYN_SENT);
297 err = inet6_hash_connect(&tcp_death_row, sk);
298 if (err)
299 goto late_failure;
300
301 if (!tp->write_seq)
302 tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
303 np->daddr.s6_addr32,
304 inet->sport,
305 inet->dport);
306
307 err = tcp_connect(sk);
308 if (err)
309 goto late_failure;
310
311 return 0;
312
313 late_failure:
314 tcp_set_state(sk, TCP_CLOSE);
315 __sk_dst_reset(sk);
316 failure:
317 inet->dport = 0;
318 sk->sk_route_caps = 0;
319 return err;
320 }
321
322 static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
323 int type, int code, int offset, __be32 info)
324 {
325 struct ipv6hdr *hdr = (struct ipv6hdr*)skb->data;
326 const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
327 struct ipv6_pinfo *np;
328 struct sock *sk;
329 int err;
330 struct tcp_sock *tp;
331 __u32 seq;
332
333 sk = inet6_lookup(&tcp_hashinfo, &hdr->daddr, th->dest, &hdr->saddr,
334 th->source, skb->dev->ifindex);
335
336 if (sk == NULL) {
337 ICMP6_INC_STATS_BH(__in6_dev_get(skb->dev), ICMP6_MIB_INERRORS);
338 return;
339 }
340
341 if (sk->sk_state == TCP_TIME_WAIT) {
342 inet_twsk_put(inet_twsk(sk));
343 return;
344 }
345
346 bh_lock_sock(sk);
347 if (sock_owned_by_user(sk))
348 NET_INC_STATS_BH(LINUX_MIB_LOCKDROPPEDICMPS);
349
350 if (sk->sk_state == TCP_CLOSE)
351 goto out;
352
353 tp = tcp_sk(sk);
354 seq = ntohl(th->seq);
355 if (sk->sk_state != TCP_LISTEN &&
356 !between(seq, tp->snd_una, tp->snd_nxt)) {
357 NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS);
358 goto out;
359 }
360
361 np = inet6_sk(sk);
362
363 if (type == ICMPV6_PKT_TOOBIG) {
364 struct dst_entry *dst = NULL;
365
366 if (sock_owned_by_user(sk))
367 goto out;
368 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
369 goto out;
370
371 /* icmp should have updated the destination cache entry */
372 dst = __sk_dst_check(sk, np->dst_cookie);
373
374 if (dst == NULL) {
375 struct inet_sock *inet = inet_sk(sk);
376 struct flowi fl;
377
378 /* BUGGG_FUTURE: Again, it is not clear how
379 to handle rthdr case. Ignore this complexity
380 for now.
381 */
382 memset(&fl, 0, sizeof(fl));
383 fl.proto = IPPROTO_TCP;
384 ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
385 ipv6_addr_copy(&fl.fl6_src, &np->saddr);
386 fl.oif = sk->sk_bound_dev_if;
387 fl.fl_ip_dport = inet->dport;
388 fl.fl_ip_sport = inet->sport;
389 security_skb_classify_flow(skb, &fl);
390
391 if ((err = ip6_dst_lookup(sk, &dst, &fl))) {
392 sk->sk_err_soft = -err;
393 goto out;
394 }
395
396 if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0) {
397 sk->sk_err_soft = -err;
398 goto out;
399 }
400
401 } else
402 dst_hold(dst);
403
404 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
405 tcp_sync_mss(sk, dst_mtu(dst));
406 tcp_simple_retransmit(sk);
407 } /* else let the usual retransmit timer handle it */
408 dst_release(dst);
409 goto out;
410 }
411
412 icmpv6_err_convert(type, code, &err);
413
414 /* Might be for an request_sock */
415 switch (sk->sk_state) {
416 struct request_sock *req, **prev;
417 case TCP_LISTEN:
418 if (sock_owned_by_user(sk))
419 goto out;
420
421 req = inet6_csk_search_req(sk, &prev, th->dest, &hdr->daddr,
422 &hdr->saddr, inet6_iif(skb));
423 if (!req)
424 goto out;
425
426 /* ICMPs are not backlogged, hence we cannot get
427 * an established socket here.
428 */
429 BUG_TRAP(req->sk == NULL);
430
431 if (seq != tcp_rsk(req)->snt_isn) {
432 NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS);
433 goto out;
434 }
435
436 inet_csk_reqsk_queue_drop(sk, req, prev);
437 goto out;
438
439 case TCP_SYN_SENT:
440 case TCP_SYN_RECV: /* Cannot happen.
441 It can, it SYNs are crossed. --ANK */
442 if (!sock_owned_by_user(sk)) {
443 sk->sk_err = err;
444 sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
445
446 tcp_done(sk);
447 } else
448 sk->sk_err_soft = err;
449 goto out;
450 }
451
452 if (!sock_owned_by_user(sk) && np->recverr) {
453 sk->sk_err = err;
454 sk->sk_error_report(sk);
455 } else
456 sk->sk_err_soft = err;
457
458 out:
459 bh_unlock_sock(sk);
460 sock_put(sk);
461 }
462
463
464 static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req,
465 struct dst_entry *dst)
466 {
467 struct inet6_request_sock *treq = inet6_rsk(req);
468 struct ipv6_pinfo *np = inet6_sk(sk);
469 struct sk_buff * skb;
470 struct ipv6_txoptions *opt = NULL;
471 struct in6_addr * final_p = NULL, final;
472 struct flowi fl;
473 int err = -1;
474
475 memset(&fl, 0, sizeof(fl));
476 fl.proto = IPPROTO_TCP;
477 ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
478 ipv6_addr_copy(&fl.fl6_src, &treq->loc_addr);
479 fl.fl6_flowlabel = 0;
480 fl.oif = treq->iif;
481 fl.fl_ip_dport = inet_rsk(req)->rmt_port;
482 fl.fl_ip_sport = inet_sk(sk)->sport;
483 security_req_classify_flow(req, &fl);
484
485 if (dst == NULL) {
486 opt = np->opt;
487 if (opt == NULL &&
488 np->rxopt.bits.osrcrt == 2 &&
489 treq->pktopts) {
490 struct sk_buff *pktopts = treq->pktopts;
491 struct inet6_skb_parm *rxopt = IP6CB(pktopts);
492 if (rxopt->srcrt)
493 opt = ipv6_invert_rthdr(sk,
494 (struct ipv6_rt_hdr *)(skb_network_header(pktopts) +
495 rxopt->srcrt));
496 }
497
498 if (opt && opt->srcrt) {
499 struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt;
500 ipv6_addr_copy(&final, &fl.fl6_dst);
501 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
502 final_p = &final;
503 }
504
505 err = ip6_dst_lookup(sk, &dst, &fl);
506 if (err)
507 goto done;
508 if (final_p)
509 ipv6_addr_copy(&fl.fl6_dst, final_p);
510 if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0)
511 goto done;
512 }
513
514 skb = tcp_make_synack(sk, dst, req);
515 if (skb) {
516 struct tcphdr *th = tcp_hdr(skb);
517
518 th->check = tcp_v6_check(th, skb->len,
519 &treq->loc_addr, &treq->rmt_addr,
520 csum_partial((char *)th, skb->len, skb->csum));
521
522 ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
523 err = ip6_xmit(sk, skb, &fl, opt, 0);
524 err = net_xmit_eval(err);
525 }
526
527 done:
528 if (opt && opt != np->opt)
529 sock_kfree_s(sk, opt, opt->tot_len);
530 dst_release(dst);
531 return err;
532 }
533
534 static void tcp_v6_reqsk_destructor(struct request_sock *req)
535 {
536 if (inet6_rsk(req)->pktopts)
537 kfree_skb(inet6_rsk(req)->pktopts);
538 }
539
540 #ifdef CONFIG_TCP_MD5SIG
541 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
542 struct in6_addr *addr)
543 {
544 struct tcp_sock *tp = tcp_sk(sk);
545 int i;
546
547 BUG_ON(tp == NULL);
548
549 if (!tp->md5sig_info || !tp->md5sig_info->entries6)
550 return NULL;
551
552 for (i = 0; i < tp->md5sig_info->entries6; i++) {
553 if (ipv6_addr_cmp(&tp->md5sig_info->keys6[i].addr, addr) == 0)
554 return (struct tcp_md5sig_key *)&tp->md5sig_info->keys6[i];
555 }
556 return NULL;
557 }
558
559 static struct tcp_md5sig_key *tcp_v6_md5_lookup(struct sock *sk,
560 struct sock *addr_sk)
561 {
562 return tcp_v6_md5_do_lookup(sk, &inet6_sk(addr_sk)->daddr);
563 }
564
565 static struct tcp_md5sig_key *tcp_v6_reqsk_md5_lookup(struct sock *sk,
566 struct request_sock *req)
567 {
568 return tcp_v6_md5_do_lookup(sk, &inet6_rsk(req)->rmt_addr);
569 }
570
571 static int tcp_v6_md5_do_add(struct sock *sk, struct in6_addr *peer,
572 char *newkey, u8 newkeylen)
573 {
574 /* Add key to the list */
575 struct tcp6_md5sig_key *key;
576 struct tcp_sock *tp = tcp_sk(sk);
577 struct tcp6_md5sig_key *keys;
578
579 key = (struct tcp6_md5sig_key*) tcp_v6_md5_do_lookup(sk, peer);
580 if (key) {
581 /* modify existing entry - just update that one */
582 kfree(key->key);
583 key->key = newkey;
584 key->keylen = newkeylen;
585 } else {
586 /* reallocate new list if current one is full. */
587 if (!tp->md5sig_info) {
588 tp->md5sig_info = kzalloc(sizeof(*tp->md5sig_info), GFP_ATOMIC);
589 if (!tp->md5sig_info) {
590 kfree(newkey);
591 return -ENOMEM;
592 }
593 }
594 tcp_alloc_md5sig_pool();
595 if (tp->md5sig_info->alloced6 == tp->md5sig_info->entries6) {
596 keys = kmalloc((sizeof (tp->md5sig_info->keys6[0]) *
597 (tp->md5sig_info->entries6 + 1)), GFP_ATOMIC);
598
599 if (!keys) {
600 tcp_free_md5sig_pool();
601 kfree(newkey);
602 return -ENOMEM;
603 }
604
605 if (tp->md5sig_info->entries6)
606 memmove(keys, tp->md5sig_info->keys6,
607 (sizeof (tp->md5sig_info->keys6[0]) *
608 tp->md5sig_info->entries6));
609
610 kfree(tp->md5sig_info->keys6);
611 tp->md5sig_info->keys6 = keys;
612 tp->md5sig_info->alloced6++;
613 }
614
615 ipv6_addr_copy(&tp->md5sig_info->keys6[tp->md5sig_info->entries6].addr,
616 peer);
617 tp->md5sig_info->keys6[tp->md5sig_info->entries6].key = newkey;
618 tp->md5sig_info->keys6[tp->md5sig_info->entries6].keylen = newkeylen;
619
620 tp->md5sig_info->entries6++;
621 }
622 return 0;
623 }
624
625 static int tcp_v6_md5_add_func(struct sock *sk, struct sock *addr_sk,
626 u8 *newkey, __u8 newkeylen)
627 {
628 return tcp_v6_md5_do_add(sk, &inet6_sk(addr_sk)->daddr,
629 newkey, newkeylen);
630 }
631
632 static int tcp_v6_md5_do_del(struct sock *sk, struct in6_addr *peer)
633 {
634 struct tcp_sock *tp = tcp_sk(sk);
635 int i;
636
637 for (i = 0; i < tp->md5sig_info->entries6; i++) {
638 if (ipv6_addr_cmp(&tp->md5sig_info->keys6[i].addr, peer) == 0) {
639 /* Free the key */
640 kfree(tp->md5sig_info->keys6[i].key);
641 tp->md5sig_info->entries6--;
642
643 if (tp->md5sig_info->entries6 == 0) {
644 kfree(tp->md5sig_info->keys6);
645 tp->md5sig_info->keys6 = NULL;
646
647 tcp_free_md5sig_pool();
648
649 return 0;
650 } else {
651 /* shrink the database */
652 if (tp->md5sig_info->entries6 != i)
653 memmove(&tp->md5sig_info->keys6[i],
654 &tp->md5sig_info->keys6[i+1],
655 (tp->md5sig_info->entries6 - i)
656 * sizeof (tp->md5sig_info->keys6[0]));
657 }
658 }
659 }
660 return -ENOENT;
661 }
662
663 static void tcp_v6_clear_md5_list (struct sock *sk)
664 {
665 struct tcp_sock *tp = tcp_sk(sk);
666 int i;
667
668 if (tp->md5sig_info->entries6) {
669 for (i = 0; i < tp->md5sig_info->entries6; i++)
670 kfree(tp->md5sig_info->keys6[i].key);
671 tp->md5sig_info->entries6 = 0;
672 tcp_free_md5sig_pool();
673 }
674
675 kfree(tp->md5sig_info->keys6);
676 tp->md5sig_info->keys6 = NULL;
677 tp->md5sig_info->alloced6 = 0;
678
679 if (tp->md5sig_info->entries4) {
680 for (i = 0; i < tp->md5sig_info->entries4; i++)
681 kfree(tp->md5sig_info->keys4[i].key);
682 tp->md5sig_info->entries4 = 0;
683 tcp_free_md5sig_pool();
684 }
685
686 kfree(tp->md5sig_info->keys4);
687 tp->md5sig_info->keys4 = NULL;
688 tp->md5sig_info->alloced4 = 0;
689 }
690
691 static int tcp_v6_parse_md5_keys (struct sock *sk, char __user *optval,
692 int optlen)
693 {
694 struct tcp_md5sig cmd;
695 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
696 u8 *newkey;
697
698 if (optlen < sizeof(cmd))
699 return -EINVAL;
700
701 if (copy_from_user(&cmd, optval, sizeof(cmd)))
702 return -EFAULT;
703
704 if (sin6->sin6_family != AF_INET6)
705 return -EINVAL;
706
707 if (!cmd.tcpm_keylen) {
708 if (!tcp_sk(sk)->md5sig_info)
709 return -ENOENT;
710 if (ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_MAPPED)
711 return tcp_v4_md5_do_del(sk, sin6->sin6_addr.s6_addr32[3]);
712 return tcp_v6_md5_do_del(sk, &sin6->sin6_addr);
713 }
714
715 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
716 return -EINVAL;
717
718 if (!tcp_sk(sk)->md5sig_info) {
719 struct tcp_sock *tp = tcp_sk(sk);
720 struct tcp_md5sig_info *p;
721
722 p = kzalloc(sizeof(struct tcp_md5sig_info), GFP_KERNEL);
723 if (!p)
724 return -ENOMEM;
725
726 tp->md5sig_info = p;
727 }
728
729 newkey = kmemdup(cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
730 if (!newkey)
731 return -ENOMEM;
732 if (ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_MAPPED) {
733 return tcp_v4_md5_do_add(sk, sin6->sin6_addr.s6_addr32[3],
734 newkey, cmd.tcpm_keylen);
735 }
736 return tcp_v6_md5_do_add(sk, &sin6->sin6_addr, newkey, cmd.tcpm_keylen);
737 }
738
739 static int tcp_v6_do_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key,
740 struct in6_addr *saddr,
741 struct in6_addr *daddr,
742 struct tcphdr *th, int protocol,
743 int tcplen)
744 {
745 struct scatterlist sg[4];
746 __u16 data_len;
747 int block = 0;
748 __sum16 cksum;
749 struct tcp_md5sig_pool *hp;
750 struct tcp6_pseudohdr *bp;
751 struct hash_desc *desc;
752 int err;
753 unsigned int nbytes = 0;
754
755 hp = tcp_get_md5sig_pool();
756 if (!hp) {
757 printk(KERN_WARNING "%s(): hash pool not found...\n", __FUNCTION__);
758 goto clear_hash_noput;
759 }
760 bp = &hp->md5_blk.ip6;
761 desc = &hp->md5_desc;
762
763 /* 1. TCP pseudo-header (RFC2460) */
764 ipv6_addr_copy(&bp->saddr, saddr);
765 ipv6_addr_copy(&bp->daddr, daddr);
766 bp->len = htonl(tcplen);
767 bp->protocol = htonl(protocol);
768
769 sg_set_buf(&sg[block++], bp, sizeof(*bp));
770 nbytes += sizeof(*bp);
771
772 /* 2. TCP header, excluding options */
773 cksum = th->check;
774 th->check = 0;
775 sg_set_buf(&sg[block++], th, sizeof(*th));
776 nbytes += sizeof(*th);
777
778 /* 3. TCP segment data (if any) */
779 data_len = tcplen - (th->doff << 2);
780 if (data_len > 0) {
781 u8 *data = (u8 *)th + (th->doff << 2);
782 sg_set_buf(&sg[block++], data, data_len);
783 nbytes += data_len;
784 }
785
786 /* 4. shared key */
787 sg_set_buf(&sg[block++], key->key, key->keylen);
788 nbytes += key->keylen;
789
790 /* Now store the hash into the packet */
791 err = crypto_hash_init(desc);
792 if (err) {
793 printk(KERN_WARNING "%s(): hash_init failed\n", __FUNCTION__);
794 goto clear_hash;
795 }
796 err = crypto_hash_update(desc, sg, nbytes);
797 if (err) {
798 printk(KERN_WARNING "%s(): hash_update failed\n", __FUNCTION__);
799 goto clear_hash;
800 }
801 err = crypto_hash_final(desc, md5_hash);
802 if (err) {
803 printk(KERN_WARNING "%s(): hash_final failed\n", __FUNCTION__);
804 goto clear_hash;
805 }
806
807 /* Reset header, and free up the crypto */
808 tcp_put_md5sig_pool();
809 th->check = cksum;
810 out:
811 return 0;
812 clear_hash:
813 tcp_put_md5sig_pool();
814 clear_hash_noput:
815 memset(md5_hash, 0, 16);
816 goto out;
817 }
818
819 static int tcp_v6_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key,
820 struct sock *sk,
821 struct dst_entry *dst,
822 struct request_sock *req,
823 struct tcphdr *th, int protocol,
824 int tcplen)
825 {
826 struct in6_addr *saddr, *daddr;
827
828 if (sk) {
829 saddr = &inet6_sk(sk)->saddr;
830 daddr = &inet6_sk(sk)->daddr;
831 } else {
832 saddr = &inet6_rsk(req)->loc_addr;
833 daddr = &inet6_rsk(req)->rmt_addr;
834 }
835 return tcp_v6_do_calc_md5_hash(md5_hash, key,
836 saddr, daddr,
837 th, protocol, tcplen);
838 }
839
840 static int tcp_v6_inbound_md5_hash (struct sock *sk, struct sk_buff *skb)
841 {
842 __u8 *hash_location = NULL;
843 struct tcp_md5sig_key *hash_expected;
844 struct ipv6hdr *ip6h = ipv6_hdr(skb);
845 struct tcphdr *th = tcp_hdr(skb);
846 int length = (th->doff << 2) - sizeof (*th);
847 int genhash;
848 u8 *ptr;
849 u8 newhash[16];
850
851 hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
852
853 /* If the TCP option is too short, we can short cut */
854 if (length < TCPOLEN_MD5SIG)
855 return hash_expected ? 1 : 0;
856
857 /* parse options */
858 ptr = (u8*)(th + 1);
859 while (length > 0) {
860 int opcode = *ptr++;
861 int opsize;
862
863 switch(opcode) {
864 case TCPOPT_EOL:
865 goto done_opts;
866 case TCPOPT_NOP:
867 length--;
868 continue;
869 default:
870 opsize = *ptr++;
871 if (opsize < 2 || opsize > length)
872 goto done_opts;
873 if (opcode == TCPOPT_MD5SIG) {
874 hash_location = ptr;
875 goto done_opts;
876 }
877 }
878 ptr += opsize - 2;
879 length -= opsize;
880 }
881
882 done_opts:
883 /* do we have a hash as expected? */
884 if (!hash_expected) {
885 if (!hash_location)
886 return 0;
887 if (net_ratelimit()) {
888 printk(KERN_INFO "MD5 Hash NOT expected but found "
889 "(" NIP6_FMT ", %u)->"
890 "(" NIP6_FMT ", %u)\n",
891 NIP6(ip6h->saddr), ntohs(th->source),
892 NIP6(ip6h->daddr), ntohs(th->dest));
893 }
894 return 1;
895 }
896
897 if (!hash_location) {
898 if (net_ratelimit()) {
899 printk(KERN_INFO "MD5 Hash expected but NOT found "
900 "(" NIP6_FMT ", %u)->"
901 "(" NIP6_FMT ", %u)\n",
902 NIP6(ip6h->saddr), ntohs(th->source),
903 NIP6(ip6h->daddr), ntohs(th->dest));
904 }
905 return 1;
906 }
907
908 /* check the signature */
909 genhash = tcp_v6_do_calc_md5_hash(newhash,
910 hash_expected,
911 &ip6h->saddr, &ip6h->daddr,
912 th, sk->sk_protocol,
913 skb->len);
914 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
915 if (net_ratelimit()) {
916 printk(KERN_INFO "MD5 Hash %s for "
917 "(" NIP6_FMT ", %u)->"
918 "(" NIP6_FMT ", %u)\n",
919 genhash ? "failed" : "mismatch",
920 NIP6(ip6h->saddr), ntohs(th->source),
921 NIP6(ip6h->daddr), ntohs(th->dest));
922 }
923 return 1;
924 }
925 return 0;
926 }
927 #endif
928
929 static struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
930 .family = AF_INET6,
931 .obj_size = sizeof(struct tcp6_request_sock),
932 .rtx_syn_ack = tcp_v6_send_synack,
933 .send_ack = tcp_v6_reqsk_send_ack,
934 .destructor = tcp_v6_reqsk_destructor,
935 .send_reset = tcp_v6_send_reset
936 };
937
938 #ifdef CONFIG_TCP_MD5SIG
939 static struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
940 .md5_lookup = tcp_v6_reqsk_md5_lookup,
941 };
942 #endif
943
944 static struct timewait_sock_ops tcp6_timewait_sock_ops = {
945 .twsk_obj_size = sizeof(struct tcp6_timewait_sock),
946 .twsk_unique = tcp_twsk_unique,
947 .twsk_destructor= tcp_twsk_destructor,
948 };
949
950 static void tcp_v6_send_check(struct sock *sk, int len, struct sk_buff *skb)
951 {
952 struct ipv6_pinfo *np = inet6_sk(sk);
953 struct tcphdr *th = tcp_hdr(skb);
954
955 if (skb->ip_summed == CHECKSUM_PARTIAL) {
956 th->check = ~csum_ipv6_magic(&np->saddr, &np->daddr, len, IPPROTO_TCP, 0);
957 skb->csum_start = skb_transport_header(skb) - skb->head;
958 skb->csum_offset = offsetof(struct tcphdr, check);
959 } else {
960 th->check = csum_ipv6_magic(&np->saddr, &np->daddr, len, IPPROTO_TCP,
961 csum_partial((char *)th, th->doff<<2,
962 skb->csum));
963 }
964 }
965
966 static int tcp_v6_gso_send_check(struct sk_buff *skb)
967 {
968 struct ipv6hdr *ipv6h;
969 struct tcphdr *th;
970
971 if (!pskb_may_pull(skb, sizeof(*th)))
972 return -EINVAL;
973
974 ipv6h = ipv6_hdr(skb);
975 th = tcp_hdr(skb);
976
977 th->check = 0;
978 th->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, skb->len,
979 IPPROTO_TCP, 0);
980 skb->csum_start = skb_transport_header(skb) - skb->head;
981 skb->csum_offset = offsetof(struct tcphdr, check);
982 skb->ip_summed = CHECKSUM_PARTIAL;
983 return 0;
984 }
985
986 static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
987 {
988 struct tcphdr *th = tcp_hdr(skb), *t1;
989 struct sk_buff *buff;
990 struct flowi fl;
991 int tot_len = sizeof(*th);
992 #ifdef CONFIG_TCP_MD5SIG
993 struct tcp_md5sig_key *key;
994 #endif
995
996 if (th->rst)
997 return;
998
999 if (!ipv6_unicast_destination(skb))
1000 return;
1001
1002 #ifdef CONFIG_TCP_MD5SIG
1003 if (sk)
1004 key = tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr);
1005 else
1006 key = NULL;
1007
1008 if (key)
1009 tot_len += TCPOLEN_MD5SIG_ALIGNED;
1010 #endif
1011
1012 /*
1013 * We need to grab some memory, and put together an RST,
1014 * and then put it into the queue to be sent.
1015 */
1016
1017 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
1018 GFP_ATOMIC);
1019 if (buff == NULL)
1020 return;
1021
1022 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
1023
1024 t1 = (struct tcphdr *) skb_push(buff, tot_len);
1025
1026 /* Swap the send and the receive. */
1027 memset(t1, 0, sizeof(*t1));
1028 t1->dest = th->source;
1029 t1->source = th->dest;
1030 t1->doff = tot_len / 4;
1031 t1->rst = 1;
1032
1033 if(th->ack) {
1034 t1->seq = th->ack_seq;
1035 } else {
1036 t1->ack = 1;
1037 t1->ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin
1038 + skb->len - (th->doff<<2));
1039 }
1040
1041 #ifdef CONFIG_TCP_MD5SIG
1042 if (key) {
1043 __be32 *opt = (__be32*)(t1 + 1);
1044 opt[0] = htonl((TCPOPT_NOP << 24) |
1045 (TCPOPT_NOP << 16) |
1046 (TCPOPT_MD5SIG << 8) |
1047 TCPOLEN_MD5SIG);
1048 tcp_v6_do_calc_md5_hash((__u8 *)&opt[1], key,
1049 &ipv6_hdr(skb)->daddr,
1050 &ipv6_hdr(skb)->saddr,
1051 t1, IPPROTO_TCP, tot_len);
1052 }
1053 #endif
1054
1055 buff->csum = csum_partial((char *)t1, sizeof(*t1), 0);
1056
1057 memset(&fl, 0, sizeof(fl));
1058 ipv6_addr_copy(&fl.fl6_dst, &ipv6_hdr(skb)->saddr);
1059 ipv6_addr_copy(&fl.fl6_src, &ipv6_hdr(skb)->daddr);
1060
1061 t1->check = csum_ipv6_magic(&fl.fl6_src, &fl.fl6_dst,
1062 sizeof(*t1), IPPROTO_TCP,
1063 buff->csum);
1064
1065 fl.proto = IPPROTO_TCP;
1066 fl.oif = inet6_iif(skb);
1067 fl.fl_ip_dport = t1->dest;
1068 fl.fl_ip_sport = t1->source;
1069 security_skb_classify_flow(skb, &fl);
1070
1071 /* sk = NULL, but it is safe for now. RST socket required. */
1072 if (!ip6_dst_lookup(NULL, &buff->dst, &fl)) {
1073
1074 if (xfrm_lookup(&buff->dst, &fl, NULL, 0) >= 0) {
1075 ip6_xmit(tcp6_socket->sk, buff, &fl, NULL, 0);
1076 TCP_INC_STATS_BH(TCP_MIB_OUTSEGS);
1077 TCP_INC_STATS_BH(TCP_MIB_OUTRSTS);
1078 return;
1079 }
1080 }
1081
1082 kfree_skb(buff);
1083 }
1084
1085 static void tcp_v6_send_ack(struct tcp_timewait_sock *tw,
1086 struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts)
1087 {
1088 struct tcphdr *th = tcp_hdr(skb), *t1;
1089 struct sk_buff *buff;
1090 struct flowi fl;
1091 int tot_len = sizeof(struct tcphdr);
1092 __be32 *topt;
1093 #ifdef CONFIG_TCP_MD5SIG
1094 struct tcp_md5sig_key *key;
1095 struct tcp_md5sig_key tw_key;
1096 #endif
1097
1098 #ifdef CONFIG_TCP_MD5SIG
1099 if (!tw && skb->sk) {
1100 key = tcp_v6_md5_do_lookup(skb->sk, &ipv6_hdr(skb)->daddr);
1101 } else if (tw && tw->tw_md5_keylen) {
1102 tw_key.key = tw->tw_md5_key;
1103 tw_key.keylen = tw->tw_md5_keylen;
1104 key = &tw_key;
1105 } else {
1106 key = NULL;
1107 }
1108 #endif
1109
1110 if (ts)
1111 tot_len += TCPOLEN_TSTAMP_ALIGNED;
1112 #ifdef CONFIG_TCP_MD5SIG
1113 if (key)
1114 tot_len += TCPOLEN_MD5SIG_ALIGNED;
1115 #endif
1116
1117 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
1118 GFP_ATOMIC);
1119 if (buff == NULL)
1120 return;
1121
1122 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
1123
1124 t1 = (struct tcphdr *) skb_push(buff,tot_len);
1125
1126 /* Swap the send and the receive. */
1127 memset(t1, 0, sizeof(*t1));
1128 t1->dest = th->source;
1129 t1->source = th->dest;
1130 t1->doff = tot_len/4;
1131 t1->seq = htonl(seq);
1132 t1->ack_seq = htonl(ack);
1133 t1->ack = 1;
1134 t1->window = htons(win);
1135
1136 topt = (__be32 *)(t1 + 1);
1137
1138 if (ts) {
1139 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
1140 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
1141 *topt++ = htonl(tcp_time_stamp);
1142 *topt = htonl(ts);
1143 }
1144
1145 #ifdef CONFIG_TCP_MD5SIG
1146 if (key) {
1147 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
1148 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
1149 tcp_v6_do_calc_md5_hash((__u8 *)topt, key,
1150 &ipv6_hdr(skb)->daddr,
1151 &ipv6_hdr(skb)->saddr,
1152 t1, IPPROTO_TCP, tot_len);
1153 }
1154 #endif
1155
1156 buff->csum = csum_partial((char *)t1, tot_len, 0);
1157
1158 memset(&fl, 0, sizeof(fl));
1159 ipv6_addr_copy(&fl.fl6_dst, &ipv6_hdr(skb)->saddr);
1160 ipv6_addr_copy(&fl.fl6_src, &ipv6_hdr(skb)->daddr);
1161
1162 t1->check = csum_ipv6_magic(&fl.fl6_src, &fl.fl6_dst,
1163 tot_len, IPPROTO_TCP,
1164 buff->csum);
1165
1166 fl.proto = IPPROTO_TCP;
1167 fl.oif = inet6_iif(skb);
1168 fl.fl_ip_dport = t1->dest;
1169 fl.fl_ip_sport = t1->source;
1170 security_skb_classify_flow(skb, &fl);
1171
1172 if (!ip6_dst_lookup(NULL, &buff->dst, &fl)) {
1173 if (xfrm_lookup(&buff->dst, &fl, NULL, 0) >= 0) {
1174 ip6_xmit(tcp6_socket->sk, buff, &fl, NULL, 0);
1175 TCP_INC_STATS_BH(TCP_MIB_OUTSEGS);
1176 return;
1177 }
1178 }
1179
1180 kfree_skb(buff);
1181 }
1182
1183 static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
1184 {
1185 struct inet_timewait_sock *tw = inet_twsk(sk);
1186 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
1187
1188 tcp_v6_send_ack(tcptw, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
1189 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
1190 tcptw->tw_ts_recent);
1191
1192 inet_twsk_put(tw);
1193 }
1194
1195 static void tcp_v6_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req)
1196 {
1197 tcp_v6_send_ack(NULL, skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, req->ts_recent);
1198 }
1199
1200
1201 static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
1202 {
1203 struct request_sock *req, **prev;
1204 const struct tcphdr *th = tcp_hdr(skb);
1205 struct sock *nsk;
1206
1207 /* Find possible connection requests. */
1208 req = inet6_csk_search_req(sk, &prev, th->source,
1209 &ipv6_hdr(skb)->saddr,
1210 &ipv6_hdr(skb)->daddr, inet6_iif(skb));
1211 if (req)
1212 return tcp_check_req(sk, skb, req, prev);
1213
1214 nsk = __inet6_lookup_established(&tcp_hashinfo, &ipv6_hdr(skb)->saddr,
1215 th->source, &ipv6_hdr(skb)->daddr,
1216 ntohs(th->dest), inet6_iif(skb));
1217
1218 if (nsk) {
1219 if (nsk->sk_state != TCP_TIME_WAIT) {
1220 bh_lock_sock(nsk);
1221 return nsk;
1222 }
1223 inet_twsk_put(inet_twsk(nsk));
1224 return NULL;
1225 }
1226
1227 #if 0 /*def CONFIG_SYN_COOKIES*/
1228 if (!th->rst && !th->syn && th->ack)
1229 sk = cookie_v6_check(sk, skb, &(IPCB(skb)->opt));
1230 #endif
1231 return sk;
1232 }
1233
1234 /* FIXME: this is substantially similar to the ipv4 code.
1235 * Can some kind of merge be done? -- erics
1236 */
1237 static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1238 {
1239 struct inet6_request_sock *treq;
1240 struct ipv6_pinfo *np = inet6_sk(sk);
1241 struct tcp_options_received tmp_opt;
1242 struct tcp_sock *tp = tcp_sk(sk);
1243 struct request_sock *req = NULL;
1244 __u32 isn = TCP_SKB_CB(skb)->when;
1245
1246 if (skb->protocol == htons(ETH_P_IP))
1247 return tcp_v4_conn_request(sk, skb);
1248
1249 if (!ipv6_unicast_destination(skb))
1250 goto drop;
1251
1252 /*
1253 * There are no SYN attacks on IPv6, yet...
1254 */
1255 if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
1256 if (net_ratelimit())
1257 printk(KERN_INFO "TCPv6: dropping request, synflood is possible\n");
1258 goto drop;
1259 }
1260
1261 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
1262 goto drop;
1263
1264 req = inet6_reqsk_alloc(&tcp6_request_sock_ops);
1265 if (req == NULL)
1266 goto drop;
1267
1268 #ifdef CONFIG_TCP_MD5SIG
1269 tcp_rsk(req)->af_specific = &tcp_request_sock_ipv6_ops;
1270 #endif
1271
1272 tcp_clear_options(&tmp_opt);
1273 tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
1274 tmp_opt.user_mss = tp->rx_opt.user_mss;
1275
1276 tcp_parse_options(skb, &tmp_opt, 0);
1277
1278 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
1279 tcp_openreq_init(req, &tmp_opt, skb);
1280
1281 treq = inet6_rsk(req);
1282 ipv6_addr_copy(&treq->rmt_addr, &ipv6_hdr(skb)->saddr);
1283 ipv6_addr_copy(&treq->loc_addr, &ipv6_hdr(skb)->daddr);
1284 TCP_ECN_create_request(req, tcp_hdr(skb));
1285 treq->pktopts = NULL;
1286 if (ipv6_opt_accepted(sk, skb) ||
1287 np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
1288 np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
1289 atomic_inc(&skb->users);
1290 treq->pktopts = skb;
1291 }
1292 treq->iif = sk->sk_bound_dev_if;
1293
1294 /* So that link locals have meaning */
1295 if (!sk->sk_bound_dev_if &&
1296 ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL)
1297 treq->iif = inet6_iif(skb);
1298
1299 if (isn == 0)
1300 isn = tcp_v6_init_sequence(skb);
1301
1302 tcp_rsk(req)->snt_isn = isn;
1303
1304 security_inet_conn_request(sk, skb, req);
1305
1306 if (tcp_v6_send_synack(sk, req, NULL))
1307 goto drop;
1308
1309 inet6_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1310 return 0;
1311
1312 drop:
1313 if (req)
1314 reqsk_free(req);
1315
1316 return 0; /* don't send reset */
1317 }
1318
1319 static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1320 struct request_sock *req,
1321 struct dst_entry *dst)
1322 {
1323 struct inet6_request_sock *treq = inet6_rsk(req);
1324 struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
1325 struct tcp6_sock *newtcp6sk;
1326 struct inet_sock *newinet;
1327 struct tcp_sock *newtp;
1328 struct sock *newsk;
1329 struct ipv6_txoptions *opt;
1330 #ifdef CONFIG_TCP_MD5SIG
1331 struct tcp_md5sig_key *key;
1332 #endif
1333
1334 if (skb->protocol == htons(ETH_P_IP)) {
1335 /*
1336 * v6 mapped
1337 */
1338
1339 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst);
1340
1341 if (newsk == NULL)
1342 return NULL;
1343
1344 newtcp6sk = (struct tcp6_sock *)newsk;
1345 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1346
1347 newinet = inet_sk(newsk);
1348 newnp = inet6_sk(newsk);
1349 newtp = tcp_sk(newsk);
1350
1351 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1352
1353 ipv6_addr_set(&newnp->daddr, 0, 0, htonl(0x0000FFFF),
1354 newinet->daddr);
1355
1356 ipv6_addr_set(&newnp->saddr, 0, 0, htonl(0x0000FFFF),
1357 newinet->saddr);
1358
1359 ipv6_addr_copy(&newnp->rcv_saddr, &newnp->saddr);
1360
1361 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1362 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1363 #ifdef CONFIG_TCP_MD5SIG
1364 newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1365 #endif
1366
1367 newnp->pktoptions = NULL;
1368 newnp->opt = NULL;
1369 newnp->mcast_oif = inet6_iif(skb);
1370 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1371
1372 /*
1373 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1374 * here, tcp_create_openreq_child now does this for us, see the comment in
1375 * that function for the gory details. -acme
1376 */
1377
1378 /* It is tricky place. Until this moment IPv4 tcp
1379 worked with IPv6 icsk.icsk_af_ops.
1380 Sync it now.
1381 */
1382 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1383
1384 return newsk;
1385 }
1386
1387 opt = np->opt;
1388
1389 if (sk_acceptq_is_full(sk))
1390 goto out_overflow;
1391
1392 if (np->rxopt.bits.osrcrt == 2 &&
1393 opt == NULL && treq->pktopts) {
1394 struct inet6_skb_parm *rxopt = IP6CB(treq->pktopts);
1395 if (rxopt->srcrt)
1396 opt = ipv6_invert_rthdr(sk,
1397 (struct ipv6_rt_hdr *)(skb_network_header(treq->pktopts) +
1398 rxopt->srcrt));
1399 }
1400
1401 if (dst == NULL) {
1402 struct in6_addr *final_p = NULL, final;
1403 struct flowi fl;
1404
1405 memset(&fl, 0, sizeof(fl));
1406 fl.proto = IPPROTO_TCP;
1407 ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
1408 if (opt && opt->srcrt) {
1409 struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt;
1410 ipv6_addr_copy(&final, &fl.fl6_dst);
1411 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
1412 final_p = &final;
1413 }
1414 ipv6_addr_copy(&fl.fl6_src, &treq->loc_addr);
1415 fl.oif = sk->sk_bound_dev_if;
1416 fl.fl_ip_dport = inet_rsk(req)->rmt_port;
1417 fl.fl_ip_sport = inet_sk(sk)->sport;
1418 security_req_classify_flow(req, &fl);
1419
1420 if (ip6_dst_lookup(sk, &dst, &fl))
1421 goto out;
1422
1423 if (final_p)
1424 ipv6_addr_copy(&fl.fl6_dst, final_p);
1425
1426 if ((xfrm_lookup(&dst, &fl, sk, 0)) < 0)
1427 goto out;
1428 }
1429
1430 newsk = tcp_create_openreq_child(sk, req, skb);
1431 if (newsk == NULL)
1432 goto out;
1433
1434 /*
1435 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1436 * count here, tcp_create_openreq_child now does this for us, see the
1437 * comment in that function for the gory details. -acme
1438 */
1439
1440 newsk->sk_gso_type = SKB_GSO_TCPV6;
1441 __ip6_dst_store(newsk, dst, NULL, NULL);
1442
1443 newtcp6sk = (struct tcp6_sock *)newsk;
1444 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1445
1446 newtp = tcp_sk(newsk);
1447 newinet = inet_sk(newsk);
1448 newnp = inet6_sk(newsk);
1449
1450 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1451
1452 ipv6_addr_copy(&newnp->daddr, &treq->rmt_addr);
1453 ipv6_addr_copy(&newnp->saddr, &treq->loc_addr);
1454 ipv6_addr_copy(&newnp->rcv_saddr, &treq->loc_addr);
1455 newsk->sk_bound_dev_if = treq->iif;
1456
1457 /* Now IPv6 options...
1458
1459 First: no IPv4 options.
1460 */
1461 newinet->opt = NULL;
1462 newnp->ipv6_fl_list = NULL;
1463
1464 /* Clone RX bits */
1465 newnp->rxopt.all = np->rxopt.all;
1466
1467 /* Clone pktoptions received with SYN */
1468 newnp->pktoptions = NULL;
1469 if (treq->pktopts != NULL) {
1470 newnp->pktoptions = skb_clone(treq->pktopts, GFP_ATOMIC);
1471 kfree_skb(treq->pktopts);
1472 treq->pktopts = NULL;
1473 if (newnp->pktoptions)
1474 skb_set_owner_r(newnp->pktoptions, newsk);
1475 }
1476 newnp->opt = NULL;
1477 newnp->mcast_oif = inet6_iif(skb);
1478 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1479
1480 /* Clone native IPv6 options from listening socket (if any)
1481
1482 Yes, keeping reference count would be much more clever,
1483 but we make one more one thing there: reattach optmem
1484 to newsk.
1485 */
1486 if (opt) {
1487 newnp->opt = ipv6_dup_options(newsk, opt);
1488 if (opt != np->opt)
1489 sock_kfree_s(sk, opt, opt->tot_len);
1490 }
1491
1492 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1493 if (newnp->opt)
1494 inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
1495 newnp->opt->opt_flen);
1496
1497 tcp_mtup_init(newsk);
1498 tcp_sync_mss(newsk, dst_mtu(dst));
1499 newtp->advmss = dst_metric(dst, RTAX_ADVMSS);
1500 tcp_initialize_rcv_mss(newsk);
1501
1502 newinet->daddr = newinet->saddr = newinet->rcv_saddr = LOOPBACK4_IPV6;
1503
1504 #ifdef CONFIG_TCP_MD5SIG
1505 /* Copy over the MD5 key from the original socket */
1506 if ((key = tcp_v6_md5_do_lookup(sk, &newnp->daddr)) != NULL) {
1507 /* We're using one, so create a matching key
1508 * on the newsk structure. If we fail to get
1509 * memory, then we end up not copying the key
1510 * across. Shucks.
1511 */
1512 char *newkey = kmemdup(key->key, key->keylen, GFP_ATOMIC);
1513 if (newkey != NULL)
1514 tcp_v6_md5_do_add(newsk, &inet6_sk(sk)->daddr,
1515 newkey, key->keylen);
1516 }
1517 #endif
1518
1519 __inet6_hash(&tcp_hashinfo, newsk);
1520 inet_inherit_port(&tcp_hashinfo, sk, newsk);
1521
1522 return newsk;
1523
1524 out_overflow:
1525 NET_INC_STATS_BH(LINUX_MIB_LISTENOVERFLOWS);
1526 out:
1527 NET_INC_STATS_BH(LINUX_MIB_LISTENDROPS);
1528 if (opt && opt != np->opt)
1529 sock_kfree_s(sk, opt, opt->tot_len);
1530 dst_release(dst);
1531 return NULL;
1532 }
1533
1534 static __sum16 tcp_v6_checksum_init(struct sk_buff *skb)
1535 {
1536 if (skb->ip_summed == CHECKSUM_COMPLETE) {
1537 if (!tcp_v6_check(tcp_hdr(skb), skb->len, &ipv6_hdr(skb)->saddr,
1538 &ipv6_hdr(skb)->daddr, skb->csum)) {
1539 skb->ip_summed = CHECKSUM_UNNECESSARY;
1540 return 0;
1541 }
1542 }
1543
1544 skb->csum = ~csum_unfold(tcp_v6_check(tcp_hdr(skb), skb->len,
1545 &ipv6_hdr(skb)->saddr,
1546 &ipv6_hdr(skb)->daddr, 0));
1547
1548 if (skb->len <= 76) {
1549 return __skb_checksum_complete(skb);
1550 }
1551 return 0;
1552 }
1553
1554 /* The socket must have it's spinlock held when we get
1555 * here.
1556 *
1557 * We have a potential double-lock case here, so even when
1558 * doing backlog processing we use the BH locking scheme.
1559 * This is because we cannot sleep with the original spinlock
1560 * held.
1561 */
1562 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1563 {
1564 struct ipv6_pinfo *np = inet6_sk(sk);
1565 struct tcp_sock *tp;
1566 struct sk_buff *opt_skb = NULL;
1567
1568 /* Imagine: socket is IPv6. IPv4 packet arrives,
1569 goes to IPv4 receive handler and backlogged.
1570 From backlog it always goes here. Kerboom...
1571 Fortunately, tcp_rcv_established and rcv_established
1572 handle them correctly, but it is not case with
1573 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1574 */
1575
1576 if (skb->protocol == htons(ETH_P_IP))
1577 return tcp_v4_do_rcv(sk, skb);
1578
1579 #ifdef CONFIG_TCP_MD5SIG
1580 if (tcp_v6_inbound_md5_hash (sk, skb))
1581 goto discard;
1582 #endif
1583
1584 if (sk_filter(sk, skb))
1585 goto discard;
1586
1587 /*
1588 * socket locking is here for SMP purposes as backlog rcv
1589 * is currently called with bh processing disabled.
1590 */
1591
1592 /* Do Stevens' IPV6_PKTOPTIONS.
1593
1594 Yes, guys, it is the only place in our code, where we
1595 may make it not affecting IPv4.
1596 The rest of code is protocol independent,
1597 and I do not like idea to uglify IPv4.
1598
1599 Actually, all the idea behind IPV6_PKTOPTIONS
1600 looks not very well thought. For now we latch
1601 options, received in the last packet, enqueued
1602 by tcp. Feel free to propose better solution.
1603 --ANK (980728)
1604 */
1605 if (np->rxopt.all)
1606 opt_skb = skb_clone(skb, GFP_ATOMIC);
1607
1608 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1609 TCP_CHECK_TIMER(sk);
1610 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len))
1611 goto reset;
1612 TCP_CHECK_TIMER(sk);
1613 if (opt_skb)
1614 goto ipv6_pktoptions;
1615 return 0;
1616 }
1617
1618 if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1619 goto csum_err;
1620
1621 if (sk->sk_state == TCP_LISTEN) {
1622 struct sock *nsk = tcp_v6_hnd_req(sk, skb);
1623 if (!nsk)
1624 goto discard;
1625
1626 /*
1627 * Queue it on the new socket if the new socket is active,
1628 * otherwise we just shortcircuit this and continue with
1629 * the new socket..
1630 */
1631 if(nsk != sk) {
1632 if (tcp_child_process(sk, nsk, skb))
1633 goto reset;
1634 if (opt_skb)
1635 __kfree_skb(opt_skb);
1636 return 0;
1637 }
1638 }
1639
1640 TCP_CHECK_TIMER(sk);
1641 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len))
1642 goto reset;
1643 TCP_CHECK_TIMER(sk);
1644 if (opt_skb)
1645 goto ipv6_pktoptions;
1646 return 0;
1647
1648 reset:
1649 tcp_v6_send_reset(sk, skb);
1650 discard:
1651 if (opt_skb)
1652 __kfree_skb(opt_skb);
1653 kfree_skb(skb);
1654 return 0;
1655 csum_err:
1656 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1657 goto discard;
1658
1659
1660 ipv6_pktoptions:
1661 /* Do you ask, what is it?
1662
1663 1. skb was enqueued by tcp.
1664 2. skb is added to tail of read queue, rather than out of order.
1665 3. socket is not in passive state.
1666 4. Finally, it really contains options, which user wants to receive.
1667 */
1668 tp = tcp_sk(sk);
1669 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1670 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1671 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
1672 np->mcast_oif = inet6_iif(opt_skb);
1673 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1674 np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
1675 if (ipv6_opt_accepted(sk, opt_skb)) {
1676 skb_set_owner_r(opt_skb, sk);
1677 opt_skb = xchg(&np->pktoptions, opt_skb);
1678 } else {
1679 __kfree_skb(opt_skb);
1680 opt_skb = xchg(&np->pktoptions, NULL);
1681 }
1682 }
1683
1684 if (opt_skb)
1685 kfree_skb(opt_skb);
1686 return 0;
1687 }
1688
1689 static int tcp_v6_rcv(struct sk_buff **pskb)
1690 {
1691 struct sk_buff *skb = *pskb;
1692 struct tcphdr *th;
1693 struct sock *sk;
1694 int ret;
1695
1696 if (skb->pkt_type != PACKET_HOST)
1697 goto discard_it;
1698
1699 /*
1700 * Count it even if it's bad.
1701 */
1702 TCP_INC_STATS_BH(TCP_MIB_INSEGS);
1703
1704 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1705 goto discard_it;
1706
1707 th = tcp_hdr(skb);
1708
1709 if (th->doff < sizeof(struct tcphdr)/4)
1710 goto bad_packet;
1711 if (!pskb_may_pull(skb, th->doff*4))
1712 goto discard_it;
1713
1714 if (!skb_csum_unnecessary(skb) && tcp_v6_checksum_init(skb))
1715 goto bad_packet;
1716
1717 th = tcp_hdr(skb);
1718 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1719 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1720 skb->len - th->doff*4);
1721 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1722 TCP_SKB_CB(skb)->when = 0;
1723 TCP_SKB_CB(skb)->flags = ipv6_get_dsfield(ipv6_hdr(skb));
1724 TCP_SKB_CB(skb)->sacked = 0;
1725
1726 sk = __inet6_lookup(&tcp_hashinfo, &ipv6_hdr(skb)->saddr, th->source,
1727 &ipv6_hdr(skb)->daddr, ntohs(th->dest),
1728 inet6_iif(skb));
1729
1730 if (!sk)
1731 goto no_tcp_socket;
1732
1733 process:
1734 if (sk->sk_state == TCP_TIME_WAIT)
1735 goto do_time_wait;
1736
1737 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1738 goto discard_and_relse;
1739
1740 if (sk_filter(sk, skb))
1741 goto discard_and_relse;
1742
1743 skb->dev = NULL;
1744
1745 bh_lock_sock_nested(sk);
1746 ret = 0;
1747 if (!sock_owned_by_user(sk)) {
1748 #ifdef CONFIG_NET_DMA
1749 struct tcp_sock *tp = tcp_sk(sk);
1750 if (tp->ucopy.dma_chan)
1751 ret = tcp_v6_do_rcv(sk, skb);
1752 else
1753 #endif
1754 {
1755 if (!tcp_prequeue(sk, skb))
1756 ret = tcp_v6_do_rcv(sk, skb);
1757 }
1758 } else
1759 sk_add_backlog(sk, skb);
1760 bh_unlock_sock(sk);
1761
1762 sock_put(sk);
1763 return ret ? -1 : 0;
1764
1765 no_tcp_socket:
1766 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1767 goto discard_it;
1768
1769 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1770 bad_packet:
1771 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1772 } else {
1773 tcp_v6_send_reset(NULL, skb);
1774 }
1775
1776 discard_it:
1777
1778 /*
1779 * Discard frame
1780 */
1781
1782 kfree_skb(skb);
1783 return 0;
1784
1785 discard_and_relse:
1786 sock_put(sk);
1787 goto discard_it;
1788
1789 do_time_wait:
1790 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1791 inet_twsk_put(inet_twsk(sk));
1792 goto discard_it;
1793 }
1794
1795 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1796 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1797 inet_twsk_put(inet_twsk(sk));
1798 goto discard_it;
1799 }
1800
1801 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1802 case TCP_TW_SYN:
1803 {
1804 struct sock *sk2;
1805
1806 sk2 = inet6_lookup_listener(&tcp_hashinfo,
1807 &ipv6_hdr(skb)->daddr,
1808 ntohs(th->dest), inet6_iif(skb));
1809 if (sk2 != NULL) {
1810 struct inet_timewait_sock *tw = inet_twsk(sk);
1811 inet_twsk_deschedule(tw, &tcp_death_row);
1812 inet_twsk_put(tw);
1813 sk = sk2;
1814 goto process;
1815 }
1816 /* Fall through to ACK */
1817 }
1818 case TCP_TW_ACK:
1819 tcp_v6_timewait_ack(sk, skb);
1820 break;
1821 case TCP_TW_RST:
1822 goto no_tcp_socket;
1823 case TCP_TW_SUCCESS:;
1824 }
1825 goto discard_it;
1826 }
1827
1828 static int tcp_v6_remember_stamp(struct sock *sk)
1829 {
1830 /* Alas, not yet... */
1831 return 0;
1832 }
1833
1834 static struct inet_connection_sock_af_ops ipv6_specific = {
1835 .queue_xmit = inet6_csk_xmit,
1836 .send_check = tcp_v6_send_check,
1837 .rebuild_header = inet6_sk_rebuild_header,
1838 .conn_request = tcp_v6_conn_request,
1839 .syn_recv_sock = tcp_v6_syn_recv_sock,
1840 .remember_stamp = tcp_v6_remember_stamp,
1841 .net_header_len = sizeof(struct ipv6hdr),
1842 .setsockopt = ipv6_setsockopt,
1843 .getsockopt = ipv6_getsockopt,
1844 .addr2sockaddr = inet6_csk_addr2sockaddr,
1845 .sockaddr_len = sizeof(struct sockaddr_in6),
1846 #ifdef CONFIG_COMPAT
1847 .compat_setsockopt = compat_ipv6_setsockopt,
1848 .compat_getsockopt = compat_ipv6_getsockopt,
1849 #endif
1850 };
1851
1852 #ifdef CONFIG_TCP_MD5SIG
1853 static struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
1854 .md5_lookup = tcp_v6_md5_lookup,
1855 .calc_md5_hash = tcp_v6_calc_md5_hash,
1856 .md5_add = tcp_v6_md5_add_func,
1857 .md5_parse = tcp_v6_parse_md5_keys,
1858 };
1859 #endif
1860
1861 /*
1862 * TCP over IPv4 via INET6 API
1863 */
1864
1865 static struct inet_connection_sock_af_ops ipv6_mapped = {
1866 .queue_xmit = ip_queue_xmit,
1867 .send_check = tcp_v4_send_check,
1868 .rebuild_header = inet_sk_rebuild_header,
1869 .conn_request = tcp_v6_conn_request,
1870 .syn_recv_sock = tcp_v6_syn_recv_sock,
1871 .remember_stamp = tcp_v4_remember_stamp,
1872 .net_header_len = sizeof(struct iphdr),
1873 .setsockopt = ipv6_setsockopt,
1874 .getsockopt = ipv6_getsockopt,
1875 .addr2sockaddr = inet6_csk_addr2sockaddr,
1876 .sockaddr_len = sizeof(struct sockaddr_in6),
1877 #ifdef CONFIG_COMPAT
1878 .compat_setsockopt = compat_ipv6_setsockopt,
1879 .compat_getsockopt = compat_ipv6_getsockopt,
1880 #endif
1881 };
1882
1883 #ifdef CONFIG_TCP_MD5SIG
1884 static struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
1885 .md5_lookup = tcp_v4_md5_lookup,
1886 .calc_md5_hash = tcp_v4_calc_md5_hash,
1887 .md5_add = tcp_v6_md5_add_func,
1888 .md5_parse = tcp_v6_parse_md5_keys,
1889 };
1890 #endif
1891
1892 /* NOTE: A lot of things set to zero explicitly by call to
1893 * sk_alloc() so need not be done here.
1894 */
1895 static int tcp_v6_init_sock(struct sock *sk)
1896 {
1897 struct inet_connection_sock *icsk = inet_csk(sk);
1898 struct tcp_sock *tp = tcp_sk(sk);
1899
1900 skb_queue_head_init(&tp->out_of_order_queue);
1901 tcp_init_xmit_timers(sk);
1902 tcp_prequeue_init(tp);
1903
1904 icsk->icsk_rto = TCP_TIMEOUT_INIT;
1905 tp->mdev = TCP_TIMEOUT_INIT;
1906
1907 /* So many TCP implementations out there (incorrectly) count the
1908 * initial SYN frame in their delayed-ACK and congestion control
1909 * algorithms that we must have the following bandaid to talk
1910 * efficiently to them. -DaveM
1911 */
1912 tp->snd_cwnd = 2;
1913
1914 /* See draft-stevens-tcpca-spec-01 for discussion of the
1915 * initialization of these values.
1916 */
1917 tp->snd_ssthresh = 0x7fffffff;
1918 tp->snd_cwnd_clamp = ~0;
1919 tp->mss_cache = 536;
1920
1921 tp->reordering = sysctl_tcp_reordering;
1922
1923 sk->sk_state = TCP_CLOSE;
1924
1925 icsk->icsk_af_ops = &ipv6_specific;
1926 icsk->icsk_ca_ops = &tcp_init_congestion_ops;
1927 icsk->icsk_sync_mss = tcp_sync_mss;
1928 sk->sk_write_space = sk_stream_write_space;
1929 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
1930
1931 #ifdef CONFIG_TCP_MD5SIG
1932 tp->af_specific = &tcp_sock_ipv6_specific;
1933 #endif
1934
1935 sk->sk_sndbuf = sysctl_tcp_wmem[1];
1936 sk->sk_rcvbuf = sysctl_tcp_rmem[1];
1937
1938 atomic_inc(&tcp_sockets_allocated);
1939
1940 return 0;
1941 }
1942
1943 static int tcp_v6_destroy_sock(struct sock *sk)
1944 {
1945 #ifdef CONFIG_TCP_MD5SIG
1946 /* Clean up the MD5 key list */
1947 if (tcp_sk(sk)->md5sig_info)
1948 tcp_v6_clear_md5_list(sk);
1949 #endif
1950 tcp_v4_destroy_sock(sk);
1951 return inet6_destroy_sock(sk);
1952 }
1953
1954 #ifdef CONFIG_PROC_FS
1955 /* Proc filesystem TCPv6 sock list dumping. */
1956 static void get_openreq6(struct seq_file *seq,
1957 struct sock *sk, struct request_sock *req, int i, int uid)
1958 {
1959 int ttd = req->expires - jiffies;
1960 struct in6_addr *src = &inet6_rsk(req)->loc_addr;
1961 struct in6_addr *dest = &inet6_rsk(req)->rmt_addr;
1962
1963 if (ttd < 0)
1964 ttd = 0;
1965
1966 seq_printf(seq,
1967 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1968 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p\n",
1969 i,
1970 src->s6_addr32[0], src->s6_addr32[1],
1971 src->s6_addr32[2], src->s6_addr32[3],
1972 ntohs(inet_sk(sk)->sport),
1973 dest->s6_addr32[0], dest->s6_addr32[1],
1974 dest->s6_addr32[2], dest->s6_addr32[3],
1975 ntohs(inet_rsk(req)->rmt_port),
1976 TCP_SYN_RECV,
1977 0,0, /* could print option size, but that is af dependent. */
1978 1, /* timers active (only the expire timer) */
1979 jiffies_to_clock_t(ttd),
1980 req->retrans,
1981 uid,
1982 0, /* non standard timer */
1983 0, /* open_requests have no inode */
1984 0, req);
1985 }
1986
1987 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1988 {
1989 struct in6_addr *dest, *src;
1990 __u16 destp, srcp;
1991 int timer_active;
1992 unsigned long timer_expires;
1993 struct inet_sock *inet = inet_sk(sp);
1994 struct tcp_sock *tp = tcp_sk(sp);
1995 const struct inet_connection_sock *icsk = inet_csk(sp);
1996 struct ipv6_pinfo *np = inet6_sk(sp);
1997
1998 dest = &np->daddr;
1999 src = &np->rcv_saddr;
2000 destp = ntohs(inet->dport);
2001 srcp = ntohs(inet->sport);
2002
2003 if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
2004 timer_active = 1;
2005 timer_expires = icsk->icsk_timeout;
2006 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
2007 timer_active = 4;
2008 timer_expires = icsk->icsk_timeout;
2009 } else if (timer_pending(&sp->sk_timer)) {
2010 timer_active = 2;
2011 timer_expires = sp->sk_timer.expires;
2012 } else {
2013 timer_active = 0;
2014 timer_expires = jiffies;
2015 }
2016
2017 seq_printf(seq,
2018 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2019 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %u %u %u %u %d\n",
2020 i,
2021 src->s6_addr32[0], src->s6_addr32[1],
2022 src->s6_addr32[2], src->s6_addr32[3], srcp,
2023 dest->s6_addr32[0], dest->s6_addr32[1],
2024 dest->s6_addr32[2], dest->s6_addr32[3], destp,
2025 sp->sk_state,
2026 tp->write_seq-tp->snd_una,
2027 (sp->sk_state == TCP_LISTEN) ? sp->sk_ack_backlog : (tp->rcv_nxt - tp->copied_seq),
2028 timer_active,
2029 jiffies_to_clock_t(timer_expires - jiffies),
2030 icsk->icsk_retransmits,
2031 sock_i_uid(sp),
2032 icsk->icsk_probes_out,
2033 sock_i_ino(sp),
2034 atomic_read(&sp->sk_refcnt), sp,
2035 icsk->icsk_rto,
2036 icsk->icsk_ack.ato,
2037 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
2038 tp->snd_cwnd, tp->snd_ssthresh>=0xFFFF?-1:tp->snd_ssthresh
2039 );
2040 }
2041
2042 static void get_timewait6_sock(struct seq_file *seq,
2043 struct inet_timewait_sock *tw, int i)
2044 {
2045 struct in6_addr *dest, *src;
2046 __u16 destp, srcp;
2047 struct inet6_timewait_sock *tw6 = inet6_twsk((struct sock *)tw);
2048 int ttd = tw->tw_ttd - jiffies;
2049
2050 if (ttd < 0)
2051 ttd = 0;
2052
2053 dest = &tw6->tw_v6_daddr;
2054 src = &tw6->tw_v6_rcv_saddr;
2055 destp = ntohs(tw->tw_dport);
2056 srcp = ntohs(tw->tw_sport);
2057
2058 seq_printf(seq,
2059 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2060 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p\n",
2061 i,
2062 src->s6_addr32[0], src->s6_addr32[1],
2063 src->s6_addr32[2], src->s6_addr32[3], srcp,
2064 dest->s6_addr32[0], dest->s6_addr32[1],
2065 dest->s6_addr32[2], dest->s6_addr32[3], destp,
2066 tw->tw_substate, 0, 0,
2067 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
2068 atomic_read(&tw->tw_refcnt), tw);
2069 }
2070
2071 static int tcp6_seq_show(struct seq_file *seq, void *v)
2072 {
2073 struct tcp_iter_state *st;
2074
2075 if (v == SEQ_START_TOKEN) {
2076 seq_puts(seq,
2077 " sl "
2078 "local_address "
2079 "remote_address "
2080 "st tx_queue rx_queue tr tm->when retrnsmt"
2081 " uid timeout inode\n");
2082 goto out;
2083 }
2084 st = seq->private;
2085
2086 switch (st->state) {
2087 case TCP_SEQ_STATE_LISTENING:
2088 case TCP_SEQ_STATE_ESTABLISHED:
2089 get_tcp6_sock(seq, v, st->num);
2090 break;
2091 case TCP_SEQ_STATE_OPENREQ:
2092 get_openreq6(seq, st->syn_wait_sk, v, st->num, st->uid);
2093 break;
2094 case TCP_SEQ_STATE_TIME_WAIT:
2095 get_timewait6_sock(seq, v, st->num);
2096 break;
2097 }
2098 out:
2099 return 0;
2100 }
2101
2102 static struct file_operations tcp6_seq_fops;
2103 static struct tcp_seq_afinfo tcp6_seq_afinfo = {
2104 .owner = THIS_MODULE,
2105 .name = "tcp6",
2106 .family = AF_INET6,
2107 .seq_show = tcp6_seq_show,
2108 .seq_fops = &tcp6_seq_fops,
2109 };
2110
2111 int __init tcp6_proc_init(void)
2112 {
2113 return tcp_proc_register(&tcp6_seq_afinfo);
2114 }
2115
2116 void tcp6_proc_exit(void)
2117 {
2118 tcp_proc_unregister(&tcp6_seq_afinfo);
2119 }
2120 #endif
2121
2122 struct proto tcpv6_prot = {
2123 .name = "TCPv6",
2124 .owner = THIS_MODULE,
2125 .close = tcp_close,
2126 .connect = tcp_v6_connect,
2127 .disconnect = tcp_disconnect,
2128 .accept = inet_csk_accept,
2129 .ioctl = tcp_ioctl,
2130 .init = tcp_v6_init_sock,
2131 .destroy = tcp_v6_destroy_sock,
2132 .shutdown = tcp_shutdown,
2133 .setsockopt = tcp_setsockopt,
2134 .getsockopt = tcp_getsockopt,
2135 .sendmsg = tcp_sendmsg,
2136 .recvmsg = tcp_recvmsg,
2137 .backlog_rcv = tcp_v6_do_rcv,
2138 .hash = tcp_v6_hash,
2139 .unhash = tcp_unhash,
2140 .get_port = tcp_v6_get_port,
2141 .enter_memory_pressure = tcp_enter_memory_pressure,
2142 .sockets_allocated = &tcp_sockets_allocated,
2143 .memory_allocated = &tcp_memory_allocated,
2144 .memory_pressure = &tcp_memory_pressure,
2145 .orphan_count = &tcp_orphan_count,
2146 .sysctl_mem = sysctl_tcp_mem,
2147 .sysctl_wmem = sysctl_tcp_wmem,
2148 .sysctl_rmem = sysctl_tcp_rmem,
2149 .max_header = MAX_TCP_HEADER,
2150 .obj_size = sizeof(struct tcp6_sock),
2151 .twsk_prot = &tcp6_timewait_sock_ops,
2152 .rsk_prot = &tcp6_request_sock_ops,
2153 #ifdef CONFIG_COMPAT
2154 .compat_setsockopt = compat_tcp_setsockopt,
2155 .compat_getsockopt = compat_tcp_getsockopt,
2156 #endif
2157 };
2158
2159 static struct inet6_protocol tcpv6_protocol = {
2160 .handler = tcp_v6_rcv,
2161 .err_handler = tcp_v6_err,
2162 .gso_send_check = tcp_v6_gso_send_check,
2163 .gso_segment = tcp_tso_segment,
2164 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
2165 };
2166
2167 static struct inet_protosw tcpv6_protosw = {
2168 .type = SOCK_STREAM,
2169 .protocol = IPPROTO_TCP,
2170 .prot = &tcpv6_prot,
2171 .ops = &inet6_stream_ops,
2172 .capability = -1,
2173 .no_check = 0,
2174 .flags = INET_PROTOSW_PERMANENT |
2175 INET_PROTOSW_ICSK,
2176 };
2177
2178 void __init tcpv6_init(void)
2179 {
2180 /* register inet6 protocol */
2181 if (inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP) < 0)
2182 printk(KERN_ERR "tcpv6_init: Could not register protocol\n");
2183 inet6_register_protosw(&tcpv6_protosw);
2184
2185 if (inet_csk_ctl_sock_create(&tcp6_socket, PF_INET6, SOCK_RAW,
2186 IPPROTO_TCP) < 0)
2187 panic("Failed to create the TCPv6 control socket.\n");
2188 }
This page took 0.106863 seconds and 5 git commands to generate.