[NETNS][IPV6] tcp6 - make socket control per namespace
[deliverable/linux.git] / net / ipv6 / tcp_ipv6.c
1 /*
2 * TCP over IPv6
3 * Linux INET6 implementation
4 *
5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt>
7 *
8 * $Id: tcp_ipv6.c,v 1.144 2002/02/01 22:01:04 davem Exp $
9 *
10 * Based on:
11 * linux/net/ipv4/tcp.c
12 * linux/net/ipv4/tcp_input.c
13 * linux/net/ipv4/tcp_output.c
14 *
15 * Fixes:
16 * Hideaki YOSHIFUJI : sin6_scope_id support
17 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
18 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
19 * a single port at the same time.
20 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
21 *
22 * This program is free software; you can redistribute it and/or
23 * modify it under the terms of the GNU General Public License
24 * as published by the Free Software Foundation; either version
25 * 2 of the License, or (at your option) any later version.
26 */
27
28 #include <linux/module.h>
29 #include <linux/errno.h>
30 #include <linux/types.h>
31 #include <linux/socket.h>
32 #include <linux/sockios.h>
33 #include <linux/net.h>
34 #include <linux/jiffies.h>
35 #include <linux/in.h>
36 #include <linux/in6.h>
37 #include <linux/netdevice.h>
38 #include <linux/init.h>
39 #include <linux/jhash.h>
40 #include <linux/ipsec.h>
41 #include <linux/times.h>
42
43 #include <linux/ipv6.h>
44 #include <linux/icmpv6.h>
45 #include <linux/random.h>
46
47 #include <net/tcp.h>
48 #include <net/ndisc.h>
49 #include <net/inet6_hashtables.h>
50 #include <net/inet6_connection_sock.h>
51 #include <net/ipv6.h>
52 #include <net/transp_v6.h>
53 #include <net/addrconf.h>
54 #include <net/ip6_route.h>
55 #include <net/ip6_checksum.h>
56 #include <net/inet_ecn.h>
57 #include <net/protocol.h>
58 #include <net/xfrm.h>
59 #include <net/snmp.h>
60 #include <net/dsfield.h>
61 #include <net/timewait_sock.h>
62 #include <net/netdma.h>
63
64 #include <asm/uaccess.h>
65
66 #include <linux/proc_fs.h>
67 #include <linux/seq_file.h>
68
69 #include <linux/crypto.h>
70 #include <linux/scatterlist.h>
71
72 static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb);
73 static void tcp_v6_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req);
74 static void tcp_v6_send_check(struct sock *sk, int len,
75 struct sk_buff *skb);
76
77 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
78
79 static struct inet_connection_sock_af_ops ipv6_mapped;
80 static struct inet_connection_sock_af_ops ipv6_specific;
81 #ifdef CONFIG_TCP_MD5SIG
82 static struct tcp_sock_af_ops tcp_sock_ipv6_specific;
83 static struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
84 #endif
85
86 static void tcp_v6_hash(struct sock *sk)
87 {
88 if (sk->sk_state != TCP_CLOSE) {
89 if (inet_csk(sk)->icsk_af_ops == &ipv6_mapped) {
90 tcp_prot.hash(sk);
91 return;
92 }
93 local_bh_disable();
94 __inet6_hash(sk);
95 local_bh_enable();
96 }
97 }
98
99 static __inline__ __sum16 tcp_v6_check(struct tcphdr *th, int len,
100 struct in6_addr *saddr,
101 struct in6_addr *daddr,
102 __wsum base)
103 {
104 return csum_ipv6_magic(saddr, daddr, len, IPPROTO_TCP, base);
105 }
106
107 static __u32 tcp_v6_init_sequence(struct sk_buff *skb)
108 {
109 return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
110 ipv6_hdr(skb)->saddr.s6_addr32,
111 tcp_hdr(skb)->dest,
112 tcp_hdr(skb)->source);
113 }
114
115 static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
116 int addr_len)
117 {
118 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
119 struct inet_sock *inet = inet_sk(sk);
120 struct inet_connection_sock *icsk = inet_csk(sk);
121 struct ipv6_pinfo *np = inet6_sk(sk);
122 struct tcp_sock *tp = tcp_sk(sk);
123 struct in6_addr *saddr = NULL, *final_p = NULL, final;
124 struct flowi fl;
125 struct dst_entry *dst;
126 int addr_type;
127 int err;
128
129 if (addr_len < SIN6_LEN_RFC2133)
130 return -EINVAL;
131
132 if (usin->sin6_family != AF_INET6)
133 return(-EAFNOSUPPORT);
134
135 memset(&fl, 0, sizeof(fl));
136
137 if (np->sndflow) {
138 fl.fl6_flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
139 IP6_ECN_flow_init(fl.fl6_flowlabel);
140 if (fl.fl6_flowlabel&IPV6_FLOWLABEL_MASK) {
141 struct ip6_flowlabel *flowlabel;
142 flowlabel = fl6_sock_lookup(sk, fl.fl6_flowlabel);
143 if (flowlabel == NULL)
144 return -EINVAL;
145 ipv6_addr_copy(&usin->sin6_addr, &flowlabel->dst);
146 fl6_sock_release(flowlabel);
147 }
148 }
149
150 /*
151 * connect() to INADDR_ANY means loopback (BSD'ism).
152 */
153
154 if(ipv6_addr_any(&usin->sin6_addr))
155 usin->sin6_addr.s6_addr[15] = 0x1;
156
157 addr_type = ipv6_addr_type(&usin->sin6_addr);
158
159 if(addr_type & IPV6_ADDR_MULTICAST)
160 return -ENETUNREACH;
161
162 if (addr_type&IPV6_ADDR_LINKLOCAL) {
163 if (addr_len >= sizeof(struct sockaddr_in6) &&
164 usin->sin6_scope_id) {
165 /* If interface is set while binding, indices
166 * must coincide.
167 */
168 if (sk->sk_bound_dev_if &&
169 sk->sk_bound_dev_if != usin->sin6_scope_id)
170 return -EINVAL;
171
172 sk->sk_bound_dev_if = usin->sin6_scope_id;
173 }
174
175 /* Connect to link-local address requires an interface */
176 if (!sk->sk_bound_dev_if)
177 return -EINVAL;
178 }
179
180 if (tp->rx_opt.ts_recent_stamp &&
181 !ipv6_addr_equal(&np->daddr, &usin->sin6_addr)) {
182 tp->rx_opt.ts_recent = 0;
183 tp->rx_opt.ts_recent_stamp = 0;
184 tp->write_seq = 0;
185 }
186
187 ipv6_addr_copy(&np->daddr, &usin->sin6_addr);
188 np->flow_label = fl.fl6_flowlabel;
189
190 /*
191 * TCP over IPv4
192 */
193
194 if (addr_type == IPV6_ADDR_MAPPED) {
195 u32 exthdrlen = icsk->icsk_ext_hdr_len;
196 struct sockaddr_in sin;
197
198 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
199
200 if (__ipv6_only_sock(sk))
201 return -ENETUNREACH;
202
203 sin.sin_family = AF_INET;
204 sin.sin_port = usin->sin6_port;
205 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
206
207 icsk->icsk_af_ops = &ipv6_mapped;
208 sk->sk_backlog_rcv = tcp_v4_do_rcv;
209 #ifdef CONFIG_TCP_MD5SIG
210 tp->af_specific = &tcp_sock_ipv6_mapped_specific;
211 #endif
212
213 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
214
215 if (err) {
216 icsk->icsk_ext_hdr_len = exthdrlen;
217 icsk->icsk_af_ops = &ipv6_specific;
218 sk->sk_backlog_rcv = tcp_v6_do_rcv;
219 #ifdef CONFIG_TCP_MD5SIG
220 tp->af_specific = &tcp_sock_ipv6_specific;
221 #endif
222 goto failure;
223 } else {
224 ipv6_addr_set(&np->saddr, 0, 0, htonl(0x0000FFFF),
225 inet->saddr);
226 ipv6_addr_set(&np->rcv_saddr, 0, 0, htonl(0x0000FFFF),
227 inet->rcv_saddr);
228 }
229
230 return err;
231 }
232
233 if (!ipv6_addr_any(&np->rcv_saddr))
234 saddr = &np->rcv_saddr;
235
236 fl.proto = IPPROTO_TCP;
237 ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
238 ipv6_addr_copy(&fl.fl6_src,
239 (saddr ? saddr : &np->saddr));
240 fl.oif = sk->sk_bound_dev_if;
241 fl.fl_ip_dport = usin->sin6_port;
242 fl.fl_ip_sport = inet->sport;
243
244 if (np->opt && np->opt->srcrt) {
245 struct rt0_hdr *rt0 = (struct rt0_hdr *)np->opt->srcrt;
246 ipv6_addr_copy(&final, &fl.fl6_dst);
247 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
248 final_p = &final;
249 }
250
251 security_sk_classify_flow(sk, &fl);
252
253 err = ip6_dst_lookup(sk, &dst, &fl);
254 if (err)
255 goto failure;
256 if (final_p)
257 ipv6_addr_copy(&fl.fl6_dst, final_p);
258
259 if ((err = __xfrm_lookup(&dst, &fl, sk, XFRM_LOOKUP_WAIT)) < 0) {
260 if (err == -EREMOTE)
261 err = ip6_dst_blackhole(sk, &dst, &fl);
262 if (err < 0)
263 goto failure;
264 }
265
266 if (saddr == NULL) {
267 saddr = &fl.fl6_src;
268 ipv6_addr_copy(&np->rcv_saddr, saddr);
269 }
270
271 /* set the source address */
272 ipv6_addr_copy(&np->saddr, saddr);
273 inet->rcv_saddr = LOOPBACK4_IPV6;
274
275 sk->sk_gso_type = SKB_GSO_TCPV6;
276 __ip6_dst_store(sk, dst, NULL, NULL);
277
278 icsk->icsk_ext_hdr_len = 0;
279 if (np->opt)
280 icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
281 np->opt->opt_nflen);
282
283 tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
284
285 inet->dport = usin->sin6_port;
286
287 tcp_set_state(sk, TCP_SYN_SENT);
288 err = inet6_hash_connect(&tcp_death_row, sk);
289 if (err)
290 goto late_failure;
291
292 if (!tp->write_seq)
293 tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
294 np->daddr.s6_addr32,
295 inet->sport,
296 inet->dport);
297
298 err = tcp_connect(sk);
299 if (err)
300 goto late_failure;
301
302 return 0;
303
304 late_failure:
305 tcp_set_state(sk, TCP_CLOSE);
306 __sk_dst_reset(sk);
307 failure:
308 inet->dport = 0;
309 sk->sk_route_caps = 0;
310 return err;
311 }
312
313 static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
314 int type, int code, int offset, __be32 info)
315 {
316 struct ipv6hdr *hdr = (struct ipv6hdr*)skb->data;
317 const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
318 struct ipv6_pinfo *np;
319 struct sock *sk;
320 int err;
321 struct tcp_sock *tp;
322 __u32 seq;
323
324 sk = inet6_lookup(skb->dev->nd_net, &tcp_hashinfo, &hdr->daddr,
325 th->dest, &hdr->saddr, th->source, skb->dev->ifindex);
326
327 if (sk == NULL) {
328 ICMP6_INC_STATS_BH(__in6_dev_get(skb->dev), ICMP6_MIB_INERRORS);
329 return;
330 }
331
332 if (sk->sk_state == TCP_TIME_WAIT) {
333 inet_twsk_put(inet_twsk(sk));
334 return;
335 }
336
337 bh_lock_sock(sk);
338 if (sock_owned_by_user(sk))
339 NET_INC_STATS_BH(LINUX_MIB_LOCKDROPPEDICMPS);
340
341 if (sk->sk_state == TCP_CLOSE)
342 goto out;
343
344 tp = tcp_sk(sk);
345 seq = ntohl(th->seq);
346 if (sk->sk_state != TCP_LISTEN &&
347 !between(seq, tp->snd_una, tp->snd_nxt)) {
348 NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS);
349 goto out;
350 }
351
352 np = inet6_sk(sk);
353
354 if (type == ICMPV6_PKT_TOOBIG) {
355 struct dst_entry *dst = NULL;
356
357 if (sock_owned_by_user(sk))
358 goto out;
359 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
360 goto out;
361
362 /* icmp should have updated the destination cache entry */
363 dst = __sk_dst_check(sk, np->dst_cookie);
364
365 if (dst == NULL) {
366 struct inet_sock *inet = inet_sk(sk);
367 struct flowi fl;
368
369 /* BUGGG_FUTURE: Again, it is not clear how
370 to handle rthdr case. Ignore this complexity
371 for now.
372 */
373 memset(&fl, 0, sizeof(fl));
374 fl.proto = IPPROTO_TCP;
375 ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
376 ipv6_addr_copy(&fl.fl6_src, &np->saddr);
377 fl.oif = sk->sk_bound_dev_if;
378 fl.fl_ip_dport = inet->dport;
379 fl.fl_ip_sport = inet->sport;
380 security_skb_classify_flow(skb, &fl);
381
382 if ((err = ip6_dst_lookup(sk, &dst, &fl))) {
383 sk->sk_err_soft = -err;
384 goto out;
385 }
386
387 if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0) {
388 sk->sk_err_soft = -err;
389 goto out;
390 }
391
392 } else
393 dst_hold(dst);
394
395 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
396 tcp_sync_mss(sk, dst_mtu(dst));
397 tcp_simple_retransmit(sk);
398 } /* else let the usual retransmit timer handle it */
399 dst_release(dst);
400 goto out;
401 }
402
403 icmpv6_err_convert(type, code, &err);
404
405 /* Might be for an request_sock */
406 switch (sk->sk_state) {
407 struct request_sock *req, **prev;
408 case TCP_LISTEN:
409 if (sock_owned_by_user(sk))
410 goto out;
411
412 req = inet6_csk_search_req(sk, &prev, th->dest, &hdr->daddr,
413 &hdr->saddr, inet6_iif(skb));
414 if (!req)
415 goto out;
416
417 /* ICMPs are not backlogged, hence we cannot get
418 * an established socket here.
419 */
420 BUG_TRAP(req->sk == NULL);
421
422 if (seq != tcp_rsk(req)->snt_isn) {
423 NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS);
424 goto out;
425 }
426
427 inet_csk_reqsk_queue_drop(sk, req, prev);
428 goto out;
429
430 case TCP_SYN_SENT:
431 case TCP_SYN_RECV: /* Cannot happen.
432 It can, it SYNs are crossed. --ANK */
433 if (!sock_owned_by_user(sk)) {
434 sk->sk_err = err;
435 sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
436
437 tcp_done(sk);
438 } else
439 sk->sk_err_soft = err;
440 goto out;
441 }
442
443 if (!sock_owned_by_user(sk) && np->recverr) {
444 sk->sk_err = err;
445 sk->sk_error_report(sk);
446 } else
447 sk->sk_err_soft = err;
448
449 out:
450 bh_unlock_sock(sk);
451 sock_put(sk);
452 }
453
454
455 static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req)
456 {
457 struct inet6_request_sock *treq = inet6_rsk(req);
458 struct ipv6_pinfo *np = inet6_sk(sk);
459 struct sk_buff * skb;
460 struct ipv6_txoptions *opt = NULL;
461 struct in6_addr * final_p = NULL, final;
462 struct flowi fl;
463 struct dst_entry *dst;
464 int err = -1;
465
466 memset(&fl, 0, sizeof(fl));
467 fl.proto = IPPROTO_TCP;
468 ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
469 ipv6_addr_copy(&fl.fl6_src, &treq->loc_addr);
470 fl.fl6_flowlabel = 0;
471 fl.oif = treq->iif;
472 fl.fl_ip_dport = inet_rsk(req)->rmt_port;
473 fl.fl_ip_sport = inet_sk(sk)->sport;
474 security_req_classify_flow(req, &fl);
475
476 opt = np->opt;
477 if (opt && opt->srcrt) {
478 struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt;
479 ipv6_addr_copy(&final, &fl.fl6_dst);
480 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
481 final_p = &final;
482 }
483
484 err = ip6_dst_lookup(sk, &dst, &fl);
485 if (err)
486 goto done;
487 if (final_p)
488 ipv6_addr_copy(&fl.fl6_dst, final_p);
489 if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0)
490 goto done;
491
492 skb = tcp_make_synack(sk, dst, req);
493 if (skb) {
494 struct tcphdr *th = tcp_hdr(skb);
495
496 th->check = tcp_v6_check(th, skb->len,
497 &treq->loc_addr, &treq->rmt_addr,
498 csum_partial((char *)th, skb->len, skb->csum));
499
500 ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
501 err = ip6_xmit(sk, skb, &fl, opt, 0);
502 err = net_xmit_eval(err);
503 }
504
505 done:
506 if (opt && opt != np->opt)
507 sock_kfree_s(sk, opt, opt->tot_len);
508 dst_release(dst);
509 return err;
510 }
511
512 static inline void syn_flood_warning(struct sk_buff *skb)
513 {
514 #ifdef CONFIG_SYN_COOKIES
515 if (sysctl_tcp_syncookies)
516 printk(KERN_INFO
517 "TCPv6: Possible SYN flooding on port %d. "
518 "Sending cookies.\n", ntohs(tcp_hdr(skb)->dest));
519 else
520 #endif
521 printk(KERN_INFO
522 "TCPv6: Possible SYN flooding on port %d. "
523 "Dropping request.\n", ntohs(tcp_hdr(skb)->dest));
524 }
525
526 static void tcp_v6_reqsk_destructor(struct request_sock *req)
527 {
528 if (inet6_rsk(req)->pktopts)
529 kfree_skb(inet6_rsk(req)->pktopts);
530 }
531
532 #ifdef CONFIG_TCP_MD5SIG
533 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
534 struct in6_addr *addr)
535 {
536 struct tcp_sock *tp = tcp_sk(sk);
537 int i;
538
539 BUG_ON(tp == NULL);
540
541 if (!tp->md5sig_info || !tp->md5sig_info->entries6)
542 return NULL;
543
544 for (i = 0; i < tp->md5sig_info->entries6; i++) {
545 if (ipv6_addr_cmp(&tp->md5sig_info->keys6[i].addr, addr) == 0)
546 return &tp->md5sig_info->keys6[i].base;
547 }
548 return NULL;
549 }
550
551 static struct tcp_md5sig_key *tcp_v6_md5_lookup(struct sock *sk,
552 struct sock *addr_sk)
553 {
554 return tcp_v6_md5_do_lookup(sk, &inet6_sk(addr_sk)->daddr);
555 }
556
557 static struct tcp_md5sig_key *tcp_v6_reqsk_md5_lookup(struct sock *sk,
558 struct request_sock *req)
559 {
560 return tcp_v6_md5_do_lookup(sk, &inet6_rsk(req)->rmt_addr);
561 }
562
563 static int tcp_v6_md5_do_add(struct sock *sk, struct in6_addr *peer,
564 char *newkey, u8 newkeylen)
565 {
566 /* Add key to the list */
567 struct tcp_md5sig_key *key;
568 struct tcp_sock *tp = tcp_sk(sk);
569 struct tcp6_md5sig_key *keys;
570
571 key = tcp_v6_md5_do_lookup(sk, peer);
572 if (key) {
573 /* modify existing entry - just update that one */
574 kfree(key->key);
575 key->key = newkey;
576 key->keylen = newkeylen;
577 } else {
578 /* reallocate new list if current one is full. */
579 if (!tp->md5sig_info) {
580 tp->md5sig_info = kzalloc(sizeof(*tp->md5sig_info), GFP_ATOMIC);
581 if (!tp->md5sig_info) {
582 kfree(newkey);
583 return -ENOMEM;
584 }
585 sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
586 }
587 if (tcp_alloc_md5sig_pool() == NULL) {
588 kfree(newkey);
589 return -ENOMEM;
590 }
591 if (tp->md5sig_info->alloced6 == tp->md5sig_info->entries6) {
592 keys = kmalloc((sizeof (tp->md5sig_info->keys6[0]) *
593 (tp->md5sig_info->entries6 + 1)), GFP_ATOMIC);
594
595 if (!keys) {
596 tcp_free_md5sig_pool();
597 kfree(newkey);
598 return -ENOMEM;
599 }
600
601 if (tp->md5sig_info->entries6)
602 memmove(keys, tp->md5sig_info->keys6,
603 (sizeof (tp->md5sig_info->keys6[0]) *
604 tp->md5sig_info->entries6));
605
606 kfree(tp->md5sig_info->keys6);
607 tp->md5sig_info->keys6 = keys;
608 tp->md5sig_info->alloced6++;
609 }
610
611 ipv6_addr_copy(&tp->md5sig_info->keys6[tp->md5sig_info->entries6].addr,
612 peer);
613 tp->md5sig_info->keys6[tp->md5sig_info->entries6].base.key = newkey;
614 tp->md5sig_info->keys6[tp->md5sig_info->entries6].base.keylen = newkeylen;
615
616 tp->md5sig_info->entries6++;
617 }
618 return 0;
619 }
620
621 static int tcp_v6_md5_add_func(struct sock *sk, struct sock *addr_sk,
622 u8 *newkey, __u8 newkeylen)
623 {
624 return tcp_v6_md5_do_add(sk, &inet6_sk(addr_sk)->daddr,
625 newkey, newkeylen);
626 }
627
628 static int tcp_v6_md5_do_del(struct sock *sk, struct in6_addr *peer)
629 {
630 struct tcp_sock *tp = tcp_sk(sk);
631 int i;
632
633 for (i = 0; i < tp->md5sig_info->entries6; i++) {
634 if (ipv6_addr_cmp(&tp->md5sig_info->keys6[i].addr, peer) == 0) {
635 /* Free the key */
636 kfree(tp->md5sig_info->keys6[i].base.key);
637 tp->md5sig_info->entries6--;
638
639 if (tp->md5sig_info->entries6 == 0) {
640 kfree(tp->md5sig_info->keys6);
641 tp->md5sig_info->keys6 = NULL;
642 tp->md5sig_info->alloced6 = 0;
643 } else {
644 /* shrink the database */
645 if (tp->md5sig_info->entries6 != i)
646 memmove(&tp->md5sig_info->keys6[i],
647 &tp->md5sig_info->keys6[i+1],
648 (tp->md5sig_info->entries6 - i)
649 * sizeof (tp->md5sig_info->keys6[0]));
650 }
651 tcp_free_md5sig_pool();
652 return 0;
653 }
654 }
655 return -ENOENT;
656 }
657
658 static void tcp_v6_clear_md5_list (struct sock *sk)
659 {
660 struct tcp_sock *tp = tcp_sk(sk);
661 int i;
662
663 if (tp->md5sig_info->entries6) {
664 for (i = 0; i < tp->md5sig_info->entries6; i++)
665 kfree(tp->md5sig_info->keys6[i].base.key);
666 tp->md5sig_info->entries6 = 0;
667 tcp_free_md5sig_pool();
668 }
669
670 kfree(tp->md5sig_info->keys6);
671 tp->md5sig_info->keys6 = NULL;
672 tp->md5sig_info->alloced6 = 0;
673
674 if (tp->md5sig_info->entries4) {
675 for (i = 0; i < tp->md5sig_info->entries4; i++)
676 kfree(tp->md5sig_info->keys4[i].base.key);
677 tp->md5sig_info->entries4 = 0;
678 tcp_free_md5sig_pool();
679 }
680
681 kfree(tp->md5sig_info->keys4);
682 tp->md5sig_info->keys4 = NULL;
683 tp->md5sig_info->alloced4 = 0;
684 }
685
686 static int tcp_v6_parse_md5_keys (struct sock *sk, char __user *optval,
687 int optlen)
688 {
689 struct tcp_md5sig cmd;
690 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
691 u8 *newkey;
692
693 if (optlen < sizeof(cmd))
694 return -EINVAL;
695
696 if (copy_from_user(&cmd, optval, sizeof(cmd)))
697 return -EFAULT;
698
699 if (sin6->sin6_family != AF_INET6)
700 return -EINVAL;
701
702 if (!cmd.tcpm_keylen) {
703 if (!tcp_sk(sk)->md5sig_info)
704 return -ENOENT;
705 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
706 return tcp_v4_md5_do_del(sk, sin6->sin6_addr.s6_addr32[3]);
707 return tcp_v6_md5_do_del(sk, &sin6->sin6_addr);
708 }
709
710 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
711 return -EINVAL;
712
713 if (!tcp_sk(sk)->md5sig_info) {
714 struct tcp_sock *tp = tcp_sk(sk);
715 struct tcp_md5sig_info *p;
716
717 p = kzalloc(sizeof(struct tcp_md5sig_info), GFP_KERNEL);
718 if (!p)
719 return -ENOMEM;
720
721 tp->md5sig_info = p;
722 sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
723 }
724
725 newkey = kmemdup(cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
726 if (!newkey)
727 return -ENOMEM;
728 if (ipv6_addr_v4mapped(&sin6->sin6_addr)) {
729 return tcp_v4_md5_do_add(sk, sin6->sin6_addr.s6_addr32[3],
730 newkey, cmd.tcpm_keylen);
731 }
732 return tcp_v6_md5_do_add(sk, &sin6->sin6_addr, newkey, cmd.tcpm_keylen);
733 }
734
735 static int tcp_v6_do_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key,
736 struct in6_addr *saddr,
737 struct in6_addr *daddr,
738 struct tcphdr *th, int protocol,
739 unsigned int tcplen)
740 {
741 struct scatterlist sg[4];
742 __u16 data_len;
743 int block = 0;
744 __sum16 cksum;
745 struct tcp_md5sig_pool *hp;
746 struct tcp6_pseudohdr *bp;
747 struct hash_desc *desc;
748 int err;
749 unsigned int nbytes = 0;
750
751 hp = tcp_get_md5sig_pool();
752 if (!hp) {
753 printk(KERN_WARNING "%s(): hash pool not found...\n", __func__);
754 goto clear_hash_noput;
755 }
756 bp = &hp->md5_blk.ip6;
757 desc = &hp->md5_desc;
758
759 /* 1. TCP pseudo-header (RFC2460) */
760 ipv6_addr_copy(&bp->saddr, saddr);
761 ipv6_addr_copy(&bp->daddr, daddr);
762 bp->len = htonl(tcplen);
763 bp->protocol = htonl(protocol);
764
765 sg_init_table(sg, 4);
766
767 sg_set_buf(&sg[block++], bp, sizeof(*bp));
768 nbytes += sizeof(*bp);
769
770 /* 2. TCP header, excluding options */
771 cksum = th->check;
772 th->check = 0;
773 sg_set_buf(&sg[block++], th, sizeof(*th));
774 nbytes += sizeof(*th);
775
776 /* 3. TCP segment data (if any) */
777 data_len = tcplen - (th->doff << 2);
778 if (data_len > 0) {
779 u8 *data = (u8 *)th + (th->doff << 2);
780 sg_set_buf(&sg[block++], data, data_len);
781 nbytes += data_len;
782 }
783
784 /* 4. shared key */
785 sg_set_buf(&sg[block++], key->key, key->keylen);
786 nbytes += key->keylen;
787
788 sg_mark_end(&sg[block - 1]);
789
790 /* Now store the hash into the packet */
791 err = crypto_hash_init(desc);
792 if (err) {
793 printk(KERN_WARNING "%s(): hash_init failed\n", __func__);
794 goto clear_hash;
795 }
796 err = crypto_hash_update(desc, sg, nbytes);
797 if (err) {
798 printk(KERN_WARNING "%s(): hash_update failed\n", __func__);
799 goto clear_hash;
800 }
801 err = crypto_hash_final(desc, md5_hash);
802 if (err) {
803 printk(KERN_WARNING "%s(): hash_final failed\n", __func__);
804 goto clear_hash;
805 }
806
807 /* Reset header, and free up the crypto */
808 tcp_put_md5sig_pool();
809 th->check = cksum;
810 out:
811 return 0;
812 clear_hash:
813 tcp_put_md5sig_pool();
814 clear_hash_noput:
815 memset(md5_hash, 0, 16);
816 goto out;
817 }
818
819 static int tcp_v6_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key,
820 struct sock *sk,
821 struct dst_entry *dst,
822 struct request_sock *req,
823 struct tcphdr *th, int protocol,
824 unsigned int tcplen)
825 {
826 struct in6_addr *saddr, *daddr;
827
828 if (sk) {
829 saddr = &inet6_sk(sk)->saddr;
830 daddr = &inet6_sk(sk)->daddr;
831 } else {
832 saddr = &inet6_rsk(req)->loc_addr;
833 daddr = &inet6_rsk(req)->rmt_addr;
834 }
835 return tcp_v6_do_calc_md5_hash(md5_hash, key,
836 saddr, daddr,
837 th, protocol, tcplen);
838 }
839
840 static int tcp_v6_inbound_md5_hash (struct sock *sk, struct sk_buff *skb)
841 {
842 __u8 *hash_location = NULL;
843 struct tcp_md5sig_key *hash_expected;
844 struct ipv6hdr *ip6h = ipv6_hdr(skb);
845 struct tcphdr *th = tcp_hdr(skb);
846 int length = (th->doff << 2) - sizeof (*th);
847 int genhash;
848 u8 *ptr;
849 u8 newhash[16];
850
851 hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
852
853 /* If the TCP option is too short, we can short cut */
854 if (length < TCPOLEN_MD5SIG)
855 return hash_expected ? 1 : 0;
856
857 /* parse options */
858 ptr = (u8*)(th + 1);
859 while (length > 0) {
860 int opcode = *ptr++;
861 int opsize;
862
863 switch(opcode) {
864 case TCPOPT_EOL:
865 goto done_opts;
866 case TCPOPT_NOP:
867 length--;
868 continue;
869 default:
870 opsize = *ptr++;
871 if (opsize < 2 || opsize > length)
872 goto done_opts;
873 if (opcode == TCPOPT_MD5SIG) {
874 hash_location = ptr;
875 goto done_opts;
876 }
877 }
878 ptr += opsize - 2;
879 length -= opsize;
880 }
881
882 done_opts:
883 /* do we have a hash as expected? */
884 if (!hash_expected) {
885 if (!hash_location)
886 return 0;
887 if (net_ratelimit()) {
888 printk(KERN_INFO "MD5 Hash NOT expected but found "
889 "(" NIP6_FMT ", %u)->"
890 "(" NIP6_FMT ", %u)\n",
891 NIP6(ip6h->saddr), ntohs(th->source),
892 NIP6(ip6h->daddr), ntohs(th->dest));
893 }
894 return 1;
895 }
896
897 if (!hash_location) {
898 if (net_ratelimit()) {
899 printk(KERN_INFO "MD5 Hash expected but NOT found "
900 "(" NIP6_FMT ", %u)->"
901 "(" NIP6_FMT ", %u)\n",
902 NIP6(ip6h->saddr), ntohs(th->source),
903 NIP6(ip6h->daddr), ntohs(th->dest));
904 }
905 return 1;
906 }
907
908 /* check the signature */
909 genhash = tcp_v6_do_calc_md5_hash(newhash,
910 hash_expected,
911 &ip6h->saddr, &ip6h->daddr,
912 th, sk->sk_protocol,
913 skb->len);
914 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
915 if (net_ratelimit()) {
916 printk(KERN_INFO "MD5 Hash %s for "
917 "(" NIP6_FMT ", %u)->"
918 "(" NIP6_FMT ", %u)\n",
919 genhash ? "failed" : "mismatch",
920 NIP6(ip6h->saddr), ntohs(th->source),
921 NIP6(ip6h->daddr), ntohs(th->dest));
922 }
923 return 1;
924 }
925 return 0;
926 }
927 #endif
928
929 struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
930 .family = AF_INET6,
931 .obj_size = sizeof(struct tcp6_request_sock),
932 .rtx_syn_ack = tcp_v6_send_synack,
933 .send_ack = tcp_v6_reqsk_send_ack,
934 .destructor = tcp_v6_reqsk_destructor,
935 .send_reset = tcp_v6_send_reset
936 };
937
938 #ifdef CONFIG_TCP_MD5SIG
939 static struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
940 .md5_lookup = tcp_v6_reqsk_md5_lookup,
941 };
942 #endif
943
944 static struct timewait_sock_ops tcp6_timewait_sock_ops = {
945 .twsk_obj_size = sizeof(struct tcp6_timewait_sock),
946 .twsk_unique = tcp_twsk_unique,
947 .twsk_destructor= tcp_twsk_destructor,
948 };
949
950 static void tcp_v6_send_check(struct sock *sk, int len, struct sk_buff *skb)
951 {
952 struct ipv6_pinfo *np = inet6_sk(sk);
953 struct tcphdr *th = tcp_hdr(skb);
954
955 if (skb->ip_summed == CHECKSUM_PARTIAL) {
956 th->check = ~csum_ipv6_magic(&np->saddr, &np->daddr, len, IPPROTO_TCP, 0);
957 skb->csum_start = skb_transport_header(skb) - skb->head;
958 skb->csum_offset = offsetof(struct tcphdr, check);
959 } else {
960 th->check = csum_ipv6_magic(&np->saddr, &np->daddr, len, IPPROTO_TCP,
961 csum_partial((char *)th, th->doff<<2,
962 skb->csum));
963 }
964 }
965
966 static int tcp_v6_gso_send_check(struct sk_buff *skb)
967 {
968 struct ipv6hdr *ipv6h;
969 struct tcphdr *th;
970
971 if (!pskb_may_pull(skb, sizeof(*th)))
972 return -EINVAL;
973
974 ipv6h = ipv6_hdr(skb);
975 th = tcp_hdr(skb);
976
977 th->check = 0;
978 th->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, skb->len,
979 IPPROTO_TCP, 0);
980 skb->csum_start = skb_transport_header(skb) - skb->head;
981 skb->csum_offset = offsetof(struct tcphdr, check);
982 skb->ip_summed = CHECKSUM_PARTIAL;
983 return 0;
984 }
985
986 static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
987 {
988 struct tcphdr *th = tcp_hdr(skb), *t1;
989 struct sk_buff *buff;
990 struct flowi fl;
991 unsigned int tot_len = sizeof(*th);
992 #ifdef CONFIG_TCP_MD5SIG
993 struct tcp_md5sig_key *key;
994 #endif
995
996 if (th->rst)
997 return;
998
999 if (!ipv6_unicast_destination(skb))
1000 return;
1001
1002 #ifdef CONFIG_TCP_MD5SIG
1003 if (sk)
1004 key = tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr);
1005 else
1006 key = NULL;
1007
1008 if (key)
1009 tot_len += TCPOLEN_MD5SIG_ALIGNED;
1010 #endif
1011
1012 /*
1013 * We need to grab some memory, and put together an RST,
1014 * and then put it into the queue to be sent.
1015 */
1016
1017 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
1018 GFP_ATOMIC);
1019 if (buff == NULL)
1020 return;
1021
1022 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
1023
1024 t1 = (struct tcphdr *) skb_push(buff, tot_len);
1025
1026 /* Swap the send and the receive. */
1027 memset(t1, 0, sizeof(*t1));
1028 t1->dest = th->source;
1029 t1->source = th->dest;
1030 t1->doff = tot_len / 4;
1031 t1->rst = 1;
1032
1033 if(th->ack) {
1034 t1->seq = th->ack_seq;
1035 } else {
1036 t1->ack = 1;
1037 t1->ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin
1038 + skb->len - (th->doff<<2));
1039 }
1040
1041 #ifdef CONFIG_TCP_MD5SIG
1042 if (key) {
1043 __be32 *opt = (__be32*)(t1 + 1);
1044 opt[0] = htonl((TCPOPT_NOP << 24) |
1045 (TCPOPT_NOP << 16) |
1046 (TCPOPT_MD5SIG << 8) |
1047 TCPOLEN_MD5SIG);
1048 tcp_v6_do_calc_md5_hash((__u8 *)&opt[1], key,
1049 &ipv6_hdr(skb)->daddr,
1050 &ipv6_hdr(skb)->saddr,
1051 t1, IPPROTO_TCP, tot_len);
1052 }
1053 #endif
1054
1055 buff->csum = csum_partial((char *)t1, sizeof(*t1), 0);
1056
1057 memset(&fl, 0, sizeof(fl));
1058 ipv6_addr_copy(&fl.fl6_dst, &ipv6_hdr(skb)->saddr);
1059 ipv6_addr_copy(&fl.fl6_src, &ipv6_hdr(skb)->daddr);
1060
1061 t1->check = csum_ipv6_magic(&fl.fl6_src, &fl.fl6_dst,
1062 sizeof(*t1), IPPROTO_TCP,
1063 buff->csum);
1064
1065 fl.proto = IPPROTO_TCP;
1066 fl.oif = inet6_iif(skb);
1067 fl.fl_ip_dport = t1->dest;
1068 fl.fl_ip_sport = t1->source;
1069 security_skb_classify_flow(skb, &fl);
1070
1071 /* Pass a socket to ip6_dst_lookup either it is for RST
1072 * Underlying function will use this to retrieve the network
1073 * namespace
1074 */
1075 if (!ip6_dst_lookup(init_net.ipv6.tcp_sk, &buff->dst, &fl)) {
1076
1077 if (xfrm_lookup(&buff->dst, &fl, NULL, 0) >= 0) {
1078 ip6_xmit(init_net.ipv6.tcp_sk,
1079 buff, &fl, NULL, 0);
1080 TCP_INC_STATS_BH(TCP_MIB_OUTSEGS);
1081 TCP_INC_STATS_BH(TCP_MIB_OUTRSTS);
1082 return;
1083 }
1084 }
1085
1086 kfree_skb(buff);
1087 }
1088
1089 static void tcp_v6_send_ack(struct tcp_timewait_sock *tw,
1090 struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts)
1091 {
1092 struct tcphdr *th = tcp_hdr(skb), *t1;
1093 struct sk_buff *buff;
1094 struct flowi fl;
1095 unsigned int tot_len = sizeof(struct tcphdr);
1096 __be32 *topt;
1097 #ifdef CONFIG_TCP_MD5SIG
1098 struct tcp_md5sig_key *key;
1099 struct tcp_md5sig_key tw_key;
1100 #endif
1101
1102 #ifdef CONFIG_TCP_MD5SIG
1103 if (!tw && skb->sk) {
1104 key = tcp_v6_md5_do_lookup(skb->sk, &ipv6_hdr(skb)->daddr);
1105 } else if (tw && tw->tw_md5_keylen) {
1106 tw_key.key = tw->tw_md5_key;
1107 tw_key.keylen = tw->tw_md5_keylen;
1108 key = &tw_key;
1109 } else {
1110 key = NULL;
1111 }
1112 #endif
1113
1114 if (ts)
1115 tot_len += TCPOLEN_TSTAMP_ALIGNED;
1116 #ifdef CONFIG_TCP_MD5SIG
1117 if (key)
1118 tot_len += TCPOLEN_MD5SIG_ALIGNED;
1119 #endif
1120
1121 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
1122 GFP_ATOMIC);
1123 if (buff == NULL)
1124 return;
1125
1126 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
1127
1128 t1 = (struct tcphdr *) skb_push(buff,tot_len);
1129
1130 /* Swap the send and the receive. */
1131 memset(t1, 0, sizeof(*t1));
1132 t1->dest = th->source;
1133 t1->source = th->dest;
1134 t1->doff = tot_len/4;
1135 t1->seq = htonl(seq);
1136 t1->ack_seq = htonl(ack);
1137 t1->ack = 1;
1138 t1->window = htons(win);
1139
1140 topt = (__be32 *)(t1 + 1);
1141
1142 if (ts) {
1143 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
1144 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
1145 *topt++ = htonl(tcp_time_stamp);
1146 *topt = htonl(ts);
1147 }
1148
1149 #ifdef CONFIG_TCP_MD5SIG
1150 if (key) {
1151 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
1152 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
1153 tcp_v6_do_calc_md5_hash((__u8 *)topt, key,
1154 &ipv6_hdr(skb)->daddr,
1155 &ipv6_hdr(skb)->saddr,
1156 t1, IPPROTO_TCP, tot_len);
1157 }
1158 #endif
1159
1160 buff->csum = csum_partial((char *)t1, tot_len, 0);
1161
1162 memset(&fl, 0, sizeof(fl));
1163 ipv6_addr_copy(&fl.fl6_dst, &ipv6_hdr(skb)->saddr);
1164 ipv6_addr_copy(&fl.fl6_src, &ipv6_hdr(skb)->daddr);
1165
1166 t1->check = csum_ipv6_magic(&fl.fl6_src, &fl.fl6_dst,
1167 tot_len, IPPROTO_TCP,
1168 buff->csum);
1169
1170 fl.proto = IPPROTO_TCP;
1171 fl.oif = inet6_iif(skb);
1172 fl.fl_ip_dport = t1->dest;
1173 fl.fl_ip_sport = t1->source;
1174 security_skb_classify_flow(skb, &fl);
1175
1176 if (!ip6_dst_lookup(init_net.ipv6.tcp_sk, &buff->dst, &fl)) {
1177 if (xfrm_lookup(&buff->dst, &fl, NULL, 0) >= 0) {
1178 ip6_xmit(init_net.ipv6.tcp_sk,
1179 buff, &fl, NULL, 0);
1180 TCP_INC_STATS_BH(TCP_MIB_OUTSEGS);
1181 return;
1182 }
1183 }
1184
1185 kfree_skb(buff);
1186 }
1187
1188 static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
1189 {
1190 struct inet_timewait_sock *tw = inet_twsk(sk);
1191 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
1192
1193 tcp_v6_send_ack(tcptw, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
1194 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
1195 tcptw->tw_ts_recent);
1196
1197 inet_twsk_put(tw);
1198 }
1199
1200 static void tcp_v6_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req)
1201 {
1202 tcp_v6_send_ack(NULL, skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, req->ts_recent);
1203 }
1204
1205
1206 static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
1207 {
1208 struct request_sock *req, **prev;
1209 const struct tcphdr *th = tcp_hdr(skb);
1210 struct sock *nsk;
1211
1212 /* Find possible connection requests. */
1213 req = inet6_csk_search_req(sk, &prev, th->source,
1214 &ipv6_hdr(skb)->saddr,
1215 &ipv6_hdr(skb)->daddr, inet6_iif(skb));
1216 if (req)
1217 return tcp_check_req(sk, skb, req, prev);
1218
1219 nsk = __inet6_lookup_established(sk->sk_net, &tcp_hashinfo,
1220 &ipv6_hdr(skb)->saddr, th->source,
1221 &ipv6_hdr(skb)->daddr, ntohs(th->dest), inet6_iif(skb));
1222
1223 if (nsk) {
1224 if (nsk->sk_state != TCP_TIME_WAIT) {
1225 bh_lock_sock(nsk);
1226 return nsk;
1227 }
1228 inet_twsk_put(inet_twsk(nsk));
1229 return NULL;
1230 }
1231
1232 #ifdef CONFIG_SYN_COOKIES
1233 if (!th->rst && !th->syn && th->ack)
1234 sk = cookie_v6_check(sk, skb);
1235 #endif
1236 return sk;
1237 }
1238
1239 /* FIXME: this is substantially similar to the ipv4 code.
1240 * Can some kind of merge be done? -- erics
1241 */
1242 static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1243 {
1244 struct inet6_request_sock *treq;
1245 struct ipv6_pinfo *np = inet6_sk(sk);
1246 struct tcp_options_received tmp_opt;
1247 struct tcp_sock *tp = tcp_sk(sk);
1248 struct request_sock *req = NULL;
1249 __u32 isn = TCP_SKB_CB(skb)->when;
1250 #ifdef CONFIG_SYN_COOKIES
1251 int want_cookie = 0;
1252 #else
1253 #define want_cookie 0
1254 #endif
1255
1256 if (skb->protocol == htons(ETH_P_IP))
1257 return tcp_v4_conn_request(sk, skb);
1258
1259 if (!ipv6_unicast_destination(skb))
1260 goto drop;
1261
1262 if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
1263 if (net_ratelimit())
1264 syn_flood_warning(skb);
1265 #ifdef CONFIG_SYN_COOKIES
1266 if (sysctl_tcp_syncookies)
1267 want_cookie = 1;
1268 else
1269 #endif
1270 goto drop;
1271 }
1272
1273 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
1274 goto drop;
1275
1276 req = inet6_reqsk_alloc(&tcp6_request_sock_ops);
1277 if (req == NULL)
1278 goto drop;
1279
1280 #ifdef CONFIG_TCP_MD5SIG
1281 tcp_rsk(req)->af_specific = &tcp_request_sock_ipv6_ops;
1282 #endif
1283
1284 tcp_clear_options(&tmp_opt);
1285 tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
1286 tmp_opt.user_mss = tp->rx_opt.user_mss;
1287
1288 tcp_parse_options(skb, &tmp_opt, 0);
1289
1290 if (want_cookie) {
1291 tcp_clear_options(&tmp_opt);
1292 tmp_opt.saw_tstamp = 0;
1293 }
1294
1295 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
1296 tcp_openreq_init(req, &tmp_opt, skb);
1297
1298 treq = inet6_rsk(req);
1299 ipv6_addr_copy(&treq->rmt_addr, &ipv6_hdr(skb)->saddr);
1300 ipv6_addr_copy(&treq->loc_addr, &ipv6_hdr(skb)->daddr);
1301 treq->pktopts = NULL;
1302 if (!want_cookie)
1303 TCP_ECN_create_request(req, tcp_hdr(skb));
1304
1305 if (want_cookie) {
1306 isn = cookie_v6_init_sequence(sk, skb, &req->mss);
1307 } else if (!isn) {
1308 if (ipv6_opt_accepted(sk, skb) ||
1309 np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
1310 np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
1311 atomic_inc(&skb->users);
1312 treq->pktopts = skb;
1313 }
1314 treq->iif = sk->sk_bound_dev_if;
1315
1316 /* So that link locals have meaning */
1317 if (!sk->sk_bound_dev_if &&
1318 ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL)
1319 treq->iif = inet6_iif(skb);
1320
1321 isn = tcp_v6_init_sequence(skb);
1322 }
1323
1324 tcp_rsk(req)->snt_isn = isn;
1325
1326 security_inet_conn_request(sk, skb, req);
1327
1328 if (tcp_v6_send_synack(sk, req))
1329 goto drop;
1330
1331 if (!want_cookie) {
1332 inet6_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1333 return 0;
1334 }
1335
1336 drop:
1337 if (req)
1338 reqsk_free(req);
1339
1340 return 0; /* don't send reset */
1341 }
1342
1343 static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1344 struct request_sock *req,
1345 struct dst_entry *dst)
1346 {
1347 struct inet6_request_sock *treq = inet6_rsk(req);
1348 struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
1349 struct tcp6_sock *newtcp6sk;
1350 struct inet_sock *newinet;
1351 struct tcp_sock *newtp;
1352 struct sock *newsk;
1353 struct ipv6_txoptions *opt;
1354 #ifdef CONFIG_TCP_MD5SIG
1355 struct tcp_md5sig_key *key;
1356 #endif
1357
1358 if (skb->protocol == htons(ETH_P_IP)) {
1359 /*
1360 * v6 mapped
1361 */
1362
1363 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst);
1364
1365 if (newsk == NULL)
1366 return NULL;
1367
1368 newtcp6sk = (struct tcp6_sock *)newsk;
1369 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1370
1371 newinet = inet_sk(newsk);
1372 newnp = inet6_sk(newsk);
1373 newtp = tcp_sk(newsk);
1374
1375 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1376
1377 ipv6_addr_set(&newnp->daddr, 0, 0, htonl(0x0000FFFF),
1378 newinet->daddr);
1379
1380 ipv6_addr_set(&newnp->saddr, 0, 0, htonl(0x0000FFFF),
1381 newinet->saddr);
1382
1383 ipv6_addr_copy(&newnp->rcv_saddr, &newnp->saddr);
1384
1385 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1386 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1387 #ifdef CONFIG_TCP_MD5SIG
1388 newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1389 #endif
1390
1391 newnp->pktoptions = NULL;
1392 newnp->opt = NULL;
1393 newnp->mcast_oif = inet6_iif(skb);
1394 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1395
1396 /*
1397 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1398 * here, tcp_create_openreq_child now does this for us, see the comment in
1399 * that function for the gory details. -acme
1400 */
1401
1402 /* It is tricky place. Until this moment IPv4 tcp
1403 worked with IPv6 icsk.icsk_af_ops.
1404 Sync it now.
1405 */
1406 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1407
1408 return newsk;
1409 }
1410
1411 opt = np->opt;
1412
1413 if (sk_acceptq_is_full(sk))
1414 goto out_overflow;
1415
1416 if (dst == NULL) {
1417 struct in6_addr *final_p = NULL, final;
1418 struct flowi fl;
1419
1420 memset(&fl, 0, sizeof(fl));
1421 fl.proto = IPPROTO_TCP;
1422 ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
1423 if (opt && opt->srcrt) {
1424 struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt;
1425 ipv6_addr_copy(&final, &fl.fl6_dst);
1426 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
1427 final_p = &final;
1428 }
1429 ipv6_addr_copy(&fl.fl6_src, &treq->loc_addr);
1430 fl.oif = sk->sk_bound_dev_if;
1431 fl.fl_ip_dport = inet_rsk(req)->rmt_port;
1432 fl.fl_ip_sport = inet_sk(sk)->sport;
1433 security_req_classify_flow(req, &fl);
1434
1435 if (ip6_dst_lookup(sk, &dst, &fl))
1436 goto out;
1437
1438 if (final_p)
1439 ipv6_addr_copy(&fl.fl6_dst, final_p);
1440
1441 if ((xfrm_lookup(&dst, &fl, sk, 0)) < 0)
1442 goto out;
1443 }
1444
1445 newsk = tcp_create_openreq_child(sk, req, skb);
1446 if (newsk == NULL)
1447 goto out;
1448
1449 /*
1450 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1451 * count here, tcp_create_openreq_child now does this for us, see the
1452 * comment in that function for the gory details. -acme
1453 */
1454
1455 newsk->sk_gso_type = SKB_GSO_TCPV6;
1456 __ip6_dst_store(newsk, dst, NULL, NULL);
1457
1458 newtcp6sk = (struct tcp6_sock *)newsk;
1459 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1460
1461 newtp = tcp_sk(newsk);
1462 newinet = inet_sk(newsk);
1463 newnp = inet6_sk(newsk);
1464
1465 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1466
1467 ipv6_addr_copy(&newnp->daddr, &treq->rmt_addr);
1468 ipv6_addr_copy(&newnp->saddr, &treq->loc_addr);
1469 ipv6_addr_copy(&newnp->rcv_saddr, &treq->loc_addr);
1470 newsk->sk_bound_dev_if = treq->iif;
1471
1472 /* Now IPv6 options...
1473
1474 First: no IPv4 options.
1475 */
1476 newinet->opt = NULL;
1477 newnp->ipv6_fl_list = NULL;
1478
1479 /* Clone RX bits */
1480 newnp->rxopt.all = np->rxopt.all;
1481
1482 /* Clone pktoptions received with SYN */
1483 newnp->pktoptions = NULL;
1484 if (treq->pktopts != NULL) {
1485 newnp->pktoptions = skb_clone(treq->pktopts, GFP_ATOMIC);
1486 kfree_skb(treq->pktopts);
1487 treq->pktopts = NULL;
1488 if (newnp->pktoptions)
1489 skb_set_owner_r(newnp->pktoptions, newsk);
1490 }
1491 newnp->opt = NULL;
1492 newnp->mcast_oif = inet6_iif(skb);
1493 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1494
1495 /* Clone native IPv6 options from listening socket (if any)
1496
1497 Yes, keeping reference count would be much more clever,
1498 but we make one more one thing there: reattach optmem
1499 to newsk.
1500 */
1501 if (opt) {
1502 newnp->opt = ipv6_dup_options(newsk, opt);
1503 if (opt != np->opt)
1504 sock_kfree_s(sk, opt, opt->tot_len);
1505 }
1506
1507 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1508 if (newnp->opt)
1509 inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
1510 newnp->opt->opt_flen);
1511
1512 tcp_mtup_init(newsk);
1513 tcp_sync_mss(newsk, dst_mtu(dst));
1514 newtp->advmss = dst_metric(dst, RTAX_ADVMSS);
1515 tcp_initialize_rcv_mss(newsk);
1516
1517 newinet->daddr = newinet->saddr = newinet->rcv_saddr = LOOPBACK4_IPV6;
1518
1519 #ifdef CONFIG_TCP_MD5SIG
1520 /* Copy over the MD5 key from the original socket */
1521 if ((key = tcp_v6_md5_do_lookup(sk, &newnp->daddr)) != NULL) {
1522 /* We're using one, so create a matching key
1523 * on the newsk structure. If we fail to get
1524 * memory, then we end up not copying the key
1525 * across. Shucks.
1526 */
1527 char *newkey = kmemdup(key->key, key->keylen, GFP_ATOMIC);
1528 if (newkey != NULL)
1529 tcp_v6_md5_do_add(newsk, &inet6_sk(sk)->daddr,
1530 newkey, key->keylen);
1531 }
1532 #endif
1533
1534 __inet6_hash(newsk);
1535 inet_inherit_port(sk, newsk);
1536
1537 return newsk;
1538
1539 out_overflow:
1540 NET_INC_STATS_BH(LINUX_MIB_LISTENOVERFLOWS);
1541 out:
1542 NET_INC_STATS_BH(LINUX_MIB_LISTENDROPS);
1543 if (opt && opt != np->opt)
1544 sock_kfree_s(sk, opt, opt->tot_len);
1545 dst_release(dst);
1546 return NULL;
1547 }
1548
1549 static __sum16 tcp_v6_checksum_init(struct sk_buff *skb)
1550 {
1551 if (skb->ip_summed == CHECKSUM_COMPLETE) {
1552 if (!tcp_v6_check(tcp_hdr(skb), skb->len, &ipv6_hdr(skb)->saddr,
1553 &ipv6_hdr(skb)->daddr, skb->csum)) {
1554 skb->ip_summed = CHECKSUM_UNNECESSARY;
1555 return 0;
1556 }
1557 }
1558
1559 skb->csum = ~csum_unfold(tcp_v6_check(tcp_hdr(skb), skb->len,
1560 &ipv6_hdr(skb)->saddr,
1561 &ipv6_hdr(skb)->daddr, 0));
1562
1563 if (skb->len <= 76) {
1564 return __skb_checksum_complete(skb);
1565 }
1566 return 0;
1567 }
1568
1569 /* The socket must have it's spinlock held when we get
1570 * here.
1571 *
1572 * We have a potential double-lock case here, so even when
1573 * doing backlog processing we use the BH locking scheme.
1574 * This is because we cannot sleep with the original spinlock
1575 * held.
1576 */
1577 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1578 {
1579 struct ipv6_pinfo *np = inet6_sk(sk);
1580 struct tcp_sock *tp;
1581 struct sk_buff *opt_skb = NULL;
1582
1583 /* Imagine: socket is IPv6. IPv4 packet arrives,
1584 goes to IPv4 receive handler and backlogged.
1585 From backlog it always goes here. Kerboom...
1586 Fortunately, tcp_rcv_established and rcv_established
1587 handle them correctly, but it is not case with
1588 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1589 */
1590
1591 if (skb->protocol == htons(ETH_P_IP))
1592 return tcp_v4_do_rcv(sk, skb);
1593
1594 #ifdef CONFIG_TCP_MD5SIG
1595 if (tcp_v6_inbound_md5_hash (sk, skb))
1596 goto discard;
1597 #endif
1598
1599 if (sk_filter(sk, skb))
1600 goto discard;
1601
1602 /*
1603 * socket locking is here for SMP purposes as backlog rcv
1604 * is currently called with bh processing disabled.
1605 */
1606
1607 /* Do Stevens' IPV6_PKTOPTIONS.
1608
1609 Yes, guys, it is the only place in our code, where we
1610 may make it not affecting IPv4.
1611 The rest of code is protocol independent,
1612 and I do not like idea to uglify IPv4.
1613
1614 Actually, all the idea behind IPV6_PKTOPTIONS
1615 looks not very well thought. For now we latch
1616 options, received in the last packet, enqueued
1617 by tcp. Feel free to propose better solution.
1618 --ANK (980728)
1619 */
1620 if (np->rxopt.all)
1621 opt_skb = skb_clone(skb, GFP_ATOMIC);
1622
1623 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1624 TCP_CHECK_TIMER(sk);
1625 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len))
1626 goto reset;
1627 TCP_CHECK_TIMER(sk);
1628 if (opt_skb)
1629 goto ipv6_pktoptions;
1630 return 0;
1631 }
1632
1633 if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1634 goto csum_err;
1635
1636 if (sk->sk_state == TCP_LISTEN) {
1637 struct sock *nsk = tcp_v6_hnd_req(sk, skb);
1638 if (!nsk)
1639 goto discard;
1640
1641 /*
1642 * Queue it on the new socket if the new socket is active,
1643 * otherwise we just shortcircuit this and continue with
1644 * the new socket..
1645 */
1646 if(nsk != sk) {
1647 if (tcp_child_process(sk, nsk, skb))
1648 goto reset;
1649 if (opt_skb)
1650 __kfree_skb(opt_skb);
1651 return 0;
1652 }
1653 }
1654
1655 TCP_CHECK_TIMER(sk);
1656 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len))
1657 goto reset;
1658 TCP_CHECK_TIMER(sk);
1659 if (opt_skb)
1660 goto ipv6_pktoptions;
1661 return 0;
1662
1663 reset:
1664 tcp_v6_send_reset(sk, skb);
1665 discard:
1666 if (opt_skb)
1667 __kfree_skb(opt_skb);
1668 kfree_skb(skb);
1669 return 0;
1670 csum_err:
1671 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1672 goto discard;
1673
1674
1675 ipv6_pktoptions:
1676 /* Do you ask, what is it?
1677
1678 1. skb was enqueued by tcp.
1679 2. skb is added to tail of read queue, rather than out of order.
1680 3. socket is not in passive state.
1681 4. Finally, it really contains options, which user wants to receive.
1682 */
1683 tp = tcp_sk(sk);
1684 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1685 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1686 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
1687 np->mcast_oif = inet6_iif(opt_skb);
1688 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1689 np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
1690 if (ipv6_opt_accepted(sk, opt_skb)) {
1691 skb_set_owner_r(opt_skb, sk);
1692 opt_skb = xchg(&np->pktoptions, opt_skb);
1693 } else {
1694 __kfree_skb(opt_skb);
1695 opt_skb = xchg(&np->pktoptions, NULL);
1696 }
1697 }
1698
1699 if (opt_skb)
1700 kfree_skb(opt_skb);
1701 return 0;
1702 }
1703
1704 static int tcp_v6_rcv(struct sk_buff *skb)
1705 {
1706 struct tcphdr *th;
1707 struct sock *sk;
1708 int ret;
1709
1710 if (skb->pkt_type != PACKET_HOST)
1711 goto discard_it;
1712
1713 /*
1714 * Count it even if it's bad.
1715 */
1716 TCP_INC_STATS_BH(TCP_MIB_INSEGS);
1717
1718 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1719 goto discard_it;
1720
1721 th = tcp_hdr(skb);
1722
1723 if (th->doff < sizeof(struct tcphdr)/4)
1724 goto bad_packet;
1725 if (!pskb_may_pull(skb, th->doff*4))
1726 goto discard_it;
1727
1728 if (!skb_csum_unnecessary(skb) && tcp_v6_checksum_init(skb))
1729 goto bad_packet;
1730
1731 th = tcp_hdr(skb);
1732 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1733 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1734 skb->len - th->doff*4);
1735 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1736 TCP_SKB_CB(skb)->when = 0;
1737 TCP_SKB_CB(skb)->flags = ipv6_get_dsfield(ipv6_hdr(skb));
1738 TCP_SKB_CB(skb)->sacked = 0;
1739
1740 sk = __inet6_lookup(skb->dev->nd_net, &tcp_hashinfo,
1741 &ipv6_hdr(skb)->saddr, th->source,
1742 &ipv6_hdr(skb)->daddr, ntohs(th->dest),
1743 inet6_iif(skb));
1744
1745 if (!sk)
1746 goto no_tcp_socket;
1747
1748 process:
1749 if (sk->sk_state == TCP_TIME_WAIT)
1750 goto do_time_wait;
1751
1752 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1753 goto discard_and_relse;
1754
1755 if (sk_filter(sk, skb))
1756 goto discard_and_relse;
1757
1758 skb->dev = NULL;
1759
1760 bh_lock_sock_nested(sk);
1761 ret = 0;
1762 if (!sock_owned_by_user(sk)) {
1763 #ifdef CONFIG_NET_DMA
1764 struct tcp_sock *tp = tcp_sk(sk);
1765 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1766 tp->ucopy.dma_chan = get_softnet_dma();
1767 if (tp->ucopy.dma_chan)
1768 ret = tcp_v6_do_rcv(sk, skb);
1769 else
1770 #endif
1771 {
1772 if (!tcp_prequeue(sk, skb))
1773 ret = tcp_v6_do_rcv(sk, skb);
1774 }
1775 } else
1776 sk_add_backlog(sk, skb);
1777 bh_unlock_sock(sk);
1778
1779 sock_put(sk);
1780 return ret ? -1 : 0;
1781
1782 no_tcp_socket:
1783 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1784 goto discard_it;
1785
1786 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1787 bad_packet:
1788 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1789 } else {
1790 tcp_v6_send_reset(NULL, skb);
1791 }
1792
1793 discard_it:
1794
1795 /*
1796 * Discard frame
1797 */
1798
1799 kfree_skb(skb);
1800 return 0;
1801
1802 discard_and_relse:
1803 sock_put(sk);
1804 goto discard_it;
1805
1806 do_time_wait:
1807 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1808 inet_twsk_put(inet_twsk(sk));
1809 goto discard_it;
1810 }
1811
1812 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1813 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1814 inet_twsk_put(inet_twsk(sk));
1815 goto discard_it;
1816 }
1817
1818 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1819 case TCP_TW_SYN:
1820 {
1821 struct sock *sk2;
1822
1823 sk2 = inet6_lookup_listener(skb->dev->nd_net, &tcp_hashinfo,
1824 &ipv6_hdr(skb)->daddr,
1825 ntohs(th->dest), inet6_iif(skb));
1826 if (sk2 != NULL) {
1827 struct inet_timewait_sock *tw = inet_twsk(sk);
1828 inet_twsk_deschedule(tw, &tcp_death_row);
1829 inet_twsk_put(tw);
1830 sk = sk2;
1831 goto process;
1832 }
1833 /* Fall through to ACK */
1834 }
1835 case TCP_TW_ACK:
1836 tcp_v6_timewait_ack(sk, skb);
1837 break;
1838 case TCP_TW_RST:
1839 goto no_tcp_socket;
1840 case TCP_TW_SUCCESS:;
1841 }
1842 goto discard_it;
1843 }
1844
1845 static int tcp_v6_remember_stamp(struct sock *sk)
1846 {
1847 /* Alas, not yet... */
1848 return 0;
1849 }
1850
1851 static struct inet_connection_sock_af_ops ipv6_specific = {
1852 .queue_xmit = inet6_csk_xmit,
1853 .send_check = tcp_v6_send_check,
1854 .rebuild_header = inet6_sk_rebuild_header,
1855 .conn_request = tcp_v6_conn_request,
1856 .syn_recv_sock = tcp_v6_syn_recv_sock,
1857 .remember_stamp = tcp_v6_remember_stamp,
1858 .net_header_len = sizeof(struct ipv6hdr),
1859 .setsockopt = ipv6_setsockopt,
1860 .getsockopt = ipv6_getsockopt,
1861 .addr2sockaddr = inet6_csk_addr2sockaddr,
1862 .sockaddr_len = sizeof(struct sockaddr_in6),
1863 .bind_conflict = inet6_csk_bind_conflict,
1864 #ifdef CONFIG_COMPAT
1865 .compat_setsockopt = compat_ipv6_setsockopt,
1866 .compat_getsockopt = compat_ipv6_getsockopt,
1867 #endif
1868 };
1869
1870 #ifdef CONFIG_TCP_MD5SIG
1871 static struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
1872 .md5_lookup = tcp_v6_md5_lookup,
1873 .calc_md5_hash = tcp_v6_calc_md5_hash,
1874 .md5_add = tcp_v6_md5_add_func,
1875 .md5_parse = tcp_v6_parse_md5_keys,
1876 };
1877 #endif
1878
1879 /*
1880 * TCP over IPv4 via INET6 API
1881 */
1882
1883 static struct inet_connection_sock_af_ops ipv6_mapped = {
1884 .queue_xmit = ip_queue_xmit,
1885 .send_check = tcp_v4_send_check,
1886 .rebuild_header = inet_sk_rebuild_header,
1887 .conn_request = tcp_v6_conn_request,
1888 .syn_recv_sock = tcp_v6_syn_recv_sock,
1889 .remember_stamp = tcp_v4_remember_stamp,
1890 .net_header_len = sizeof(struct iphdr),
1891 .setsockopt = ipv6_setsockopt,
1892 .getsockopt = ipv6_getsockopt,
1893 .addr2sockaddr = inet6_csk_addr2sockaddr,
1894 .sockaddr_len = sizeof(struct sockaddr_in6),
1895 .bind_conflict = inet6_csk_bind_conflict,
1896 #ifdef CONFIG_COMPAT
1897 .compat_setsockopt = compat_ipv6_setsockopt,
1898 .compat_getsockopt = compat_ipv6_getsockopt,
1899 #endif
1900 };
1901
1902 #ifdef CONFIG_TCP_MD5SIG
1903 static struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
1904 .md5_lookup = tcp_v4_md5_lookup,
1905 .calc_md5_hash = tcp_v4_calc_md5_hash,
1906 .md5_add = tcp_v6_md5_add_func,
1907 .md5_parse = tcp_v6_parse_md5_keys,
1908 };
1909 #endif
1910
1911 /* NOTE: A lot of things set to zero explicitly by call to
1912 * sk_alloc() so need not be done here.
1913 */
1914 static int tcp_v6_init_sock(struct sock *sk)
1915 {
1916 struct inet_connection_sock *icsk = inet_csk(sk);
1917 struct tcp_sock *tp = tcp_sk(sk);
1918
1919 skb_queue_head_init(&tp->out_of_order_queue);
1920 tcp_init_xmit_timers(sk);
1921 tcp_prequeue_init(tp);
1922
1923 icsk->icsk_rto = TCP_TIMEOUT_INIT;
1924 tp->mdev = TCP_TIMEOUT_INIT;
1925
1926 /* So many TCP implementations out there (incorrectly) count the
1927 * initial SYN frame in their delayed-ACK and congestion control
1928 * algorithms that we must have the following bandaid to talk
1929 * efficiently to them. -DaveM
1930 */
1931 tp->snd_cwnd = 2;
1932
1933 /* See draft-stevens-tcpca-spec-01 for discussion of the
1934 * initialization of these values.
1935 */
1936 tp->snd_ssthresh = 0x7fffffff;
1937 tp->snd_cwnd_clamp = ~0;
1938 tp->mss_cache = 536;
1939
1940 tp->reordering = sysctl_tcp_reordering;
1941
1942 sk->sk_state = TCP_CLOSE;
1943
1944 icsk->icsk_af_ops = &ipv6_specific;
1945 icsk->icsk_ca_ops = &tcp_init_congestion_ops;
1946 icsk->icsk_sync_mss = tcp_sync_mss;
1947 sk->sk_write_space = sk_stream_write_space;
1948 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
1949
1950 #ifdef CONFIG_TCP_MD5SIG
1951 tp->af_specific = &tcp_sock_ipv6_specific;
1952 #endif
1953
1954 sk->sk_sndbuf = sysctl_tcp_wmem[1];
1955 sk->sk_rcvbuf = sysctl_tcp_rmem[1];
1956
1957 atomic_inc(&tcp_sockets_allocated);
1958
1959 return 0;
1960 }
1961
1962 static int tcp_v6_destroy_sock(struct sock *sk)
1963 {
1964 #ifdef CONFIG_TCP_MD5SIG
1965 /* Clean up the MD5 key list */
1966 if (tcp_sk(sk)->md5sig_info)
1967 tcp_v6_clear_md5_list(sk);
1968 #endif
1969 tcp_v4_destroy_sock(sk);
1970 return inet6_destroy_sock(sk);
1971 }
1972
1973 #ifdef CONFIG_PROC_FS
1974 /* Proc filesystem TCPv6 sock list dumping. */
1975 static void get_openreq6(struct seq_file *seq,
1976 struct sock *sk, struct request_sock *req, int i, int uid)
1977 {
1978 int ttd = req->expires - jiffies;
1979 struct in6_addr *src = &inet6_rsk(req)->loc_addr;
1980 struct in6_addr *dest = &inet6_rsk(req)->rmt_addr;
1981
1982 if (ttd < 0)
1983 ttd = 0;
1984
1985 seq_printf(seq,
1986 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1987 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p\n",
1988 i,
1989 src->s6_addr32[0], src->s6_addr32[1],
1990 src->s6_addr32[2], src->s6_addr32[3],
1991 ntohs(inet_sk(sk)->sport),
1992 dest->s6_addr32[0], dest->s6_addr32[1],
1993 dest->s6_addr32[2], dest->s6_addr32[3],
1994 ntohs(inet_rsk(req)->rmt_port),
1995 TCP_SYN_RECV,
1996 0,0, /* could print option size, but that is af dependent. */
1997 1, /* timers active (only the expire timer) */
1998 jiffies_to_clock_t(ttd),
1999 req->retrans,
2000 uid,
2001 0, /* non standard timer */
2002 0, /* open_requests have no inode */
2003 0, req);
2004 }
2005
2006 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
2007 {
2008 struct in6_addr *dest, *src;
2009 __u16 destp, srcp;
2010 int timer_active;
2011 unsigned long timer_expires;
2012 struct inet_sock *inet = inet_sk(sp);
2013 struct tcp_sock *tp = tcp_sk(sp);
2014 const struct inet_connection_sock *icsk = inet_csk(sp);
2015 struct ipv6_pinfo *np = inet6_sk(sp);
2016
2017 dest = &np->daddr;
2018 src = &np->rcv_saddr;
2019 destp = ntohs(inet->dport);
2020 srcp = ntohs(inet->sport);
2021
2022 if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
2023 timer_active = 1;
2024 timer_expires = icsk->icsk_timeout;
2025 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
2026 timer_active = 4;
2027 timer_expires = icsk->icsk_timeout;
2028 } else if (timer_pending(&sp->sk_timer)) {
2029 timer_active = 2;
2030 timer_expires = sp->sk_timer.expires;
2031 } else {
2032 timer_active = 0;
2033 timer_expires = jiffies;
2034 }
2035
2036 seq_printf(seq,
2037 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2038 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %u %u %u %u %d\n",
2039 i,
2040 src->s6_addr32[0], src->s6_addr32[1],
2041 src->s6_addr32[2], src->s6_addr32[3], srcp,
2042 dest->s6_addr32[0], dest->s6_addr32[1],
2043 dest->s6_addr32[2], dest->s6_addr32[3], destp,
2044 sp->sk_state,
2045 tp->write_seq-tp->snd_una,
2046 (sp->sk_state == TCP_LISTEN) ? sp->sk_ack_backlog : (tp->rcv_nxt - tp->copied_seq),
2047 timer_active,
2048 jiffies_to_clock_t(timer_expires - jiffies),
2049 icsk->icsk_retransmits,
2050 sock_i_uid(sp),
2051 icsk->icsk_probes_out,
2052 sock_i_ino(sp),
2053 atomic_read(&sp->sk_refcnt), sp,
2054 icsk->icsk_rto,
2055 icsk->icsk_ack.ato,
2056 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
2057 tp->snd_cwnd, tp->snd_ssthresh>=0xFFFF?-1:tp->snd_ssthresh
2058 );
2059 }
2060
2061 static void get_timewait6_sock(struct seq_file *seq,
2062 struct inet_timewait_sock *tw, int i)
2063 {
2064 struct in6_addr *dest, *src;
2065 __u16 destp, srcp;
2066 struct inet6_timewait_sock *tw6 = inet6_twsk((struct sock *)tw);
2067 int ttd = tw->tw_ttd - jiffies;
2068
2069 if (ttd < 0)
2070 ttd = 0;
2071
2072 dest = &tw6->tw_v6_daddr;
2073 src = &tw6->tw_v6_rcv_saddr;
2074 destp = ntohs(tw->tw_dport);
2075 srcp = ntohs(tw->tw_sport);
2076
2077 seq_printf(seq,
2078 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2079 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p\n",
2080 i,
2081 src->s6_addr32[0], src->s6_addr32[1],
2082 src->s6_addr32[2], src->s6_addr32[3], srcp,
2083 dest->s6_addr32[0], dest->s6_addr32[1],
2084 dest->s6_addr32[2], dest->s6_addr32[3], destp,
2085 tw->tw_substate, 0, 0,
2086 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
2087 atomic_read(&tw->tw_refcnt), tw);
2088 }
2089
2090 static int tcp6_seq_show(struct seq_file *seq, void *v)
2091 {
2092 struct tcp_iter_state *st;
2093
2094 if (v == SEQ_START_TOKEN) {
2095 seq_puts(seq,
2096 " sl "
2097 "local_address "
2098 "remote_address "
2099 "st tx_queue rx_queue tr tm->when retrnsmt"
2100 " uid timeout inode\n");
2101 goto out;
2102 }
2103 st = seq->private;
2104
2105 switch (st->state) {
2106 case TCP_SEQ_STATE_LISTENING:
2107 case TCP_SEQ_STATE_ESTABLISHED:
2108 get_tcp6_sock(seq, v, st->num);
2109 break;
2110 case TCP_SEQ_STATE_OPENREQ:
2111 get_openreq6(seq, st->syn_wait_sk, v, st->num, st->uid);
2112 break;
2113 case TCP_SEQ_STATE_TIME_WAIT:
2114 get_timewait6_sock(seq, v, st->num);
2115 break;
2116 }
2117 out:
2118 return 0;
2119 }
2120
2121 static struct file_operations tcp6_seq_fops;
2122 static struct tcp_seq_afinfo tcp6_seq_afinfo = {
2123 .owner = THIS_MODULE,
2124 .name = "tcp6",
2125 .family = AF_INET6,
2126 .seq_show = tcp6_seq_show,
2127 .seq_fops = &tcp6_seq_fops,
2128 };
2129
2130 int __init tcp6_proc_init(void)
2131 {
2132 return tcp_proc_register(&tcp6_seq_afinfo);
2133 }
2134
2135 void tcp6_proc_exit(void)
2136 {
2137 tcp_proc_unregister(&tcp6_seq_afinfo);
2138 }
2139 #endif
2140
2141 DEFINE_PROTO_INUSE(tcpv6)
2142
2143 struct proto tcpv6_prot = {
2144 .name = "TCPv6",
2145 .owner = THIS_MODULE,
2146 .close = tcp_close,
2147 .connect = tcp_v6_connect,
2148 .disconnect = tcp_disconnect,
2149 .accept = inet_csk_accept,
2150 .ioctl = tcp_ioctl,
2151 .init = tcp_v6_init_sock,
2152 .destroy = tcp_v6_destroy_sock,
2153 .shutdown = tcp_shutdown,
2154 .setsockopt = tcp_setsockopt,
2155 .getsockopt = tcp_getsockopt,
2156 .recvmsg = tcp_recvmsg,
2157 .backlog_rcv = tcp_v6_do_rcv,
2158 .hash = tcp_v6_hash,
2159 .unhash = inet_unhash,
2160 .get_port = inet_csk_get_port,
2161 .enter_memory_pressure = tcp_enter_memory_pressure,
2162 .sockets_allocated = &tcp_sockets_allocated,
2163 .memory_allocated = &tcp_memory_allocated,
2164 .memory_pressure = &tcp_memory_pressure,
2165 .orphan_count = &tcp_orphan_count,
2166 .sysctl_mem = sysctl_tcp_mem,
2167 .sysctl_wmem = sysctl_tcp_wmem,
2168 .sysctl_rmem = sysctl_tcp_rmem,
2169 .max_header = MAX_TCP_HEADER,
2170 .obj_size = sizeof(struct tcp6_sock),
2171 .twsk_prot = &tcp6_timewait_sock_ops,
2172 .rsk_prot = &tcp6_request_sock_ops,
2173 .hashinfo = &tcp_hashinfo,
2174 #ifdef CONFIG_COMPAT
2175 .compat_setsockopt = compat_tcp_setsockopt,
2176 .compat_getsockopt = compat_tcp_getsockopt,
2177 #endif
2178 REF_PROTO_INUSE(tcpv6)
2179 };
2180
2181 static struct inet6_protocol tcpv6_protocol = {
2182 .handler = tcp_v6_rcv,
2183 .err_handler = tcp_v6_err,
2184 .gso_send_check = tcp_v6_gso_send_check,
2185 .gso_segment = tcp_tso_segment,
2186 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
2187 };
2188
2189 static struct inet_protosw tcpv6_protosw = {
2190 .type = SOCK_STREAM,
2191 .protocol = IPPROTO_TCP,
2192 .prot = &tcpv6_prot,
2193 .ops = &inet6_stream_ops,
2194 .capability = -1,
2195 .no_check = 0,
2196 .flags = INET_PROTOSW_PERMANENT |
2197 INET_PROTOSW_ICSK,
2198 };
2199
2200 static int tcpv6_net_init(struct net *net)
2201 {
2202 int err;
2203 struct socket *sock;
2204 struct sock *sk;
2205
2206 err = inet_csk_ctl_sock_create(&sock, PF_INET6, SOCK_RAW, IPPROTO_TCP);
2207 if (err)
2208 return err;
2209
2210 net->ipv6.tcp_sk = sk = sock->sk;
2211 sk_change_net(sk, net);
2212 return err;
2213 }
2214
2215 static void tcpv6_net_exit(struct net *net)
2216 {
2217 sk_release_kernel(net->ipv6.tcp_sk);
2218 }
2219
2220 static struct pernet_operations tcpv6_net_ops = {
2221 .init = tcpv6_net_init,
2222 .exit = tcpv6_net_exit,
2223 };
2224
2225 int __init tcpv6_init(void)
2226 {
2227 int ret;
2228
2229 ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
2230 if (ret)
2231 goto out;
2232
2233 /* register inet6 protocol */
2234 ret = inet6_register_protosw(&tcpv6_protosw);
2235 if (ret)
2236 goto out_tcpv6_protocol;
2237
2238 ret = register_pernet_subsys(&tcpv6_net_ops);
2239 if (ret)
2240 goto out_tcpv6_protosw;
2241 out:
2242 return ret;
2243
2244 out_tcpv6_protocol:
2245 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
2246 out_tcpv6_protosw:
2247 inet6_unregister_protosw(&tcpv6_protosw);
2248 goto out;
2249 }
2250
2251 void tcpv6_exit(void)
2252 {
2253 unregister_pernet_subsys(&tcpv6_net_ops);
2254 inet6_unregister_protosw(&tcpv6_protosw);
2255 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
2256 }
This page took 0.119274 seconds and 5 git commands to generate.