[INET]: Generalise tcp_v4_hash_connect
[deliverable/linux.git] / net / dccp / ipv6.c
CommitLineData
3df80d93
ACM
1/*
2 * DCCP over IPv6
3 * Linux INET6 implementation
4 *
5 * Based on net/dccp6/ipv6.c
6 *
7 * Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 */
14
15#include <linux/config.h>
16#include <linux/module.h>
17#include <linux/random.h>
18#include <linux/xfrm.h>
19
20#include <net/addrconf.h>
21#include <net/inet_common.h>
22#include <net/inet_hashtables.h>
23#include <net/inet6_connection_sock.h>
24#include <net/inet6_hashtables.h>
25#include <net/ip6_route.h>
26#include <net/ipv6.h>
27#include <net/protocol.h>
28#include <net/transp_v6.h>
29#include <net/xfrm.h>
30
31#include "dccp.h"
32#include "ipv6.h"
33
34static void dccp_v6_ctl_send_reset(struct sk_buff *skb);
35static void dccp_v6_reqsk_send_ack(struct sk_buff *skb,
36 struct request_sock *req);
37static void dccp_v6_send_check(struct sock *sk, int len, struct sk_buff *skb);
38
39static int dccp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
40
41static struct inet_connection_sock_af_ops dccp_ipv6_mapped;
42static struct inet_connection_sock_af_ops dccp_ipv6_af_ops;
43
44static int dccp_v6_get_port(struct sock *sk, unsigned short snum)
45{
46 return inet_csk_get_port(&dccp_hashinfo, sk, snum,
47 inet6_csk_bind_conflict);
48}
49
50static void dccp_v6_hash(struct sock *sk)
51{
52 if (sk->sk_state != DCCP_CLOSED) {
53 if (inet_csk(sk)->icsk_af_ops == &dccp_ipv6_mapped) {
54 dccp_prot.hash(sk);
55 return;
56 }
57 local_bh_disable();
58 __inet6_hash(&dccp_hashinfo, sk);
59 local_bh_enable();
60 }
61}
62
63static inline u16 dccp_v6_check(struct dccp_hdr *dh, int len,
64 struct in6_addr *saddr,
65 struct in6_addr *daddr,
66 unsigned long base)
67{
68 return csum_ipv6_magic(saddr, daddr, len, IPPROTO_DCCP, base);
69}
70
71static __u32 dccp_v6_init_sequence(struct sock *sk, struct sk_buff *skb)
72{
73 const struct dccp_hdr *dh = dccp_hdr(skb);
74
75 if (skb->protocol == htons(ETH_P_IPV6))
76 return secure_tcpv6_sequence_number(skb->nh.ipv6h->daddr.s6_addr32,
77 skb->nh.ipv6h->saddr.s6_addr32,
78 dh->dccph_dport,
79 dh->dccph_sport);
80 else
81 return secure_dccp_sequence_number(skb->nh.iph->daddr,
82 skb->nh.iph->saddr,
83 dh->dccph_dport,
84 dh->dccph_sport);
85}
86
87static int __dccp_v6_check_established(struct sock *sk, const __u16 lport,
88 struct inet_timewait_sock **twp)
89{
90 struct inet_sock *inet = inet_sk(sk);
91 const struct ipv6_pinfo *np = inet6_sk(sk);
92 const struct in6_addr *daddr = &np->rcv_saddr;
93 const struct in6_addr *saddr = &np->daddr;
94 const int dif = sk->sk_bound_dev_if;
95 const u32 ports = INET_COMBINED_PORTS(inet->dport, lport);
96 const unsigned int hash = inet6_ehashfn(daddr, inet->num,
97 saddr, inet->dport);
98 struct inet_ehash_bucket *head = inet_ehash_bucket(&dccp_hashinfo, hash);
99 struct sock *sk2;
100 const struct hlist_node *node;
101 struct inet_timewait_sock *tw;
102
103 prefetch(head->chain.first);
104 write_lock(&head->lock);
105
106 /* Check TIME-WAIT sockets first. */
107 sk_for_each(sk2, node, &(head + dccp_hashinfo.ehash_size)->chain) {
108 const struct inet6_timewait_sock *tw6 = inet6_twsk(sk2);
109
110 tw = inet_twsk(sk2);
111
112 if(*((__u32 *)&(tw->tw_dport)) == ports &&
113 sk2->sk_family == PF_INET6 &&
114 ipv6_addr_equal(&tw6->tw_v6_daddr, saddr) &&
115 ipv6_addr_equal(&tw6->tw_v6_rcv_saddr, daddr) &&
116 sk2->sk_bound_dev_if == sk->sk_bound_dev_if)
117 goto not_unique;
118 }
119 tw = NULL;
120
121 /* And established part... */
122 sk_for_each(sk2, node, &head->chain) {
123 if (INET6_MATCH(sk2, hash, saddr, daddr, ports, dif))
124 goto not_unique;
125 }
126
127 BUG_TRAP(sk_unhashed(sk));
128 __sk_add_node(sk, &head->chain);
129 sk->sk_hash = hash;
130 sock_prot_inc_use(sk->sk_prot);
131 write_unlock(&head->lock);
132
133 if (twp) {
134 *twp = tw;
135 NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED);
136 } else if (tw) {
137 /* Silly. Should hash-dance instead... */
138 inet_twsk_deschedule(tw, &dccp_death_row);
139 NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED);
140
141 inet_twsk_put(tw);
142 }
143 return 0;
144
145not_unique:
146 write_unlock(&head->lock);
147 return -EADDRNOTAVAIL;
148}
149
150static inline u32 dccp_v6_port_offset(const struct sock *sk)
151{
152 const struct inet_sock *inet = inet_sk(sk);
153 const struct ipv6_pinfo *np = inet6_sk(sk);
154
155 return secure_tcpv6_port_ephemeral(np->rcv_saddr.s6_addr32,
156 np->daddr.s6_addr32,
157 inet->dport);
158}
159
160static int dccp_v6_hash_connect(struct sock *sk)
161{
162 const unsigned short snum = inet_sk(sk)->num;
163 struct inet_bind_hashbucket *head;
164 struct inet_bind_bucket *tb;
165 int ret;
166
167 if (snum == 0) {
168 int low = sysctl_local_port_range[0];
169 int high = sysctl_local_port_range[1];
170 int range = high - low;
171 int i;
172 int port;
173 static u32 hint;
174 u32 offset = hint + dccp_v6_port_offset(sk);
175 struct hlist_node *node;
176 struct inet_timewait_sock *tw = NULL;
177
178 local_bh_disable();
179 for (i = 1; i <= range; i++) {
180 port = low + (i + offset) % range;
181 head = &dccp_hashinfo.bhash[inet_bhashfn(port,
182 dccp_hashinfo.bhash_size)];
183 spin_lock(&head->lock);
184
185 /* Does not bother with rcv_saddr checks,
186 * because the established check is already
187 * unique enough.
188 */
189 inet_bind_bucket_for_each(tb, node, &head->chain) {
190 if (tb->port == port) {
191 BUG_TRAP(!hlist_empty(&tb->owners));
192 if (tb->fastreuse >= 0)
193 goto next_port;
194 if (!__dccp_v6_check_established(sk,
195 port,
196 &tw))
197 goto ok;
198 goto next_port;
199 }
200 }
201
202 tb = inet_bind_bucket_create(dccp_hashinfo.bind_bucket_cachep,
203 head, port);
204 if (!tb) {
205 spin_unlock(&head->lock);
206 break;
207 }
208 tb->fastreuse = -1;
209 goto ok;
210
211 next_port:
212 spin_unlock(&head->lock);
213 }
214 local_bh_enable();
215
216 return -EADDRNOTAVAIL;
217ok:
218 hint += i;
219
220 /* Head lock still held and bh's disabled */
221 inet_bind_hash(sk, tb, port);
222 if (sk_unhashed(sk)) {
223 inet_sk(sk)->sport = htons(port);
224 __inet6_hash(&dccp_hashinfo, sk);
225 }
226 spin_unlock(&head->lock);
227
228 if (tw) {
229 inet_twsk_deschedule(tw, &dccp_death_row);
230 inet_twsk_put(tw);
231 }
232
233 ret = 0;
234 goto out;
235 }
236
237 head = &dccp_hashinfo.bhash[inet_bhashfn(snum,
238 dccp_hashinfo.bhash_size)];
239 tb = inet_csk(sk)->icsk_bind_hash;
240 spin_lock_bh(&head->lock);
241
242 if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) {
243 __inet6_hash(&dccp_hashinfo, sk);
244 spin_unlock_bh(&head->lock);
245 return 0;
246 } else {
247 spin_unlock(&head->lock);
248 /* No definite answer... Walk to established hash table */
249 ret = __dccp_v6_check_established(sk, snum, NULL);
250out:
251 local_bh_enable();
252 return ret;
253 }
254}
255
256static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
257 int addr_len)
258{
259 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
260 struct inet_sock *inet = inet_sk(sk);
261 struct ipv6_pinfo *np = inet6_sk(sk);
262 struct dccp_sock *dp = dccp_sk(sk);
263 struct in6_addr *saddr = NULL, *final_p = NULL, final;
264 struct flowi fl;
265 struct dst_entry *dst;
266 int addr_type;
267 int err;
268
269 dp->dccps_role = DCCP_ROLE_CLIENT;
270
271 if (addr_len < SIN6_LEN_RFC2133)
272 return -EINVAL;
273
274 if (usin->sin6_family != AF_INET6)
275 return -EAFNOSUPPORT;
276
277 memset(&fl, 0, sizeof(fl));
278
279 if (np->sndflow) {
280 fl.fl6_flowlabel = usin->sin6_flowinfo & IPV6_FLOWINFO_MASK;
281 IP6_ECN_flow_init(fl.fl6_flowlabel);
282 if (fl.fl6_flowlabel & IPV6_FLOWLABEL_MASK) {
283 struct ip6_flowlabel *flowlabel;
284 flowlabel = fl6_sock_lookup(sk, fl.fl6_flowlabel);
285 if (flowlabel == NULL)
286 return -EINVAL;
287 ipv6_addr_copy(&usin->sin6_addr, &flowlabel->dst);
288 fl6_sock_release(flowlabel);
289 }
290 }
291
292 /*
293 * connect() to INADDR_ANY means loopback (BSD'ism).
294 */
295
296 if (ipv6_addr_any(&usin->sin6_addr))
297 usin->sin6_addr.s6_addr[15] = 0x1;
298
299 addr_type = ipv6_addr_type(&usin->sin6_addr);
300
301 if(addr_type & IPV6_ADDR_MULTICAST)
302 return -ENETUNREACH;
303
304 if (addr_type & IPV6_ADDR_LINKLOCAL) {
305 if (addr_len >= sizeof(struct sockaddr_in6) &&
306 usin->sin6_scope_id) {
307 /* If interface is set while binding, indices
308 * must coincide.
309 */
310 if (sk->sk_bound_dev_if &&
311 sk->sk_bound_dev_if != usin->sin6_scope_id)
312 return -EINVAL;
313
314 sk->sk_bound_dev_if = usin->sin6_scope_id;
315 }
316
317 /* Connect to link-local address requires an interface */
318 if (!sk->sk_bound_dev_if)
319 return -EINVAL;
320 }
321
322 ipv6_addr_copy(&np->daddr, &usin->sin6_addr);
323 np->flow_label = fl.fl6_flowlabel;
324
325 /*
326 * DCCP over IPv4
327 */
328
329 if (addr_type == IPV6_ADDR_MAPPED) {
330 u32 exthdrlen = dp->dccps_ext_header_len;
331 struct sockaddr_in sin;
332
333 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
334
335 if (__ipv6_only_sock(sk))
336 return -ENETUNREACH;
337
338 sin.sin_family = AF_INET;
339 sin.sin_port = usin->sin6_port;
340 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
341
342 inet_csk(sk)->icsk_af_ops = &dccp_ipv6_mapped;
343 sk->sk_backlog_rcv = dccp_v4_do_rcv;
344
345 err = dccp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
346
347 if (err) {
348 dp->dccps_ext_header_len = exthdrlen;
349 inet_csk(sk)->icsk_af_ops = &dccp_ipv6_af_ops;
350 sk->sk_backlog_rcv = dccp_v6_do_rcv;
351 goto failure;
352 } else {
353 ipv6_addr_set(&np->saddr, 0, 0, htonl(0x0000FFFF),
354 inet->saddr);
355 ipv6_addr_set(&np->rcv_saddr, 0, 0, htonl(0x0000FFFF),
356 inet->rcv_saddr);
357 }
358
359 return err;
360 }
361
362 if (!ipv6_addr_any(&np->rcv_saddr))
363 saddr = &np->rcv_saddr;
364
365 fl.proto = IPPROTO_DCCP;
366 ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
367 ipv6_addr_copy(&fl.fl6_src, saddr ? saddr : &np->saddr);
368 fl.oif = sk->sk_bound_dev_if;
369 fl.fl_ip_dport = usin->sin6_port;
370 fl.fl_ip_sport = inet->sport;
371
372 if (np->opt && np->opt->srcrt) {
373 struct rt0_hdr *rt0 = (struct rt0_hdr *)np->opt->srcrt;
374 ipv6_addr_copy(&final, &fl.fl6_dst);
375 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
376 final_p = &final;
377 }
378
379 err = ip6_dst_lookup(sk, &dst, &fl);
380 if (err)
381 goto failure;
382 if (final_p)
383 ipv6_addr_copy(&fl.fl6_dst, final_p);
384
385 if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0)
386 goto failure;
387
388 if (saddr == NULL) {
389 saddr = &fl.fl6_src;
390 ipv6_addr_copy(&np->rcv_saddr, saddr);
391 }
392
393 /* set the source address */
394 ipv6_addr_copy(&np->saddr, saddr);
395 inet->rcv_saddr = LOOPBACK4_IPV6;
396
397 ip6_dst_store(sk, dst, NULL);
398
399 dp->dccps_ext_header_len = 0;
400 if (np->opt)
401 dp->dccps_ext_header_len = np->opt->opt_flen + np->opt->opt_nflen;
402
403 inet->dport = usin->sin6_port;
404
405 dccp_set_state(sk, DCCP_REQUESTING);
406 err = dccp_v6_hash_connect(sk);
407 if (err)
408 goto late_failure;
409 /* FIXME */
410#if 0
411 dp->dccps_gar = secure_dccp_v6_sequence_number(np->saddr.s6_addr32,
412 np->daddr.s6_addr32,
413 inet->sport,
414 inet->dport);
415#endif
416 err = dccp_connect(sk);
417 if (err)
418 goto late_failure;
419
420 return 0;
421
422late_failure:
423 dccp_set_state(sk, DCCP_CLOSED);
424 __sk_dst_reset(sk);
425failure:
426 inet->dport = 0;
427 sk->sk_route_caps = 0;
428 return err;
429}
430
431static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
432 int type, int code, int offset, __u32 info)
433{
434 struct ipv6hdr *hdr = (struct ipv6hdr *)skb->data;
435 const struct dccp_hdr *dh = (struct dccp_hdr *)(skb->data + offset);
436 struct ipv6_pinfo *np;
437 struct sock *sk;
438 int err;
439 __u64 seq;
440
441 sk = inet6_lookup(&dccp_hashinfo, &hdr->daddr, dh->dccph_dport,
442 &hdr->saddr, dh->dccph_sport, skb->dev->ifindex);
443
444 if (sk == NULL) {
445 ICMP6_INC_STATS_BH(__in6_dev_get(skb->dev), ICMP6_MIB_INERRORS);
446 return;
447 }
448
449 if (sk->sk_state == DCCP_TIME_WAIT) {
450 inet_twsk_put((struct inet_timewait_sock *)sk);
451 return;
452 }
453
454 bh_lock_sock(sk);
455 if (sock_owned_by_user(sk))
456 NET_INC_STATS_BH(LINUX_MIB_LOCKDROPPEDICMPS);
457
458 if (sk->sk_state == DCCP_CLOSED)
459 goto out;
460
461 np = inet6_sk(sk);
462
463 if (type == ICMPV6_PKT_TOOBIG) {
464 struct dccp_sock *dp = dccp_sk(sk);
465 struct dst_entry *dst = NULL;
466
467 if (sock_owned_by_user(sk))
468 goto out;
469 if ((1 << sk->sk_state) & (DCCPF_LISTEN | DCCPF_CLOSED))
470 goto out;
471
472 /* icmp should have updated the destination cache entry */
473 dst = __sk_dst_check(sk, np->dst_cookie);
474
475 if (dst == NULL) {
476 struct inet_sock *inet = inet_sk(sk);
477 struct flowi fl;
478
479 /* BUGGG_FUTURE: Again, it is not clear how
480 to handle rthdr case. Ignore this complexity
481 for now.
482 */
483 memset(&fl, 0, sizeof(fl));
484 fl.proto = IPPROTO_DCCP;
485 ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
486 ipv6_addr_copy(&fl.fl6_src, &np->saddr);
487 fl.oif = sk->sk_bound_dev_if;
488 fl.fl_ip_dport = inet->dport;
489 fl.fl_ip_sport = inet->sport;
490
491 if ((err = ip6_dst_lookup(sk, &dst, &fl))) {
492 sk->sk_err_soft = -err;
493 goto out;
494 }
495
496 if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0) {
497 sk->sk_err_soft = -err;
498 goto out;
499 }
500
501 } else
502 dst_hold(dst);
503
504 if (dp->dccps_pmtu_cookie > dst_mtu(dst)) {
505 dccp_sync_mss(sk, dst_mtu(dst));
506 } /* else let the usual retransmit timer handle it */
507 dst_release(dst);
508 goto out;
509 }
510
511 icmpv6_err_convert(type, code, &err);
512
513 seq = DCCP_SKB_CB(skb)->dccpd_seq;
514 /* Might be for an request_sock */
515 switch (sk->sk_state) {
516 struct request_sock *req, **prev;
517 case DCCP_LISTEN:
518 if (sock_owned_by_user(sk))
519 goto out;
520
521 req = inet6_csk_search_req(sk, &prev, dh->dccph_dport,
522 &hdr->daddr, &hdr->saddr,
523 inet6_iif(skb));
524 if (!req)
525 goto out;
526
527 /* ICMPs are not backlogged, hence we cannot get
528 * an established socket here.
529 */
530 BUG_TRAP(req->sk == NULL);
531
532 if (seq != dccp_rsk(req)->dreq_iss) {
533 NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS);
534 goto out;
535 }
536
537 inet_csk_reqsk_queue_drop(sk, req, prev);
538 goto out;
539
540 case DCCP_REQUESTING:
541 case DCCP_RESPOND: /* Cannot happen.
542 It can, it SYNs are crossed. --ANK */
543 if (!sock_owned_by_user(sk)) {
544 DCCP_INC_STATS_BH(DCCP_MIB_ATTEMPTFAILS);
545 sk->sk_err = err;
546 /*
547 * Wake people up to see the error
548 * (see connect in sock.c)
549 */
550 sk->sk_error_report(sk);
551
552 dccp_done(sk);
553 } else
554 sk->sk_err_soft = err;
555 goto out;
556 }
557
558 if (!sock_owned_by_user(sk) && np->recverr) {
559 sk->sk_err = err;
560 sk->sk_error_report(sk);
561 } else
562 sk->sk_err_soft = err;
563
564out:
565 bh_unlock_sock(sk);
566 sock_put(sk);
567}
568
569
570static int dccp_v6_send_response(struct sock *sk, struct request_sock *req,
571 struct dst_entry *dst)
572{
573 struct inet6_request_sock *ireq6 = inet6_rsk(req);
574 struct ipv6_pinfo *np = inet6_sk(sk);
575 struct sk_buff *skb;
576 struct ipv6_txoptions *opt = NULL;
577 struct in6_addr *final_p = NULL, final;
578 struct flowi fl;
579 int err = -1;
580
581 memset(&fl, 0, sizeof(fl));
582 fl.proto = IPPROTO_DCCP;
583 ipv6_addr_copy(&fl.fl6_dst, &ireq6->rmt_addr);
584 ipv6_addr_copy(&fl.fl6_src, &ireq6->loc_addr);
585 fl.fl6_flowlabel = 0;
586 fl.oif = ireq6->iif;
587 fl.fl_ip_dport = inet_rsk(req)->rmt_port;
588 fl.fl_ip_sport = inet_sk(sk)->sport;
589
590 if (dst == NULL) {
591 opt = np->opt;
592 if (opt == NULL &&
593 np->rxopt.bits.osrcrt == 2 &&
594 ireq6->pktopts) {
595 struct sk_buff *pktopts = ireq6->pktopts;
596 struct inet6_skb_parm *rxopt = IP6CB(pktopts);
597 if (rxopt->srcrt)
598 opt = ipv6_invert_rthdr(sk,
599 (struct ipv6_rt_hdr *)(pktopts->nh.raw +
600 rxopt->srcrt));
601 }
602
603 if (opt && opt->srcrt) {
604 struct rt0_hdr *rt0 = (struct rt0_hdr *)opt->srcrt;
605 ipv6_addr_copy(&final, &fl.fl6_dst);
606 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
607 final_p = &final;
608 }
609
610 err = ip6_dst_lookup(sk, &dst, &fl);
611 if (err)
612 goto done;
613 if (final_p)
614 ipv6_addr_copy(&fl.fl6_dst, final_p);
615 if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0)
616 goto done;
617 }
618
619 skb = dccp_make_response(sk, dst, req);
620 if (skb != NULL) {
621 struct dccp_hdr *dh = dccp_hdr(skb);
622 dh->dccph_checksum = dccp_v6_check(dh, skb->len,
623 &ireq6->loc_addr,
624 &ireq6->rmt_addr,
625 csum_partial((char *)dh,
626 skb->len,
627 skb->csum));
628 ipv6_addr_copy(&fl.fl6_dst, &ireq6->rmt_addr);
629 err = ip6_xmit(sk, skb, &fl, opt, 0);
630 if (err == NET_XMIT_CN)
631 err = 0;
632 }
633
634done:
635 if (opt && opt != np->opt)
636 sock_kfree_s(sk, opt, opt->tot_len);
637 return err;
638}
639
640static void dccp_v6_reqsk_destructor(struct request_sock *req)
641{
642 if (inet6_rsk(req)->pktopts != NULL)
643 kfree_skb(inet6_rsk(req)->pktopts);
644}
645
646static struct request_sock_ops dccp6_request_sock_ops = {
647 .family = AF_INET6,
648 .obj_size = sizeof(struct dccp6_request_sock),
649 .rtx_syn_ack = dccp_v6_send_response,
650 .send_ack = dccp_v6_reqsk_send_ack,
651 .destructor = dccp_v6_reqsk_destructor,
652 .send_reset = dccp_v6_ctl_send_reset,
653};
654
6d6ee43e
ACM
655static struct timewait_sock_ops dccp6_timewait_sock_ops = {
656 .twsk_obj_size = sizeof(struct dccp6_timewait_sock),
657};
658
3df80d93
ACM
659static void dccp_v6_send_check(struct sock *sk, int len, struct sk_buff *skb)
660{
661 struct ipv6_pinfo *np = inet6_sk(sk);
662 struct dccp_hdr *dh = dccp_hdr(skb);
663
664 dh->dccph_checksum = csum_ipv6_magic(&np->saddr, &np->daddr,
665 len, IPPROTO_DCCP,
666 csum_partial((char *)dh,
667 dh->dccph_doff << 2,
668 skb->csum));
669}
670
671static void dccp_v6_ctl_send_reset(struct sk_buff *rxskb)
672{
673 struct dccp_hdr *rxdh = dccp_hdr(rxskb), *dh;
674 const int dccp_hdr_reset_len = sizeof(struct dccp_hdr) +
675 sizeof(struct dccp_hdr_ext) +
676 sizeof(struct dccp_hdr_reset);
677 struct sk_buff *skb;
678 struct flowi fl;
679 u64 seqno;
680
681 if (rxdh->dccph_type == DCCP_PKT_RESET)
682 return;
683
684 if (!ipv6_unicast_destination(rxskb))
685 return;
686
687 /*
688 * We need to grab some memory, and put together an RST,
689 * and then put it into the queue to be sent.
690 */
691
692 skb = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) +
693 dccp_hdr_reset_len, GFP_ATOMIC);
694 if (skb == NULL)
695 return;
696
697 skb_reserve(skb, MAX_HEADER + sizeof(struct ipv6hdr) +
698 dccp_hdr_reset_len);
699
700 skb->h.raw = skb_push(skb, dccp_hdr_reset_len);
701 dh = dccp_hdr(skb);
702 memset(dh, 0, dccp_hdr_reset_len);
703
704 /* Swap the send and the receive. */
705 dh->dccph_type = DCCP_PKT_RESET;
706 dh->dccph_sport = rxdh->dccph_dport;
707 dh->dccph_dport = rxdh->dccph_sport;
708 dh->dccph_doff = dccp_hdr_reset_len / 4;
709 dh->dccph_x = 1;
710 dccp_hdr_reset(skb)->dccph_reset_code =
711 DCCP_SKB_CB(rxskb)->dccpd_reset_code;
712
713 /* See "8.3.1. Abnormal Termination" in draft-ietf-dccp-spec-11 */
714 seqno = 0;
715 if (DCCP_SKB_CB(rxskb)->dccpd_ack_seq != DCCP_PKT_WITHOUT_ACK_SEQ)
716 dccp_set_seqno(&seqno, DCCP_SKB_CB(rxskb)->dccpd_ack_seq + 1);
717
718 dccp_hdr_set_seq(dh, seqno);
719 dccp_hdr_set_ack(dccp_hdr_ack_bits(skb),
720 DCCP_SKB_CB(rxskb)->dccpd_seq);
721
722 memset(&fl, 0, sizeof(fl));
723 ipv6_addr_copy(&fl.fl6_dst, &rxskb->nh.ipv6h->saddr);
724 ipv6_addr_copy(&fl.fl6_src, &rxskb->nh.ipv6h->daddr);
725 dh->dccph_checksum = csum_ipv6_magic(&fl.fl6_src, &fl.fl6_dst,
726 sizeof(*dh), IPPROTO_DCCP,
727 skb->csum);
728 fl.proto = IPPROTO_DCCP;
729 fl.oif = inet6_iif(rxskb);
730 fl.fl_ip_dport = dh->dccph_dport;
731 fl.fl_ip_sport = dh->dccph_sport;
732
733 /* sk = NULL, but it is safe for now. RST socket required. */
734 if (!ip6_dst_lookup(NULL, &skb->dst, &fl)) {
735 if (xfrm_lookup(&skb->dst, &fl, NULL, 0) >= 0) {
736 ip6_xmit(NULL, skb, &fl, NULL, 0);
737 DCCP_INC_STATS_BH(DCCP_MIB_OUTSEGS);
738 DCCP_INC_STATS_BH(DCCP_MIB_OUTRSTS);
739 return;
740 }
741 }
742
743 kfree_skb(skb);
744}
745
746static void dccp_v6_ctl_send_ack(struct sk_buff *rxskb)
747{
748 struct flowi fl;
749 struct dccp_hdr *rxdh = dccp_hdr(rxskb), *dh;
750 const int dccp_hdr_ack_len = sizeof(struct dccp_hdr) +
751 sizeof(struct dccp_hdr_ext) +
752 sizeof(struct dccp_hdr_ack_bits);
753 struct sk_buff *skb;
754
755 skb = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) +
756 dccp_hdr_ack_len, GFP_ATOMIC);
757 if (skb == NULL)
758 return;
759
760 skb_reserve(skb, MAX_HEADER + sizeof(struct ipv6hdr) +
761 dccp_hdr_ack_len);
762
763 skb->h.raw = skb_push(skb, dccp_hdr_ack_len);
764 dh = dccp_hdr(skb);
765 memset(dh, 0, dccp_hdr_ack_len);
766
767 /* Build DCCP header and checksum it. */
768 dh->dccph_type = DCCP_PKT_ACK;
769 dh->dccph_sport = rxdh->dccph_dport;
770 dh->dccph_dport = rxdh->dccph_sport;
771 dh->dccph_doff = dccp_hdr_ack_len / 4;
772 dh->dccph_x = 1;
773
774 dccp_hdr_set_seq(dh, DCCP_SKB_CB(rxskb)->dccpd_ack_seq);
775 dccp_hdr_set_ack(dccp_hdr_ack_bits(skb),
776 DCCP_SKB_CB(rxskb)->dccpd_seq);
777
778 memset(&fl, 0, sizeof(fl));
779 ipv6_addr_copy(&fl.fl6_dst, &rxskb->nh.ipv6h->saddr);
780 ipv6_addr_copy(&fl.fl6_src, &rxskb->nh.ipv6h->daddr);
781
782 /* FIXME: calculate checksum, IPv4 also should... */
783
784 fl.proto = IPPROTO_DCCP;
785 fl.oif = inet6_iif(rxskb);
786 fl.fl_ip_dport = dh->dccph_dport;
787 fl.fl_ip_sport = dh->dccph_sport;
788
789 if (!ip6_dst_lookup(NULL, &skb->dst, &fl)) {
790 if (xfrm_lookup(&skb->dst, &fl, NULL, 0) >= 0) {
791 ip6_xmit(NULL, skb, &fl, NULL, 0);
792 DCCP_INC_STATS_BH(DCCP_MIB_OUTSEGS);
793 return;
794 }
795 }
796
797 kfree_skb(skb);
798}
799
800static void dccp_v6_reqsk_send_ack(struct sk_buff *skb,
801 struct request_sock *req)
802{
803 dccp_v6_ctl_send_ack(skb);
804}
805
806static struct sock *dccp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
807{
808 const struct dccp_hdr *dh = dccp_hdr(skb);
809 const struct ipv6hdr *iph = skb->nh.ipv6h;
810 struct sock *nsk;
811 struct request_sock **prev;
812 /* Find possible connection requests. */
813 struct request_sock *req = inet6_csk_search_req(sk, &prev,
814 dh->dccph_sport,
815 &iph->saddr,
816 &iph->daddr,
817 inet6_iif(skb));
818 if (req != NULL)
819 return dccp_check_req(sk, skb, req, prev);
820
821 nsk = __inet6_lookup_established(&dccp_hashinfo,
822 &iph->saddr, dh->dccph_sport,
823 &iph->daddr, ntohs(dh->dccph_dport),
824 inet6_iif(skb));
825
826 if (nsk != NULL) {
827 if (nsk->sk_state != DCCP_TIME_WAIT) {
828 bh_lock_sock(nsk);
829 return nsk;
830 }
831 inet_twsk_put((struct inet_timewait_sock *)nsk);
832 return NULL;
833 }
834
835 return sk;
836}
837
838static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
839{
840 struct inet_request_sock *ireq;
841 struct dccp_sock dp;
842 struct request_sock *req;
843 struct dccp_request_sock *dreq;
844 struct inet6_request_sock *ireq6;
845 struct ipv6_pinfo *np = inet6_sk(sk);
846 const __u32 service = dccp_hdr_request(skb)->dccph_req_service;
847 struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
848 __u8 reset_code = DCCP_RESET_CODE_TOO_BUSY;
849
850 if (skb->protocol == htons(ETH_P_IP))
851 return dccp_v4_conn_request(sk, skb);
852
853 if (!ipv6_unicast_destination(skb))
854 goto drop;
855
856 if (dccp_bad_service_code(sk, service)) {
857 reset_code = DCCP_RESET_CODE_BAD_SERVICE_CODE;
858 goto drop;
859 }
860 /*
861 * There are no SYN attacks on IPv6, yet...
862 */
863 if (inet_csk_reqsk_queue_is_full(sk))
864 goto drop;
865
866 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
867 goto drop;
868
869 req = inet6_reqsk_alloc(sk->sk_prot->rsk_prot);
870 if (req == NULL)
871 goto drop;
872
873 /* FIXME: process options */
874
875 dccp_openreq_init(req, &dp, skb);
876
877 ireq6 = inet6_rsk(req);
878 ireq = inet_rsk(req);
879 ipv6_addr_copy(&ireq6->rmt_addr, &skb->nh.ipv6h->saddr);
880 ipv6_addr_copy(&ireq6->loc_addr, &skb->nh.ipv6h->daddr);
881 req->rcv_wnd = 100; /* Fake, option parsing will get the
882 right value */
883 ireq6->pktopts = NULL;
884
885 if (ipv6_opt_accepted(sk, skb) ||
886 np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
887 np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
888 atomic_inc(&skb->users);
889 ireq6->pktopts = skb;
890 }
891 ireq6->iif = sk->sk_bound_dev_if;
892
893 /* So that link locals have meaning */
894 if (!sk->sk_bound_dev_if &&
895 ipv6_addr_type(&ireq6->rmt_addr) & IPV6_ADDR_LINKLOCAL)
896 ireq6->iif = inet6_iif(skb);
897
898 /*
899 * Step 3: Process LISTEN state
900 *
901 * Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookie
902 *
903 * In fact we defer setting S.GSR, S.SWL, S.SWH to
904 * dccp_create_openreq_child.
905 */
906 dreq = dccp_rsk(req);
907 dreq->dreq_isr = dcb->dccpd_seq;
908 dreq->dreq_iss = dccp_v6_init_sequence(sk, skb);
909 dreq->dreq_service = service;
910
911 if (dccp_v6_send_response(sk, req, NULL))
912 goto drop_and_free;
913
914 inet6_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT);
915 return 0;
916
917drop_and_free:
918 reqsk_free(req);
919drop:
920 DCCP_INC_STATS_BH(DCCP_MIB_ATTEMPTFAILS);
921 dcb->dccpd_reset_code = reset_code;
922 return -1;
923}
924
925static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
926 struct sk_buff *skb,
927 struct request_sock *req,
928 struct dst_entry *dst)
929{
930 struct inet6_request_sock *ireq6 = inet6_rsk(req);
931 struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
932 struct inet_sock *newinet;
933 struct dccp_sock *newdp;
934 struct dccp6_sock *newdp6;
935 struct sock *newsk;
936 struct ipv6_txoptions *opt;
937
938 if (skb->protocol == htons(ETH_P_IP)) {
939 /*
940 * v6 mapped
941 */
942
943 newsk = dccp_v4_request_recv_sock(sk, skb, req, dst);
944 if (newsk == NULL)
945 return NULL;
946
947 newdp6 = (struct dccp6_sock *)newsk;
948 newdp = dccp_sk(newsk);
949 newinet = inet_sk(newsk);
950 newinet->pinet6 = &newdp6->inet6;
951 newnp = inet6_sk(newsk);
952
953 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
954
955 ipv6_addr_set(&newnp->daddr, 0, 0, htonl(0x0000FFFF),
956 newinet->daddr);
957
958 ipv6_addr_set(&newnp->saddr, 0, 0, htonl(0x0000FFFF),
959 newinet->saddr);
960
961 ipv6_addr_copy(&newnp->rcv_saddr, &newnp->saddr);
962
963 inet_csk(newsk)->icsk_af_ops = &dccp_ipv6_mapped;
964 newsk->sk_backlog_rcv = dccp_v4_do_rcv;
965 newnp->pktoptions = NULL;
966 newnp->opt = NULL;
967 newnp->mcast_oif = inet6_iif(skb);
968 newnp->mcast_hops = skb->nh.ipv6h->hop_limit;
969
970 /*
971 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
972 * here, dccp_create_openreq_child now does this for us, see the comment in
973 * that function for the gory details. -acme
974 */
975
976 /* It is tricky place. Until this moment IPv4 tcp
977 worked with IPv6 icsk.icsk_af_ops.
978 Sync it now.
979 */
980 dccp_sync_mss(newsk, newdp->dccps_pmtu_cookie);
981
982 return newsk;
983 }
984
985 opt = np->opt;
986
987 if (sk_acceptq_is_full(sk))
988 goto out_overflow;
989
990 if (np->rxopt.bits.osrcrt == 2 &&
991 opt == NULL && ireq6->pktopts) {
992 struct inet6_skb_parm *rxopt = IP6CB(ireq6->pktopts);
993 if (rxopt->srcrt)
994 opt = ipv6_invert_rthdr(sk,
995 (struct ipv6_rt_hdr *)(ireq6->pktopts->nh.raw +
996 rxopt->srcrt));
997 }
998
999 if (dst == NULL) {
1000 struct in6_addr *final_p = NULL, final;
1001 struct flowi fl;
1002
1003 memset(&fl, 0, sizeof(fl));
1004 fl.proto = IPPROTO_DCCP;
1005 ipv6_addr_copy(&fl.fl6_dst, &ireq6->rmt_addr);
1006 if (opt && opt->srcrt) {
1007 struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt;
1008 ipv6_addr_copy(&final, &fl.fl6_dst);
1009 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
1010 final_p = &final;
1011 }
1012 ipv6_addr_copy(&fl.fl6_src, &ireq6->loc_addr);
1013 fl.oif = sk->sk_bound_dev_if;
1014 fl.fl_ip_dport = inet_rsk(req)->rmt_port;
1015 fl.fl_ip_sport = inet_sk(sk)->sport;
1016
1017 if (ip6_dst_lookup(sk, &dst, &fl))
1018 goto out;
1019
1020 if (final_p)
1021 ipv6_addr_copy(&fl.fl6_dst, final_p);
1022
1023 if ((xfrm_lookup(&dst, &fl, sk, 0)) < 0)
1024 goto out;
1025 }
1026
1027 newsk = dccp_create_openreq_child(sk, req, skb);
1028 if (newsk == NULL)
1029 goto out;
1030
1031 /*
1032 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1033 * count here, dccp_create_openreq_child now does this for us, see the
1034 * comment in that function for the gory details. -acme
1035 */
1036
1037 ip6_dst_store(newsk, dst, NULL);
1038 newsk->sk_route_caps = dst->dev->features &
1039 ~(NETIF_F_IP_CSUM | NETIF_F_TSO);
1040
1041 newdp6 = (struct dccp6_sock *)newsk;
1042 newinet = inet_sk(newsk);
1043 newinet->pinet6 = &newdp6->inet6;
1044 newdp = dccp_sk(newsk);
1045 newnp = inet6_sk(newsk);
1046
1047 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1048
1049 ipv6_addr_copy(&newnp->daddr, &ireq6->rmt_addr);
1050 ipv6_addr_copy(&newnp->saddr, &ireq6->loc_addr);
1051 ipv6_addr_copy(&newnp->rcv_saddr, &ireq6->loc_addr);
1052 newsk->sk_bound_dev_if = ireq6->iif;
1053
1054 /* Now IPv6 options...
1055
1056 First: no IPv4 options.
1057 */
1058 newinet->opt = NULL;
1059
1060 /* Clone RX bits */
1061 newnp->rxopt.all = np->rxopt.all;
1062
1063 /* Clone pktoptions received with SYN */
1064 newnp->pktoptions = NULL;
1065 if (ireq6->pktopts != NULL) {
1066 newnp->pktoptions = skb_clone(ireq6->pktopts, GFP_ATOMIC);
1067 kfree_skb(ireq6->pktopts);
1068 ireq6->pktopts = NULL;
1069 if (newnp->pktoptions)
1070 skb_set_owner_r(newnp->pktoptions, newsk);
1071 }
1072 newnp->opt = NULL;
1073 newnp->mcast_oif = inet6_iif(skb);
1074 newnp->mcast_hops = skb->nh.ipv6h->hop_limit;
1075
1076 /* Clone native IPv6 options from listening socket (if any)
1077
1078 Yes, keeping reference count would be much more clever,
1079 but we make one more one thing there: reattach optmem
1080 to newsk.
1081 */
1082 if (opt) {
1083 newnp->opt = ipv6_dup_options(newsk, opt);
1084 if (opt != np->opt)
1085 sock_kfree_s(sk, opt, opt->tot_len);
1086 }
1087
1088 newdp->dccps_ext_header_len = 0;
1089 if (newnp->opt)
1090 newdp->dccps_ext_header_len = newnp->opt->opt_nflen +
1091 newnp->opt->opt_flen;
1092
1093 dccp_sync_mss(newsk, dst_mtu(dst));
1094
1095 newinet->daddr = newinet->saddr = newinet->rcv_saddr = LOOPBACK4_IPV6;
1096
1097 __inet6_hash(&dccp_hashinfo, newsk);
1098 inet_inherit_port(&dccp_hashinfo, sk, newsk);
1099
1100 return newsk;
1101
1102out_overflow:
1103 NET_INC_STATS_BH(LINUX_MIB_LISTENOVERFLOWS);
1104out:
1105 NET_INC_STATS_BH(LINUX_MIB_LISTENDROPS);
1106 if (opt && opt != np->opt)
1107 sock_kfree_s(sk, opt, opt->tot_len);
1108 dst_release(dst);
1109 return NULL;
1110}
1111
1112/* The socket must have it's spinlock held when we get
1113 * here.
1114 *
1115 * We have a potential double-lock case here, so even when
1116 * doing backlog processing we use the BH locking scheme.
1117 * This is because we cannot sleep with the original spinlock
1118 * held.
1119 */
1120static int dccp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1121{
1122 struct ipv6_pinfo *np = inet6_sk(sk);
1123 struct sk_buff *opt_skb = NULL;
1124
1125 /* Imagine: socket is IPv6. IPv4 packet arrives,
1126 goes to IPv4 receive handler and backlogged.
1127 From backlog it always goes here. Kerboom...
1128 Fortunately, dccp_rcv_established and rcv_established
1129 handle them correctly, but it is not case with
1130 dccp_v6_hnd_req and dccp_v6_ctl_send_reset(). --ANK
1131 */
1132
1133 if (skb->protocol == htons(ETH_P_IP))
1134 return dccp_v4_do_rcv(sk, skb);
1135
1136 if (sk_filter(sk, skb, 0))
1137 goto discard;
1138
1139 /*
1140 * socket locking is here for SMP purposes as backlog rcv
1141 * is currently called with bh processing disabled.
1142 */
1143
1144 /* Do Stevens' IPV6_PKTOPTIONS.
1145
1146 Yes, guys, it is the only place in our code, where we
1147 may make it not affecting IPv4.
1148 The rest of code is protocol independent,
1149 and I do not like idea to uglify IPv4.
1150
1151 Actually, all the idea behind IPV6_PKTOPTIONS
1152 looks not very well thought. For now we latch
1153 options, received in the last packet, enqueued
1154 by tcp. Feel free to propose better solution.
1155 --ANK (980728)
1156 */
1157 if (np->rxopt.all)
1158 opt_skb = skb_clone(skb, GFP_ATOMIC);
1159
1160 if (sk->sk_state == DCCP_OPEN) { /* Fast path */
1161 if (dccp_rcv_established(sk, skb, dccp_hdr(skb), skb->len))
1162 goto reset;
1163 return 0;
1164 }
1165
1166 if (sk->sk_state == DCCP_LISTEN) {
1167 struct sock *nsk = dccp_v6_hnd_req(sk, skb);
1168 if (!nsk)
1169 goto discard;
1170
1171 /*
1172 * Queue it on the new socket if the new socket is active,
1173 * otherwise we just shortcircuit this and continue with
1174 * the new socket..
1175 */
1176 if(nsk != sk) {
1177 if (dccp_child_process(sk, nsk, skb))
1178 goto reset;
1179 if (opt_skb)
1180 __kfree_skb(opt_skb);
1181 return 0;
1182 }
1183 }
1184
1185 if (dccp_rcv_state_process(sk, skb, dccp_hdr(skb), skb->len))
1186 goto reset;
1187 return 0;
1188
1189reset:
1190 dccp_v6_ctl_send_reset(skb);
1191discard:
1192 if (opt_skb)
1193 __kfree_skb(opt_skb);
1194 kfree_skb(skb);
1195 return 0;
1196}
1197
1198static int dccp_v6_rcv(struct sk_buff **pskb, unsigned int *nhoffp)
1199{
1200 const struct dccp_hdr *dh;
1201 struct sk_buff *skb = *pskb;
1202 struct sock *sk;
1203 int rc;
1204
1205 /* Step 1: Check header basics: */
1206
1207 if (dccp_invalid_packet(skb))
1208 goto discard_it;
1209
1210 dh = dccp_hdr(skb);
1211
1212 DCCP_SKB_CB(skb)->dccpd_seq = dccp_hdr_seq(skb);
1213 DCCP_SKB_CB(skb)->dccpd_type = dh->dccph_type;
1214
1215 if (dccp_packet_without_ack(skb))
1216 DCCP_SKB_CB(skb)->dccpd_ack_seq = DCCP_PKT_WITHOUT_ACK_SEQ;
1217 else
1218 DCCP_SKB_CB(skb)->dccpd_ack_seq = dccp_hdr_ack_seq(skb);
1219
1220 /* Step 2:
1221 * Look up flow ID in table and get corresponding socket */
1222 sk = __inet6_lookup(&dccp_hashinfo, &skb->nh.ipv6h->saddr,
1223 dh->dccph_sport,
1224 &skb->nh.ipv6h->daddr, ntohs(dh->dccph_dport),
1225 inet6_iif(skb));
1226 /*
1227 * Step 2:
1228 * If no socket ...
1229 * Generate Reset(No Connection) unless P.type == Reset
1230 * Drop packet and return
1231 */
1232 if (sk == NULL)
1233 goto no_dccp_socket;
1234
1235 /*
1236 * Step 2:
1237 * ... or S.state == TIMEWAIT,
1238 * Generate Reset(No Connection) unless P.type == Reset
1239 * Drop packet and return
1240 */
1241
1242 if (sk->sk_state == DCCP_TIME_WAIT)
1243 goto do_time_wait;
1244
1245 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1246 goto discard_and_relse;
1247
1248 if (sk_filter(sk, skb, 0))
1249 goto discard_and_relse;
1250
1251 skb->dev = NULL;
1252
1253 bh_lock_sock(sk);
1254 rc = 0;
1255 if (!sock_owned_by_user(sk))
1256 rc = dccp_v6_do_rcv(sk, skb);
1257 else
1258 sk_add_backlog(sk, skb);
1259 bh_unlock_sock(sk);
1260
1261 sock_put(sk);
1262 return rc ? -1 : 0;
1263
1264no_dccp_socket:
1265 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1266 goto discard_it;
1267 /*
1268 * Step 2:
1269 * Generate Reset(No Connection) unless P.type == Reset
1270 * Drop packet and return
1271 */
1272 if (dh->dccph_type != DCCP_PKT_RESET) {
1273 DCCP_SKB_CB(skb)->dccpd_reset_code =
1274 DCCP_RESET_CODE_NO_CONNECTION;
1275 dccp_v6_ctl_send_reset(skb);
1276 }
1277discard_it:
1278
1279 /*
1280 * Discard frame
1281 */
1282
1283 kfree_skb(skb);
1284 return 0;
1285
1286discard_and_relse:
1287 sock_put(sk);
1288 goto discard_it;
1289
1290do_time_wait:
1291 inet_twsk_put((struct inet_timewait_sock *)sk);
1292 goto no_dccp_socket;
1293}
1294
1295static struct inet_connection_sock_af_ops dccp_ipv6_af_ops = {
1296 .queue_xmit = inet6_csk_xmit,
1297 .send_check = dccp_v6_send_check,
1298 .rebuild_header = inet6_sk_rebuild_header,
1299 .conn_request = dccp_v6_conn_request,
1300 .syn_recv_sock = dccp_v6_request_recv_sock,
1301 .net_header_len = sizeof(struct ipv6hdr),
1302 .setsockopt = ipv6_setsockopt,
1303 .getsockopt = ipv6_getsockopt,
1304 .addr2sockaddr = inet6_csk_addr2sockaddr,
1305 .sockaddr_len = sizeof(struct sockaddr_in6)
1306};
1307
1308/*
1309 * DCCP over IPv4 via INET6 API
1310 */
1311static struct inet_connection_sock_af_ops dccp_ipv6_mapped = {
1312 .queue_xmit = ip_queue_xmit,
1313 .send_check = dccp_v4_send_check,
1314 .rebuild_header = inet_sk_rebuild_header,
1315 .conn_request = dccp_v6_conn_request,
1316 .syn_recv_sock = dccp_v6_request_recv_sock,
1317 .net_header_len = sizeof(struct iphdr),
1318 .setsockopt = ipv6_setsockopt,
1319 .getsockopt = ipv6_getsockopt,
1320 .addr2sockaddr = inet6_csk_addr2sockaddr,
1321 .sockaddr_len = sizeof(struct sockaddr_in6)
1322};
1323
1324/* NOTE: A lot of things set to zero explicitly by call to
1325 * sk_alloc() so need not be done here.
1326 */
1327static int dccp_v6_init_sock(struct sock *sk)
1328{
1329 int err = dccp_v4_init_sock(sk);
1330
1331 if (err == 0)
1332 inet_csk(sk)->icsk_af_ops = &dccp_ipv6_af_ops;
1333
1334 return err;
1335}
1336
1337static int dccp_v6_destroy_sock(struct sock *sk)
1338{
1339 dccp_v4_destroy_sock(sk);
1340 return inet6_destroy_sock(sk);
1341}
1342
1343static struct proto dccp_v6_prot = {
1344 .name = "DCCPv6",
1345 .owner = THIS_MODULE,
1346 .close = dccp_close,
1347 .connect = dccp_v6_connect,
1348 .disconnect = dccp_disconnect,
1349 .ioctl = dccp_ioctl,
1350 .init = dccp_v6_init_sock,
1351 .setsockopt = dccp_setsockopt,
1352 .getsockopt = dccp_getsockopt,
1353 .sendmsg = dccp_sendmsg,
1354 .recvmsg = dccp_recvmsg,
1355 .backlog_rcv = dccp_v6_do_rcv,
1356 .hash = dccp_v6_hash,
1357 .unhash = dccp_unhash,
1358 .accept = inet_csk_accept,
1359 .get_port = dccp_v6_get_port,
1360 .shutdown = dccp_shutdown,
1361 .destroy = dccp_v6_destroy_sock,
1362 .orphan_count = &dccp_orphan_count,
1363 .max_header = MAX_DCCP_HEADER,
1364 .obj_size = sizeof(struct dccp6_sock),
1365 .rsk_prot = &dccp6_request_sock_ops,
6d6ee43e 1366 .twsk_prot = &dccp6_timewait_sock_ops,
3df80d93
ACM
1367};
1368
1369static struct inet6_protocol dccp_v6_protocol = {
1370 .handler = dccp_v6_rcv,
1371 .err_handler = dccp_v6_err,
1372 .flags = INET6_PROTO_NOPOLICY | INET6_PROTO_FINAL,
1373};
1374
1375static struct proto_ops inet6_dccp_ops = {
1376 .family = PF_INET6,
1377 .owner = THIS_MODULE,
1378 .release = inet6_release,
1379 .bind = inet6_bind,
1380 .connect = inet_stream_connect,
1381 .socketpair = sock_no_socketpair,
1382 .accept = inet_accept,
1383 .getname = inet6_getname,
1384 .poll = dccp_poll,
1385 .ioctl = inet6_ioctl,
1386 .listen = inet_dccp_listen,
1387 .shutdown = inet_shutdown,
1388 .setsockopt = sock_common_setsockopt,
1389 .getsockopt = sock_common_getsockopt,
1390 .sendmsg = inet_sendmsg,
1391 .recvmsg = sock_common_recvmsg,
1392 .mmap = sock_no_mmap,
1393 .sendpage = sock_no_sendpage,
1394};
1395
1396static struct inet_protosw dccp_v6_protosw = {
1397 .type = SOCK_DCCP,
1398 .protocol = IPPROTO_DCCP,
1399 .prot = &dccp_v6_prot,
1400 .ops = &inet6_dccp_ops,
1401 .capability = -1,
1402};
1403
1404static int __init dccp_v6_init(void)
1405{
1406 int err = proto_register(&dccp_v6_prot, 1);
1407
1408 if (err != 0)
1409 goto out;
1410
1411 err = inet6_add_protocol(&dccp_v6_protocol, IPPROTO_DCCP);
1412 if (err != 0)
1413 goto out_unregister_proto;
1414
1415 inet6_register_protosw(&dccp_v6_protosw);
1416out:
1417 return err;
1418out_unregister_proto:
1419 proto_unregister(&dccp_v6_prot);
1420 goto out;
1421}
1422
1423static void __exit dccp_v6_exit(void)
1424{
1425 inet6_del_protocol(&dccp_v6_protocol, IPPROTO_DCCP);
1426 inet6_unregister_protosw(&dccp_v6_protosw);
1427 proto_unregister(&dccp_v6_prot);
1428}
1429
1430module_init(dccp_v6_init);
1431module_exit(dccp_v6_exit);
1432
1433/*
1434 * __stringify doesn't likes enums, so use SOCK_DCCP (6) and IPPROTO_DCCP (33)
1435 * values directly, Also cover the case where the protocol is not specified,
1436 * i.e. net-pf-PF_INET6-proto-0-type-SOCK_DCCP
1437 */
1438MODULE_ALIAS("net-pf-" __stringify(PF_INET6) "-proto-33-type-6");
1439MODULE_ALIAS("net-pf-" __stringify(PF_INET6) "-proto-0-type-6");
1440MODULE_LICENSE("GPL");
1441MODULE_AUTHOR("Arnaldo Carvalho de Melo <acme@mandriva.com>");
1442MODULE_DESCRIPTION("DCCPv6 - Datagram Congestion Controlled Protocol");
This page took 0.09519 seconds and 5 git commands to generate.