[DCCP] ipv6: cleanups
[deliverable/linux.git] / net / dccp / ipv6.c
1 /*
2 * DCCP over IPv6
3 * Linux INET6 implementation
4 *
5 * Based on net/dccp6/ipv6.c
6 *
7 * Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 */
14
15 #include <linux/config.h>
16 #include <linux/module.h>
17 #include <linux/random.h>
18 #include <linux/xfrm.h>
19
20 #include <net/addrconf.h>
21 #include <net/inet_common.h>
22 #include <net/inet_hashtables.h>
23 #include <net/inet_sock.h>
24 #include <net/inet6_connection_sock.h>
25 #include <net/inet6_hashtables.h>
26 #include <net/ip6_route.h>
27 #include <net/ipv6.h>
28 #include <net/protocol.h>
29 #include <net/transp_v6.h>
30 #include <net/ip6_checksum.h>
31 #include <net/xfrm.h>
32
33 #include "dccp.h"
34 #include "ipv6.h"
35
36 /* Socket used for sending RSTs and ACKs */
37 static struct socket *dccp_v6_ctl_socket;
38
39 static void dccp_v6_ctl_send_reset(struct sk_buff *skb);
40 static void dccp_v6_reqsk_send_ack(struct sk_buff *skb,
41 struct request_sock *req);
42 static void dccp_v6_send_check(struct sock *sk, int len, struct sk_buff *skb);
43
44 static int dccp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
45
46 static struct inet_connection_sock_af_ops dccp_ipv6_mapped;
47 static struct inet_connection_sock_af_ops dccp_ipv6_af_ops;
48
49 static int dccp_v6_get_port(struct sock *sk, unsigned short snum)
50 {
51 return inet_csk_get_port(&dccp_hashinfo, sk, snum,
52 inet6_csk_bind_conflict);
53 }
54
55 static void dccp_v6_hash(struct sock *sk)
56 {
57 if (sk->sk_state != DCCP_CLOSED) {
58 if (inet_csk(sk)->icsk_af_ops == &dccp_ipv6_mapped) {
59 dccp_hash(sk);
60 return;
61 }
62 local_bh_disable();
63 __inet6_hash(&dccp_hashinfo, sk);
64 local_bh_enable();
65 }
66 }
67
68 static inline u16 dccp_v6_check(struct dccp_hdr *dh, int len,
69 struct in6_addr *saddr,
70 struct in6_addr *daddr,
71 unsigned long base)
72 {
73 return csum_ipv6_magic(saddr, daddr, len, IPPROTO_DCCP, base);
74 }
75
76 static __u32 dccp_v6_init_sequence(struct sock *sk, struct sk_buff *skb)
77 {
78 const struct dccp_hdr *dh = dccp_hdr(skb);
79
80 if (skb->protocol == htons(ETH_P_IPV6))
81 return secure_tcpv6_sequence_number(skb->nh.ipv6h->daddr.s6_addr32,
82 skb->nh.ipv6h->saddr.s6_addr32,
83 dh->dccph_dport,
84 dh->dccph_sport);
85
86 return secure_dccp_sequence_number(skb->nh.iph->daddr,
87 skb->nh.iph->saddr,
88 dh->dccph_dport,
89 dh->dccph_sport);
90 }
91
92 static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
93 int addr_len)
94 {
95 struct sockaddr_in6 *usin = (struct sockaddr_in6 *)uaddr;
96 struct inet_connection_sock *icsk = inet_csk(sk);
97 struct inet_sock *inet = inet_sk(sk);
98 struct ipv6_pinfo *np = inet6_sk(sk);
99 struct dccp_sock *dp = dccp_sk(sk);
100 struct in6_addr *saddr = NULL, *final_p = NULL, final;
101 struct flowi fl;
102 struct dst_entry *dst;
103 int addr_type;
104 int err;
105
106 dp->dccps_role = DCCP_ROLE_CLIENT;
107
108 if (addr_len < SIN6_LEN_RFC2133)
109 return -EINVAL;
110
111 if (usin->sin6_family != AF_INET6)
112 return -EAFNOSUPPORT;
113
114 memset(&fl, 0, sizeof(fl));
115
116 if (np->sndflow) {
117 fl.fl6_flowlabel = usin->sin6_flowinfo & IPV6_FLOWINFO_MASK;
118 IP6_ECN_flow_init(fl.fl6_flowlabel);
119 if (fl.fl6_flowlabel & IPV6_FLOWLABEL_MASK) {
120 struct ip6_flowlabel *flowlabel;
121 flowlabel = fl6_sock_lookup(sk, fl.fl6_flowlabel);
122 if (flowlabel == NULL)
123 return -EINVAL;
124 ipv6_addr_copy(&usin->sin6_addr, &flowlabel->dst);
125 fl6_sock_release(flowlabel);
126 }
127 }
128 /*
129 * connect() to INADDR_ANY means loopback (BSD'ism).
130 */
131 if (ipv6_addr_any(&usin->sin6_addr))
132 usin->sin6_addr.s6_addr[15] = 1;
133
134 addr_type = ipv6_addr_type(&usin->sin6_addr);
135
136 if (addr_type & IPV6_ADDR_MULTICAST)
137 return -ENETUNREACH;
138
139 if (addr_type & IPV6_ADDR_LINKLOCAL) {
140 if (addr_len >= sizeof(struct sockaddr_in6) &&
141 usin->sin6_scope_id) {
142 /* If interface is set while binding, indices
143 * must coincide.
144 */
145 if (sk->sk_bound_dev_if &&
146 sk->sk_bound_dev_if != usin->sin6_scope_id)
147 return -EINVAL;
148
149 sk->sk_bound_dev_if = usin->sin6_scope_id;
150 }
151
152 /* Connect to link-local address requires an interface */
153 if (!sk->sk_bound_dev_if)
154 return -EINVAL;
155 }
156
157 ipv6_addr_copy(&np->daddr, &usin->sin6_addr);
158 np->flow_label = fl.fl6_flowlabel;
159
160 /*
161 * DCCP over IPv4
162 */
163 if (addr_type == IPV6_ADDR_MAPPED) {
164 u32 exthdrlen = icsk->icsk_ext_hdr_len;
165 struct sockaddr_in sin;
166
167 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
168
169 if (__ipv6_only_sock(sk))
170 return -ENETUNREACH;
171
172 sin.sin_family = AF_INET;
173 sin.sin_port = usin->sin6_port;
174 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
175
176 icsk->icsk_af_ops = &dccp_ipv6_mapped;
177 sk->sk_backlog_rcv = dccp_v4_do_rcv;
178
179 err = dccp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
180 if (err) {
181 icsk->icsk_ext_hdr_len = exthdrlen;
182 icsk->icsk_af_ops = &dccp_ipv6_af_ops;
183 sk->sk_backlog_rcv = dccp_v6_do_rcv;
184 goto failure;
185 } else {
186 ipv6_addr_set(&np->saddr, 0, 0, htonl(0x0000FFFF),
187 inet->saddr);
188 ipv6_addr_set(&np->rcv_saddr, 0, 0, htonl(0x0000FFFF),
189 inet->rcv_saddr);
190 }
191
192 return err;
193 }
194
195 if (!ipv6_addr_any(&np->rcv_saddr))
196 saddr = &np->rcv_saddr;
197
198 fl.proto = IPPROTO_DCCP;
199 ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
200 ipv6_addr_copy(&fl.fl6_src, saddr ? saddr : &np->saddr);
201 fl.oif = sk->sk_bound_dev_if;
202 fl.fl_ip_dport = usin->sin6_port;
203 fl.fl_ip_sport = inet->sport;
204
205 if (np->opt != NULL && np->opt->srcrt != NULL) {
206 const struct rt0_hdr *rt0 = (struct rt0_hdr *)np->opt->srcrt;
207
208 ipv6_addr_copy(&final, &fl.fl6_dst);
209 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
210 final_p = &final;
211 }
212
213 err = ip6_dst_lookup(sk, &dst, &fl);
214 if (err)
215 goto failure;
216
217 if (final_p)
218 ipv6_addr_copy(&fl.fl6_dst, final_p);
219
220 err = xfrm_lookup(&dst, &fl, sk, 0);
221 if (err < 0)
222 goto failure;
223
224 if (saddr == NULL) {
225 saddr = &fl.fl6_src;
226 ipv6_addr_copy(&np->rcv_saddr, saddr);
227 }
228
229 /* set the source address */
230 ipv6_addr_copy(&np->saddr, saddr);
231 inet->rcv_saddr = LOOPBACK4_IPV6;
232
233 ip6_dst_store(sk, dst, NULL);
234
235 icsk->icsk_ext_hdr_len = 0;
236 if (np->opt != NULL)
237 icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
238 np->opt->opt_nflen);
239
240 inet->dport = usin->sin6_port;
241
242 dccp_set_state(sk, DCCP_REQUESTING);
243 err = inet6_hash_connect(&dccp_death_row, sk);
244 if (err)
245 goto late_failure;
246 /* FIXME */
247 #if 0
248 dp->dccps_gar = secure_dccp_v6_sequence_number(np->saddr.s6_addr32,
249 np->daddr.s6_addr32,
250 inet->sport,
251 inet->dport);
252 #endif
253 err = dccp_connect(sk);
254 if (err)
255 goto late_failure;
256
257 return 0;
258
259 late_failure:
260 dccp_set_state(sk, DCCP_CLOSED);
261 __sk_dst_reset(sk);
262 failure:
263 inet->dport = 0;
264 sk->sk_route_caps = 0;
265 return err;
266 }
267
268 static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
269 int type, int code, int offset, __be32 info)
270 {
271 struct ipv6hdr *hdr = (struct ipv6hdr *)skb->data;
272 const struct dccp_hdr *dh = (struct dccp_hdr *)(skb->data + offset);
273 struct ipv6_pinfo *np;
274 struct sock *sk;
275 int err;
276 __u64 seq;
277
278 sk = inet6_lookup(&dccp_hashinfo, &hdr->daddr, dh->dccph_dport,
279 &hdr->saddr, dh->dccph_sport, skb->dev->ifindex);
280
281 if (sk == NULL) {
282 ICMP6_INC_STATS_BH(__in6_dev_get(skb->dev), ICMP6_MIB_INERRORS);
283 return;
284 }
285
286 if (sk->sk_state == DCCP_TIME_WAIT) {
287 inet_twsk_put((struct inet_timewait_sock *)sk);
288 return;
289 }
290
291 bh_lock_sock(sk);
292 if (sock_owned_by_user(sk))
293 NET_INC_STATS_BH(LINUX_MIB_LOCKDROPPEDICMPS);
294
295 if (sk->sk_state == DCCP_CLOSED)
296 goto out;
297
298 np = inet6_sk(sk);
299
300 if (type == ICMPV6_PKT_TOOBIG) {
301 struct dst_entry *dst = NULL;
302
303 if (sock_owned_by_user(sk))
304 goto out;
305 if ((1 << sk->sk_state) & (DCCPF_LISTEN | DCCPF_CLOSED))
306 goto out;
307
308 /* icmp should have updated the destination cache entry */
309 dst = __sk_dst_check(sk, np->dst_cookie);
310 if (dst == NULL) {
311 struct inet_sock *inet = inet_sk(sk);
312 struct flowi fl;
313
314 /* BUGGG_FUTURE: Again, it is not clear how
315 to handle rthdr case. Ignore this complexity
316 for now.
317 */
318 memset(&fl, 0, sizeof(fl));
319 fl.proto = IPPROTO_DCCP;
320 ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
321 ipv6_addr_copy(&fl.fl6_src, &np->saddr);
322 fl.oif = sk->sk_bound_dev_if;
323 fl.fl_ip_dport = inet->dport;
324 fl.fl_ip_sport = inet->sport;
325
326 err = ip6_dst_lookup(sk, &dst, &fl);
327 if (err) {
328 sk->sk_err_soft = -err;
329 goto out;
330 }
331
332 err = xfrm_lookup(&dst, &fl, sk, 0);
333 if (err < 0) {
334 sk->sk_err_soft = -err;
335 goto out;
336 }
337 } else
338 dst_hold(dst);
339
340 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
341 dccp_sync_mss(sk, dst_mtu(dst));
342 } /* else let the usual retransmit timer handle it */
343 dst_release(dst);
344 goto out;
345 }
346
347 icmpv6_err_convert(type, code, &err);
348
349 seq = DCCP_SKB_CB(skb)->dccpd_seq;
350 /* Might be for an request_sock */
351 switch (sk->sk_state) {
352 struct request_sock *req, **prev;
353 case DCCP_LISTEN:
354 if (sock_owned_by_user(sk))
355 goto out;
356
357 req = inet6_csk_search_req(sk, &prev, dh->dccph_dport,
358 &hdr->daddr, &hdr->saddr,
359 inet6_iif(skb));
360 if (req == NULL)
361 goto out;
362
363 /*
364 * ICMPs are not backlogged, hence we cannot get an established
365 * socket here.
366 */
367 BUG_TRAP(req->sk == NULL);
368
369 if (seq != dccp_rsk(req)->dreq_iss) {
370 NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS);
371 goto out;
372 }
373
374 inet_csk_reqsk_queue_drop(sk, req, prev);
375 goto out;
376
377 case DCCP_REQUESTING:
378 case DCCP_RESPOND: /* Cannot happen.
379 It can, it SYNs are crossed. --ANK */
380 if (!sock_owned_by_user(sk)) {
381 DCCP_INC_STATS_BH(DCCP_MIB_ATTEMPTFAILS);
382 sk->sk_err = err;
383 /*
384 * Wake people up to see the error
385 * (see connect in sock.c)
386 */
387 sk->sk_error_report(sk);
388 dccp_done(sk);
389 } else
390 sk->sk_err_soft = err;
391 goto out;
392 }
393
394 if (!sock_owned_by_user(sk) && np->recverr) {
395 sk->sk_err = err;
396 sk->sk_error_report(sk);
397 } else
398 sk->sk_err_soft = err;
399
400 out:
401 bh_unlock_sock(sk);
402 sock_put(sk);
403 }
404
405
406 static int dccp_v6_send_response(struct sock *sk, struct request_sock *req,
407 struct dst_entry *dst)
408 {
409 struct inet6_request_sock *ireq6 = inet6_rsk(req);
410 struct ipv6_pinfo *np = inet6_sk(sk);
411 struct sk_buff *skb;
412 struct ipv6_txoptions *opt = NULL;
413 struct in6_addr *final_p = NULL, final;
414 struct flowi fl;
415 int err = -1;
416
417 memset(&fl, 0, sizeof(fl));
418 fl.proto = IPPROTO_DCCP;
419 ipv6_addr_copy(&fl.fl6_dst, &ireq6->rmt_addr);
420 ipv6_addr_copy(&fl.fl6_src, &ireq6->loc_addr);
421 fl.fl6_flowlabel = 0;
422 fl.oif = ireq6->iif;
423 fl.fl_ip_dport = inet_rsk(req)->rmt_port;
424 fl.fl_ip_sport = inet_sk(sk)->sport;
425
426 if (dst == NULL) {
427 opt = np->opt;
428 if (opt == NULL &&
429 np->rxopt.bits.osrcrt == 2 &&
430 ireq6->pktopts) {
431 struct sk_buff *pktopts = ireq6->pktopts;
432 struct inet6_skb_parm *rxopt = IP6CB(pktopts);
433
434 if (rxopt->srcrt)
435 opt = ipv6_invert_rthdr(sk,
436 (struct ipv6_rt_hdr *)(pktopts->nh.raw +
437 rxopt->srcrt));
438 }
439
440 if (opt != NULL && opt->srcrt != NULL) {
441 const struct rt0_hdr *rt0 = (struct rt0_hdr *)opt->srcrt;
442
443 ipv6_addr_copy(&final, &fl.fl6_dst);
444 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
445 final_p = &final;
446 }
447
448 err = ip6_dst_lookup(sk, &dst, &fl);
449 if (err)
450 goto done;
451
452 if (final_p)
453 ipv6_addr_copy(&fl.fl6_dst, final_p);
454
455 err = xfrm_lookup(&dst, &fl, sk, 0);
456 if (err < 0)
457 goto done;
458 }
459
460 skb = dccp_make_response(sk, dst, req);
461 if (skb != NULL) {
462 struct dccp_hdr *dh = dccp_hdr(skb);
463
464 dh->dccph_checksum = dccp_v6_check(dh, skb->len,
465 &ireq6->loc_addr,
466 &ireq6->rmt_addr,
467 csum_partial((char *)dh,
468 skb->len,
469 skb->csum));
470 ipv6_addr_copy(&fl.fl6_dst, &ireq6->rmt_addr);
471 err = ip6_xmit(sk, skb, &fl, opt, 0);
472 if (err == NET_XMIT_CN)
473 err = 0;
474 }
475
476 done:
477 if (opt != NULL && opt != np->opt)
478 sock_kfree_s(sk, opt, opt->tot_len);
479 dst_release(dst);
480 return err;
481 }
482
483 static void dccp_v6_reqsk_destructor(struct request_sock *req)
484 {
485 if (inet6_rsk(req)->pktopts != NULL)
486 kfree_skb(inet6_rsk(req)->pktopts);
487 }
488
489 static struct request_sock_ops dccp6_request_sock_ops = {
490 .family = AF_INET6,
491 .obj_size = sizeof(struct dccp6_request_sock),
492 .rtx_syn_ack = dccp_v6_send_response,
493 .send_ack = dccp_v6_reqsk_send_ack,
494 .destructor = dccp_v6_reqsk_destructor,
495 .send_reset = dccp_v6_ctl_send_reset,
496 };
497
498 static struct timewait_sock_ops dccp6_timewait_sock_ops = {
499 .twsk_obj_size = sizeof(struct dccp6_timewait_sock),
500 };
501
502 static void dccp_v6_send_check(struct sock *sk, int len, struct sk_buff *skb)
503 {
504 struct ipv6_pinfo *np = inet6_sk(sk);
505 struct dccp_hdr *dh = dccp_hdr(skb);
506
507 dh->dccph_checksum = csum_ipv6_magic(&np->saddr, &np->daddr,
508 len, IPPROTO_DCCP,
509 csum_partial((char *)dh,
510 dh->dccph_doff << 2,
511 skb->csum));
512 }
513
514 static void dccp_v6_ctl_send_reset(struct sk_buff *rxskb)
515 {
516 struct dccp_hdr *rxdh = dccp_hdr(rxskb), *dh;
517 const int dccp_hdr_reset_len = sizeof(struct dccp_hdr) +
518 sizeof(struct dccp_hdr_ext) +
519 sizeof(struct dccp_hdr_reset);
520 struct sk_buff *skb;
521 struct flowi fl;
522 u64 seqno;
523
524 if (rxdh->dccph_type == DCCP_PKT_RESET)
525 return;
526
527 if (!ipv6_unicast_destination(rxskb))
528 return;
529
530 /*
531 * We need to grab some memory, and put together an RST,
532 * and then put it into the queue to be sent.
533 */
534
535 skb = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) +
536 dccp_hdr_reset_len, GFP_ATOMIC);
537 if (skb == NULL)
538 return;
539
540 skb_reserve(skb, MAX_HEADER + sizeof(struct ipv6hdr) +
541 dccp_hdr_reset_len);
542
543 skb->h.raw = skb_push(skb, dccp_hdr_reset_len);
544 dh = dccp_hdr(skb);
545 memset(dh, 0, dccp_hdr_reset_len);
546
547 /* Swap the send and the receive. */
548 dh->dccph_type = DCCP_PKT_RESET;
549 dh->dccph_sport = rxdh->dccph_dport;
550 dh->dccph_dport = rxdh->dccph_sport;
551 dh->dccph_doff = dccp_hdr_reset_len / 4;
552 dh->dccph_x = 1;
553 dccp_hdr_reset(skb)->dccph_reset_code =
554 DCCP_SKB_CB(rxskb)->dccpd_reset_code;
555
556 /* See "8.3.1. Abnormal Termination" in draft-ietf-dccp-spec-11 */
557 seqno = 0;
558 if (DCCP_SKB_CB(rxskb)->dccpd_ack_seq != DCCP_PKT_WITHOUT_ACK_SEQ)
559 dccp_set_seqno(&seqno, DCCP_SKB_CB(rxskb)->dccpd_ack_seq + 1);
560
561 dccp_hdr_set_seq(dh, seqno);
562 dccp_hdr_set_ack(dccp_hdr_ack_bits(skb),
563 DCCP_SKB_CB(rxskb)->dccpd_seq);
564
565 memset(&fl, 0, sizeof(fl));
566 ipv6_addr_copy(&fl.fl6_dst, &rxskb->nh.ipv6h->saddr);
567 ipv6_addr_copy(&fl.fl6_src, &rxskb->nh.ipv6h->daddr);
568 dh->dccph_checksum = csum_ipv6_magic(&fl.fl6_src, &fl.fl6_dst,
569 sizeof(*dh), IPPROTO_DCCP,
570 skb->csum);
571 fl.proto = IPPROTO_DCCP;
572 fl.oif = inet6_iif(rxskb);
573 fl.fl_ip_dport = dh->dccph_dport;
574 fl.fl_ip_sport = dh->dccph_sport;
575
576 /* sk = NULL, but it is safe for now. RST socket required. */
577 if (!ip6_dst_lookup(NULL, &skb->dst, &fl)) {
578 if (xfrm_lookup(&skb->dst, &fl, NULL, 0) >= 0) {
579 ip6_xmit(dccp_v6_ctl_socket->sk, skb, &fl, NULL, 0);
580 DCCP_INC_STATS_BH(DCCP_MIB_OUTSEGS);
581 DCCP_INC_STATS_BH(DCCP_MIB_OUTRSTS);
582 return;
583 }
584 }
585
586 kfree_skb(skb);
587 }
588
589 static void dccp_v6_ctl_send_ack(struct sk_buff *rxskb)
590 {
591 struct flowi fl;
592 struct dccp_hdr *rxdh = dccp_hdr(rxskb), *dh;
593 const int dccp_hdr_ack_len = sizeof(struct dccp_hdr) +
594 sizeof(struct dccp_hdr_ext) +
595 sizeof(struct dccp_hdr_ack_bits);
596 struct sk_buff *skb;
597
598 skb = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) +
599 dccp_hdr_ack_len, GFP_ATOMIC);
600 if (skb == NULL)
601 return;
602
603 skb_reserve(skb, MAX_HEADER + sizeof(struct ipv6hdr) +
604 dccp_hdr_ack_len);
605
606 skb->h.raw = skb_push(skb, dccp_hdr_ack_len);
607 dh = dccp_hdr(skb);
608 memset(dh, 0, dccp_hdr_ack_len);
609
610 /* Build DCCP header and checksum it. */
611 dh->dccph_type = DCCP_PKT_ACK;
612 dh->dccph_sport = rxdh->dccph_dport;
613 dh->dccph_dport = rxdh->dccph_sport;
614 dh->dccph_doff = dccp_hdr_ack_len / 4;
615 dh->dccph_x = 1;
616
617 dccp_hdr_set_seq(dh, DCCP_SKB_CB(rxskb)->dccpd_ack_seq);
618 dccp_hdr_set_ack(dccp_hdr_ack_bits(skb),
619 DCCP_SKB_CB(rxskb)->dccpd_seq);
620
621 memset(&fl, 0, sizeof(fl));
622 ipv6_addr_copy(&fl.fl6_dst, &rxskb->nh.ipv6h->saddr);
623 ipv6_addr_copy(&fl.fl6_src, &rxskb->nh.ipv6h->daddr);
624
625 /* FIXME: calculate checksum, IPv4 also should... */
626
627 fl.proto = IPPROTO_DCCP;
628 fl.oif = inet6_iif(rxskb);
629 fl.fl_ip_dport = dh->dccph_dport;
630 fl.fl_ip_sport = dh->dccph_sport;
631
632 if (!ip6_dst_lookup(NULL, &skb->dst, &fl)) {
633 if (xfrm_lookup(&skb->dst, &fl, NULL, 0) >= 0) {
634 ip6_xmit(dccp_v6_ctl_socket->sk, skb, &fl, NULL, 0);
635 DCCP_INC_STATS_BH(DCCP_MIB_OUTSEGS);
636 return;
637 }
638 }
639
640 kfree_skb(skb);
641 }
642
643 static void dccp_v6_reqsk_send_ack(struct sk_buff *skb,
644 struct request_sock *req)
645 {
646 dccp_v6_ctl_send_ack(skb);
647 }
648
649 static struct sock *dccp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
650 {
651 const struct dccp_hdr *dh = dccp_hdr(skb);
652 const struct ipv6hdr *iph = skb->nh.ipv6h;
653 struct sock *nsk;
654 struct request_sock **prev;
655 /* Find possible connection requests. */
656 struct request_sock *req = inet6_csk_search_req(sk, &prev,
657 dh->dccph_sport,
658 &iph->saddr,
659 &iph->daddr,
660 inet6_iif(skb));
661 if (req != NULL)
662 return dccp_check_req(sk, skb, req, prev);
663
664 nsk = __inet6_lookup_established(&dccp_hashinfo,
665 &iph->saddr, dh->dccph_sport,
666 &iph->daddr, ntohs(dh->dccph_dport),
667 inet6_iif(skb));
668 if (nsk != NULL) {
669 if (nsk->sk_state != DCCP_TIME_WAIT) {
670 bh_lock_sock(nsk);
671 return nsk;
672 }
673 inet_twsk_put((struct inet_timewait_sock *)nsk);
674 return NULL;
675 }
676
677 return sk;
678 }
679
680 static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
681 {
682 struct inet_request_sock *ireq;
683 struct dccp_sock dp;
684 struct request_sock *req;
685 struct dccp_request_sock *dreq;
686 struct inet6_request_sock *ireq6;
687 struct ipv6_pinfo *np = inet6_sk(sk);
688 const __be32 service = dccp_hdr_request(skb)->dccph_req_service;
689 struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
690 __u8 reset_code = DCCP_RESET_CODE_TOO_BUSY;
691
692 if (skb->protocol == htons(ETH_P_IP))
693 return dccp_v4_conn_request(sk, skb);
694
695 if (!ipv6_unicast_destination(skb))
696 goto drop;
697
698 if (dccp_bad_service_code(sk, service)) {
699 reset_code = DCCP_RESET_CODE_BAD_SERVICE_CODE;
700 goto drop;
701 }
702 /*
703 * There are no SYN attacks on IPv6, yet...
704 */
705 if (inet_csk_reqsk_queue_is_full(sk))
706 goto drop;
707
708 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
709 goto drop;
710
711 req = inet6_reqsk_alloc(sk->sk_prot->rsk_prot);
712 if (req == NULL)
713 goto drop;
714
715 /* FIXME: process options */
716
717 dccp_openreq_init(req, &dp, skb);
718
719 ireq6 = inet6_rsk(req);
720 ireq = inet_rsk(req);
721 ipv6_addr_copy(&ireq6->rmt_addr, &skb->nh.ipv6h->saddr);
722 ipv6_addr_copy(&ireq6->loc_addr, &skb->nh.ipv6h->daddr);
723 req->rcv_wnd = 100; /* Fake, option parsing will get the
724 right value */
725 ireq6->pktopts = NULL;
726
727 if (ipv6_opt_accepted(sk, skb) ||
728 np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
729 np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
730 atomic_inc(&skb->users);
731 ireq6->pktopts = skb;
732 }
733 ireq6->iif = sk->sk_bound_dev_if;
734
735 /* So that link locals have meaning */
736 if (!sk->sk_bound_dev_if &&
737 ipv6_addr_type(&ireq6->rmt_addr) & IPV6_ADDR_LINKLOCAL)
738 ireq6->iif = inet6_iif(skb);
739
740 /*
741 * Step 3: Process LISTEN state
742 *
743 * Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookie
744 *
745 * In fact we defer setting S.GSR, S.SWL, S.SWH to
746 * dccp_create_openreq_child.
747 */
748 dreq = dccp_rsk(req);
749 dreq->dreq_isr = dcb->dccpd_seq;
750 dreq->dreq_iss = dccp_v6_init_sequence(sk, skb);
751 dreq->dreq_service = service;
752
753 if (dccp_v6_send_response(sk, req, NULL))
754 goto drop_and_free;
755
756 inet6_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT);
757 return 0;
758
759 drop_and_free:
760 reqsk_free(req);
761 drop:
762 DCCP_INC_STATS_BH(DCCP_MIB_ATTEMPTFAILS);
763 dcb->dccpd_reset_code = reset_code;
764 return -1;
765 }
766
767 static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
768 struct sk_buff *skb,
769 struct request_sock *req,
770 struct dst_entry *dst)
771 {
772 struct inet6_request_sock *ireq6 = inet6_rsk(req);
773 struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
774 struct inet_sock *newinet;
775 struct dccp_sock *newdp;
776 struct dccp6_sock *newdp6;
777 struct sock *newsk;
778 struct ipv6_txoptions *opt;
779
780 if (skb->protocol == htons(ETH_P_IP)) {
781 /*
782 * v6 mapped
783 */
784 newsk = dccp_v4_request_recv_sock(sk, skb, req, dst);
785 if (newsk == NULL)
786 return NULL;
787
788 newdp6 = (struct dccp6_sock *)newsk;
789 newdp = dccp_sk(newsk);
790 newinet = inet_sk(newsk);
791 newinet->pinet6 = &newdp6->inet6;
792 newnp = inet6_sk(newsk);
793
794 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
795
796 ipv6_addr_set(&newnp->daddr, 0, 0, htonl(0x0000FFFF),
797 newinet->daddr);
798
799 ipv6_addr_set(&newnp->saddr, 0, 0, htonl(0x0000FFFF),
800 newinet->saddr);
801
802 ipv6_addr_copy(&newnp->rcv_saddr, &newnp->saddr);
803
804 inet_csk(newsk)->icsk_af_ops = &dccp_ipv6_mapped;
805 newsk->sk_backlog_rcv = dccp_v4_do_rcv;
806 newnp->pktoptions = NULL;
807 newnp->opt = NULL;
808 newnp->mcast_oif = inet6_iif(skb);
809 newnp->mcast_hops = skb->nh.ipv6h->hop_limit;
810
811 /*
812 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
813 * here, dccp_create_openreq_child now does this for us, see the comment in
814 * that function for the gory details. -acme
815 */
816
817 /* It is tricky place. Until this moment IPv4 tcp
818 worked with IPv6 icsk.icsk_af_ops.
819 Sync it now.
820 */
821 dccp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
822
823 return newsk;
824 }
825
826 opt = np->opt;
827
828 if (sk_acceptq_is_full(sk))
829 goto out_overflow;
830
831 if (np->rxopt.bits.osrcrt == 2 && opt == NULL && ireq6->pktopts) {
832 const struct inet6_skb_parm *rxopt = IP6CB(ireq6->pktopts);
833
834 if (rxopt->srcrt)
835 opt = ipv6_invert_rthdr(sk,
836 (struct ipv6_rt_hdr *)(ireq6->pktopts->nh.raw +
837 rxopt->srcrt));
838 }
839
840 if (dst == NULL) {
841 struct in6_addr *final_p = NULL, final;
842 struct flowi fl;
843
844 memset(&fl, 0, sizeof(fl));
845 fl.proto = IPPROTO_DCCP;
846 ipv6_addr_copy(&fl.fl6_dst, &ireq6->rmt_addr);
847 if (opt != NULL && opt->srcrt != NULL) {
848 const struct rt0_hdr *rt0 = (struct rt0_hdr *)opt->srcrt;
849
850 ipv6_addr_copy(&final, &fl.fl6_dst);
851 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
852 final_p = &final;
853 }
854 ipv6_addr_copy(&fl.fl6_src, &ireq6->loc_addr);
855 fl.oif = sk->sk_bound_dev_if;
856 fl.fl_ip_dport = inet_rsk(req)->rmt_port;
857 fl.fl_ip_sport = inet_sk(sk)->sport;
858
859 if (ip6_dst_lookup(sk, &dst, &fl))
860 goto out;
861
862 if (final_p)
863 ipv6_addr_copy(&fl.fl6_dst, final_p);
864
865 if ((xfrm_lookup(&dst, &fl, sk, 0)) < 0)
866 goto out;
867 }
868
869 newsk = dccp_create_openreq_child(sk, req, skb);
870 if (newsk == NULL)
871 goto out;
872
873 /*
874 * No need to charge this sock to the relevant IPv6 refcnt debug socks
875 * count here, dccp_create_openreq_child now does this for us, see the
876 * comment in that function for the gory details. -acme
877 */
878
879 ip6_dst_store(newsk, dst, NULL);
880 newsk->sk_route_caps = dst->dev->features & ~(NETIF_F_IP_CSUM |
881 NETIF_F_TSO);
882 newdp6 = (struct dccp6_sock *)newsk;
883 newinet = inet_sk(newsk);
884 newinet->pinet6 = &newdp6->inet6;
885 newdp = dccp_sk(newsk);
886 newnp = inet6_sk(newsk);
887
888 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
889
890 ipv6_addr_copy(&newnp->daddr, &ireq6->rmt_addr);
891 ipv6_addr_copy(&newnp->saddr, &ireq6->loc_addr);
892 ipv6_addr_copy(&newnp->rcv_saddr, &ireq6->loc_addr);
893 newsk->sk_bound_dev_if = ireq6->iif;
894
895 /* Now IPv6 options...
896
897 First: no IPv4 options.
898 */
899 newinet->opt = NULL;
900
901 /* Clone RX bits */
902 newnp->rxopt.all = np->rxopt.all;
903
904 /* Clone pktoptions received with SYN */
905 newnp->pktoptions = NULL;
906 if (ireq6->pktopts != NULL) {
907 newnp->pktoptions = skb_clone(ireq6->pktopts, GFP_ATOMIC);
908 kfree_skb(ireq6->pktopts);
909 ireq6->pktopts = NULL;
910 if (newnp->pktoptions)
911 skb_set_owner_r(newnp->pktoptions, newsk);
912 }
913 newnp->opt = NULL;
914 newnp->mcast_oif = inet6_iif(skb);
915 newnp->mcast_hops = skb->nh.ipv6h->hop_limit;
916
917 /*
918 * Clone native IPv6 options from listening socket (if any)
919 *
920 * Yes, keeping reference count would be much more clever, but we make
921 * one more one thing there: reattach optmem to newsk.
922 */
923 if (opt != NULL) {
924 newnp->opt = ipv6_dup_options(newsk, opt);
925 if (opt != np->opt)
926 sock_kfree_s(sk, opt, opt->tot_len);
927 }
928
929 inet_csk(newsk)->icsk_ext_hdr_len = 0;
930 if (newnp->opt != NULL)
931 inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
932 newnp->opt->opt_flen);
933
934 dccp_sync_mss(newsk, dst_mtu(dst));
935
936 newinet->daddr = newinet->saddr = newinet->rcv_saddr = LOOPBACK4_IPV6;
937
938 __inet6_hash(&dccp_hashinfo, newsk);
939 inet_inherit_port(&dccp_hashinfo, sk, newsk);
940
941 return newsk;
942
943 out_overflow:
944 NET_INC_STATS_BH(LINUX_MIB_LISTENOVERFLOWS);
945 out:
946 NET_INC_STATS_BH(LINUX_MIB_LISTENDROPS);
947 if (opt != NULL && opt != np->opt)
948 sock_kfree_s(sk, opt, opt->tot_len);
949 dst_release(dst);
950 return NULL;
951 }
952
953 /* The socket must have it's spinlock held when we get
954 * here.
955 *
956 * We have a potential double-lock case here, so even when
957 * doing backlog processing we use the BH locking scheme.
958 * This is because we cannot sleep with the original spinlock
959 * held.
960 */
961 static int dccp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
962 {
963 struct ipv6_pinfo *np = inet6_sk(sk);
964 struct sk_buff *opt_skb = NULL;
965
966 /* Imagine: socket is IPv6. IPv4 packet arrives,
967 goes to IPv4 receive handler and backlogged.
968 From backlog it always goes here. Kerboom...
969 Fortunately, dccp_rcv_established and rcv_established
970 handle them correctly, but it is not case with
971 dccp_v6_hnd_req and dccp_v6_ctl_send_reset(). --ANK
972 */
973
974 if (skb->protocol == htons(ETH_P_IP))
975 return dccp_v4_do_rcv(sk, skb);
976
977 if (sk_filter(sk, skb, 0))
978 goto discard;
979
980 /*
981 * socket locking is here for SMP purposes as backlog rcv is currently
982 * called with bh processing disabled.
983 */
984
985 /* Do Stevens' IPV6_PKTOPTIONS.
986
987 Yes, guys, it is the only place in our code, where we
988 may make it not affecting IPv4.
989 The rest of code is protocol independent,
990 and I do not like idea to uglify IPv4.
991
992 Actually, all the idea behind IPV6_PKTOPTIONS
993 looks not very well thought. For now we latch
994 options, received in the last packet, enqueued
995 by tcp. Feel free to propose better solution.
996 --ANK (980728)
997 */
998 if (np->rxopt.all)
999 opt_skb = skb_clone(skb, GFP_ATOMIC);
1000
1001 if (sk->sk_state == DCCP_OPEN) { /* Fast path */
1002 if (dccp_rcv_established(sk, skb, dccp_hdr(skb), skb->len))
1003 goto reset;
1004 return 0;
1005 }
1006
1007 if (sk->sk_state == DCCP_LISTEN) {
1008 struct sock *nsk = dccp_v6_hnd_req(sk, skb);
1009
1010 if (nsk == NULL)
1011 goto discard;
1012 /*
1013 * Queue it on the new socket if the new socket is active,
1014 * otherwise we just shortcircuit this and continue with
1015 * the new socket..
1016 */
1017 if (nsk != sk) {
1018 if (dccp_child_process(sk, nsk, skb))
1019 goto reset;
1020 if (opt_skb != NULL)
1021 __kfree_skb(opt_skb);
1022 return 0;
1023 }
1024 }
1025
1026 if (dccp_rcv_state_process(sk, skb, dccp_hdr(skb), skb->len))
1027 goto reset;
1028 return 0;
1029
1030 reset:
1031 dccp_v6_ctl_send_reset(skb);
1032 discard:
1033 if (opt_skb != NULL)
1034 __kfree_skb(opt_skb);
1035 kfree_skb(skb);
1036 return 0;
1037 }
1038
1039 static int dccp_v6_rcv(struct sk_buff **pskb)
1040 {
1041 const struct dccp_hdr *dh;
1042 struct sk_buff *skb = *pskb;
1043 struct sock *sk;
1044
1045 /* Step 1: Check header basics: */
1046
1047 if (dccp_invalid_packet(skb))
1048 goto discard_it;
1049
1050 dh = dccp_hdr(skb);
1051
1052 DCCP_SKB_CB(skb)->dccpd_seq = dccp_hdr_seq(skb);
1053 DCCP_SKB_CB(skb)->dccpd_type = dh->dccph_type;
1054
1055 if (dccp_packet_without_ack(skb))
1056 DCCP_SKB_CB(skb)->dccpd_ack_seq = DCCP_PKT_WITHOUT_ACK_SEQ;
1057 else
1058 DCCP_SKB_CB(skb)->dccpd_ack_seq = dccp_hdr_ack_seq(skb);
1059
1060 /* Step 2:
1061 * Look up flow ID in table and get corresponding socket */
1062 sk = __inet6_lookup(&dccp_hashinfo, &skb->nh.ipv6h->saddr,
1063 dh->dccph_sport,
1064 &skb->nh.ipv6h->daddr, ntohs(dh->dccph_dport),
1065 inet6_iif(skb));
1066 /*
1067 * Step 2:
1068 * If no socket ...
1069 * Generate Reset(No Connection) unless P.type == Reset
1070 * Drop packet and return
1071 */
1072 if (sk == NULL)
1073 goto no_dccp_socket;
1074
1075 /*
1076 * Step 2:
1077 * ... or S.state == TIMEWAIT,
1078 * Generate Reset(No Connection) unless P.type == Reset
1079 * Drop packet and return
1080 */
1081 if (sk->sk_state == DCCP_TIME_WAIT)
1082 goto do_time_wait;
1083
1084 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1085 goto discard_and_relse;
1086
1087 return sk_receive_skb(sk, skb) ? -1 : 0;
1088
1089 no_dccp_socket:
1090 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1091 goto discard_it;
1092 /*
1093 * Step 2:
1094 * Generate Reset(No Connection) unless P.type == Reset
1095 * Drop packet and return
1096 */
1097 if (dh->dccph_type != DCCP_PKT_RESET) {
1098 DCCP_SKB_CB(skb)->dccpd_reset_code =
1099 DCCP_RESET_CODE_NO_CONNECTION;
1100 dccp_v6_ctl_send_reset(skb);
1101 }
1102 discard_it:
1103
1104 /*
1105 * Discard frame
1106 */
1107
1108 kfree_skb(skb);
1109 return 0;
1110
1111 discard_and_relse:
1112 sock_put(sk);
1113 goto discard_it;
1114
1115 do_time_wait:
1116 inet_twsk_put((struct inet_timewait_sock *)sk);
1117 goto no_dccp_socket;
1118 }
1119
1120 static struct inet_connection_sock_af_ops dccp_ipv6_af_ops = {
1121 .queue_xmit = inet6_csk_xmit,
1122 .send_check = dccp_v6_send_check,
1123 .rebuild_header = inet6_sk_rebuild_header,
1124 .conn_request = dccp_v6_conn_request,
1125 .syn_recv_sock = dccp_v6_request_recv_sock,
1126 .net_header_len = sizeof(struct ipv6hdr),
1127 .setsockopt = ipv6_setsockopt,
1128 .getsockopt = ipv6_getsockopt,
1129 .addr2sockaddr = inet6_csk_addr2sockaddr,
1130 .sockaddr_len = sizeof(struct sockaddr_in6)
1131 };
1132
1133 /*
1134 * DCCP over IPv4 via INET6 API
1135 */
1136 static struct inet_connection_sock_af_ops dccp_ipv6_mapped = {
1137 .queue_xmit = ip_queue_xmit,
1138 .send_check = dccp_v4_send_check,
1139 .rebuild_header = inet_sk_rebuild_header,
1140 .conn_request = dccp_v6_conn_request,
1141 .syn_recv_sock = dccp_v6_request_recv_sock,
1142 .net_header_len = sizeof(struct iphdr),
1143 .setsockopt = ipv6_setsockopt,
1144 .getsockopt = ipv6_getsockopt,
1145 .addr2sockaddr = inet6_csk_addr2sockaddr,
1146 .sockaddr_len = sizeof(struct sockaddr_in6)
1147 };
1148
1149 /* NOTE: A lot of things set to zero explicitly by call to
1150 * sk_alloc() so need not be done here.
1151 */
1152 static int dccp_v6_init_sock(struct sock *sk)
1153 {
1154 static __u8 dccp_v6_ctl_sock_initialized;
1155 int err = dccp_init_sock(sk, dccp_v6_ctl_sock_initialized);
1156
1157 if (err == 0) {
1158 if (unlikely(!dccp_v6_ctl_sock_initialized))
1159 dccp_v6_ctl_sock_initialized = 1;
1160 inet_csk(sk)->icsk_af_ops = &dccp_ipv6_af_ops;
1161 }
1162
1163 return err;
1164 }
1165
1166 static int dccp_v6_destroy_sock(struct sock *sk)
1167 {
1168 dccp_destroy_sock(sk);
1169 return inet6_destroy_sock(sk);
1170 }
1171
1172 static struct proto dccp_v6_prot = {
1173 .name = "DCCPv6",
1174 .owner = THIS_MODULE,
1175 .close = dccp_close,
1176 .connect = dccp_v6_connect,
1177 .disconnect = dccp_disconnect,
1178 .ioctl = dccp_ioctl,
1179 .init = dccp_v6_init_sock,
1180 .setsockopt = dccp_setsockopt,
1181 .getsockopt = dccp_getsockopt,
1182 .sendmsg = dccp_sendmsg,
1183 .recvmsg = dccp_recvmsg,
1184 .backlog_rcv = dccp_v6_do_rcv,
1185 .hash = dccp_v6_hash,
1186 .unhash = dccp_unhash,
1187 .accept = inet_csk_accept,
1188 .get_port = dccp_v6_get_port,
1189 .shutdown = dccp_shutdown,
1190 .destroy = dccp_v6_destroy_sock,
1191 .orphan_count = &dccp_orphan_count,
1192 .max_header = MAX_DCCP_HEADER,
1193 .obj_size = sizeof(struct dccp6_sock),
1194 .rsk_prot = &dccp6_request_sock_ops,
1195 .twsk_prot = &dccp6_timewait_sock_ops,
1196 };
1197
1198 static struct inet6_protocol dccp_v6_protocol = {
1199 .handler = dccp_v6_rcv,
1200 .err_handler = dccp_v6_err,
1201 .flags = INET6_PROTO_NOPOLICY | INET6_PROTO_FINAL,
1202 };
1203
1204 static struct proto_ops inet6_dccp_ops = {
1205 .family = PF_INET6,
1206 .owner = THIS_MODULE,
1207 .release = inet6_release,
1208 .bind = inet6_bind,
1209 .connect = inet_stream_connect,
1210 .socketpair = sock_no_socketpair,
1211 .accept = inet_accept,
1212 .getname = inet6_getname,
1213 .poll = dccp_poll,
1214 .ioctl = inet6_ioctl,
1215 .listen = inet_dccp_listen,
1216 .shutdown = inet_shutdown,
1217 .setsockopt = sock_common_setsockopt,
1218 .getsockopt = sock_common_getsockopt,
1219 .sendmsg = inet_sendmsg,
1220 .recvmsg = sock_common_recvmsg,
1221 .mmap = sock_no_mmap,
1222 .sendpage = sock_no_sendpage,
1223 };
1224
1225 static struct inet_protosw dccp_v6_protosw = {
1226 .type = SOCK_DCCP,
1227 .protocol = IPPROTO_DCCP,
1228 .prot = &dccp_v6_prot,
1229 .ops = &inet6_dccp_ops,
1230 .capability = -1,
1231 .flags = INET_PROTOSW_ICSK,
1232 };
1233
1234 static int __init dccp_v6_init(void)
1235 {
1236 int err = proto_register(&dccp_v6_prot, 1);
1237
1238 if (err != 0)
1239 goto out;
1240
1241 err = inet6_add_protocol(&dccp_v6_protocol, IPPROTO_DCCP);
1242 if (err != 0)
1243 goto out_unregister_proto;
1244
1245 inet6_register_protosw(&dccp_v6_protosw);
1246
1247 err = inet_csk_ctl_sock_create(&dccp_v6_ctl_socket, PF_INET6,
1248 SOCK_DCCP, IPPROTO_DCCP);
1249 if (err != 0)
1250 goto out_unregister_protosw;
1251 out:
1252 return err;
1253 out_unregister_protosw:
1254 inet6_del_protocol(&dccp_v6_protocol, IPPROTO_DCCP);
1255 inet6_unregister_protosw(&dccp_v6_protosw);
1256 out_unregister_proto:
1257 proto_unregister(&dccp_v6_prot);
1258 goto out;
1259 }
1260
1261 static void __exit dccp_v6_exit(void)
1262 {
1263 inet6_del_protocol(&dccp_v6_protocol, IPPROTO_DCCP);
1264 inet6_unregister_protosw(&dccp_v6_protosw);
1265 proto_unregister(&dccp_v6_prot);
1266 }
1267
1268 module_init(dccp_v6_init);
1269 module_exit(dccp_v6_exit);
1270
1271 /*
1272 * __stringify doesn't likes enums, so use SOCK_DCCP (6) and IPPROTO_DCCP (33)
1273 * values directly, Also cover the case where the protocol is not specified,
1274 * i.e. net-pf-PF_INET6-proto-0-type-SOCK_DCCP
1275 */
1276 MODULE_ALIAS("net-pf-" __stringify(PF_INET6) "-proto-33-type-6");
1277 MODULE_ALIAS("net-pf-" __stringify(PF_INET6) "-proto-0-type-6");
1278 MODULE_LICENSE("GPL");
1279 MODULE_AUTHOR("Arnaldo Carvalho de Melo <acme@mandriva.com>");
1280 MODULE_DESCRIPTION("DCCPv6 - Datagram Congestion Controlled Protocol");
This page took 0.088715 seconds and 5 git commands to generate.