[NET]: restructure sock_aio_{read,write} / sock_{readv,writev}
[deliverable/linux.git] / net / dccp / ipv6.c
CommitLineData
3df80d93
ACM
1/*
2 * DCCP over IPv6
3 * Linux INET6 implementation
4 *
5 * Based on net/dccp6/ipv6.c
6 *
7 * Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 */
14
15#include <linux/config.h>
16#include <linux/module.h>
17#include <linux/random.h>
18#include <linux/xfrm.h>
19
20#include <net/addrconf.h>
21#include <net/inet_common.h>
22#include <net/inet_hashtables.h>
23#include <net/inet6_connection_sock.h>
24#include <net/inet6_hashtables.h>
25#include <net/ip6_route.h>
26#include <net/ipv6.h>
27#include <net/protocol.h>
28#include <net/transp_v6.h>
29#include <net/xfrm.h>
30
31#include "dccp.h"
32#include "ipv6.h"
33
34static void dccp_v6_ctl_send_reset(struct sk_buff *skb);
35static void dccp_v6_reqsk_send_ack(struct sk_buff *skb,
36 struct request_sock *req);
37static void dccp_v6_send_check(struct sock *sk, int len, struct sk_buff *skb);
38
39static int dccp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
40
41static struct inet_connection_sock_af_ops dccp_ipv6_mapped;
42static struct inet_connection_sock_af_ops dccp_ipv6_af_ops;
43
44static int dccp_v6_get_port(struct sock *sk, unsigned short snum)
45{
46 return inet_csk_get_port(&dccp_hashinfo, sk, snum,
47 inet6_csk_bind_conflict);
48}
49
50static void dccp_v6_hash(struct sock *sk)
51{
52 if (sk->sk_state != DCCP_CLOSED) {
53 if (inet_csk(sk)->icsk_af_ops == &dccp_ipv6_mapped) {
54 dccp_prot.hash(sk);
55 return;
56 }
57 local_bh_disable();
58 __inet6_hash(&dccp_hashinfo, sk);
59 local_bh_enable();
60 }
61}
62
63static inline u16 dccp_v6_check(struct dccp_hdr *dh, int len,
64 struct in6_addr *saddr,
65 struct in6_addr *daddr,
66 unsigned long base)
67{
68 return csum_ipv6_magic(saddr, daddr, len, IPPROTO_DCCP, base);
69}
70
71static __u32 dccp_v6_init_sequence(struct sock *sk, struct sk_buff *skb)
72{
73 const struct dccp_hdr *dh = dccp_hdr(skb);
74
75 if (skb->protocol == htons(ETH_P_IPV6))
76 return secure_tcpv6_sequence_number(skb->nh.ipv6h->daddr.s6_addr32,
77 skb->nh.ipv6h->saddr.s6_addr32,
78 dh->dccph_dport,
79 dh->dccph_sport);
80 else
81 return secure_dccp_sequence_number(skb->nh.iph->daddr,
82 skb->nh.iph->saddr,
83 dh->dccph_dport,
84 dh->dccph_sport);
85}
86
3df80d93
ACM
87static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
88 int addr_len)
89{
90 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
d83d8461 91 struct inet_connection_sock *icsk = inet_csk(sk);
3df80d93
ACM
92 struct inet_sock *inet = inet_sk(sk);
93 struct ipv6_pinfo *np = inet6_sk(sk);
94 struct dccp_sock *dp = dccp_sk(sk);
95 struct in6_addr *saddr = NULL, *final_p = NULL, final;
96 struct flowi fl;
97 struct dst_entry *dst;
98 int addr_type;
99 int err;
100
101 dp->dccps_role = DCCP_ROLE_CLIENT;
102
103 if (addr_len < SIN6_LEN_RFC2133)
104 return -EINVAL;
105
106 if (usin->sin6_family != AF_INET6)
107 return -EAFNOSUPPORT;
108
109 memset(&fl, 0, sizeof(fl));
110
111 if (np->sndflow) {
112 fl.fl6_flowlabel = usin->sin6_flowinfo & IPV6_FLOWINFO_MASK;
113 IP6_ECN_flow_init(fl.fl6_flowlabel);
114 if (fl.fl6_flowlabel & IPV6_FLOWLABEL_MASK) {
115 struct ip6_flowlabel *flowlabel;
116 flowlabel = fl6_sock_lookup(sk, fl.fl6_flowlabel);
117 if (flowlabel == NULL)
118 return -EINVAL;
119 ipv6_addr_copy(&usin->sin6_addr, &flowlabel->dst);
120 fl6_sock_release(flowlabel);
121 }
122 }
123
124 /*
125 * connect() to INADDR_ANY means loopback (BSD'ism).
126 */
127
128 if (ipv6_addr_any(&usin->sin6_addr))
129 usin->sin6_addr.s6_addr[15] = 0x1;
130
131 addr_type = ipv6_addr_type(&usin->sin6_addr);
132
133 if(addr_type & IPV6_ADDR_MULTICAST)
134 return -ENETUNREACH;
135
136 if (addr_type & IPV6_ADDR_LINKLOCAL) {
137 if (addr_len >= sizeof(struct sockaddr_in6) &&
138 usin->sin6_scope_id) {
139 /* If interface is set while binding, indices
140 * must coincide.
141 */
142 if (sk->sk_bound_dev_if &&
143 sk->sk_bound_dev_if != usin->sin6_scope_id)
144 return -EINVAL;
145
146 sk->sk_bound_dev_if = usin->sin6_scope_id;
147 }
148
149 /* Connect to link-local address requires an interface */
150 if (!sk->sk_bound_dev_if)
151 return -EINVAL;
152 }
153
154 ipv6_addr_copy(&np->daddr, &usin->sin6_addr);
155 np->flow_label = fl.fl6_flowlabel;
156
157 /*
158 * DCCP over IPv4
159 */
160
161 if (addr_type == IPV6_ADDR_MAPPED) {
d83d8461 162 u32 exthdrlen = icsk->icsk_ext_hdr_len;
3df80d93
ACM
163 struct sockaddr_in sin;
164
165 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
166
167 if (__ipv6_only_sock(sk))
168 return -ENETUNREACH;
169
170 sin.sin_family = AF_INET;
171 sin.sin_port = usin->sin6_port;
172 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
173
d83d8461 174 icsk->icsk_af_ops = &dccp_ipv6_mapped;
3df80d93
ACM
175 sk->sk_backlog_rcv = dccp_v4_do_rcv;
176
177 err = dccp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
178
179 if (err) {
d83d8461
ACM
180 icsk->icsk_ext_hdr_len = exthdrlen;
181 icsk->icsk_af_ops = &dccp_ipv6_af_ops;
3df80d93
ACM
182 sk->sk_backlog_rcv = dccp_v6_do_rcv;
183 goto failure;
184 } else {
185 ipv6_addr_set(&np->saddr, 0, 0, htonl(0x0000FFFF),
186 inet->saddr);
187 ipv6_addr_set(&np->rcv_saddr, 0, 0, htonl(0x0000FFFF),
188 inet->rcv_saddr);
189 }
190
191 return err;
192 }
193
194 if (!ipv6_addr_any(&np->rcv_saddr))
195 saddr = &np->rcv_saddr;
196
197 fl.proto = IPPROTO_DCCP;
198 ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
199 ipv6_addr_copy(&fl.fl6_src, saddr ? saddr : &np->saddr);
200 fl.oif = sk->sk_bound_dev_if;
201 fl.fl_ip_dport = usin->sin6_port;
202 fl.fl_ip_sport = inet->sport;
203
204 if (np->opt && np->opt->srcrt) {
205 struct rt0_hdr *rt0 = (struct rt0_hdr *)np->opt->srcrt;
206 ipv6_addr_copy(&final, &fl.fl6_dst);
207 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
208 final_p = &final;
209 }
210
211 err = ip6_dst_lookup(sk, &dst, &fl);
212 if (err)
213 goto failure;
214 if (final_p)
215 ipv6_addr_copy(&fl.fl6_dst, final_p);
216
217 if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0)
218 goto failure;
219
220 if (saddr == NULL) {
221 saddr = &fl.fl6_src;
222 ipv6_addr_copy(&np->rcv_saddr, saddr);
223 }
224
225 /* set the source address */
226 ipv6_addr_copy(&np->saddr, saddr);
227 inet->rcv_saddr = LOOPBACK4_IPV6;
228
229 ip6_dst_store(sk, dst, NULL);
230
d83d8461 231 icsk->icsk_ext_hdr_len = 0;
3df80d93 232 if (np->opt)
d83d8461
ACM
233 icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
234 np->opt->opt_nflen);
3df80d93
ACM
235
236 inet->dport = usin->sin6_port;
237
238 dccp_set_state(sk, DCCP_REQUESTING);
d8313f5c 239 err = inet6_hash_connect(&dccp_death_row, sk);
3df80d93
ACM
240 if (err)
241 goto late_failure;
242 /* FIXME */
243#if 0
244 dp->dccps_gar = secure_dccp_v6_sequence_number(np->saddr.s6_addr32,
245 np->daddr.s6_addr32,
246 inet->sport,
247 inet->dport);
248#endif
249 err = dccp_connect(sk);
250 if (err)
251 goto late_failure;
252
253 return 0;
254
255late_failure:
256 dccp_set_state(sk, DCCP_CLOSED);
257 __sk_dst_reset(sk);
258failure:
259 inet->dport = 0;
260 sk->sk_route_caps = 0;
261 return err;
262}
263
264static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
265 int type, int code, int offset, __u32 info)
266{
267 struct ipv6hdr *hdr = (struct ipv6hdr *)skb->data;
268 const struct dccp_hdr *dh = (struct dccp_hdr *)(skb->data + offset);
269 struct ipv6_pinfo *np;
270 struct sock *sk;
271 int err;
272 __u64 seq;
273
274 sk = inet6_lookup(&dccp_hashinfo, &hdr->daddr, dh->dccph_dport,
275 &hdr->saddr, dh->dccph_sport, skb->dev->ifindex);
276
277 if (sk == NULL) {
278 ICMP6_INC_STATS_BH(__in6_dev_get(skb->dev), ICMP6_MIB_INERRORS);
279 return;
280 }
281
282 if (sk->sk_state == DCCP_TIME_WAIT) {
283 inet_twsk_put((struct inet_timewait_sock *)sk);
284 return;
285 }
286
287 bh_lock_sock(sk);
288 if (sock_owned_by_user(sk))
289 NET_INC_STATS_BH(LINUX_MIB_LOCKDROPPEDICMPS);
290
291 if (sk->sk_state == DCCP_CLOSED)
292 goto out;
293
294 np = inet6_sk(sk);
295
296 if (type == ICMPV6_PKT_TOOBIG) {
3df80d93
ACM
297 struct dst_entry *dst = NULL;
298
299 if (sock_owned_by_user(sk))
300 goto out;
301 if ((1 << sk->sk_state) & (DCCPF_LISTEN | DCCPF_CLOSED))
302 goto out;
303
304 /* icmp should have updated the destination cache entry */
305 dst = __sk_dst_check(sk, np->dst_cookie);
306
307 if (dst == NULL) {
308 struct inet_sock *inet = inet_sk(sk);
309 struct flowi fl;
310
311 /* BUGGG_FUTURE: Again, it is not clear how
312 to handle rthdr case. Ignore this complexity
313 for now.
314 */
315 memset(&fl, 0, sizeof(fl));
316 fl.proto = IPPROTO_DCCP;
317 ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
318 ipv6_addr_copy(&fl.fl6_src, &np->saddr);
319 fl.oif = sk->sk_bound_dev_if;
320 fl.fl_ip_dport = inet->dport;
321 fl.fl_ip_sport = inet->sport;
322
323 if ((err = ip6_dst_lookup(sk, &dst, &fl))) {
324 sk->sk_err_soft = -err;
325 goto out;
326 }
327
328 if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0) {
329 sk->sk_err_soft = -err;
330 goto out;
331 }
332
333 } else
334 dst_hold(dst);
335
d83d8461 336 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
3df80d93
ACM
337 dccp_sync_mss(sk, dst_mtu(dst));
338 } /* else let the usual retransmit timer handle it */
339 dst_release(dst);
340 goto out;
341 }
342
343 icmpv6_err_convert(type, code, &err);
344
345 seq = DCCP_SKB_CB(skb)->dccpd_seq;
346 /* Might be for an request_sock */
347 switch (sk->sk_state) {
348 struct request_sock *req, **prev;
349 case DCCP_LISTEN:
350 if (sock_owned_by_user(sk))
351 goto out;
352
353 req = inet6_csk_search_req(sk, &prev, dh->dccph_dport,
354 &hdr->daddr, &hdr->saddr,
355 inet6_iif(skb));
356 if (!req)
357 goto out;
358
359 /* ICMPs are not backlogged, hence we cannot get
360 * an established socket here.
361 */
362 BUG_TRAP(req->sk == NULL);
363
364 if (seq != dccp_rsk(req)->dreq_iss) {
365 NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS);
366 goto out;
367 }
368
369 inet_csk_reqsk_queue_drop(sk, req, prev);
370 goto out;
371
372 case DCCP_REQUESTING:
373 case DCCP_RESPOND: /* Cannot happen.
374 It can, it SYNs are crossed. --ANK */
375 if (!sock_owned_by_user(sk)) {
376 DCCP_INC_STATS_BH(DCCP_MIB_ATTEMPTFAILS);
377 sk->sk_err = err;
378 /*
379 * Wake people up to see the error
380 * (see connect in sock.c)
381 */
382 sk->sk_error_report(sk);
383
384 dccp_done(sk);
385 } else
386 sk->sk_err_soft = err;
387 goto out;
388 }
389
390 if (!sock_owned_by_user(sk) && np->recverr) {
391 sk->sk_err = err;
392 sk->sk_error_report(sk);
393 } else
394 sk->sk_err_soft = err;
395
396out:
397 bh_unlock_sock(sk);
398 sock_put(sk);
399}
400
401
402static int dccp_v6_send_response(struct sock *sk, struct request_sock *req,
403 struct dst_entry *dst)
404{
405 struct inet6_request_sock *ireq6 = inet6_rsk(req);
406 struct ipv6_pinfo *np = inet6_sk(sk);
407 struct sk_buff *skb;
408 struct ipv6_txoptions *opt = NULL;
409 struct in6_addr *final_p = NULL, final;
410 struct flowi fl;
411 int err = -1;
412
413 memset(&fl, 0, sizeof(fl));
414 fl.proto = IPPROTO_DCCP;
415 ipv6_addr_copy(&fl.fl6_dst, &ireq6->rmt_addr);
416 ipv6_addr_copy(&fl.fl6_src, &ireq6->loc_addr);
417 fl.fl6_flowlabel = 0;
418 fl.oif = ireq6->iif;
419 fl.fl_ip_dport = inet_rsk(req)->rmt_port;
420 fl.fl_ip_sport = inet_sk(sk)->sport;
421
422 if (dst == NULL) {
423 opt = np->opt;
424 if (opt == NULL &&
425 np->rxopt.bits.osrcrt == 2 &&
426 ireq6->pktopts) {
427 struct sk_buff *pktopts = ireq6->pktopts;
428 struct inet6_skb_parm *rxopt = IP6CB(pktopts);
429 if (rxopt->srcrt)
430 opt = ipv6_invert_rthdr(sk,
431 (struct ipv6_rt_hdr *)(pktopts->nh.raw +
432 rxopt->srcrt));
433 }
434
435 if (opt && opt->srcrt) {
436 struct rt0_hdr *rt0 = (struct rt0_hdr *)opt->srcrt;
437 ipv6_addr_copy(&final, &fl.fl6_dst);
438 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
439 final_p = &final;
440 }
441
442 err = ip6_dst_lookup(sk, &dst, &fl);
443 if (err)
444 goto done;
445 if (final_p)
446 ipv6_addr_copy(&fl.fl6_dst, final_p);
447 if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0)
448 goto done;
449 }
450
451 skb = dccp_make_response(sk, dst, req);
452 if (skb != NULL) {
453 struct dccp_hdr *dh = dccp_hdr(skb);
454 dh->dccph_checksum = dccp_v6_check(dh, skb->len,
455 &ireq6->loc_addr,
456 &ireq6->rmt_addr,
457 csum_partial((char *)dh,
458 skb->len,
459 skb->csum));
460 ipv6_addr_copy(&fl.fl6_dst, &ireq6->rmt_addr);
461 err = ip6_xmit(sk, skb, &fl, opt, 0);
462 if (err == NET_XMIT_CN)
463 err = 0;
464 }
465
466done:
467 if (opt && opt != np->opt)
468 sock_kfree_s(sk, opt, opt->tot_len);
469 return err;
470}
471
472static void dccp_v6_reqsk_destructor(struct request_sock *req)
473{
474 if (inet6_rsk(req)->pktopts != NULL)
475 kfree_skb(inet6_rsk(req)->pktopts);
476}
477
478static struct request_sock_ops dccp6_request_sock_ops = {
479 .family = AF_INET6,
480 .obj_size = sizeof(struct dccp6_request_sock),
481 .rtx_syn_ack = dccp_v6_send_response,
482 .send_ack = dccp_v6_reqsk_send_ack,
483 .destructor = dccp_v6_reqsk_destructor,
484 .send_reset = dccp_v6_ctl_send_reset,
485};
486
6d6ee43e
ACM
487static struct timewait_sock_ops dccp6_timewait_sock_ops = {
488 .twsk_obj_size = sizeof(struct dccp6_timewait_sock),
489};
490
3df80d93
ACM
491static void dccp_v6_send_check(struct sock *sk, int len, struct sk_buff *skb)
492{
493 struct ipv6_pinfo *np = inet6_sk(sk);
494 struct dccp_hdr *dh = dccp_hdr(skb);
495
496 dh->dccph_checksum = csum_ipv6_magic(&np->saddr, &np->daddr,
497 len, IPPROTO_DCCP,
498 csum_partial((char *)dh,
499 dh->dccph_doff << 2,
500 skb->csum));
501}
502
503static void dccp_v6_ctl_send_reset(struct sk_buff *rxskb)
504{
505 struct dccp_hdr *rxdh = dccp_hdr(rxskb), *dh;
506 const int dccp_hdr_reset_len = sizeof(struct dccp_hdr) +
507 sizeof(struct dccp_hdr_ext) +
508 sizeof(struct dccp_hdr_reset);
509 struct sk_buff *skb;
510 struct flowi fl;
511 u64 seqno;
512
513 if (rxdh->dccph_type == DCCP_PKT_RESET)
514 return;
515
516 if (!ipv6_unicast_destination(rxskb))
517 return;
518
519 /*
520 * We need to grab some memory, and put together an RST,
521 * and then put it into the queue to be sent.
522 */
523
524 skb = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) +
525 dccp_hdr_reset_len, GFP_ATOMIC);
526 if (skb == NULL)
527 return;
528
529 skb_reserve(skb, MAX_HEADER + sizeof(struct ipv6hdr) +
530 dccp_hdr_reset_len);
531
532 skb->h.raw = skb_push(skb, dccp_hdr_reset_len);
533 dh = dccp_hdr(skb);
534 memset(dh, 0, dccp_hdr_reset_len);
535
536 /* Swap the send and the receive. */
537 dh->dccph_type = DCCP_PKT_RESET;
538 dh->dccph_sport = rxdh->dccph_dport;
539 dh->dccph_dport = rxdh->dccph_sport;
540 dh->dccph_doff = dccp_hdr_reset_len / 4;
541 dh->dccph_x = 1;
542 dccp_hdr_reset(skb)->dccph_reset_code =
543 DCCP_SKB_CB(rxskb)->dccpd_reset_code;
544
545 /* See "8.3.1. Abnormal Termination" in draft-ietf-dccp-spec-11 */
546 seqno = 0;
547 if (DCCP_SKB_CB(rxskb)->dccpd_ack_seq != DCCP_PKT_WITHOUT_ACK_SEQ)
548 dccp_set_seqno(&seqno, DCCP_SKB_CB(rxskb)->dccpd_ack_seq + 1);
549
550 dccp_hdr_set_seq(dh, seqno);
551 dccp_hdr_set_ack(dccp_hdr_ack_bits(skb),
552 DCCP_SKB_CB(rxskb)->dccpd_seq);
553
554 memset(&fl, 0, sizeof(fl));
555 ipv6_addr_copy(&fl.fl6_dst, &rxskb->nh.ipv6h->saddr);
556 ipv6_addr_copy(&fl.fl6_src, &rxskb->nh.ipv6h->daddr);
557 dh->dccph_checksum = csum_ipv6_magic(&fl.fl6_src, &fl.fl6_dst,
558 sizeof(*dh), IPPROTO_DCCP,
559 skb->csum);
560 fl.proto = IPPROTO_DCCP;
561 fl.oif = inet6_iif(rxskb);
562 fl.fl_ip_dport = dh->dccph_dport;
563 fl.fl_ip_sport = dh->dccph_sport;
564
565 /* sk = NULL, but it is safe for now. RST socket required. */
566 if (!ip6_dst_lookup(NULL, &skb->dst, &fl)) {
567 if (xfrm_lookup(&skb->dst, &fl, NULL, 0) >= 0) {
568 ip6_xmit(NULL, skb, &fl, NULL, 0);
569 DCCP_INC_STATS_BH(DCCP_MIB_OUTSEGS);
570 DCCP_INC_STATS_BH(DCCP_MIB_OUTRSTS);
571 return;
572 }
573 }
574
575 kfree_skb(skb);
576}
577
578static void dccp_v6_ctl_send_ack(struct sk_buff *rxskb)
579{
580 struct flowi fl;
581 struct dccp_hdr *rxdh = dccp_hdr(rxskb), *dh;
582 const int dccp_hdr_ack_len = sizeof(struct dccp_hdr) +
583 sizeof(struct dccp_hdr_ext) +
584 sizeof(struct dccp_hdr_ack_bits);
585 struct sk_buff *skb;
586
587 skb = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) +
588 dccp_hdr_ack_len, GFP_ATOMIC);
589 if (skb == NULL)
590 return;
591
592 skb_reserve(skb, MAX_HEADER + sizeof(struct ipv6hdr) +
593 dccp_hdr_ack_len);
594
595 skb->h.raw = skb_push(skb, dccp_hdr_ack_len);
596 dh = dccp_hdr(skb);
597 memset(dh, 0, dccp_hdr_ack_len);
598
599 /* Build DCCP header and checksum it. */
600 dh->dccph_type = DCCP_PKT_ACK;
601 dh->dccph_sport = rxdh->dccph_dport;
602 dh->dccph_dport = rxdh->dccph_sport;
603 dh->dccph_doff = dccp_hdr_ack_len / 4;
604 dh->dccph_x = 1;
605
606 dccp_hdr_set_seq(dh, DCCP_SKB_CB(rxskb)->dccpd_ack_seq);
607 dccp_hdr_set_ack(dccp_hdr_ack_bits(skb),
608 DCCP_SKB_CB(rxskb)->dccpd_seq);
609
610 memset(&fl, 0, sizeof(fl));
611 ipv6_addr_copy(&fl.fl6_dst, &rxskb->nh.ipv6h->saddr);
612 ipv6_addr_copy(&fl.fl6_src, &rxskb->nh.ipv6h->daddr);
613
614 /* FIXME: calculate checksum, IPv4 also should... */
615
616 fl.proto = IPPROTO_DCCP;
617 fl.oif = inet6_iif(rxskb);
618 fl.fl_ip_dport = dh->dccph_dport;
619 fl.fl_ip_sport = dh->dccph_sport;
620
621 if (!ip6_dst_lookup(NULL, &skb->dst, &fl)) {
622 if (xfrm_lookup(&skb->dst, &fl, NULL, 0) >= 0) {
623 ip6_xmit(NULL, skb, &fl, NULL, 0);
624 DCCP_INC_STATS_BH(DCCP_MIB_OUTSEGS);
625 return;
626 }
627 }
628
629 kfree_skb(skb);
630}
631
632static void dccp_v6_reqsk_send_ack(struct sk_buff *skb,
633 struct request_sock *req)
634{
635 dccp_v6_ctl_send_ack(skb);
636}
637
638static struct sock *dccp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
639{
640 const struct dccp_hdr *dh = dccp_hdr(skb);
641 const struct ipv6hdr *iph = skb->nh.ipv6h;
642 struct sock *nsk;
643 struct request_sock **prev;
644 /* Find possible connection requests. */
645 struct request_sock *req = inet6_csk_search_req(sk, &prev,
646 dh->dccph_sport,
647 &iph->saddr,
648 &iph->daddr,
649 inet6_iif(skb));
650 if (req != NULL)
651 return dccp_check_req(sk, skb, req, prev);
652
653 nsk = __inet6_lookup_established(&dccp_hashinfo,
654 &iph->saddr, dh->dccph_sport,
655 &iph->daddr, ntohs(dh->dccph_dport),
656 inet6_iif(skb));
657
658 if (nsk != NULL) {
659 if (nsk->sk_state != DCCP_TIME_WAIT) {
660 bh_lock_sock(nsk);
661 return nsk;
662 }
663 inet_twsk_put((struct inet_timewait_sock *)nsk);
664 return NULL;
665 }
666
667 return sk;
668}
669
670static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
671{
672 struct inet_request_sock *ireq;
673 struct dccp_sock dp;
674 struct request_sock *req;
675 struct dccp_request_sock *dreq;
676 struct inet6_request_sock *ireq6;
677 struct ipv6_pinfo *np = inet6_sk(sk);
678 const __u32 service = dccp_hdr_request(skb)->dccph_req_service;
679 struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
680 __u8 reset_code = DCCP_RESET_CODE_TOO_BUSY;
681
682 if (skb->protocol == htons(ETH_P_IP))
683 return dccp_v4_conn_request(sk, skb);
684
685 if (!ipv6_unicast_destination(skb))
686 goto drop;
687
688 if (dccp_bad_service_code(sk, service)) {
689 reset_code = DCCP_RESET_CODE_BAD_SERVICE_CODE;
690 goto drop;
691 }
692 /*
693 * There are no SYN attacks on IPv6, yet...
694 */
695 if (inet_csk_reqsk_queue_is_full(sk))
696 goto drop;
697
698 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
699 goto drop;
700
701 req = inet6_reqsk_alloc(sk->sk_prot->rsk_prot);
702 if (req == NULL)
703 goto drop;
704
705 /* FIXME: process options */
706
707 dccp_openreq_init(req, &dp, skb);
708
709 ireq6 = inet6_rsk(req);
710 ireq = inet_rsk(req);
711 ipv6_addr_copy(&ireq6->rmt_addr, &skb->nh.ipv6h->saddr);
712 ipv6_addr_copy(&ireq6->loc_addr, &skb->nh.ipv6h->daddr);
713 req->rcv_wnd = 100; /* Fake, option parsing will get the
714 right value */
715 ireq6->pktopts = NULL;
716
717 if (ipv6_opt_accepted(sk, skb) ||
718 np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
719 np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
720 atomic_inc(&skb->users);
721 ireq6->pktopts = skb;
722 }
723 ireq6->iif = sk->sk_bound_dev_if;
724
725 /* So that link locals have meaning */
726 if (!sk->sk_bound_dev_if &&
727 ipv6_addr_type(&ireq6->rmt_addr) & IPV6_ADDR_LINKLOCAL)
728 ireq6->iif = inet6_iif(skb);
729
730 /*
731 * Step 3: Process LISTEN state
732 *
733 * Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookie
734 *
735 * In fact we defer setting S.GSR, S.SWL, S.SWH to
736 * dccp_create_openreq_child.
737 */
738 dreq = dccp_rsk(req);
739 dreq->dreq_isr = dcb->dccpd_seq;
740 dreq->dreq_iss = dccp_v6_init_sequence(sk, skb);
741 dreq->dreq_service = service;
742
743 if (dccp_v6_send_response(sk, req, NULL))
744 goto drop_and_free;
745
746 inet6_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT);
747 return 0;
748
749drop_and_free:
750 reqsk_free(req);
751drop:
752 DCCP_INC_STATS_BH(DCCP_MIB_ATTEMPTFAILS);
753 dcb->dccpd_reset_code = reset_code;
754 return -1;
755}
756
757static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
758 struct sk_buff *skb,
759 struct request_sock *req,
760 struct dst_entry *dst)
761{
762 struct inet6_request_sock *ireq6 = inet6_rsk(req);
763 struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
764 struct inet_sock *newinet;
765 struct dccp_sock *newdp;
766 struct dccp6_sock *newdp6;
767 struct sock *newsk;
768 struct ipv6_txoptions *opt;
769
770 if (skb->protocol == htons(ETH_P_IP)) {
771 /*
772 * v6 mapped
773 */
774
775 newsk = dccp_v4_request_recv_sock(sk, skb, req, dst);
776 if (newsk == NULL)
777 return NULL;
778
779 newdp6 = (struct dccp6_sock *)newsk;
780 newdp = dccp_sk(newsk);
781 newinet = inet_sk(newsk);
782 newinet->pinet6 = &newdp6->inet6;
783 newnp = inet6_sk(newsk);
784
785 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
786
787 ipv6_addr_set(&newnp->daddr, 0, 0, htonl(0x0000FFFF),
788 newinet->daddr);
789
790 ipv6_addr_set(&newnp->saddr, 0, 0, htonl(0x0000FFFF),
791 newinet->saddr);
792
793 ipv6_addr_copy(&newnp->rcv_saddr, &newnp->saddr);
794
795 inet_csk(newsk)->icsk_af_ops = &dccp_ipv6_mapped;
796 newsk->sk_backlog_rcv = dccp_v4_do_rcv;
797 newnp->pktoptions = NULL;
798 newnp->opt = NULL;
799 newnp->mcast_oif = inet6_iif(skb);
800 newnp->mcast_hops = skb->nh.ipv6h->hop_limit;
801
802 /*
803 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
804 * here, dccp_create_openreq_child now does this for us, see the comment in
805 * that function for the gory details. -acme
806 */
807
808 /* It is tricky place. Until this moment IPv4 tcp
809 worked with IPv6 icsk.icsk_af_ops.
810 Sync it now.
811 */
d83d8461 812 dccp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
3df80d93
ACM
813
814 return newsk;
815 }
816
817 opt = np->opt;
818
819 if (sk_acceptq_is_full(sk))
820 goto out_overflow;
821
822 if (np->rxopt.bits.osrcrt == 2 &&
823 opt == NULL && ireq6->pktopts) {
824 struct inet6_skb_parm *rxopt = IP6CB(ireq6->pktopts);
825 if (rxopt->srcrt)
826 opt = ipv6_invert_rthdr(sk,
827 (struct ipv6_rt_hdr *)(ireq6->pktopts->nh.raw +
828 rxopt->srcrt));
829 }
830
831 if (dst == NULL) {
832 struct in6_addr *final_p = NULL, final;
833 struct flowi fl;
834
835 memset(&fl, 0, sizeof(fl));
836 fl.proto = IPPROTO_DCCP;
837 ipv6_addr_copy(&fl.fl6_dst, &ireq6->rmt_addr);
838 if (opt && opt->srcrt) {
839 struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt;
840 ipv6_addr_copy(&final, &fl.fl6_dst);
841 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
842 final_p = &final;
843 }
844 ipv6_addr_copy(&fl.fl6_src, &ireq6->loc_addr);
845 fl.oif = sk->sk_bound_dev_if;
846 fl.fl_ip_dport = inet_rsk(req)->rmt_port;
847 fl.fl_ip_sport = inet_sk(sk)->sport;
848
849 if (ip6_dst_lookup(sk, &dst, &fl))
850 goto out;
851
852 if (final_p)
853 ipv6_addr_copy(&fl.fl6_dst, final_p);
854
855 if ((xfrm_lookup(&dst, &fl, sk, 0)) < 0)
856 goto out;
857 }
858
859 newsk = dccp_create_openreq_child(sk, req, skb);
860 if (newsk == NULL)
861 goto out;
862
863 /*
864 * No need to charge this sock to the relevant IPv6 refcnt debug socks
865 * count here, dccp_create_openreq_child now does this for us, see the
866 * comment in that function for the gory details. -acme
867 */
868
869 ip6_dst_store(newsk, dst, NULL);
870 newsk->sk_route_caps = dst->dev->features &
871 ~(NETIF_F_IP_CSUM | NETIF_F_TSO);
872
873 newdp6 = (struct dccp6_sock *)newsk;
874 newinet = inet_sk(newsk);
875 newinet->pinet6 = &newdp6->inet6;
876 newdp = dccp_sk(newsk);
877 newnp = inet6_sk(newsk);
878
879 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
880
881 ipv6_addr_copy(&newnp->daddr, &ireq6->rmt_addr);
882 ipv6_addr_copy(&newnp->saddr, &ireq6->loc_addr);
883 ipv6_addr_copy(&newnp->rcv_saddr, &ireq6->loc_addr);
884 newsk->sk_bound_dev_if = ireq6->iif;
885
886 /* Now IPv6 options...
887
888 First: no IPv4 options.
889 */
890 newinet->opt = NULL;
891
892 /* Clone RX bits */
893 newnp->rxopt.all = np->rxopt.all;
894
895 /* Clone pktoptions received with SYN */
896 newnp->pktoptions = NULL;
897 if (ireq6->pktopts != NULL) {
898 newnp->pktoptions = skb_clone(ireq6->pktopts, GFP_ATOMIC);
899 kfree_skb(ireq6->pktopts);
900 ireq6->pktopts = NULL;
901 if (newnp->pktoptions)
902 skb_set_owner_r(newnp->pktoptions, newsk);
903 }
904 newnp->opt = NULL;
905 newnp->mcast_oif = inet6_iif(skb);
906 newnp->mcast_hops = skb->nh.ipv6h->hop_limit;
907
908 /* Clone native IPv6 options from listening socket (if any)
909
910 Yes, keeping reference count would be much more clever,
911 but we make one more one thing there: reattach optmem
912 to newsk.
913 */
914 if (opt) {
915 newnp->opt = ipv6_dup_options(newsk, opt);
916 if (opt != np->opt)
917 sock_kfree_s(sk, opt, opt->tot_len);
918 }
919
d83d8461 920 inet_csk(newsk)->icsk_ext_hdr_len = 0;
3df80d93 921 if (newnp->opt)
d83d8461
ACM
922 inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
923 newnp->opt->opt_flen);
3df80d93
ACM
924
925 dccp_sync_mss(newsk, dst_mtu(dst));
926
927 newinet->daddr = newinet->saddr = newinet->rcv_saddr = LOOPBACK4_IPV6;
928
929 __inet6_hash(&dccp_hashinfo, newsk);
930 inet_inherit_port(&dccp_hashinfo, sk, newsk);
931
932 return newsk;
933
934out_overflow:
935 NET_INC_STATS_BH(LINUX_MIB_LISTENOVERFLOWS);
936out:
937 NET_INC_STATS_BH(LINUX_MIB_LISTENDROPS);
938 if (opt && opt != np->opt)
939 sock_kfree_s(sk, opt, opt->tot_len);
940 dst_release(dst);
941 return NULL;
942}
943
944/* The socket must have it's spinlock held when we get
945 * here.
946 *
947 * We have a potential double-lock case here, so even when
948 * doing backlog processing we use the BH locking scheme.
949 * This is because we cannot sleep with the original spinlock
950 * held.
951 */
952static int dccp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
953{
954 struct ipv6_pinfo *np = inet6_sk(sk);
955 struct sk_buff *opt_skb = NULL;
956
957 /* Imagine: socket is IPv6. IPv4 packet arrives,
958 goes to IPv4 receive handler and backlogged.
959 From backlog it always goes here. Kerboom...
960 Fortunately, dccp_rcv_established and rcv_established
961 handle them correctly, but it is not case with
962 dccp_v6_hnd_req and dccp_v6_ctl_send_reset(). --ANK
963 */
964
965 if (skb->protocol == htons(ETH_P_IP))
966 return dccp_v4_do_rcv(sk, skb);
967
968 if (sk_filter(sk, skb, 0))
969 goto discard;
970
971 /*
972 * socket locking is here for SMP purposes as backlog rcv
973 * is currently called with bh processing disabled.
974 */
975
976 /* Do Stevens' IPV6_PKTOPTIONS.
977
978 Yes, guys, it is the only place in our code, where we
979 may make it not affecting IPv4.
980 The rest of code is protocol independent,
981 and I do not like idea to uglify IPv4.
982
983 Actually, all the idea behind IPV6_PKTOPTIONS
984 looks not very well thought. For now we latch
985 options, received in the last packet, enqueued
986 by tcp. Feel free to propose better solution.
987 --ANK (980728)
988 */
989 if (np->rxopt.all)
990 opt_skb = skb_clone(skb, GFP_ATOMIC);
991
992 if (sk->sk_state == DCCP_OPEN) { /* Fast path */
993 if (dccp_rcv_established(sk, skb, dccp_hdr(skb), skb->len))
994 goto reset;
995 return 0;
996 }
997
998 if (sk->sk_state == DCCP_LISTEN) {
999 struct sock *nsk = dccp_v6_hnd_req(sk, skb);
1000 if (!nsk)
1001 goto discard;
1002
1003 /*
1004 * Queue it on the new socket if the new socket is active,
1005 * otherwise we just shortcircuit this and continue with
1006 * the new socket..
1007 */
1008 if(nsk != sk) {
1009 if (dccp_child_process(sk, nsk, skb))
1010 goto reset;
1011 if (opt_skb)
1012 __kfree_skb(opt_skb);
1013 return 0;
1014 }
1015 }
1016
1017 if (dccp_rcv_state_process(sk, skb, dccp_hdr(skb), skb->len))
1018 goto reset;
1019 return 0;
1020
1021reset:
1022 dccp_v6_ctl_send_reset(skb);
1023discard:
1024 if (opt_skb)
1025 __kfree_skb(opt_skb);
1026 kfree_skb(skb);
1027 return 0;
1028}
1029
1030static int dccp_v6_rcv(struct sk_buff **pskb, unsigned int *nhoffp)
1031{
1032 const struct dccp_hdr *dh;
1033 struct sk_buff *skb = *pskb;
1034 struct sock *sk;
1035 int rc;
1036
1037 /* Step 1: Check header basics: */
1038
1039 if (dccp_invalid_packet(skb))
1040 goto discard_it;
1041
1042 dh = dccp_hdr(skb);
1043
1044 DCCP_SKB_CB(skb)->dccpd_seq = dccp_hdr_seq(skb);
1045 DCCP_SKB_CB(skb)->dccpd_type = dh->dccph_type;
1046
1047 if (dccp_packet_without_ack(skb))
1048 DCCP_SKB_CB(skb)->dccpd_ack_seq = DCCP_PKT_WITHOUT_ACK_SEQ;
1049 else
1050 DCCP_SKB_CB(skb)->dccpd_ack_seq = dccp_hdr_ack_seq(skb);
1051
1052 /* Step 2:
1053 * Look up flow ID in table and get corresponding socket */
1054 sk = __inet6_lookup(&dccp_hashinfo, &skb->nh.ipv6h->saddr,
1055 dh->dccph_sport,
1056 &skb->nh.ipv6h->daddr, ntohs(dh->dccph_dport),
1057 inet6_iif(skb));
1058 /*
1059 * Step 2:
1060 * If no socket ...
1061 * Generate Reset(No Connection) unless P.type == Reset
1062 * Drop packet and return
1063 */
1064 if (sk == NULL)
1065 goto no_dccp_socket;
1066
1067 /*
1068 * Step 2:
1069 * ... or S.state == TIMEWAIT,
1070 * Generate Reset(No Connection) unless P.type == Reset
1071 * Drop packet and return
1072 */
1073
1074 if (sk->sk_state == DCCP_TIME_WAIT)
1075 goto do_time_wait;
1076
1077 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1078 goto discard_and_relse;
1079
1080 if (sk_filter(sk, skb, 0))
1081 goto discard_and_relse;
1082
1083 skb->dev = NULL;
1084
1085 bh_lock_sock(sk);
1086 rc = 0;
1087 if (!sock_owned_by_user(sk))
1088 rc = dccp_v6_do_rcv(sk, skb);
1089 else
1090 sk_add_backlog(sk, skb);
1091 bh_unlock_sock(sk);
1092
1093 sock_put(sk);
1094 return rc ? -1 : 0;
1095
1096no_dccp_socket:
1097 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1098 goto discard_it;
1099 /*
1100 * Step 2:
1101 * Generate Reset(No Connection) unless P.type == Reset
1102 * Drop packet and return
1103 */
1104 if (dh->dccph_type != DCCP_PKT_RESET) {
1105 DCCP_SKB_CB(skb)->dccpd_reset_code =
1106 DCCP_RESET_CODE_NO_CONNECTION;
1107 dccp_v6_ctl_send_reset(skb);
1108 }
1109discard_it:
1110
1111 /*
1112 * Discard frame
1113 */
1114
1115 kfree_skb(skb);
1116 return 0;
1117
1118discard_and_relse:
1119 sock_put(sk);
1120 goto discard_it;
1121
1122do_time_wait:
1123 inet_twsk_put((struct inet_timewait_sock *)sk);
1124 goto no_dccp_socket;
1125}
1126
1127static struct inet_connection_sock_af_ops dccp_ipv6_af_ops = {
1128 .queue_xmit = inet6_csk_xmit,
1129 .send_check = dccp_v6_send_check,
1130 .rebuild_header = inet6_sk_rebuild_header,
1131 .conn_request = dccp_v6_conn_request,
1132 .syn_recv_sock = dccp_v6_request_recv_sock,
1133 .net_header_len = sizeof(struct ipv6hdr),
1134 .setsockopt = ipv6_setsockopt,
1135 .getsockopt = ipv6_getsockopt,
1136 .addr2sockaddr = inet6_csk_addr2sockaddr,
1137 .sockaddr_len = sizeof(struct sockaddr_in6)
1138};
1139
1140/*
1141 * DCCP over IPv4 via INET6 API
1142 */
1143static struct inet_connection_sock_af_ops dccp_ipv6_mapped = {
1144 .queue_xmit = ip_queue_xmit,
1145 .send_check = dccp_v4_send_check,
1146 .rebuild_header = inet_sk_rebuild_header,
1147 .conn_request = dccp_v6_conn_request,
1148 .syn_recv_sock = dccp_v6_request_recv_sock,
1149 .net_header_len = sizeof(struct iphdr),
1150 .setsockopt = ipv6_setsockopt,
1151 .getsockopt = ipv6_getsockopt,
1152 .addr2sockaddr = inet6_csk_addr2sockaddr,
1153 .sockaddr_len = sizeof(struct sockaddr_in6)
1154};
1155
1156/* NOTE: A lot of things set to zero explicitly by call to
1157 * sk_alloc() so need not be done here.
1158 */
1159static int dccp_v6_init_sock(struct sock *sk)
1160{
1161 int err = dccp_v4_init_sock(sk);
1162
1163 if (err == 0)
1164 inet_csk(sk)->icsk_af_ops = &dccp_ipv6_af_ops;
1165
1166 return err;
1167}
1168
1169static int dccp_v6_destroy_sock(struct sock *sk)
1170{
1171 dccp_v4_destroy_sock(sk);
1172 return inet6_destroy_sock(sk);
1173}
1174
1175static struct proto dccp_v6_prot = {
1176 .name = "DCCPv6",
1177 .owner = THIS_MODULE,
1178 .close = dccp_close,
1179 .connect = dccp_v6_connect,
1180 .disconnect = dccp_disconnect,
1181 .ioctl = dccp_ioctl,
1182 .init = dccp_v6_init_sock,
1183 .setsockopt = dccp_setsockopt,
1184 .getsockopt = dccp_getsockopt,
1185 .sendmsg = dccp_sendmsg,
1186 .recvmsg = dccp_recvmsg,
1187 .backlog_rcv = dccp_v6_do_rcv,
1188 .hash = dccp_v6_hash,
1189 .unhash = dccp_unhash,
1190 .accept = inet_csk_accept,
1191 .get_port = dccp_v6_get_port,
1192 .shutdown = dccp_shutdown,
1193 .destroy = dccp_v6_destroy_sock,
1194 .orphan_count = &dccp_orphan_count,
1195 .max_header = MAX_DCCP_HEADER,
1196 .obj_size = sizeof(struct dccp6_sock),
1197 .rsk_prot = &dccp6_request_sock_ops,
6d6ee43e 1198 .twsk_prot = &dccp6_timewait_sock_ops,
3df80d93
ACM
1199};
1200
1201static struct inet6_protocol dccp_v6_protocol = {
1202 .handler = dccp_v6_rcv,
1203 .err_handler = dccp_v6_err,
1204 .flags = INET6_PROTO_NOPOLICY | INET6_PROTO_FINAL,
1205};
1206
1207static struct proto_ops inet6_dccp_ops = {
1208 .family = PF_INET6,
1209 .owner = THIS_MODULE,
1210 .release = inet6_release,
1211 .bind = inet6_bind,
1212 .connect = inet_stream_connect,
1213 .socketpair = sock_no_socketpair,
1214 .accept = inet_accept,
1215 .getname = inet6_getname,
1216 .poll = dccp_poll,
1217 .ioctl = inet6_ioctl,
1218 .listen = inet_dccp_listen,
1219 .shutdown = inet_shutdown,
1220 .setsockopt = sock_common_setsockopt,
1221 .getsockopt = sock_common_getsockopt,
1222 .sendmsg = inet_sendmsg,
1223 .recvmsg = sock_common_recvmsg,
1224 .mmap = sock_no_mmap,
1225 .sendpage = sock_no_sendpage,
1226};
1227
1228static struct inet_protosw dccp_v6_protosw = {
1229 .type = SOCK_DCCP,
1230 .protocol = IPPROTO_DCCP,
1231 .prot = &dccp_v6_prot,
1232 .ops = &inet6_dccp_ops,
1233 .capability = -1,
d83d8461 1234 .flags = INET_PROTOSW_ICSK,
3df80d93
ACM
1235};
1236
1237static int __init dccp_v6_init(void)
1238{
1239 int err = proto_register(&dccp_v6_prot, 1);
1240
1241 if (err != 0)
1242 goto out;
1243
1244 err = inet6_add_protocol(&dccp_v6_protocol, IPPROTO_DCCP);
1245 if (err != 0)
1246 goto out_unregister_proto;
1247
1248 inet6_register_protosw(&dccp_v6_protosw);
1249out:
1250 return err;
1251out_unregister_proto:
1252 proto_unregister(&dccp_v6_prot);
1253 goto out;
1254}
1255
1256static void __exit dccp_v6_exit(void)
1257{
1258 inet6_del_protocol(&dccp_v6_protocol, IPPROTO_DCCP);
1259 inet6_unregister_protosw(&dccp_v6_protosw);
1260 proto_unregister(&dccp_v6_prot);
1261}
1262
1263module_init(dccp_v6_init);
1264module_exit(dccp_v6_exit);
1265
1266/*
1267 * __stringify doesn't likes enums, so use SOCK_DCCP (6) and IPPROTO_DCCP (33)
1268 * values directly, Also cover the case where the protocol is not specified,
1269 * i.e. net-pf-PF_INET6-proto-0-type-SOCK_DCCP
1270 */
1271MODULE_ALIAS("net-pf-" __stringify(PF_INET6) "-proto-33-type-6");
1272MODULE_ALIAS("net-pf-" __stringify(PF_INET6) "-proto-0-type-6");
1273MODULE_LICENSE("GPL");
1274MODULE_AUTHOR("Arnaldo Carvalho de Melo <acme@mandriva.com>");
1275MODULE_DESCRIPTION("DCCPv6 - Datagram Congestion Controlled Protocol");
This page took 0.100035 seconds and 5 git commands to generate.