sfc: add support for skb->xmit_more
[deliverable/linux.git] / net / dccp / ipv6.c
1 /*
2 * DCCP over IPv6
3 * Linux INET6 implementation
4 *
5 * Based on net/dccp6/ipv6.c
6 *
7 * Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 */
14
15 #include <linux/module.h>
16 #include <linux/random.h>
17 #include <linux/slab.h>
18 #include <linux/xfrm.h>
19
20 #include <net/addrconf.h>
21 #include <net/inet_common.h>
22 #include <net/inet_hashtables.h>
23 #include <net/inet_sock.h>
24 #include <net/inet6_connection_sock.h>
25 #include <net/inet6_hashtables.h>
26 #include <net/ip6_route.h>
27 #include <net/ipv6.h>
28 #include <net/protocol.h>
29 #include <net/transp_v6.h>
30 #include <net/ip6_checksum.h>
31 #include <net/xfrm.h>
32 #include <net/secure_seq.h>
33
34 #include "dccp.h"
35 #include "ipv6.h"
36 #include "feat.h"
37
38 /* The per-net dccp.v6_ctl_sk is used for sending RSTs and ACKs */
39
40 static const struct inet_connection_sock_af_ops dccp_ipv6_mapped;
41 static const struct inet_connection_sock_af_ops dccp_ipv6_af_ops;
42
43 static void dccp_v6_hash(struct sock *sk)
44 {
45 if (sk->sk_state != DCCP_CLOSED) {
46 if (inet_csk(sk)->icsk_af_ops == &dccp_ipv6_mapped) {
47 inet_hash(sk);
48 return;
49 }
50 local_bh_disable();
51 __inet6_hash(sk, NULL);
52 local_bh_enable();
53 }
54 }
55
56 /* add pseudo-header to DCCP checksum stored in skb->csum */
57 static inline __sum16 dccp_v6_csum_finish(struct sk_buff *skb,
58 const struct in6_addr *saddr,
59 const struct in6_addr *daddr)
60 {
61 return csum_ipv6_magic(saddr, daddr, skb->len, IPPROTO_DCCP, skb->csum);
62 }
63
64 static inline void dccp_v6_send_check(struct sock *sk, struct sk_buff *skb)
65 {
66 struct ipv6_pinfo *np = inet6_sk(sk);
67 struct dccp_hdr *dh = dccp_hdr(skb);
68
69 dccp_csum_outgoing(skb);
70 dh->dccph_checksum = dccp_v6_csum_finish(skb, &np->saddr, &sk->sk_v6_daddr);
71 }
72
73 static inline __u64 dccp_v6_init_sequence(struct sk_buff *skb)
74 {
75 return secure_dccpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
76 ipv6_hdr(skb)->saddr.s6_addr32,
77 dccp_hdr(skb)->dccph_dport,
78 dccp_hdr(skb)->dccph_sport );
79
80 }
81
82 static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
83 u8 type, u8 code, int offset, __be32 info)
84 {
85 const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
86 const struct dccp_hdr *dh = (struct dccp_hdr *)(skb->data + offset);
87 struct dccp_sock *dp;
88 struct ipv6_pinfo *np;
89 struct sock *sk;
90 int err;
91 __u64 seq;
92 struct net *net = dev_net(skb->dev);
93
94 if (skb->len < offset + sizeof(*dh) ||
95 skb->len < offset + __dccp_basic_hdr_len(dh)) {
96 ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
97 ICMP6_MIB_INERRORS);
98 return;
99 }
100
101 sk = inet6_lookup(net, &dccp_hashinfo,
102 &hdr->daddr, dh->dccph_dport,
103 &hdr->saddr, dh->dccph_sport, inet6_iif(skb));
104
105 if (sk == NULL) {
106 ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
107 ICMP6_MIB_INERRORS);
108 return;
109 }
110
111 if (sk->sk_state == DCCP_TIME_WAIT) {
112 inet_twsk_put(inet_twsk(sk));
113 return;
114 }
115
116 bh_lock_sock(sk);
117 if (sock_owned_by_user(sk))
118 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
119
120 if (sk->sk_state == DCCP_CLOSED)
121 goto out;
122
123 dp = dccp_sk(sk);
124 seq = dccp_hdr_seq(dh);
125 if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_LISTEN) &&
126 !between48(seq, dp->dccps_awl, dp->dccps_awh)) {
127 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
128 goto out;
129 }
130
131 np = inet6_sk(sk);
132
133 if (type == NDISC_REDIRECT) {
134 struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
135
136 if (dst)
137 dst->ops->redirect(dst, sk, skb);
138 goto out;
139 }
140
141 if (type == ICMPV6_PKT_TOOBIG) {
142 struct dst_entry *dst = NULL;
143
144 if (!ip6_sk_accept_pmtu(sk))
145 goto out;
146
147 if (sock_owned_by_user(sk))
148 goto out;
149 if ((1 << sk->sk_state) & (DCCPF_LISTEN | DCCPF_CLOSED))
150 goto out;
151
152 dst = inet6_csk_update_pmtu(sk, ntohl(info));
153 if (!dst)
154 goto out;
155
156 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst))
157 dccp_sync_mss(sk, dst_mtu(dst));
158 goto out;
159 }
160
161 icmpv6_err_convert(type, code, &err);
162
163 /* Might be for an request_sock */
164 switch (sk->sk_state) {
165 struct request_sock *req, **prev;
166 case DCCP_LISTEN:
167 if (sock_owned_by_user(sk))
168 goto out;
169
170 req = inet6_csk_search_req(sk, &prev, dh->dccph_dport,
171 &hdr->daddr, &hdr->saddr,
172 inet6_iif(skb));
173 if (req == NULL)
174 goto out;
175
176 /*
177 * ICMPs are not backlogged, hence we cannot get an established
178 * socket here.
179 */
180 WARN_ON(req->sk != NULL);
181
182 if (!between48(seq, dccp_rsk(req)->dreq_iss,
183 dccp_rsk(req)->dreq_gss)) {
184 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
185 goto out;
186 }
187
188 inet_csk_reqsk_queue_drop(sk, req, prev);
189 goto out;
190
191 case DCCP_REQUESTING:
192 case DCCP_RESPOND: /* Cannot happen.
193 It can, it SYNs are crossed. --ANK */
194 if (!sock_owned_by_user(sk)) {
195 DCCP_INC_STATS_BH(DCCP_MIB_ATTEMPTFAILS);
196 sk->sk_err = err;
197 /*
198 * Wake people up to see the error
199 * (see connect in sock.c)
200 */
201 sk->sk_error_report(sk);
202 dccp_done(sk);
203 } else
204 sk->sk_err_soft = err;
205 goto out;
206 }
207
208 if (!sock_owned_by_user(sk) && np->recverr) {
209 sk->sk_err = err;
210 sk->sk_error_report(sk);
211 } else
212 sk->sk_err_soft = err;
213
214 out:
215 bh_unlock_sock(sk);
216 sock_put(sk);
217 }
218
219
220 static int dccp_v6_send_response(struct sock *sk, struct request_sock *req)
221 {
222 struct inet_request_sock *ireq = inet_rsk(req);
223 struct ipv6_pinfo *np = inet6_sk(sk);
224 struct sk_buff *skb;
225 struct in6_addr *final_p, final;
226 struct flowi6 fl6;
227 int err = -1;
228 struct dst_entry *dst;
229
230 memset(&fl6, 0, sizeof(fl6));
231 fl6.flowi6_proto = IPPROTO_DCCP;
232 fl6.daddr = ireq->ir_v6_rmt_addr;
233 fl6.saddr = ireq->ir_v6_loc_addr;
234 fl6.flowlabel = 0;
235 fl6.flowi6_oif = ireq->ir_iif;
236 fl6.fl6_dport = ireq->ir_rmt_port;
237 fl6.fl6_sport = htons(ireq->ir_num);
238 security_req_classify_flow(req, flowi6_to_flowi(&fl6));
239
240
241 final_p = fl6_update_dst(&fl6, np->opt, &final);
242
243 dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
244 if (IS_ERR(dst)) {
245 err = PTR_ERR(dst);
246 dst = NULL;
247 goto done;
248 }
249
250 skb = dccp_make_response(sk, dst, req);
251 if (skb != NULL) {
252 struct dccp_hdr *dh = dccp_hdr(skb);
253
254 dh->dccph_checksum = dccp_v6_csum_finish(skb,
255 &ireq->ir_v6_loc_addr,
256 &ireq->ir_v6_rmt_addr);
257 fl6.daddr = ireq->ir_v6_rmt_addr;
258 err = ip6_xmit(sk, skb, &fl6, np->opt, np->tclass);
259 err = net_xmit_eval(err);
260 }
261
262 done:
263 dst_release(dst);
264 return err;
265 }
266
267 static void dccp_v6_reqsk_destructor(struct request_sock *req)
268 {
269 dccp_feat_list_purge(&dccp_rsk(req)->dreq_featneg);
270 kfree_skb(inet_rsk(req)->pktopts);
271 }
272
273 static void dccp_v6_ctl_send_reset(struct sock *sk, struct sk_buff *rxskb)
274 {
275 const struct ipv6hdr *rxip6h;
276 struct sk_buff *skb;
277 struct flowi6 fl6;
278 struct net *net = dev_net(skb_dst(rxskb)->dev);
279 struct sock *ctl_sk = net->dccp.v6_ctl_sk;
280 struct dst_entry *dst;
281
282 if (dccp_hdr(rxskb)->dccph_type == DCCP_PKT_RESET)
283 return;
284
285 if (!ipv6_unicast_destination(rxskb))
286 return;
287
288 skb = dccp_ctl_make_reset(ctl_sk, rxskb);
289 if (skb == NULL)
290 return;
291
292 rxip6h = ipv6_hdr(rxskb);
293 dccp_hdr(skb)->dccph_checksum = dccp_v6_csum_finish(skb, &rxip6h->saddr,
294 &rxip6h->daddr);
295
296 memset(&fl6, 0, sizeof(fl6));
297 fl6.daddr = rxip6h->saddr;
298 fl6.saddr = rxip6h->daddr;
299
300 fl6.flowi6_proto = IPPROTO_DCCP;
301 fl6.flowi6_oif = inet6_iif(rxskb);
302 fl6.fl6_dport = dccp_hdr(skb)->dccph_dport;
303 fl6.fl6_sport = dccp_hdr(skb)->dccph_sport;
304 security_skb_classify_flow(rxskb, flowi6_to_flowi(&fl6));
305
306 /* sk = NULL, but it is safe for now. RST socket required. */
307 dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL);
308 if (!IS_ERR(dst)) {
309 skb_dst_set(skb, dst);
310 ip6_xmit(ctl_sk, skb, &fl6, NULL, 0);
311 DCCP_INC_STATS_BH(DCCP_MIB_OUTSEGS);
312 DCCP_INC_STATS_BH(DCCP_MIB_OUTRSTS);
313 return;
314 }
315
316 kfree_skb(skb);
317 }
318
319 static struct request_sock_ops dccp6_request_sock_ops = {
320 .family = AF_INET6,
321 .obj_size = sizeof(struct dccp6_request_sock),
322 .rtx_syn_ack = dccp_v6_send_response,
323 .send_ack = dccp_reqsk_send_ack,
324 .destructor = dccp_v6_reqsk_destructor,
325 .send_reset = dccp_v6_ctl_send_reset,
326 .syn_ack_timeout = dccp_syn_ack_timeout,
327 };
328
329 static struct sock *dccp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
330 {
331 const struct dccp_hdr *dh = dccp_hdr(skb);
332 const struct ipv6hdr *iph = ipv6_hdr(skb);
333 struct sock *nsk;
334 struct request_sock **prev;
335 /* Find possible connection requests. */
336 struct request_sock *req = inet6_csk_search_req(sk, &prev,
337 dh->dccph_sport,
338 &iph->saddr,
339 &iph->daddr,
340 inet6_iif(skb));
341 if (req != NULL)
342 return dccp_check_req(sk, skb, req, prev);
343
344 nsk = __inet6_lookup_established(sock_net(sk), &dccp_hashinfo,
345 &iph->saddr, dh->dccph_sport,
346 &iph->daddr, ntohs(dh->dccph_dport),
347 inet6_iif(skb));
348 if (nsk != NULL) {
349 if (nsk->sk_state != DCCP_TIME_WAIT) {
350 bh_lock_sock(nsk);
351 return nsk;
352 }
353 inet_twsk_put(inet_twsk(nsk));
354 return NULL;
355 }
356
357 return sk;
358 }
359
360 static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
361 {
362 struct request_sock *req;
363 struct dccp_request_sock *dreq;
364 struct inet_request_sock *ireq;
365 struct ipv6_pinfo *np = inet6_sk(sk);
366 const __be32 service = dccp_hdr_request(skb)->dccph_req_service;
367 struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
368
369 if (skb->protocol == htons(ETH_P_IP))
370 return dccp_v4_conn_request(sk, skb);
371
372 if (!ipv6_unicast_destination(skb))
373 return 0; /* discard, don't send a reset here */
374
375 if (dccp_bad_service_code(sk, service)) {
376 dcb->dccpd_reset_code = DCCP_RESET_CODE_BAD_SERVICE_CODE;
377 goto drop;
378 }
379 /*
380 * There are no SYN attacks on IPv6, yet...
381 */
382 dcb->dccpd_reset_code = DCCP_RESET_CODE_TOO_BUSY;
383 if (inet_csk_reqsk_queue_is_full(sk))
384 goto drop;
385
386 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
387 goto drop;
388
389 req = inet_reqsk_alloc(&dccp6_request_sock_ops);
390 if (req == NULL)
391 goto drop;
392
393 if (dccp_reqsk_init(req, dccp_sk(sk), skb))
394 goto drop_and_free;
395
396 dreq = dccp_rsk(req);
397 if (dccp_parse_options(sk, dreq, skb))
398 goto drop_and_free;
399
400 if (security_inet_conn_request(sk, skb, req))
401 goto drop_and_free;
402
403 ireq = inet_rsk(req);
404 ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
405 ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
406
407 if (ipv6_opt_accepted(sk, skb, IP6CB(skb)) ||
408 np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
409 np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
410 atomic_inc(&skb->users);
411 ireq->pktopts = skb;
412 }
413 ireq->ir_iif = sk->sk_bound_dev_if;
414
415 /* So that link locals have meaning */
416 if (!sk->sk_bound_dev_if &&
417 ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
418 ireq->ir_iif = inet6_iif(skb);
419
420 /*
421 * Step 3: Process LISTEN state
422 *
423 * Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookie
424 *
425 * Setting S.SWL/S.SWH to is deferred to dccp_create_openreq_child().
426 */
427 dreq->dreq_isr = dcb->dccpd_seq;
428 dreq->dreq_gsr = dreq->dreq_isr;
429 dreq->dreq_iss = dccp_v6_init_sequence(skb);
430 dreq->dreq_gss = dreq->dreq_iss;
431 dreq->dreq_service = service;
432
433 if (dccp_v6_send_response(sk, req))
434 goto drop_and_free;
435
436 inet6_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT);
437 return 0;
438
439 drop_and_free:
440 reqsk_free(req);
441 drop:
442 DCCP_INC_STATS_BH(DCCP_MIB_ATTEMPTFAILS);
443 return -1;
444 }
445
446 static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
447 struct sk_buff *skb,
448 struct request_sock *req,
449 struct dst_entry *dst)
450 {
451 struct inet_request_sock *ireq = inet_rsk(req);
452 struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
453 struct inet_sock *newinet;
454 struct dccp6_sock *newdp6;
455 struct sock *newsk;
456
457 if (skb->protocol == htons(ETH_P_IP)) {
458 /*
459 * v6 mapped
460 */
461 newsk = dccp_v4_request_recv_sock(sk, skb, req, dst);
462 if (newsk == NULL)
463 return NULL;
464
465 newdp6 = (struct dccp6_sock *)newsk;
466 newinet = inet_sk(newsk);
467 newinet->pinet6 = &newdp6->inet6;
468 newnp = inet6_sk(newsk);
469
470 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
471
472 ipv6_addr_set_v4mapped(newinet->inet_daddr, &newsk->sk_v6_daddr);
473
474 ipv6_addr_set_v4mapped(newinet->inet_saddr, &newnp->saddr);
475
476 newsk->sk_v6_rcv_saddr = newnp->saddr;
477
478 inet_csk(newsk)->icsk_af_ops = &dccp_ipv6_mapped;
479 newsk->sk_backlog_rcv = dccp_v4_do_rcv;
480 newnp->pktoptions = NULL;
481 newnp->opt = NULL;
482 newnp->mcast_oif = inet6_iif(skb);
483 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
484
485 /*
486 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
487 * here, dccp_create_openreq_child now does this for us, see the comment in
488 * that function for the gory details. -acme
489 */
490
491 /* It is tricky place. Until this moment IPv4 tcp
492 worked with IPv6 icsk.icsk_af_ops.
493 Sync it now.
494 */
495 dccp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
496
497 return newsk;
498 }
499
500
501 if (sk_acceptq_is_full(sk))
502 goto out_overflow;
503
504 if (dst == NULL) {
505 struct in6_addr *final_p, final;
506 struct flowi6 fl6;
507
508 memset(&fl6, 0, sizeof(fl6));
509 fl6.flowi6_proto = IPPROTO_DCCP;
510 fl6.daddr = ireq->ir_v6_rmt_addr;
511 final_p = fl6_update_dst(&fl6, np->opt, &final);
512 fl6.saddr = ireq->ir_v6_loc_addr;
513 fl6.flowi6_oif = sk->sk_bound_dev_if;
514 fl6.fl6_dport = ireq->ir_rmt_port;
515 fl6.fl6_sport = htons(ireq->ir_num);
516 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
517
518 dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
519 if (IS_ERR(dst))
520 goto out;
521 }
522
523 newsk = dccp_create_openreq_child(sk, req, skb);
524 if (newsk == NULL)
525 goto out_nonewsk;
526
527 /*
528 * No need to charge this sock to the relevant IPv6 refcnt debug socks
529 * count here, dccp_create_openreq_child now does this for us, see the
530 * comment in that function for the gory details. -acme
531 */
532
533 __ip6_dst_store(newsk, dst, NULL, NULL);
534 newsk->sk_route_caps = dst->dev->features & ~(NETIF_F_IP_CSUM |
535 NETIF_F_TSO);
536 newdp6 = (struct dccp6_sock *)newsk;
537 newinet = inet_sk(newsk);
538 newinet->pinet6 = &newdp6->inet6;
539 newnp = inet6_sk(newsk);
540
541 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
542
543 newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr;
544 newnp->saddr = ireq->ir_v6_loc_addr;
545 newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
546 newsk->sk_bound_dev_if = ireq->ir_iif;
547
548 /* Now IPv6 options...
549
550 First: no IPv4 options.
551 */
552 newinet->inet_opt = NULL;
553
554 /* Clone RX bits */
555 newnp->rxopt.all = np->rxopt.all;
556
557 /* Clone pktoptions received with SYN */
558 newnp->pktoptions = NULL;
559 if (ireq->pktopts != NULL) {
560 newnp->pktoptions = skb_clone(ireq->pktopts, GFP_ATOMIC);
561 consume_skb(ireq->pktopts);
562 ireq->pktopts = NULL;
563 if (newnp->pktoptions)
564 skb_set_owner_r(newnp->pktoptions, newsk);
565 }
566 newnp->opt = NULL;
567 newnp->mcast_oif = inet6_iif(skb);
568 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
569
570 /*
571 * Clone native IPv6 options from listening socket (if any)
572 *
573 * Yes, keeping reference count would be much more clever, but we make
574 * one more one thing there: reattach optmem to newsk.
575 */
576 if (np->opt != NULL)
577 newnp->opt = ipv6_dup_options(newsk, np->opt);
578
579 inet_csk(newsk)->icsk_ext_hdr_len = 0;
580 if (newnp->opt != NULL)
581 inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
582 newnp->opt->opt_flen);
583
584 dccp_sync_mss(newsk, dst_mtu(dst));
585
586 newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
587 newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
588
589 if (__inet_inherit_port(sk, newsk) < 0) {
590 inet_csk_prepare_forced_close(newsk);
591 dccp_done(newsk);
592 goto out;
593 }
594 __inet6_hash(newsk, NULL);
595
596 return newsk;
597
598 out_overflow:
599 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
600 out_nonewsk:
601 dst_release(dst);
602 out:
603 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
604 return NULL;
605 }
606
607 /* The socket must have it's spinlock held when we get
608 * here.
609 *
610 * We have a potential double-lock case here, so even when
611 * doing backlog processing we use the BH locking scheme.
612 * This is because we cannot sleep with the original spinlock
613 * held.
614 */
615 static int dccp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
616 {
617 struct ipv6_pinfo *np = inet6_sk(sk);
618 struct sk_buff *opt_skb = NULL;
619
620 /* Imagine: socket is IPv6. IPv4 packet arrives,
621 goes to IPv4 receive handler and backlogged.
622 From backlog it always goes here. Kerboom...
623 Fortunately, dccp_rcv_established and rcv_established
624 handle them correctly, but it is not case with
625 dccp_v6_hnd_req and dccp_v6_ctl_send_reset(). --ANK
626 */
627
628 if (skb->protocol == htons(ETH_P_IP))
629 return dccp_v4_do_rcv(sk, skb);
630
631 if (sk_filter(sk, skb))
632 goto discard;
633
634 /*
635 * socket locking is here for SMP purposes as backlog rcv is currently
636 * called with bh processing disabled.
637 */
638
639 /* Do Stevens' IPV6_PKTOPTIONS.
640
641 Yes, guys, it is the only place in our code, where we
642 may make it not affecting IPv4.
643 The rest of code is protocol independent,
644 and I do not like idea to uglify IPv4.
645
646 Actually, all the idea behind IPV6_PKTOPTIONS
647 looks not very well thought. For now we latch
648 options, received in the last packet, enqueued
649 by tcp. Feel free to propose better solution.
650 --ANK (980728)
651 */
652 if (np->rxopt.all)
653 /*
654 * FIXME: Add handling of IPV6_PKTOPTIONS skb. See the comments below
655 * (wrt ipv6_pktopions) and net/ipv6/tcp_ipv6.c for an example.
656 */
657 opt_skb = skb_clone(skb, GFP_ATOMIC);
658
659 if (sk->sk_state == DCCP_OPEN) { /* Fast path */
660 if (dccp_rcv_established(sk, skb, dccp_hdr(skb), skb->len))
661 goto reset;
662 if (opt_skb) {
663 /* XXX This is where we would goto ipv6_pktoptions. */
664 __kfree_skb(opt_skb);
665 }
666 return 0;
667 }
668
669 /*
670 * Step 3: Process LISTEN state
671 * If S.state == LISTEN,
672 * If P.type == Request or P contains a valid Init Cookie option,
673 * (* Must scan the packet's options to check for Init
674 * Cookies. Only Init Cookies are processed here,
675 * however; other options are processed in Step 8. This
676 * scan need only be performed if the endpoint uses Init
677 * Cookies *)
678 * (* Generate a new socket and switch to that socket *)
679 * Set S := new socket for this port pair
680 * S.state = RESPOND
681 * Choose S.ISS (initial seqno) or set from Init Cookies
682 * Initialize S.GAR := S.ISS
683 * Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookies
684 * Continue with S.state == RESPOND
685 * (* A Response packet will be generated in Step 11 *)
686 * Otherwise,
687 * Generate Reset(No Connection) unless P.type == Reset
688 * Drop packet and return
689 *
690 * NOTE: the check for the packet types is done in
691 * dccp_rcv_state_process
692 */
693 if (sk->sk_state == DCCP_LISTEN) {
694 struct sock *nsk = dccp_v6_hnd_req(sk, skb);
695
696 if (nsk == NULL)
697 goto discard;
698 /*
699 * Queue it on the new socket if the new socket is active,
700 * otherwise we just shortcircuit this and continue with
701 * the new socket..
702 */
703 if (nsk != sk) {
704 if (dccp_child_process(sk, nsk, skb))
705 goto reset;
706 if (opt_skb != NULL)
707 __kfree_skb(opt_skb);
708 return 0;
709 }
710 }
711
712 if (dccp_rcv_state_process(sk, skb, dccp_hdr(skb), skb->len))
713 goto reset;
714 if (opt_skb) {
715 /* XXX This is where we would goto ipv6_pktoptions. */
716 __kfree_skb(opt_skb);
717 }
718 return 0;
719
720 reset:
721 dccp_v6_ctl_send_reset(sk, skb);
722 discard:
723 if (opt_skb != NULL)
724 __kfree_skb(opt_skb);
725 kfree_skb(skb);
726 return 0;
727 }
728
729 static int dccp_v6_rcv(struct sk_buff *skb)
730 {
731 const struct dccp_hdr *dh;
732 struct sock *sk;
733 int min_cov;
734
735 /* Step 1: Check header basics */
736
737 if (dccp_invalid_packet(skb))
738 goto discard_it;
739
740 /* Step 1: If header checksum is incorrect, drop packet and return. */
741 if (dccp_v6_csum_finish(skb, &ipv6_hdr(skb)->saddr,
742 &ipv6_hdr(skb)->daddr)) {
743 DCCP_WARN("dropped packet with invalid checksum\n");
744 goto discard_it;
745 }
746
747 dh = dccp_hdr(skb);
748
749 DCCP_SKB_CB(skb)->dccpd_seq = dccp_hdr_seq(dh);
750 DCCP_SKB_CB(skb)->dccpd_type = dh->dccph_type;
751
752 if (dccp_packet_without_ack(skb))
753 DCCP_SKB_CB(skb)->dccpd_ack_seq = DCCP_PKT_WITHOUT_ACK_SEQ;
754 else
755 DCCP_SKB_CB(skb)->dccpd_ack_seq = dccp_hdr_ack_seq(skb);
756
757 /* Step 2:
758 * Look up flow ID in table and get corresponding socket */
759 sk = __inet6_lookup_skb(&dccp_hashinfo, skb,
760 dh->dccph_sport, dh->dccph_dport);
761 /*
762 * Step 2:
763 * If no socket ...
764 */
765 if (sk == NULL) {
766 dccp_pr_debug("failed to look up flow ID in table and "
767 "get corresponding socket\n");
768 goto no_dccp_socket;
769 }
770
771 /*
772 * Step 2:
773 * ... or S.state == TIMEWAIT,
774 * Generate Reset(No Connection) unless P.type == Reset
775 * Drop packet and return
776 */
777 if (sk->sk_state == DCCP_TIME_WAIT) {
778 dccp_pr_debug("sk->sk_state == DCCP_TIME_WAIT: do_time_wait\n");
779 inet_twsk_put(inet_twsk(sk));
780 goto no_dccp_socket;
781 }
782
783 /*
784 * RFC 4340, sec. 9.2.1: Minimum Checksum Coverage
785 * o if MinCsCov = 0, only packets with CsCov = 0 are accepted
786 * o if MinCsCov > 0, also accept packets with CsCov >= MinCsCov
787 */
788 min_cov = dccp_sk(sk)->dccps_pcrlen;
789 if (dh->dccph_cscov && (min_cov == 0 || dh->dccph_cscov < min_cov)) {
790 dccp_pr_debug("Packet CsCov %d does not satisfy MinCsCov %d\n",
791 dh->dccph_cscov, min_cov);
792 /* FIXME: send Data Dropped option (see also dccp_v4_rcv) */
793 goto discard_and_relse;
794 }
795
796 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
797 goto discard_and_relse;
798
799 return sk_receive_skb(sk, skb, 1) ? -1 : 0;
800
801 no_dccp_socket:
802 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
803 goto discard_it;
804 /*
805 * Step 2:
806 * If no socket ...
807 * Generate Reset(No Connection) unless P.type == Reset
808 * Drop packet and return
809 */
810 if (dh->dccph_type != DCCP_PKT_RESET) {
811 DCCP_SKB_CB(skb)->dccpd_reset_code =
812 DCCP_RESET_CODE_NO_CONNECTION;
813 dccp_v6_ctl_send_reset(sk, skb);
814 }
815
816 discard_it:
817 kfree_skb(skb);
818 return 0;
819
820 discard_and_relse:
821 sock_put(sk);
822 goto discard_it;
823 }
824
825 static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
826 int addr_len)
827 {
828 struct sockaddr_in6 *usin = (struct sockaddr_in6 *)uaddr;
829 struct inet_connection_sock *icsk = inet_csk(sk);
830 struct inet_sock *inet = inet_sk(sk);
831 struct ipv6_pinfo *np = inet6_sk(sk);
832 struct dccp_sock *dp = dccp_sk(sk);
833 struct in6_addr *saddr = NULL, *final_p, final;
834 struct flowi6 fl6;
835 struct dst_entry *dst;
836 int addr_type;
837 int err;
838
839 dp->dccps_role = DCCP_ROLE_CLIENT;
840
841 if (addr_len < SIN6_LEN_RFC2133)
842 return -EINVAL;
843
844 if (usin->sin6_family != AF_INET6)
845 return -EAFNOSUPPORT;
846
847 memset(&fl6, 0, sizeof(fl6));
848
849 if (np->sndflow) {
850 fl6.flowlabel = usin->sin6_flowinfo & IPV6_FLOWINFO_MASK;
851 IP6_ECN_flow_init(fl6.flowlabel);
852 if (fl6.flowlabel & IPV6_FLOWLABEL_MASK) {
853 struct ip6_flowlabel *flowlabel;
854 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
855 if (flowlabel == NULL)
856 return -EINVAL;
857 fl6_sock_release(flowlabel);
858 }
859 }
860 /*
861 * connect() to INADDR_ANY means loopback (BSD'ism).
862 */
863 if (ipv6_addr_any(&usin->sin6_addr))
864 usin->sin6_addr.s6_addr[15] = 1;
865
866 addr_type = ipv6_addr_type(&usin->sin6_addr);
867
868 if (addr_type & IPV6_ADDR_MULTICAST)
869 return -ENETUNREACH;
870
871 if (addr_type & IPV6_ADDR_LINKLOCAL) {
872 if (addr_len >= sizeof(struct sockaddr_in6) &&
873 usin->sin6_scope_id) {
874 /* If interface is set while binding, indices
875 * must coincide.
876 */
877 if (sk->sk_bound_dev_if &&
878 sk->sk_bound_dev_if != usin->sin6_scope_id)
879 return -EINVAL;
880
881 sk->sk_bound_dev_if = usin->sin6_scope_id;
882 }
883
884 /* Connect to link-local address requires an interface */
885 if (!sk->sk_bound_dev_if)
886 return -EINVAL;
887 }
888
889 sk->sk_v6_daddr = usin->sin6_addr;
890 np->flow_label = fl6.flowlabel;
891
892 /*
893 * DCCP over IPv4
894 */
895 if (addr_type == IPV6_ADDR_MAPPED) {
896 u32 exthdrlen = icsk->icsk_ext_hdr_len;
897 struct sockaddr_in sin;
898
899 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
900
901 if (__ipv6_only_sock(sk))
902 return -ENETUNREACH;
903
904 sin.sin_family = AF_INET;
905 sin.sin_port = usin->sin6_port;
906 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
907
908 icsk->icsk_af_ops = &dccp_ipv6_mapped;
909 sk->sk_backlog_rcv = dccp_v4_do_rcv;
910
911 err = dccp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
912 if (err) {
913 icsk->icsk_ext_hdr_len = exthdrlen;
914 icsk->icsk_af_ops = &dccp_ipv6_af_ops;
915 sk->sk_backlog_rcv = dccp_v6_do_rcv;
916 goto failure;
917 }
918 ipv6_addr_set_v4mapped(inet->inet_saddr, &np->saddr);
919 ipv6_addr_set_v4mapped(inet->inet_rcv_saddr, &sk->sk_v6_rcv_saddr);
920
921 return err;
922 }
923
924 if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr))
925 saddr = &sk->sk_v6_rcv_saddr;
926
927 fl6.flowi6_proto = IPPROTO_DCCP;
928 fl6.daddr = sk->sk_v6_daddr;
929 fl6.saddr = saddr ? *saddr : np->saddr;
930 fl6.flowi6_oif = sk->sk_bound_dev_if;
931 fl6.fl6_dport = usin->sin6_port;
932 fl6.fl6_sport = inet->inet_sport;
933 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
934
935 final_p = fl6_update_dst(&fl6, np->opt, &final);
936
937 dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
938 if (IS_ERR(dst)) {
939 err = PTR_ERR(dst);
940 goto failure;
941 }
942
943 if (saddr == NULL) {
944 saddr = &fl6.saddr;
945 sk->sk_v6_rcv_saddr = *saddr;
946 }
947
948 /* set the source address */
949 np->saddr = *saddr;
950 inet->inet_rcv_saddr = LOOPBACK4_IPV6;
951
952 __ip6_dst_store(sk, dst, NULL, NULL);
953
954 icsk->icsk_ext_hdr_len = 0;
955 if (np->opt != NULL)
956 icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
957 np->opt->opt_nflen);
958
959 inet->inet_dport = usin->sin6_port;
960
961 dccp_set_state(sk, DCCP_REQUESTING);
962 err = inet6_hash_connect(&dccp_death_row, sk);
963 if (err)
964 goto late_failure;
965
966 dp->dccps_iss = secure_dccpv6_sequence_number(np->saddr.s6_addr32,
967 sk->sk_v6_daddr.s6_addr32,
968 inet->inet_sport,
969 inet->inet_dport);
970 err = dccp_connect(sk);
971 if (err)
972 goto late_failure;
973
974 return 0;
975
976 late_failure:
977 dccp_set_state(sk, DCCP_CLOSED);
978 __sk_dst_reset(sk);
979 failure:
980 inet->inet_dport = 0;
981 sk->sk_route_caps = 0;
982 return err;
983 }
984
985 static const struct inet_connection_sock_af_ops dccp_ipv6_af_ops = {
986 .queue_xmit = inet6_csk_xmit,
987 .send_check = dccp_v6_send_check,
988 .rebuild_header = inet6_sk_rebuild_header,
989 .conn_request = dccp_v6_conn_request,
990 .syn_recv_sock = dccp_v6_request_recv_sock,
991 .net_header_len = sizeof(struct ipv6hdr),
992 .setsockopt = ipv6_setsockopt,
993 .getsockopt = ipv6_getsockopt,
994 .addr2sockaddr = inet6_csk_addr2sockaddr,
995 .sockaddr_len = sizeof(struct sockaddr_in6),
996 .bind_conflict = inet6_csk_bind_conflict,
997 #ifdef CONFIG_COMPAT
998 .compat_setsockopt = compat_ipv6_setsockopt,
999 .compat_getsockopt = compat_ipv6_getsockopt,
1000 #endif
1001 };
1002
1003 /*
1004 * DCCP over IPv4 via INET6 API
1005 */
1006 static const struct inet_connection_sock_af_ops dccp_ipv6_mapped = {
1007 .queue_xmit = ip_queue_xmit,
1008 .send_check = dccp_v4_send_check,
1009 .rebuild_header = inet_sk_rebuild_header,
1010 .conn_request = dccp_v6_conn_request,
1011 .syn_recv_sock = dccp_v6_request_recv_sock,
1012 .net_header_len = sizeof(struct iphdr),
1013 .setsockopt = ipv6_setsockopt,
1014 .getsockopt = ipv6_getsockopt,
1015 .addr2sockaddr = inet6_csk_addr2sockaddr,
1016 .sockaddr_len = sizeof(struct sockaddr_in6),
1017 #ifdef CONFIG_COMPAT
1018 .compat_setsockopt = compat_ipv6_setsockopt,
1019 .compat_getsockopt = compat_ipv6_getsockopt,
1020 #endif
1021 };
1022
1023 /* NOTE: A lot of things set to zero explicitly by call to
1024 * sk_alloc() so need not be done here.
1025 */
1026 static int dccp_v6_init_sock(struct sock *sk)
1027 {
1028 static __u8 dccp_v6_ctl_sock_initialized;
1029 int err = dccp_init_sock(sk, dccp_v6_ctl_sock_initialized);
1030
1031 if (err == 0) {
1032 if (unlikely(!dccp_v6_ctl_sock_initialized))
1033 dccp_v6_ctl_sock_initialized = 1;
1034 inet_csk(sk)->icsk_af_ops = &dccp_ipv6_af_ops;
1035 }
1036
1037 return err;
1038 }
1039
1040 static void dccp_v6_destroy_sock(struct sock *sk)
1041 {
1042 dccp_destroy_sock(sk);
1043 inet6_destroy_sock(sk);
1044 }
1045
1046 static struct timewait_sock_ops dccp6_timewait_sock_ops = {
1047 .twsk_obj_size = sizeof(struct dccp6_timewait_sock),
1048 };
1049
1050 static struct proto dccp_v6_prot = {
1051 .name = "DCCPv6",
1052 .owner = THIS_MODULE,
1053 .close = dccp_close,
1054 .connect = dccp_v6_connect,
1055 .disconnect = dccp_disconnect,
1056 .ioctl = dccp_ioctl,
1057 .init = dccp_v6_init_sock,
1058 .setsockopt = dccp_setsockopt,
1059 .getsockopt = dccp_getsockopt,
1060 .sendmsg = dccp_sendmsg,
1061 .recvmsg = dccp_recvmsg,
1062 .backlog_rcv = dccp_v6_do_rcv,
1063 .hash = dccp_v6_hash,
1064 .unhash = inet_unhash,
1065 .accept = inet_csk_accept,
1066 .get_port = inet_csk_get_port,
1067 .shutdown = dccp_shutdown,
1068 .destroy = dccp_v6_destroy_sock,
1069 .orphan_count = &dccp_orphan_count,
1070 .max_header = MAX_DCCP_HEADER,
1071 .obj_size = sizeof(struct dccp6_sock),
1072 .slab_flags = SLAB_DESTROY_BY_RCU,
1073 .rsk_prot = &dccp6_request_sock_ops,
1074 .twsk_prot = &dccp6_timewait_sock_ops,
1075 .h.hashinfo = &dccp_hashinfo,
1076 #ifdef CONFIG_COMPAT
1077 .compat_setsockopt = compat_dccp_setsockopt,
1078 .compat_getsockopt = compat_dccp_getsockopt,
1079 #endif
1080 };
1081
1082 static const struct inet6_protocol dccp_v6_protocol = {
1083 .handler = dccp_v6_rcv,
1084 .err_handler = dccp_v6_err,
1085 .flags = INET6_PROTO_NOPOLICY | INET6_PROTO_FINAL,
1086 };
1087
1088 static const struct proto_ops inet6_dccp_ops = {
1089 .family = PF_INET6,
1090 .owner = THIS_MODULE,
1091 .release = inet6_release,
1092 .bind = inet6_bind,
1093 .connect = inet_stream_connect,
1094 .socketpair = sock_no_socketpair,
1095 .accept = inet_accept,
1096 .getname = inet6_getname,
1097 .poll = dccp_poll,
1098 .ioctl = inet6_ioctl,
1099 .listen = inet_dccp_listen,
1100 .shutdown = inet_shutdown,
1101 .setsockopt = sock_common_setsockopt,
1102 .getsockopt = sock_common_getsockopt,
1103 .sendmsg = inet_sendmsg,
1104 .recvmsg = sock_common_recvmsg,
1105 .mmap = sock_no_mmap,
1106 .sendpage = sock_no_sendpage,
1107 #ifdef CONFIG_COMPAT
1108 .compat_setsockopt = compat_sock_common_setsockopt,
1109 .compat_getsockopt = compat_sock_common_getsockopt,
1110 #endif
1111 };
1112
1113 static struct inet_protosw dccp_v6_protosw = {
1114 .type = SOCK_DCCP,
1115 .protocol = IPPROTO_DCCP,
1116 .prot = &dccp_v6_prot,
1117 .ops = &inet6_dccp_ops,
1118 .flags = INET_PROTOSW_ICSK,
1119 };
1120
1121 static int __net_init dccp_v6_init_net(struct net *net)
1122 {
1123 if (dccp_hashinfo.bhash == NULL)
1124 return -ESOCKTNOSUPPORT;
1125
1126 return inet_ctl_sock_create(&net->dccp.v6_ctl_sk, PF_INET6,
1127 SOCK_DCCP, IPPROTO_DCCP, net);
1128 }
1129
1130 static void __net_exit dccp_v6_exit_net(struct net *net)
1131 {
1132 inet_ctl_sock_destroy(net->dccp.v6_ctl_sk);
1133 }
1134
1135 static struct pernet_operations dccp_v6_ops = {
1136 .init = dccp_v6_init_net,
1137 .exit = dccp_v6_exit_net,
1138 };
1139
1140 static int __init dccp_v6_init(void)
1141 {
1142 int err = proto_register(&dccp_v6_prot, 1);
1143
1144 if (err != 0)
1145 goto out;
1146
1147 err = inet6_add_protocol(&dccp_v6_protocol, IPPROTO_DCCP);
1148 if (err != 0)
1149 goto out_unregister_proto;
1150
1151 inet6_register_protosw(&dccp_v6_protosw);
1152
1153 err = register_pernet_subsys(&dccp_v6_ops);
1154 if (err != 0)
1155 goto out_destroy_ctl_sock;
1156 out:
1157 return err;
1158
1159 out_destroy_ctl_sock:
1160 inet6_del_protocol(&dccp_v6_protocol, IPPROTO_DCCP);
1161 inet6_unregister_protosw(&dccp_v6_protosw);
1162 out_unregister_proto:
1163 proto_unregister(&dccp_v6_prot);
1164 goto out;
1165 }
1166
1167 static void __exit dccp_v6_exit(void)
1168 {
1169 unregister_pernet_subsys(&dccp_v6_ops);
1170 inet6_del_protocol(&dccp_v6_protocol, IPPROTO_DCCP);
1171 inet6_unregister_protosw(&dccp_v6_protosw);
1172 proto_unregister(&dccp_v6_prot);
1173 }
1174
1175 module_init(dccp_v6_init);
1176 module_exit(dccp_v6_exit);
1177
1178 /*
1179 * __stringify doesn't likes enums, so use SOCK_DCCP (6) and IPPROTO_DCCP (33)
1180 * values directly, Also cover the case where the protocol is not specified,
1181 * i.e. net-pf-PF_INET6-proto-0-type-SOCK_DCCP
1182 */
1183 MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET6, 33, 6);
1184 MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET6, 0, 6);
1185 MODULE_LICENSE("GPL");
1186 MODULE_AUTHOR("Arnaldo Carvalho de Melo <acme@mandriva.com>");
1187 MODULE_DESCRIPTION("DCCPv6 - Datagram Congestion Controlled Protocol");
This page took 0.07494 seconds and 6 git commands to generate.