[SK_BUFF]: Introduce skb_network_offset()
[deliverable/linux.git] / net / ipv6 / exthdrs.c
... / ...
CommitLineData
1/*
2 * Extension Header handling for IPv6
3 * Linux INET6 implementation
4 *
5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt>
7 * Andi Kleen <ak@muc.de>
8 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
9 *
10 * $Id: exthdrs.c,v 1.13 2001/06/19 15:58:56 davem Exp $
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
16 */
17
18/* Changes:
19 * yoshfuji : ensure not to overrun while parsing
20 * tlv options.
21 * Mitsuru KANDA @USAGI and: Remove ipv6_parse_exthdrs().
22 * YOSHIFUJI Hideaki @USAGI Register inbound extension header
23 * handlers as inet6_protocol{}.
24 */
25
26#include <linux/errno.h>
27#include <linux/types.h>
28#include <linux/socket.h>
29#include <linux/sockios.h>
30#include <linux/net.h>
31#include <linux/netdevice.h>
32#include <linux/in6.h>
33#include <linux/icmpv6.h>
34
35#include <net/sock.h>
36#include <net/snmp.h>
37
38#include <net/ipv6.h>
39#include <net/protocol.h>
40#include <net/transp_v6.h>
41#include <net/rawv6.h>
42#include <net/ndisc.h>
43#include <net/ip6_route.h>
44#include <net/addrconf.h>
45#ifdef CONFIG_IPV6_MIP6
46#include <net/xfrm.h>
47#endif
48
49#include <asm/uaccess.h>
50
51int ipv6_find_tlv(struct sk_buff *skb, int offset, int type)
52{
53 int packet_len = skb->tail - skb->nh.raw;
54 struct ipv6_opt_hdr *hdr;
55 int len;
56
57 if (offset + 2 > packet_len)
58 goto bad;
59 hdr = (struct ipv6_opt_hdr*)(skb->nh.raw + offset);
60 len = ((hdr->hdrlen + 1) << 3);
61
62 if (offset + len > packet_len)
63 goto bad;
64
65 offset += 2;
66 len -= 2;
67
68 while (len > 0) {
69 int opttype = skb->nh.raw[offset];
70 int optlen;
71
72 if (opttype == type)
73 return offset;
74
75 switch (opttype) {
76 case IPV6_TLV_PAD0:
77 optlen = 1;
78 break;
79 default:
80 optlen = skb->nh.raw[offset + 1] + 2;
81 if (optlen > len)
82 goto bad;
83 break;
84 }
85 offset += optlen;
86 len -= optlen;
87 }
88 /* not_found */
89 bad:
90 return -1;
91}
92
93/*
94 * Parsing tlv encoded headers.
95 *
96 * Parsing function "func" returns 1, if parsing succeed
97 * and 0, if it failed.
98 * It MUST NOT touch skb->h.
99 */
100
101struct tlvtype_proc {
102 int type;
103 int (*func)(struct sk_buff **skbp, int offset);
104};
105
106/*********************
107 Generic functions
108 *********************/
109
110/* An unknown option is detected, decide what to do */
111
112static int ip6_tlvopt_unknown(struct sk_buff **skbp, int optoff)
113{
114 struct sk_buff *skb = *skbp;
115
116 switch ((skb->nh.raw[optoff] & 0xC0) >> 6) {
117 case 0: /* ignore */
118 return 1;
119
120 case 1: /* drop packet */
121 break;
122
123 case 3: /* Send ICMP if not a multicast address and drop packet */
124 /* Actually, it is redundant check. icmp_send
125 will recheck in any case.
126 */
127 if (ipv6_addr_is_multicast(&skb->nh.ipv6h->daddr))
128 break;
129 case 2: /* send ICMP PARM PROB regardless and drop packet */
130 icmpv6_param_prob(skb, ICMPV6_UNK_OPTION, optoff);
131 return 0;
132 };
133
134 kfree_skb(skb);
135 return 0;
136}
137
138/* Parse tlv encoded option header (hop-by-hop or destination) */
139
140static int ip6_parse_tlv(struct tlvtype_proc *procs, struct sk_buff **skbp)
141{
142 struct sk_buff *skb = *skbp;
143 struct tlvtype_proc *curr;
144 int off = skb->h.raw - skb->nh.raw;
145 int len = ((skb->h.raw[1]+1)<<3);
146
147 if ((skb->h.raw + len) - skb->data > skb_headlen(skb))
148 goto bad;
149
150 off += 2;
151 len -= 2;
152
153 while (len > 0) {
154 int optlen = skb->nh.raw[off+1]+2;
155
156 switch (skb->nh.raw[off]) {
157 case IPV6_TLV_PAD0:
158 optlen = 1;
159 break;
160
161 case IPV6_TLV_PADN:
162 break;
163
164 default: /* Other TLV code so scan list */
165 if (optlen > len)
166 goto bad;
167 for (curr=procs; curr->type >= 0; curr++) {
168 if (curr->type == skb->nh.raw[off]) {
169 /* type specific length/alignment
170 checks will be performed in the
171 func(). */
172 if (curr->func(skbp, off) == 0)
173 return 0;
174 break;
175 }
176 }
177 if (curr->type < 0) {
178 if (ip6_tlvopt_unknown(skbp, off) == 0)
179 return 0;
180 }
181 break;
182 }
183 off += optlen;
184 len -= optlen;
185 }
186 if (len == 0)
187 return 1;
188bad:
189 kfree_skb(skb);
190 return 0;
191}
192
193/*****************************
194 Destination options header.
195 *****************************/
196
197#ifdef CONFIG_IPV6_MIP6
198static int ipv6_dest_hao(struct sk_buff **skbp, int optoff)
199{
200 struct sk_buff *skb = *skbp;
201 struct ipv6_destopt_hao *hao;
202 struct inet6_skb_parm *opt = IP6CB(skb);
203 struct ipv6hdr *ipv6h = skb->nh.ipv6h;
204 struct in6_addr tmp_addr;
205 int ret;
206
207 if (opt->dsthao) {
208 LIMIT_NETDEBUG(KERN_DEBUG "hao duplicated\n");
209 goto discard;
210 }
211 opt->dsthao = opt->dst1;
212 opt->dst1 = 0;
213
214 hao = (struct ipv6_destopt_hao *)(skb->nh.raw + optoff);
215
216 if (hao->length != 16) {
217 LIMIT_NETDEBUG(
218 KERN_DEBUG "hao invalid option length = %d\n", hao->length);
219 goto discard;
220 }
221
222 if (!(ipv6_addr_type(&hao->addr) & IPV6_ADDR_UNICAST)) {
223 LIMIT_NETDEBUG(
224 KERN_DEBUG "hao is not an unicast addr: " NIP6_FMT "\n", NIP6(hao->addr));
225 goto discard;
226 }
227
228 ret = xfrm6_input_addr(skb, (xfrm_address_t *)&ipv6h->daddr,
229 (xfrm_address_t *)&hao->addr, IPPROTO_DSTOPTS);
230 if (unlikely(ret < 0))
231 goto discard;
232
233 if (skb_cloned(skb)) {
234 struct sk_buff *skb2 = skb_copy(skb, GFP_ATOMIC);
235 struct inet6_skb_parm *opt2;
236
237 if (skb2 == NULL)
238 goto discard;
239
240 opt2 = IP6CB(skb2);
241 memcpy(opt2, opt, sizeof(*opt2));
242
243 kfree_skb(skb);
244
245 /* update all variable using below by copied skbuff */
246 *skbp = skb = skb2;
247 hao = (struct ipv6_destopt_hao *)(skb2->nh.raw + optoff);
248 ipv6h = (struct ipv6hdr *)skb2->nh.raw;
249 }
250
251 if (skb->ip_summed == CHECKSUM_COMPLETE)
252 skb->ip_summed = CHECKSUM_NONE;
253
254 ipv6_addr_copy(&tmp_addr, &ipv6h->saddr);
255 ipv6_addr_copy(&ipv6h->saddr, &hao->addr);
256 ipv6_addr_copy(&hao->addr, &tmp_addr);
257
258 if (skb->tstamp.tv64 == 0)
259 __net_timestamp(skb);
260
261 return 1;
262
263 discard:
264 kfree_skb(skb);
265 return 0;
266}
267#endif
268
269static struct tlvtype_proc tlvprocdestopt_lst[] = {
270#ifdef CONFIG_IPV6_MIP6
271 {
272 .type = IPV6_TLV_HAO,
273 .func = ipv6_dest_hao,
274 },
275#endif
276 {-1, NULL}
277};
278
279static int ipv6_destopt_rcv(struct sk_buff **skbp)
280{
281 struct sk_buff *skb = *skbp;
282 struct inet6_skb_parm *opt = IP6CB(skb);
283#ifdef CONFIG_IPV6_MIP6
284 __u16 dstbuf;
285#endif
286 struct dst_entry *dst;
287
288 if (!pskb_may_pull(skb, (skb->h.raw-skb->data)+8) ||
289 !pskb_may_pull(skb, (skb->h.raw-skb->data)+((skb->h.raw[1]+1)<<3))) {
290 IP6_INC_STATS_BH(ip6_dst_idev(skb->dst),
291 IPSTATS_MIB_INHDRERRORS);
292 kfree_skb(skb);
293 return -1;
294 }
295
296 opt->lastopt = skb->h.raw - skb->nh.raw;
297 opt->dst1 = skb->h.raw - skb->nh.raw;
298#ifdef CONFIG_IPV6_MIP6
299 dstbuf = opt->dst1;
300#endif
301
302 dst = dst_clone(skb->dst);
303 if (ip6_parse_tlv(tlvprocdestopt_lst, skbp)) {
304 dst_release(dst);
305 skb = *skbp;
306 skb->h.raw += ((skb->h.raw[1]+1)<<3);
307 opt = IP6CB(skb);
308#ifdef CONFIG_IPV6_MIP6
309 opt->nhoff = dstbuf;
310#else
311 opt->nhoff = opt->dst1;
312#endif
313 return 1;
314 }
315
316 IP6_INC_STATS_BH(ip6_dst_idev(dst), IPSTATS_MIB_INHDRERRORS);
317 dst_release(dst);
318 return -1;
319}
320
321static struct inet6_protocol destopt_protocol = {
322 .handler = ipv6_destopt_rcv,
323 .flags = INET6_PROTO_NOPOLICY | INET6_PROTO_GSO_EXTHDR,
324};
325
326void __init ipv6_destopt_init(void)
327{
328 if (inet6_add_protocol(&destopt_protocol, IPPROTO_DSTOPTS) < 0)
329 printk(KERN_ERR "ipv6_destopt_init: Could not register protocol\n");
330}
331
332/********************************
333 NONE header. No data in packet.
334 ********************************/
335
336static int ipv6_nodata_rcv(struct sk_buff **skbp)
337{
338 struct sk_buff *skb = *skbp;
339
340 kfree_skb(skb);
341 return 0;
342}
343
344static struct inet6_protocol nodata_protocol = {
345 .handler = ipv6_nodata_rcv,
346 .flags = INET6_PROTO_NOPOLICY,
347};
348
349void __init ipv6_nodata_init(void)
350{
351 if (inet6_add_protocol(&nodata_protocol, IPPROTO_NONE) < 0)
352 printk(KERN_ERR "ipv6_nodata_init: Could not register protocol\n");
353}
354
355/********************************
356 Routing header.
357 ********************************/
358
359static int ipv6_rthdr_rcv(struct sk_buff **skbp)
360{
361 struct sk_buff *skb = *skbp;
362 struct inet6_skb_parm *opt = IP6CB(skb);
363 struct in6_addr *addr = NULL;
364 struct in6_addr daddr;
365 struct inet6_dev *idev;
366 int n, i;
367 struct ipv6_rt_hdr *hdr;
368 struct rt0_hdr *rthdr;
369 int accept_source_route = ipv6_devconf.accept_source_route;
370
371 if (accept_source_route < 0 ||
372 ((idev = in6_dev_get(skb->dev)) == NULL)) {
373 kfree_skb(skb);
374 return -1;
375 }
376 if (idev->cnf.accept_source_route < 0) {
377 in6_dev_put(idev);
378 kfree_skb(skb);
379 return -1;
380 }
381
382 if (accept_source_route > idev->cnf.accept_source_route)
383 accept_source_route = idev->cnf.accept_source_route;
384
385 in6_dev_put(idev);
386
387 if (!pskb_may_pull(skb, (skb->h.raw-skb->data)+8) ||
388 !pskb_may_pull(skb, (skb->h.raw-skb->data)+((skb->h.raw[1]+1)<<3))) {
389 IP6_INC_STATS_BH(ip6_dst_idev(skb->dst),
390 IPSTATS_MIB_INHDRERRORS);
391 kfree_skb(skb);
392 return -1;
393 }
394
395 hdr = (struct ipv6_rt_hdr *) skb->h.raw;
396
397 switch (hdr->type) {
398#ifdef CONFIG_IPV6_MIP6
399 break;
400#endif
401 case IPV6_SRCRT_TYPE_0:
402 if (accept_source_route > 0)
403 break;
404 kfree_skb(skb);
405 return -1;
406 default:
407 IP6_INC_STATS_BH(ip6_dst_idev(skb->dst),
408 IPSTATS_MIB_INHDRERRORS);
409 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, (&hdr->type) - skb->nh.raw);
410 return -1;
411 }
412
413 if (ipv6_addr_is_multicast(&skb->nh.ipv6h->daddr) ||
414 skb->pkt_type != PACKET_HOST) {
415 IP6_INC_STATS_BH(ip6_dst_idev(skb->dst),
416 IPSTATS_MIB_INADDRERRORS);
417 kfree_skb(skb);
418 return -1;
419 }
420
421looped_back:
422 if (hdr->segments_left == 0) {
423 switch (hdr->type) {
424#ifdef CONFIG_IPV6_MIP6
425 case IPV6_SRCRT_TYPE_2:
426 /* Silently discard type 2 header unless it was
427 * processed by own
428 */
429 if (!addr) {
430 IP6_INC_STATS_BH(ip6_dst_idev(skb->dst),
431 IPSTATS_MIB_INADDRERRORS);
432 kfree_skb(skb);
433 return -1;
434 }
435 break;
436#endif
437 default:
438 break;
439 }
440
441 opt->lastopt = skb->h.raw - skb->nh.raw;
442 opt->srcrt = skb->h.raw - skb->nh.raw;
443 skb->h.raw += (hdr->hdrlen + 1) << 3;
444 opt->dst0 = opt->dst1;
445 opt->dst1 = 0;
446 opt->nhoff = (&hdr->nexthdr) - skb->nh.raw;
447 return 1;
448 }
449
450 switch (hdr->type) {
451 case IPV6_SRCRT_TYPE_0:
452 if (hdr->hdrlen & 0x01) {
453 IP6_INC_STATS_BH(ip6_dst_idev(skb->dst),
454 IPSTATS_MIB_INHDRERRORS);
455 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, (&hdr->hdrlen) - skb->nh.raw);
456 return -1;
457 }
458 break;
459#ifdef CONFIG_IPV6_MIP6
460 case IPV6_SRCRT_TYPE_2:
461 /* Silently discard invalid RTH type 2 */
462 if (hdr->hdrlen != 2 || hdr->segments_left != 1) {
463 IP6_INC_STATS_BH(ip6_dst_idev(skb->dst),
464 IPSTATS_MIB_INHDRERRORS);
465 kfree_skb(skb);
466 return -1;
467 }
468 break;
469#endif
470 }
471
472 /*
473 * This is the routing header forwarding algorithm from
474 * RFC 2460, page 16.
475 */
476
477 n = hdr->hdrlen >> 1;
478
479 if (hdr->segments_left > n) {
480 IP6_INC_STATS_BH(ip6_dst_idev(skb->dst),
481 IPSTATS_MIB_INHDRERRORS);
482 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, (&hdr->segments_left) - skb->nh.raw);
483 return -1;
484 }
485
486 /* We are about to mangle packet header. Be careful!
487 Do not damage packets queued somewhere.
488 */
489 if (skb_cloned(skb)) {
490 struct sk_buff *skb2 = skb_copy(skb, GFP_ATOMIC);
491 /* the copy is a forwarded packet */
492 if (skb2 == NULL) {
493 IP6_INC_STATS_BH(ip6_dst_idev(skb->dst),
494 IPSTATS_MIB_OUTDISCARDS);
495 kfree_skb(skb);
496 return -1;
497 }
498 kfree_skb(skb);
499 *skbp = skb = skb2;
500 opt = IP6CB(skb2);
501 hdr = (struct ipv6_rt_hdr *) skb2->h.raw;
502 }
503
504 if (skb->ip_summed == CHECKSUM_COMPLETE)
505 skb->ip_summed = CHECKSUM_NONE;
506
507 i = n - --hdr->segments_left;
508
509 rthdr = (struct rt0_hdr *) hdr;
510 addr = rthdr->addr;
511 addr += i - 1;
512
513 switch (hdr->type) {
514#ifdef CONFIG_IPV6_MIP6
515 case IPV6_SRCRT_TYPE_2:
516 if (xfrm6_input_addr(skb, (xfrm_address_t *)addr,
517 (xfrm_address_t *)&skb->nh.ipv6h->saddr,
518 IPPROTO_ROUTING) < 0) {
519 IP6_INC_STATS_BH(ip6_dst_idev(skb->dst),
520 IPSTATS_MIB_INADDRERRORS);
521 kfree_skb(skb);
522 return -1;
523 }
524 if (!ipv6_chk_home_addr(addr)) {
525 IP6_INC_STATS_BH(ip6_dst_idev(skb->dst),
526 IPSTATS_MIB_INADDRERRORS);
527 kfree_skb(skb);
528 return -1;
529 }
530 break;
531#endif
532 default:
533 break;
534 }
535
536 if (ipv6_addr_is_multicast(addr)) {
537 IP6_INC_STATS_BH(ip6_dst_idev(skb->dst),
538 IPSTATS_MIB_INADDRERRORS);
539 kfree_skb(skb);
540 return -1;
541 }
542
543 ipv6_addr_copy(&daddr, addr);
544 ipv6_addr_copy(addr, &skb->nh.ipv6h->daddr);
545 ipv6_addr_copy(&skb->nh.ipv6h->daddr, &daddr);
546
547 dst_release(xchg(&skb->dst, NULL));
548 ip6_route_input(skb);
549 if (skb->dst->error) {
550 skb_push(skb, skb->data - skb->nh.raw);
551 dst_input(skb);
552 return -1;
553 }
554
555 if (skb->dst->dev->flags&IFF_LOOPBACK) {
556 if (skb->nh.ipv6h->hop_limit <= 1) {
557 IP6_INC_STATS_BH(ip6_dst_idev(skb->dst),
558 IPSTATS_MIB_INHDRERRORS);
559 icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT,
560 0, skb->dev);
561 kfree_skb(skb);
562 return -1;
563 }
564 skb->nh.ipv6h->hop_limit--;
565 goto looped_back;
566 }
567
568 skb_push(skb, skb->data - skb->nh.raw);
569 dst_input(skb);
570 return -1;
571}
572
573static struct inet6_protocol rthdr_protocol = {
574 .handler = ipv6_rthdr_rcv,
575 .flags = INET6_PROTO_NOPOLICY | INET6_PROTO_GSO_EXTHDR,
576};
577
578void __init ipv6_rthdr_init(void)
579{
580 if (inet6_add_protocol(&rthdr_protocol, IPPROTO_ROUTING) < 0)
581 printk(KERN_ERR "ipv6_rthdr_init: Could not register protocol\n");
582};
583
584/*
585 This function inverts received rthdr.
586 NOTE: specs allow to make it automatically only if
587 packet authenticated.
588
589 I will not discuss it here (though, I am really pissed off at
590 this stupid requirement making rthdr idea useless)
591
592 Actually, it creates severe problems for us.
593 Embryonic requests has no associated sockets,
594 so that user have no control over it and
595 cannot not only to set reply options, but
596 even to know, that someone wants to connect
597 without success. :-(
598
599 For now we need to test the engine, so that I created
600 temporary (or permanent) backdoor.
601 If listening socket set IPV6_RTHDR to 2, then we invert header.
602 --ANK (980729)
603 */
604
605struct ipv6_txoptions *
606ipv6_invert_rthdr(struct sock *sk, struct ipv6_rt_hdr *hdr)
607{
608 /* Received rthdr:
609
610 [ H1 -> H2 -> ... H_prev ] daddr=ME
611
612 Inverted result:
613 [ H_prev -> ... -> H1 ] daddr =sender
614
615 Note, that IP output engine will rewrite this rthdr
616 by rotating it left by one addr.
617 */
618
619 int n, i;
620 struct rt0_hdr *rthdr = (struct rt0_hdr*)hdr;
621 struct rt0_hdr *irthdr;
622 struct ipv6_txoptions *opt;
623 int hdrlen = ipv6_optlen(hdr);
624
625 if (hdr->segments_left ||
626 hdr->type != IPV6_SRCRT_TYPE_0 ||
627 hdr->hdrlen & 0x01)
628 return NULL;
629
630 n = hdr->hdrlen >> 1;
631 opt = sock_kmalloc(sk, sizeof(*opt) + hdrlen, GFP_ATOMIC);
632 if (opt == NULL)
633 return NULL;
634 memset(opt, 0, sizeof(*opt));
635 opt->tot_len = sizeof(*opt) + hdrlen;
636 opt->srcrt = (void*)(opt+1);
637 opt->opt_nflen = hdrlen;
638
639 memcpy(opt->srcrt, hdr, sizeof(*hdr));
640 irthdr = (struct rt0_hdr*)opt->srcrt;
641 irthdr->reserved = 0;
642 opt->srcrt->segments_left = n;
643 for (i=0; i<n; i++)
644 memcpy(irthdr->addr+i, rthdr->addr+(n-1-i), 16);
645 return opt;
646}
647
648EXPORT_SYMBOL_GPL(ipv6_invert_rthdr);
649
650/**********************************
651 Hop-by-hop options.
652 **********************************/
653
654/* Router Alert as of RFC 2711 */
655
656static int ipv6_hop_ra(struct sk_buff **skbp, int optoff)
657{
658 struct sk_buff *skb = *skbp;
659
660 if (skb->nh.raw[optoff+1] == 2) {
661 IP6CB(skb)->ra = optoff;
662 return 1;
663 }
664 LIMIT_NETDEBUG(KERN_DEBUG "ipv6_hop_ra: wrong RA length %d\n",
665 skb->nh.raw[optoff+1]);
666 kfree_skb(skb);
667 return 0;
668}
669
670/* Jumbo payload */
671
672static int ipv6_hop_jumbo(struct sk_buff **skbp, int optoff)
673{
674 struct sk_buff *skb = *skbp;
675 u32 pkt_len;
676
677 if (skb->nh.raw[optoff+1] != 4 || (optoff&3) != 2) {
678 LIMIT_NETDEBUG(KERN_DEBUG "ipv6_hop_jumbo: wrong jumbo opt length/alignment %d\n",
679 skb->nh.raw[optoff+1]);
680 IP6_INC_STATS_BH(ip6_dst_idev(skb->dst),
681 IPSTATS_MIB_INHDRERRORS);
682 goto drop;
683 }
684
685 pkt_len = ntohl(*(__be32*)(skb->nh.raw+optoff+2));
686 if (pkt_len <= IPV6_MAXPLEN) {
687 IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), IPSTATS_MIB_INHDRERRORS);
688 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, optoff+2);
689 return 0;
690 }
691 if (skb->nh.ipv6h->payload_len) {
692 IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), IPSTATS_MIB_INHDRERRORS);
693 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, optoff);
694 return 0;
695 }
696
697 if (pkt_len > skb->len - sizeof(struct ipv6hdr)) {
698 IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), IPSTATS_MIB_INTRUNCATEDPKTS);
699 goto drop;
700 }
701
702 if (pskb_trim_rcsum(skb, pkt_len + sizeof(struct ipv6hdr)))
703 goto drop;
704
705 return 1;
706
707drop:
708 kfree_skb(skb);
709 return 0;
710}
711
712static struct tlvtype_proc tlvprochopopt_lst[] = {
713 {
714 .type = IPV6_TLV_ROUTERALERT,
715 .func = ipv6_hop_ra,
716 },
717 {
718 .type = IPV6_TLV_JUMBO,
719 .func = ipv6_hop_jumbo,
720 },
721 { -1, }
722};
723
724int ipv6_parse_hopopts(struct sk_buff **skbp)
725{
726 struct sk_buff *skb = *skbp;
727 struct inet6_skb_parm *opt = IP6CB(skb);
728
729 /*
730 * skb->nh.raw is equal to skb->data, and
731 * skb->h.raw - skb->nh.raw is always equal to
732 * sizeof(struct ipv6hdr) by definition of
733 * hop-by-hop options.
734 */
735 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr) + 8) ||
736 !pskb_may_pull(skb, sizeof(struct ipv6hdr) + ((skb->h.raw[1] + 1) << 3))) {
737 kfree_skb(skb);
738 return -1;
739 }
740
741 opt->hop = sizeof(struct ipv6hdr);
742 if (ip6_parse_tlv(tlvprochopopt_lst, skbp)) {
743 skb = *skbp;
744 skb->h.raw += (skb->h.raw[1]+1)<<3;
745 opt = IP6CB(skb);
746 opt->nhoff = sizeof(struct ipv6hdr);
747 return 1;
748 }
749 return -1;
750}
751
752/*
753 * Creating outbound headers.
754 *
755 * "build" functions work when skb is filled from head to tail (datagram)
756 * "push" functions work when headers are added from tail to head (tcp)
757 *
758 * In both cases we assume, that caller reserved enough room
759 * for headers.
760 */
761
762static void ipv6_push_rthdr(struct sk_buff *skb, u8 *proto,
763 struct ipv6_rt_hdr *opt,
764 struct in6_addr **addr_p)
765{
766 struct rt0_hdr *phdr, *ihdr;
767 int hops;
768
769 ihdr = (struct rt0_hdr *) opt;
770
771 phdr = (struct rt0_hdr *) skb_push(skb, (ihdr->rt_hdr.hdrlen + 1) << 3);
772 memcpy(phdr, ihdr, sizeof(struct rt0_hdr));
773
774 hops = ihdr->rt_hdr.hdrlen >> 1;
775
776 if (hops > 1)
777 memcpy(phdr->addr, ihdr->addr + 1,
778 (hops - 1) * sizeof(struct in6_addr));
779
780 ipv6_addr_copy(phdr->addr + (hops - 1), *addr_p);
781 *addr_p = ihdr->addr;
782
783 phdr->rt_hdr.nexthdr = *proto;
784 *proto = NEXTHDR_ROUTING;
785}
786
787static void ipv6_push_exthdr(struct sk_buff *skb, u8 *proto, u8 type, struct ipv6_opt_hdr *opt)
788{
789 struct ipv6_opt_hdr *h = (struct ipv6_opt_hdr *)skb_push(skb, ipv6_optlen(opt));
790
791 memcpy(h, opt, ipv6_optlen(opt));
792 h->nexthdr = *proto;
793 *proto = type;
794}
795
796void ipv6_push_nfrag_opts(struct sk_buff *skb, struct ipv6_txoptions *opt,
797 u8 *proto,
798 struct in6_addr **daddr)
799{
800 if (opt->srcrt) {
801 ipv6_push_rthdr(skb, proto, opt->srcrt, daddr);
802 /*
803 * IPV6_RTHDRDSTOPTS is ignored
804 * unless IPV6_RTHDR is set (RFC3542).
805 */
806 if (opt->dst0opt)
807 ipv6_push_exthdr(skb, proto, NEXTHDR_DEST, opt->dst0opt);
808 }
809 if (opt->hopopt)
810 ipv6_push_exthdr(skb, proto, NEXTHDR_HOP, opt->hopopt);
811}
812
813EXPORT_SYMBOL(ipv6_push_nfrag_opts);
814
815void ipv6_push_frag_opts(struct sk_buff *skb, struct ipv6_txoptions *opt, u8 *proto)
816{
817 if (opt->dst1opt)
818 ipv6_push_exthdr(skb, proto, NEXTHDR_DEST, opt->dst1opt);
819}
820
821struct ipv6_txoptions *
822ipv6_dup_options(struct sock *sk, struct ipv6_txoptions *opt)
823{
824 struct ipv6_txoptions *opt2;
825
826 opt2 = sock_kmalloc(sk, opt->tot_len, GFP_ATOMIC);
827 if (opt2) {
828 long dif = (char*)opt2 - (char*)opt;
829 memcpy(opt2, opt, opt->tot_len);
830 if (opt2->hopopt)
831 *((char**)&opt2->hopopt) += dif;
832 if (opt2->dst0opt)
833 *((char**)&opt2->dst0opt) += dif;
834 if (opt2->dst1opt)
835 *((char**)&opt2->dst1opt) += dif;
836 if (opt2->srcrt)
837 *((char**)&opt2->srcrt) += dif;
838 }
839 return opt2;
840}
841
842EXPORT_SYMBOL_GPL(ipv6_dup_options);
843
844static int ipv6_renew_option(void *ohdr,
845 struct ipv6_opt_hdr __user *newopt, int newoptlen,
846 int inherit,
847 struct ipv6_opt_hdr **hdr,
848 char **p)
849{
850 if (inherit) {
851 if (ohdr) {
852 memcpy(*p, ohdr, ipv6_optlen((struct ipv6_opt_hdr *)ohdr));
853 *hdr = (struct ipv6_opt_hdr *)*p;
854 *p += CMSG_ALIGN(ipv6_optlen(*(struct ipv6_opt_hdr **)hdr));
855 }
856 } else {
857 if (newopt) {
858 if (copy_from_user(*p, newopt, newoptlen))
859 return -EFAULT;
860 *hdr = (struct ipv6_opt_hdr *)*p;
861 if (ipv6_optlen(*(struct ipv6_opt_hdr **)hdr) > newoptlen)
862 return -EINVAL;
863 *p += CMSG_ALIGN(newoptlen);
864 }
865 }
866 return 0;
867}
868
869struct ipv6_txoptions *
870ipv6_renew_options(struct sock *sk, struct ipv6_txoptions *opt,
871 int newtype,
872 struct ipv6_opt_hdr __user *newopt, int newoptlen)
873{
874 int tot_len = 0;
875 char *p;
876 struct ipv6_txoptions *opt2;
877 int err;
878
879 if (opt) {
880 if (newtype != IPV6_HOPOPTS && opt->hopopt)
881 tot_len += CMSG_ALIGN(ipv6_optlen(opt->hopopt));
882 if (newtype != IPV6_RTHDRDSTOPTS && opt->dst0opt)
883 tot_len += CMSG_ALIGN(ipv6_optlen(opt->dst0opt));
884 if (newtype != IPV6_RTHDR && opt->srcrt)
885 tot_len += CMSG_ALIGN(ipv6_optlen(opt->srcrt));
886 if (newtype != IPV6_DSTOPTS && opt->dst1opt)
887 tot_len += CMSG_ALIGN(ipv6_optlen(opt->dst1opt));
888 }
889
890 if (newopt && newoptlen)
891 tot_len += CMSG_ALIGN(newoptlen);
892
893 if (!tot_len)
894 return NULL;
895
896 tot_len += sizeof(*opt2);
897 opt2 = sock_kmalloc(sk, tot_len, GFP_ATOMIC);
898 if (!opt2)
899 return ERR_PTR(-ENOBUFS);
900
901 memset(opt2, 0, tot_len);
902
903 opt2->tot_len = tot_len;
904 p = (char *)(opt2 + 1);
905
906 err = ipv6_renew_option(opt ? opt->hopopt : NULL, newopt, newoptlen,
907 newtype != IPV6_HOPOPTS,
908 &opt2->hopopt, &p);
909 if (err)
910 goto out;
911
912 err = ipv6_renew_option(opt ? opt->dst0opt : NULL, newopt, newoptlen,
913 newtype != IPV6_RTHDRDSTOPTS,
914 &opt2->dst0opt, &p);
915 if (err)
916 goto out;
917
918 err = ipv6_renew_option(opt ? opt->srcrt : NULL, newopt, newoptlen,
919 newtype != IPV6_RTHDR,
920 (struct ipv6_opt_hdr **)&opt2->srcrt, &p);
921 if (err)
922 goto out;
923
924 err = ipv6_renew_option(opt ? opt->dst1opt : NULL, newopt, newoptlen,
925 newtype != IPV6_DSTOPTS,
926 &opt2->dst1opt, &p);
927 if (err)
928 goto out;
929
930 opt2->opt_nflen = (opt2->hopopt ? ipv6_optlen(opt2->hopopt) : 0) +
931 (opt2->dst0opt ? ipv6_optlen(opt2->dst0opt) : 0) +
932 (opt2->srcrt ? ipv6_optlen(opt2->srcrt) : 0);
933 opt2->opt_flen = (opt2->dst1opt ? ipv6_optlen(opt2->dst1opt) : 0);
934
935 return opt2;
936out:
937 sock_kfree_s(sk, opt2, opt2->tot_len);
938 return ERR_PTR(err);
939}
940
941struct ipv6_txoptions *ipv6_fixup_options(struct ipv6_txoptions *opt_space,
942 struct ipv6_txoptions *opt)
943{
944 /*
945 * ignore the dest before srcrt unless srcrt is being included.
946 * --yoshfuji
947 */
948 if (opt && opt->dst0opt && !opt->srcrt) {
949 if (opt_space != opt) {
950 memcpy(opt_space, opt, sizeof(*opt_space));
951 opt = opt_space;
952 }
953 opt->opt_nflen -= ipv6_optlen(opt->dst0opt);
954 opt->dst0opt = NULL;
955 }
956
957 return opt;
958}
959
This page took 0.040308 seconds and 5 git commands to generate.