Merge branch 'next' of git://git.infradead.org/users/vkoul/slave-dma
[deliverable/linux.git] / net / ipv6 / ip6_tunnel.c
1 /*
2 * IPv6 tunneling device
3 * Linux INET6 implementation
4 *
5 * Authors:
6 * Ville Nuorvala <vnuorval@tcs.hut.fi>
7 * Yasuyuki Kozakai <kozakai@linux-ipv6.org>
8 *
9 * Based on:
10 * linux/net/ipv6/sit.c and linux/net/ipv4/ipip.c
11 *
12 * RFC 2473
13 *
14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License
16 * as published by the Free Software Foundation; either version
17 * 2 of the License, or (at your option) any later version.
18 *
19 */
20
21 #include <linux/module.h>
22 #include <linux/capability.h>
23 #include <linux/errno.h>
24 #include <linux/types.h>
25 #include <linux/sockios.h>
26 #include <linux/icmp.h>
27 #include <linux/if.h>
28 #include <linux/in.h>
29 #include <linux/ip.h>
30 #include <linux/if_tunnel.h>
31 #include <linux/net.h>
32 #include <linux/in6.h>
33 #include <linux/netdevice.h>
34 #include <linux/if_arp.h>
35 #include <linux/icmpv6.h>
36 #include <linux/init.h>
37 #include <linux/route.h>
38 #include <linux/rtnetlink.h>
39 #include <linux/netfilter_ipv6.h>
40 #include <linux/slab.h>
41
42 #include <asm/uaccess.h>
43 #include <linux/atomic.h>
44
45 #include <net/icmp.h>
46 #include <net/ip.h>
47 #include <net/ipv6.h>
48 #include <net/ip6_route.h>
49 #include <net/addrconf.h>
50 #include <net/ip6_tunnel.h>
51 #include <net/xfrm.h>
52 #include <net/dsfield.h>
53 #include <net/inet_ecn.h>
54 #include <net/net_namespace.h>
55 #include <net/netns/generic.h>
56
57 MODULE_AUTHOR("Ville Nuorvala");
58 MODULE_DESCRIPTION("IPv6 tunneling device");
59 MODULE_LICENSE("GPL");
60 MODULE_ALIAS_NETDEV("ip6tnl0");
61
62 #ifdef IP6_TNL_DEBUG
63 #define IP6_TNL_TRACE(x...) printk(KERN_DEBUG "%s:" x "\n", __func__)
64 #else
65 #define IP6_TNL_TRACE(x...) do {;} while(0)
66 #endif
67
68 #define IPV6_TCLASS_MASK (IPV6_FLOWINFO_MASK & ~IPV6_FLOWLABEL_MASK)
69 #define IPV6_TCLASS_SHIFT 20
70
71 #define HASH_SIZE 32
72
73 #define HASH(addr) ((__force u32)((addr)->s6_addr32[0] ^ (addr)->s6_addr32[1] ^ \
74 (addr)->s6_addr32[2] ^ (addr)->s6_addr32[3]) & \
75 (HASH_SIZE - 1))
76
77 static int ip6_tnl_dev_init(struct net_device *dev);
78 static void ip6_tnl_dev_setup(struct net_device *dev);
79
80 static int ip6_tnl_net_id __read_mostly;
81 struct ip6_tnl_net {
82 /* the IPv6 tunnel fallback device */
83 struct net_device *fb_tnl_dev;
84 /* lists for storing tunnels in use */
85 struct ip6_tnl __rcu *tnls_r_l[HASH_SIZE];
86 struct ip6_tnl __rcu *tnls_wc[1];
87 struct ip6_tnl __rcu **tnls[2];
88 };
89
90 /* often modified stats are per cpu, other are shared (netdev->stats) */
91 struct pcpu_tstats {
92 unsigned long rx_packets;
93 unsigned long rx_bytes;
94 unsigned long tx_packets;
95 unsigned long tx_bytes;
96 };
97
98 static struct net_device_stats *ip6_get_stats(struct net_device *dev)
99 {
100 struct pcpu_tstats sum = { 0 };
101 int i;
102
103 for_each_possible_cpu(i) {
104 const struct pcpu_tstats *tstats = per_cpu_ptr(dev->tstats, i);
105
106 sum.rx_packets += tstats->rx_packets;
107 sum.rx_bytes += tstats->rx_bytes;
108 sum.tx_packets += tstats->tx_packets;
109 sum.tx_bytes += tstats->tx_bytes;
110 }
111 dev->stats.rx_packets = sum.rx_packets;
112 dev->stats.rx_bytes = sum.rx_bytes;
113 dev->stats.tx_packets = sum.tx_packets;
114 dev->stats.tx_bytes = sum.tx_bytes;
115 return &dev->stats;
116 }
117
118 /*
119 * Locking : hash tables are protected by RCU and RTNL
120 */
121
122 static inline struct dst_entry *ip6_tnl_dst_check(struct ip6_tnl *t)
123 {
124 struct dst_entry *dst = t->dst_cache;
125
126 if (dst && dst->obsolete &&
127 dst->ops->check(dst, t->dst_cookie) == NULL) {
128 t->dst_cache = NULL;
129 dst_release(dst);
130 return NULL;
131 }
132
133 return dst;
134 }
135
136 static inline void ip6_tnl_dst_reset(struct ip6_tnl *t)
137 {
138 dst_release(t->dst_cache);
139 t->dst_cache = NULL;
140 }
141
142 static inline void ip6_tnl_dst_store(struct ip6_tnl *t, struct dst_entry *dst)
143 {
144 struct rt6_info *rt = (struct rt6_info *) dst;
145 t->dst_cookie = rt->rt6i_node ? rt->rt6i_node->fn_sernum : 0;
146 dst_release(t->dst_cache);
147 t->dst_cache = dst;
148 }
149
150 /**
151 * ip6_tnl_lookup - fetch tunnel matching the end-point addresses
152 * @remote: the address of the tunnel exit-point
153 * @local: the address of the tunnel entry-point
154 *
155 * Return:
156 * tunnel matching given end-points if found,
157 * else fallback tunnel if its device is up,
158 * else %NULL
159 **/
160
161 #define for_each_ip6_tunnel_rcu(start) \
162 for (t = rcu_dereference(start); t; t = rcu_dereference(t->next))
163
164 static struct ip6_tnl *
165 ip6_tnl_lookup(struct net *net, const struct in6_addr *remote, const struct in6_addr *local)
166 {
167 unsigned int h0 = HASH(remote);
168 unsigned int h1 = HASH(local);
169 struct ip6_tnl *t;
170 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
171
172 for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[h0 ^ h1]) {
173 if (ipv6_addr_equal(local, &t->parms.laddr) &&
174 ipv6_addr_equal(remote, &t->parms.raddr) &&
175 (t->dev->flags & IFF_UP))
176 return t;
177 }
178 t = rcu_dereference(ip6n->tnls_wc[0]);
179 if (t && (t->dev->flags & IFF_UP))
180 return t;
181
182 return NULL;
183 }
184
185 /**
186 * ip6_tnl_bucket - get head of list matching given tunnel parameters
187 * @p: parameters containing tunnel end-points
188 *
189 * Description:
190 * ip6_tnl_bucket() returns the head of the list matching the
191 * &struct in6_addr entries laddr and raddr in @p.
192 *
193 * Return: head of IPv6 tunnel list
194 **/
195
196 static struct ip6_tnl __rcu **
197 ip6_tnl_bucket(struct ip6_tnl_net *ip6n, const struct ip6_tnl_parm *p)
198 {
199 const struct in6_addr *remote = &p->raddr;
200 const struct in6_addr *local = &p->laddr;
201 unsigned h = 0;
202 int prio = 0;
203
204 if (!ipv6_addr_any(remote) || !ipv6_addr_any(local)) {
205 prio = 1;
206 h = HASH(remote) ^ HASH(local);
207 }
208 return &ip6n->tnls[prio][h];
209 }
210
211 /**
212 * ip6_tnl_link - add tunnel to hash table
213 * @t: tunnel to be added
214 **/
215
216 static void
217 ip6_tnl_link(struct ip6_tnl_net *ip6n, struct ip6_tnl *t)
218 {
219 struct ip6_tnl __rcu **tp = ip6_tnl_bucket(ip6n, &t->parms);
220
221 RCU_INIT_POINTER(t->next , rtnl_dereference(*tp));
222 RCU_INIT_POINTER(*tp, t);
223 }
224
225 /**
226 * ip6_tnl_unlink - remove tunnel from hash table
227 * @t: tunnel to be removed
228 **/
229
230 static void
231 ip6_tnl_unlink(struct ip6_tnl_net *ip6n, struct ip6_tnl *t)
232 {
233 struct ip6_tnl __rcu **tp;
234 struct ip6_tnl *iter;
235
236 for (tp = ip6_tnl_bucket(ip6n, &t->parms);
237 (iter = rtnl_dereference(*tp)) != NULL;
238 tp = &iter->next) {
239 if (t == iter) {
240 RCU_INIT_POINTER(*tp, t->next);
241 break;
242 }
243 }
244 }
245
246 static void ip6_dev_free(struct net_device *dev)
247 {
248 free_percpu(dev->tstats);
249 free_netdev(dev);
250 }
251
252 /**
253 * ip6_tnl_create() - create a new tunnel
254 * @p: tunnel parameters
255 * @pt: pointer to new tunnel
256 *
257 * Description:
258 * Create tunnel matching given parameters.
259 *
260 * Return:
261 * created tunnel or NULL
262 **/
263
264 static struct ip6_tnl *ip6_tnl_create(struct net *net, struct ip6_tnl_parm *p)
265 {
266 struct net_device *dev;
267 struct ip6_tnl *t;
268 char name[IFNAMSIZ];
269 int err;
270 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
271
272 if (p->name[0])
273 strlcpy(name, p->name, IFNAMSIZ);
274 else
275 sprintf(name, "ip6tnl%%d");
276
277 dev = alloc_netdev(sizeof (*t), name, ip6_tnl_dev_setup);
278 if (dev == NULL)
279 goto failed;
280
281 dev_net_set(dev, net);
282
283 t = netdev_priv(dev);
284 t->parms = *p;
285 err = ip6_tnl_dev_init(dev);
286 if (err < 0)
287 goto failed_free;
288
289 if ((err = register_netdevice(dev)) < 0)
290 goto failed_free;
291
292 dev_hold(dev);
293 ip6_tnl_link(ip6n, t);
294 return t;
295
296 failed_free:
297 ip6_dev_free(dev);
298 failed:
299 return NULL;
300 }
301
302 /**
303 * ip6_tnl_locate - find or create tunnel matching given parameters
304 * @p: tunnel parameters
305 * @create: != 0 if allowed to create new tunnel if no match found
306 *
307 * Description:
308 * ip6_tnl_locate() first tries to locate an existing tunnel
309 * based on @parms. If this is unsuccessful, but @create is set a new
310 * tunnel device is created and registered for use.
311 *
312 * Return:
313 * matching tunnel or NULL
314 **/
315
316 static struct ip6_tnl *ip6_tnl_locate(struct net *net,
317 struct ip6_tnl_parm *p, int create)
318 {
319 const struct in6_addr *remote = &p->raddr;
320 const struct in6_addr *local = &p->laddr;
321 struct ip6_tnl __rcu **tp;
322 struct ip6_tnl *t;
323 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
324
325 for (tp = ip6_tnl_bucket(ip6n, p);
326 (t = rtnl_dereference(*tp)) != NULL;
327 tp = &t->next) {
328 if (ipv6_addr_equal(local, &t->parms.laddr) &&
329 ipv6_addr_equal(remote, &t->parms.raddr))
330 return t;
331 }
332 if (!create)
333 return NULL;
334 return ip6_tnl_create(net, p);
335 }
336
337 /**
338 * ip6_tnl_dev_uninit - tunnel device uninitializer
339 * @dev: the device to be destroyed
340 *
341 * Description:
342 * ip6_tnl_dev_uninit() removes tunnel from its list
343 **/
344
345 static void
346 ip6_tnl_dev_uninit(struct net_device *dev)
347 {
348 struct ip6_tnl *t = netdev_priv(dev);
349 struct net *net = dev_net(dev);
350 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
351
352 if (dev == ip6n->fb_tnl_dev)
353 RCU_INIT_POINTER(ip6n->tnls_wc[0], NULL);
354 else
355 ip6_tnl_unlink(ip6n, t);
356 ip6_tnl_dst_reset(t);
357 dev_put(dev);
358 }
359
360 /**
361 * parse_tvl_tnl_enc_lim - handle encapsulation limit option
362 * @skb: received socket buffer
363 *
364 * Return:
365 * 0 if none was found,
366 * else index to encapsulation limit
367 **/
368
369 static __u16
370 parse_tlv_tnl_enc_lim(struct sk_buff *skb, __u8 * raw)
371 {
372 const struct ipv6hdr *ipv6h = (const struct ipv6hdr *) raw;
373 __u8 nexthdr = ipv6h->nexthdr;
374 __u16 off = sizeof (*ipv6h);
375
376 while (ipv6_ext_hdr(nexthdr) && nexthdr != NEXTHDR_NONE) {
377 __u16 optlen = 0;
378 struct ipv6_opt_hdr *hdr;
379 if (raw + off + sizeof (*hdr) > skb->data &&
380 !pskb_may_pull(skb, raw - skb->data + off + sizeof (*hdr)))
381 break;
382
383 hdr = (struct ipv6_opt_hdr *) (raw + off);
384 if (nexthdr == NEXTHDR_FRAGMENT) {
385 struct frag_hdr *frag_hdr = (struct frag_hdr *) hdr;
386 if (frag_hdr->frag_off)
387 break;
388 optlen = 8;
389 } else if (nexthdr == NEXTHDR_AUTH) {
390 optlen = (hdr->hdrlen + 2) << 2;
391 } else {
392 optlen = ipv6_optlen(hdr);
393 }
394 if (nexthdr == NEXTHDR_DEST) {
395 __u16 i = off + 2;
396 while (1) {
397 struct ipv6_tlv_tnl_enc_lim *tel;
398
399 /* No more room for encapsulation limit */
400 if (i + sizeof (*tel) > off + optlen)
401 break;
402
403 tel = (struct ipv6_tlv_tnl_enc_lim *) &raw[i];
404 /* return index of option if found and valid */
405 if (tel->type == IPV6_TLV_TNL_ENCAP_LIMIT &&
406 tel->length == 1)
407 return i;
408 /* else jump to next option */
409 if (tel->type)
410 i += tel->length + 2;
411 else
412 i++;
413 }
414 }
415 nexthdr = hdr->nexthdr;
416 off += optlen;
417 }
418 return 0;
419 }
420
421 /**
422 * ip6_tnl_err - tunnel error handler
423 *
424 * Description:
425 * ip6_tnl_err() should handle errors in the tunnel according
426 * to the specifications in RFC 2473.
427 **/
428
429 static int
430 ip6_tnl_err(struct sk_buff *skb, __u8 ipproto, struct inet6_skb_parm *opt,
431 u8 *type, u8 *code, int *msg, __u32 *info, int offset)
432 {
433 const struct ipv6hdr *ipv6h = (const struct ipv6hdr *) skb->data;
434 struct ip6_tnl *t;
435 int rel_msg = 0;
436 u8 rel_type = ICMPV6_DEST_UNREACH;
437 u8 rel_code = ICMPV6_ADDR_UNREACH;
438 __u32 rel_info = 0;
439 __u16 len;
440 int err = -ENOENT;
441
442 /* If the packet doesn't contain the original IPv6 header we are
443 in trouble since we might need the source address for further
444 processing of the error. */
445
446 rcu_read_lock();
447 if ((t = ip6_tnl_lookup(dev_net(skb->dev), &ipv6h->daddr,
448 &ipv6h->saddr)) == NULL)
449 goto out;
450
451 if (t->parms.proto != ipproto && t->parms.proto != 0)
452 goto out;
453
454 err = 0;
455
456 switch (*type) {
457 __u32 teli;
458 struct ipv6_tlv_tnl_enc_lim *tel;
459 __u32 mtu;
460 case ICMPV6_DEST_UNREACH:
461 if (net_ratelimit())
462 printk(KERN_WARNING
463 "%s: Path to destination invalid "
464 "or inactive!\n", t->parms.name);
465 rel_msg = 1;
466 break;
467 case ICMPV6_TIME_EXCEED:
468 if ((*code) == ICMPV6_EXC_HOPLIMIT) {
469 if (net_ratelimit())
470 printk(KERN_WARNING
471 "%s: Too small hop limit or "
472 "routing loop in tunnel!\n",
473 t->parms.name);
474 rel_msg = 1;
475 }
476 break;
477 case ICMPV6_PARAMPROB:
478 teli = 0;
479 if ((*code) == ICMPV6_HDR_FIELD)
480 teli = parse_tlv_tnl_enc_lim(skb, skb->data);
481
482 if (teli && teli == *info - 2) {
483 tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->data[teli];
484 if (tel->encap_limit == 0) {
485 if (net_ratelimit())
486 printk(KERN_WARNING
487 "%s: Too small encapsulation "
488 "limit or routing loop in "
489 "tunnel!\n", t->parms.name);
490 rel_msg = 1;
491 }
492 } else if (net_ratelimit()) {
493 printk(KERN_WARNING
494 "%s: Recipient unable to parse tunneled "
495 "packet!\n ", t->parms.name);
496 }
497 break;
498 case ICMPV6_PKT_TOOBIG:
499 mtu = *info - offset;
500 if (mtu < IPV6_MIN_MTU)
501 mtu = IPV6_MIN_MTU;
502 t->dev->mtu = mtu;
503
504 if ((len = sizeof (*ipv6h) + ntohs(ipv6h->payload_len)) > mtu) {
505 rel_type = ICMPV6_PKT_TOOBIG;
506 rel_code = 0;
507 rel_info = mtu;
508 rel_msg = 1;
509 }
510 break;
511 }
512
513 *type = rel_type;
514 *code = rel_code;
515 *info = rel_info;
516 *msg = rel_msg;
517
518 out:
519 rcu_read_unlock();
520 return err;
521 }
522
523 static int
524 ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
525 u8 type, u8 code, int offset, __be32 info)
526 {
527 int rel_msg = 0;
528 u8 rel_type = type;
529 u8 rel_code = code;
530 __u32 rel_info = ntohl(info);
531 int err;
532 struct sk_buff *skb2;
533 const struct iphdr *eiph;
534 struct rtable *rt;
535 struct flowi4 fl4;
536
537 err = ip6_tnl_err(skb, IPPROTO_IPIP, opt, &rel_type, &rel_code,
538 &rel_msg, &rel_info, offset);
539 if (err < 0)
540 return err;
541
542 if (rel_msg == 0)
543 return 0;
544
545 switch (rel_type) {
546 case ICMPV6_DEST_UNREACH:
547 if (rel_code != ICMPV6_ADDR_UNREACH)
548 return 0;
549 rel_type = ICMP_DEST_UNREACH;
550 rel_code = ICMP_HOST_UNREACH;
551 break;
552 case ICMPV6_PKT_TOOBIG:
553 if (rel_code != 0)
554 return 0;
555 rel_type = ICMP_DEST_UNREACH;
556 rel_code = ICMP_FRAG_NEEDED;
557 break;
558 default:
559 return 0;
560 }
561
562 if (!pskb_may_pull(skb, offset + sizeof(struct iphdr)))
563 return 0;
564
565 skb2 = skb_clone(skb, GFP_ATOMIC);
566 if (!skb2)
567 return 0;
568
569 skb_dst_drop(skb2);
570
571 skb_pull(skb2, offset);
572 skb_reset_network_header(skb2);
573 eiph = ip_hdr(skb2);
574
575 /* Try to guess incoming interface */
576 rt = ip_route_output_ports(dev_net(skb->dev), &fl4, NULL,
577 eiph->saddr, 0,
578 0, 0,
579 IPPROTO_IPIP, RT_TOS(eiph->tos), 0);
580 if (IS_ERR(rt))
581 goto out;
582
583 skb2->dev = rt->dst.dev;
584
585 /* route "incoming" packet */
586 if (rt->rt_flags & RTCF_LOCAL) {
587 ip_rt_put(rt);
588 rt = NULL;
589 rt = ip_route_output_ports(dev_net(skb->dev), &fl4, NULL,
590 eiph->daddr, eiph->saddr,
591 0, 0,
592 IPPROTO_IPIP,
593 RT_TOS(eiph->tos), 0);
594 if (IS_ERR(rt) ||
595 rt->dst.dev->type != ARPHRD_TUNNEL) {
596 if (!IS_ERR(rt))
597 ip_rt_put(rt);
598 goto out;
599 }
600 skb_dst_set(skb2, &rt->dst);
601 } else {
602 ip_rt_put(rt);
603 if (ip_route_input(skb2, eiph->daddr, eiph->saddr, eiph->tos,
604 skb2->dev) ||
605 skb_dst(skb2)->dev->type != ARPHRD_TUNNEL)
606 goto out;
607 }
608
609 /* change mtu on this route */
610 if (rel_type == ICMP_DEST_UNREACH && rel_code == ICMP_FRAG_NEEDED) {
611 if (rel_info > dst_mtu(skb_dst(skb2)))
612 goto out;
613
614 skb_dst(skb2)->ops->update_pmtu(skb_dst(skb2), rel_info);
615 }
616
617 icmp_send(skb2, rel_type, rel_code, htonl(rel_info));
618
619 out:
620 kfree_skb(skb2);
621 return 0;
622 }
623
624 static int
625 ip6ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
626 u8 type, u8 code, int offset, __be32 info)
627 {
628 int rel_msg = 0;
629 u8 rel_type = type;
630 u8 rel_code = code;
631 __u32 rel_info = ntohl(info);
632 int err;
633
634 err = ip6_tnl_err(skb, IPPROTO_IPV6, opt, &rel_type, &rel_code,
635 &rel_msg, &rel_info, offset);
636 if (err < 0)
637 return err;
638
639 if (rel_msg && pskb_may_pull(skb, offset + sizeof(struct ipv6hdr))) {
640 struct rt6_info *rt;
641 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
642
643 if (!skb2)
644 return 0;
645
646 skb_dst_drop(skb2);
647 skb_pull(skb2, offset);
648 skb_reset_network_header(skb2);
649
650 /* Try to guess incoming interface */
651 rt = rt6_lookup(dev_net(skb->dev), &ipv6_hdr(skb2)->saddr,
652 NULL, 0, 0);
653
654 if (rt && rt->rt6i_dev)
655 skb2->dev = rt->rt6i_dev;
656
657 icmpv6_send(skb2, rel_type, rel_code, rel_info);
658
659 if (rt)
660 dst_release(&rt->dst);
661
662 kfree_skb(skb2);
663 }
664
665 return 0;
666 }
667
668 static void ip4ip6_dscp_ecn_decapsulate(const struct ip6_tnl *t,
669 const struct ipv6hdr *ipv6h,
670 struct sk_buff *skb)
671 {
672 __u8 dsfield = ipv6_get_dsfield(ipv6h) & ~INET_ECN_MASK;
673
674 if (t->parms.flags & IP6_TNL_F_RCV_DSCP_COPY)
675 ipv4_change_dsfield(ip_hdr(skb), INET_ECN_MASK, dsfield);
676
677 if (INET_ECN_is_ce(dsfield))
678 IP_ECN_set_ce(ip_hdr(skb));
679 }
680
681 static void ip6ip6_dscp_ecn_decapsulate(const struct ip6_tnl *t,
682 const struct ipv6hdr *ipv6h,
683 struct sk_buff *skb)
684 {
685 if (t->parms.flags & IP6_TNL_F_RCV_DSCP_COPY)
686 ipv6_copy_dscp(ipv6_get_dsfield(ipv6h), ipv6_hdr(skb));
687
688 if (INET_ECN_is_ce(ipv6_get_dsfield(ipv6h)))
689 IP6_ECN_set_ce(ipv6_hdr(skb));
690 }
691
692 /* called with rcu_read_lock() */
693 static inline int ip6_tnl_rcv_ctl(struct ip6_tnl *t)
694 {
695 struct ip6_tnl_parm *p = &t->parms;
696 int ret = 0;
697 struct net *net = dev_net(t->dev);
698
699 if (p->flags & IP6_TNL_F_CAP_RCV) {
700 struct net_device *ldev = NULL;
701
702 if (p->link)
703 ldev = dev_get_by_index_rcu(net, p->link);
704
705 if ((ipv6_addr_is_multicast(&p->laddr) ||
706 likely(ipv6_chk_addr(net, &p->laddr, ldev, 0))) &&
707 likely(!ipv6_chk_addr(net, &p->raddr, NULL, 0)))
708 ret = 1;
709
710 }
711 return ret;
712 }
713
714 /**
715 * ip6_tnl_rcv - decapsulate IPv6 packet and retransmit it locally
716 * @skb: received socket buffer
717 * @protocol: ethernet protocol ID
718 * @dscp_ecn_decapsulate: the function to decapsulate DSCP code and ECN
719 *
720 * Return: 0
721 **/
722
723 static int ip6_tnl_rcv(struct sk_buff *skb, __u16 protocol,
724 __u8 ipproto,
725 void (*dscp_ecn_decapsulate)(const struct ip6_tnl *t,
726 const struct ipv6hdr *ipv6h,
727 struct sk_buff *skb))
728 {
729 struct ip6_tnl *t;
730 const struct ipv6hdr *ipv6h = ipv6_hdr(skb);
731
732 rcu_read_lock();
733
734 if ((t = ip6_tnl_lookup(dev_net(skb->dev), &ipv6h->saddr,
735 &ipv6h->daddr)) != NULL) {
736 struct pcpu_tstats *tstats;
737
738 if (t->parms.proto != ipproto && t->parms.proto != 0) {
739 rcu_read_unlock();
740 goto discard;
741 }
742
743 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
744 rcu_read_unlock();
745 goto discard;
746 }
747
748 if (!ip6_tnl_rcv_ctl(t)) {
749 t->dev->stats.rx_dropped++;
750 rcu_read_unlock();
751 goto discard;
752 }
753 secpath_reset(skb);
754 skb->mac_header = skb->network_header;
755 skb_reset_network_header(skb);
756 skb->protocol = htons(protocol);
757 skb->pkt_type = PACKET_HOST;
758 memset(skb->cb, 0, sizeof(struct inet6_skb_parm));
759
760 tstats = this_cpu_ptr(t->dev->tstats);
761 tstats->rx_packets++;
762 tstats->rx_bytes += skb->len;
763
764 __skb_tunnel_rx(skb, t->dev);
765
766 dscp_ecn_decapsulate(t, ipv6h, skb);
767
768 netif_rx(skb);
769
770 rcu_read_unlock();
771 return 0;
772 }
773 rcu_read_unlock();
774 return 1;
775
776 discard:
777 kfree_skb(skb);
778 return 0;
779 }
780
781 static int ip4ip6_rcv(struct sk_buff *skb)
782 {
783 return ip6_tnl_rcv(skb, ETH_P_IP, IPPROTO_IPIP,
784 ip4ip6_dscp_ecn_decapsulate);
785 }
786
787 static int ip6ip6_rcv(struct sk_buff *skb)
788 {
789 return ip6_tnl_rcv(skb, ETH_P_IPV6, IPPROTO_IPV6,
790 ip6ip6_dscp_ecn_decapsulate);
791 }
792
793 struct ipv6_tel_txoption {
794 struct ipv6_txoptions ops;
795 __u8 dst_opt[8];
796 };
797
798 static void init_tel_txopt(struct ipv6_tel_txoption *opt, __u8 encap_limit)
799 {
800 memset(opt, 0, sizeof(struct ipv6_tel_txoption));
801
802 opt->dst_opt[2] = IPV6_TLV_TNL_ENCAP_LIMIT;
803 opt->dst_opt[3] = 1;
804 opt->dst_opt[4] = encap_limit;
805 opt->dst_opt[5] = IPV6_TLV_PADN;
806 opt->dst_opt[6] = 1;
807
808 opt->ops.dst0opt = (struct ipv6_opt_hdr *) opt->dst_opt;
809 opt->ops.opt_nflen = 8;
810 }
811
812 /**
813 * ip6_tnl_addr_conflict - compare packet addresses to tunnel's own
814 * @t: the outgoing tunnel device
815 * @hdr: IPv6 header from the incoming packet
816 *
817 * Description:
818 * Avoid trivial tunneling loop by checking that tunnel exit-point
819 * doesn't match source of incoming packet.
820 *
821 * Return:
822 * 1 if conflict,
823 * 0 else
824 **/
825
826 static inline int
827 ip6_tnl_addr_conflict(const struct ip6_tnl *t, const struct ipv6hdr *hdr)
828 {
829 return ipv6_addr_equal(&t->parms.raddr, &hdr->saddr);
830 }
831
832 static inline int ip6_tnl_xmit_ctl(struct ip6_tnl *t)
833 {
834 struct ip6_tnl_parm *p = &t->parms;
835 int ret = 0;
836 struct net *net = dev_net(t->dev);
837
838 if (p->flags & IP6_TNL_F_CAP_XMIT) {
839 struct net_device *ldev = NULL;
840
841 rcu_read_lock();
842 if (p->link)
843 ldev = dev_get_by_index_rcu(net, p->link);
844
845 if (unlikely(!ipv6_chk_addr(net, &p->laddr, ldev, 0)))
846 printk(KERN_WARNING
847 "%s xmit: Local address not yet configured!\n",
848 p->name);
849 else if (!ipv6_addr_is_multicast(&p->raddr) &&
850 unlikely(ipv6_chk_addr(net, &p->raddr, NULL, 0)))
851 printk(KERN_WARNING
852 "%s xmit: Routing loop! "
853 "Remote address found on this node!\n",
854 p->name);
855 else
856 ret = 1;
857 rcu_read_unlock();
858 }
859 return ret;
860 }
861 /**
862 * ip6_tnl_xmit2 - encapsulate packet and send
863 * @skb: the outgoing socket buffer
864 * @dev: the outgoing tunnel device
865 * @dsfield: dscp code for outer header
866 * @fl: flow of tunneled packet
867 * @encap_limit: encapsulation limit
868 * @pmtu: Path MTU is stored if packet is too big
869 *
870 * Description:
871 * Build new header and do some sanity checks on the packet before sending
872 * it.
873 *
874 * Return:
875 * 0 on success
876 * -1 fail
877 * %-EMSGSIZE message too big. return mtu in this case.
878 **/
879
880 static int ip6_tnl_xmit2(struct sk_buff *skb,
881 struct net_device *dev,
882 __u8 dsfield,
883 struct flowi6 *fl6,
884 int encap_limit,
885 __u32 *pmtu)
886 {
887 struct net *net = dev_net(dev);
888 struct ip6_tnl *t = netdev_priv(dev);
889 struct net_device_stats *stats = &t->dev->stats;
890 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
891 struct ipv6_tel_txoption opt;
892 struct dst_entry *dst = NULL, *ndst = NULL;
893 struct net_device *tdev;
894 int mtu;
895 unsigned int max_headroom = sizeof(struct ipv6hdr);
896 u8 proto;
897 int err = -1;
898 int pkt_len;
899
900 if (!fl6->flowi6_mark)
901 dst = ip6_tnl_dst_check(t);
902 if (!dst) {
903 ndst = ip6_route_output(net, NULL, fl6);
904
905 if (ndst->error)
906 goto tx_err_link_failure;
907 ndst = xfrm_lookup(net, ndst, flowi6_to_flowi(fl6), NULL, 0);
908 if (IS_ERR(ndst)) {
909 err = PTR_ERR(ndst);
910 ndst = NULL;
911 goto tx_err_link_failure;
912 }
913 dst = ndst;
914 }
915
916 tdev = dst->dev;
917
918 if (tdev == dev) {
919 stats->collisions++;
920 if (net_ratelimit())
921 printk(KERN_WARNING
922 "%s: Local routing loop detected!\n",
923 t->parms.name);
924 goto tx_err_dst_release;
925 }
926 mtu = dst_mtu(dst) - sizeof (*ipv6h);
927 if (encap_limit >= 0) {
928 max_headroom += 8;
929 mtu -= 8;
930 }
931 if (mtu < IPV6_MIN_MTU)
932 mtu = IPV6_MIN_MTU;
933 if (skb_dst(skb))
934 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu);
935 if (skb->len > mtu) {
936 *pmtu = mtu;
937 err = -EMSGSIZE;
938 goto tx_err_dst_release;
939 }
940
941 /*
942 * Okay, now see if we can stuff it in the buffer as-is.
943 */
944 max_headroom += LL_RESERVED_SPACE(tdev);
945
946 if (skb_headroom(skb) < max_headroom || skb_shared(skb) ||
947 (skb_cloned(skb) && !skb_clone_writable(skb, 0))) {
948 struct sk_buff *new_skb;
949
950 if (!(new_skb = skb_realloc_headroom(skb, max_headroom)))
951 goto tx_err_dst_release;
952
953 if (skb->sk)
954 skb_set_owner_w(new_skb, skb->sk);
955 kfree_skb(skb);
956 skb = new_skb;
957 }
958 skb_dst_drop(skb);
959 if (fl6->flowi6_mark) {
960 skb_dst_set(skb, dst);
961 ndst = NULL;
962 } else {
963 skb_dst_set_noref(skb, dst);
964 }
965 skb->transport_header = skb->network_header;
966
967 proto = fl6->flowi6_proto;
968 if (encap_limit >= 0) {
969 init_tel_txopt(&opt, encap_limit);
970 ipv6_push_nfrag_opts(skb, &opt.ops, &proto, NULL);
971 }
972 skb_push(skb, sizeof(struct ipv6hdr));
973 skb_reset_network_header(skb);
974 ipv6h = ipv6_hdr(skb);
975 *(__be32*)ipv6h = fl6->flowlabel | htonl(0x60000000);
976 dsfield = INET_ECN_encapsulate(0, dsfield);
977 ipv6_change_dsfield(ipv6h, ~INET_ECN_MASK, dsfield);
978 ipv6h->hop_limit = t->parms.hop_limit;
979 ipv6h->nexthdr = proto;
980 ipv6_addr_copy(&ipv6h->saddr, &fl6->saddr);
981 ipv6_addr_copy(&ipv6h->daddr, &fl6->daddr);
982 nf_reset(skb);
983 pkt_len = skb->len;
984 err = ip6_local_out(skb);
985
986 if (net_xmit_eval(err) == 0) {
987 struct pcpu_tstats *tstats = this_cpu_ptr(t->dev->tstats);
988
989 tstats->tx_bytes += pkt_len;
990 tstats->tx_packets++;
991 } else {
992 stats->tx_errors++;
993 stats->tx_aborted_errors++;
994 }
995 if (ndst)
996 ip6_tnl_dst_store(t, ndst);
997 return 0;
998 tx_err_link_failure:
999 stats->tx_carrier_errors++;
1000 dst_link_failure(skb);
1001 tx_err_dst_release:
1002 dst_release(ndst);
1003 return err;
1004 }
1005
1006 static inline int
1007 ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
1008 {
1009 struct ip6_tnl *t = netdev_priv(dev);
1010 const struct iphdr *iph = ip_hdr(skb);
1011 int encap_limit = -1;
1012 struct flowi6 fl6;
1013 __u8 dsfield;
1014 __u32 mtu;
1015 int err;
1016
1017 if ((t->parms.proto != IPPROTO_IPIP && t->parms.proto != 0) ||
1018 !ip6_tnl_xmit_ctl(t))
1019 return -1;
1020
1021 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
1022 encap_limit = t->parms.encap_limit;
1023
1024 memcpy(&fl6, &t->fl.u.ip6, sizeof (fl6));
1025 fl6.flowi6_proto = IPPROTO_IPIP;
1026
1027 dsfield = ipv4_get_dsfield(iph);
1028
1029 if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
1030 fl6.flowlabel |= htonl((__u32)iph->tos << IPV6_TCLASS_SHIFT)
1031 & IPV6_TCLASS_MASK;
1032 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
1033 fl6.flowi6_mark = skb->mark;
1034
1035 err = ip6_tnl_xmit2(skb, dev, dsfield, &fl6, encap_limit, &mtu);
1036 if (err != 0) {
1037 /* XXX: send ICMP error even if DF is not set. */
1038 if (err == -EMSGSIZE)
1039 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
1040 htonl(mtu));
1041 return -1;
1042 }
1043
1044 return 0;
1045 }
1046
1047 static inline int
1048 ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
1049 {
1050 struct ip6_tnl *t = netdev_priv(dev);
1051 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
1052 int encap_limit = -1;
1053 __u16 offset;
1054 struct flowi6 fl6;
1055 __u8 dsfield;
1056 __u32 mtu;
1057 int err;
1058
1059 if ((t->parms.proto != IPPROTO_IPV6 && t->parms.proto != 0) ||
1060 !ip6_tnl_xmit_ctl(t) || ip6_tnl_addr_conflict(t, ipv6h))
1061 return -1;
1062
1063 offset = parse_tlv_tnl_enc_lim(skb, skb_network_header(skb));
1064 if (offset > 0) {
1065 struct ipv6_tlv_tnl_enc_lim *tel;
1066 tel = (struct ipv6_tlv_tnl_enc_lim *)&skb_network_header(skb)[offset];
1067 if (tel->encap_limit == 0) {
1068 icmpv6_send(skb, ICMPV6_PARAMPROB,
1069 ICMPV6_HDR_FIELD, offset + 2);
1070 return -1;
1071 }
1072 encap_limit = tel->encap_limit - 1;
1073 } else if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
1074 encap_limit = t->parms.encap_limit;
1075
1076 memcpy(&fl6, &t->fl.u.ip6, sizeof (fl6));
1077 fl6.flowi6_proto = IPPROTO_IPV6;
1078
1079 dsfield = ipv6_get_dsfield(ipv6h);
1080 if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
1081 fl6.flowlabel |= (*(__be32 *) ipv6h & IPV6_TCLASS_MASK);
1082 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL)
1083 fl6.flowlabel |= (*(__be32 *) ipv6h & IPV6_FLOWLABEL_MASK);
1084 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
1085 fl6.flowi6_mark = skb->mark;
1086
1087 err = ip6_tnl_xmit2(skb, dev, dsfield, &fl6, encap_limit, &mtu);
1088 if (err != 0) {
1089 if (err == -EMSGSIZE)
1090 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
1091 return -1;
1092 }
1093
1094 return 0;
1095 }
1096
1097 static netdev_tx_t
1098 ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
1099 {
1100 struct ip6_tnl *t = netdev_priv(dev);
1101 struct net_device_stats *stats = &t->dev->stats;
1102 int ret;
1103
1104 switch (skb->protocol) {
1105 case htons(ETH_P_IP):
1106 ret = ip4ip6_tnl_xmit(skb, dev);
1107 break;
1108 case htons(ETH_P_IPV6):
1109 ret = ip6ip6_tnl_xmit(skb, dev);
1110 break;
1111 default:
1112 goto tx_err;
1113 }
1114
1115 if (ret < 0)
1116 goto tx_err;
1117
1118 return NETDEV_TX_OK;
1119
1120 tx_err:
1121 stats->tx_errors++;
1122 stats->tx_dropped++;
1123 kfree_skb(skb);
1124 return NETDEV_TX_OK;
1125 }
1126
1127 static void ip6_tnl_set_cap(struct ip6_tnl *t)
1128 {
1129 struct ip6_tnl_parm *p = &t->parms;
1130 int ltype = ipv6_addr_type(&p->laddr);
1131 int rtype = ipv6_addr_type(&p->raddr);
1132
1133 p->flags &= ~(IP6_TNL_F_CAP_XMIT|IP6_TNL_F_CAP_RCV);
1134
1135 if (ltype & (IPV6_ADDR_UNICAST|IPV6_ADDR_MULTICAST) &&
1136 rtype & (IPV6_ADDR_UNICAST|IPV6_ADDR_MULTICAST) &&
1137 !((ltype|rtype) & IPV6_ADDR_LOOPBACK) &&
1138 (!((ltype|rtype) & IPV6_ADDR_LINKLOCAL) || p->link)) {
1139 if (ltype&IPV6_ADDR_UNICAST)
1140 p->flags |= IP6_TNL_F_CAP_XMIT;
1141 if (rtype&IPV6_ADDR_UNICAST)
1142 p->flags |= IP6_TNL_F_CAP_RCV;
1143 }
1144 }
1145
1146 static void ip6_tnl_link_config(struct ip6_tnl *t)
1147 {
1148 struct net_device *dev = t->dev;
1149 struct ip6_tnl_parm *p = &t->parms;
1150 struct flowi6 *fl6 = &t->fl.u.ip6;
1151
1152 memcpy(dev->dev_addr, &p->laddr, sizeof(struct in6_addr));
1153 memcpy(dev->broadcast, &p->raddr, sizeof(struct in6_addr));
1154
1155 /* Set up flowi template */
1156 ipv6_addr_copy(&fl6->saddr, &p->laddr);
1157 ipv6_addr_copy(&fl6->daddr, &p->raddr);
1158 fl6->flowi6_oif = p->link;
1159 fl6->flowlabel = 0;
1160
1161 if (!(p->flags&IP6_TNL_F_USE_ORIG_TCLASS))
1162 fl6->flowlabel |= IPV6_TCLASS_MASK & p->flowinfo;
1163 if (!(p->flags&IP6_TNL_F_USE_ORIG_FLOWLABEL))
1164 fl6->flowlabel |= IPV6_FLOWLABEL_MASK & p->flowinfo;
1165
1166 ip6_tnl_set_cap(t);
1167
1168 if (p->flags&IP6_TNL_F_CAP_XMIT && p->flags&IP6_TNL_F_CAP_RCV)
1169 dev->flags |= IFF_POINTOPOINT;
1170 else
1171 dev->flags &= ~IFF_POINTOPOINT;
1172
1173 dev->iflink = p->link;
1174
1175 if (p->flags & IP6_TNL_F_CAP_XMIT) {
1176 int strict = (ipv6_addr_type(&p->raddr) &
1177 (IPV6_ADDR_MULTICAST|IPV6_ADDR_LINKLOCAL));
1178
1179 struct rt6_info *rt = rt6_lookup(dev_net(dev),
1180 &p->raddr, &p->laddr,
1181 p->link, strict);
1182
1183 if (rt == NULL)
1184 return;
1185
1186 if (rt->rt6i_dev) {
1187 dev->hard_header_len = rt->rt6i_dev->hard_header_len +
1188 sizeof (struct ipv6hdr);
1189
1190 dev->mtu = rt->rt6i_dev->mtu - sizeof (struct ipv6hdr);
1191 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
1192 dev->mtu-=8;
1193
1194 if (dev->mtu < IPV6_MIN_MTU)
1195 dev->mtu = IPV6_MIN_MTU;
1196 }
1197 dst_release(&rt->dst);
1198 }
1199 }
1200
1201 /**
1202 * ip6_tnl_change - update the tunnel parameters
1203 * @t: tunnel to be changed
1204 * @p: tunnel configuration parameters
1205 *
1206 * Description:
1207 * ip6_tnl_change() updates the tunnel parameters
1208 **/
1209
1210 static int
1211 ip6_tnl_change(struct ip6_tnl *t, struct ip6_tnl_parm *p)
1212 {
1213 ipv6_addr_copy(&t->parms.laddr, &p->laddr);
1214 ipv6_addr_copy(&t->parms.raddr, &p->raddr);
1215 t->parms.flags = p->flags;
1216 t->parms.hop_limit = p->hop_limit;
1217 t->parms.encap_limit = p->encap_limit;
1218 t->parms.flowinfo = p->flowinfo;
1219 t->parms.link = p->link;
1220 t->parms.proto = p->proto;
1221 ip6_tnl_dst_reset(t);
1222 ip6_tnl_link_config(t);
1223 return 0;
1224 }
1225
1226 /**
1227 * ip6_tnl_ioctl - configure ipv6 tunnels from userspace
1228 * @dev: virtual device associated with tunnel
1229 * @ifr: parameters passed from userspace
1230 * @cmd: command to be performed
1231 *
1232 * Description:
1233 * ip6_tnl_ioctl() is used for managing IPv6 tunnels
1234 * from userspace.
1235 *
1236 * The possible commands are the following:
1237 * %SIOCGETTUNNEL: get tunnel parameters for device
1238 * %SIOCADDTUNNEL: add tunnel matching given tunnel parameters
1239 * %SIOCCHGTUNNEL: change tunnel parameters to those given
1240 * %SIOCDELTUNNEL: delete tunnel
1241 *
1242 * The fallback device "ip6tnl0", created during module
1243 * initialization, can be used for creating other tunnel devices.
1244 *
1245 * Return:
1246 * 0 on success,
1247 * %-EFAULT if unable to copy data to or from userspace,
1248 * %-EPERM if current process hasn't %CAP_NET_ADMIN set
1249 * %-EINVAL if passed tunnel parameters are invalid,
1250 * %-EEXIST if changing a tunnel's parameters would cause a conflict
1251 * %-ENODEV if attempting to change or delete a nonexisting device
1252 **/
1253
1254 static int
1255 ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1256 {
1257 int err = 0;
1258 struct ip6_tnl_parm p;
1259 struct ip6_tnl *t = NULL;
1260 struct net *net = dev_net(dev);
1261 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
1262
1263 switch (cmd) {
1264 case SIOCGETTUNNEL:
1265 if (dev == ip6n->fb_tnl_dev) {
1266 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof (p))) {
1267 err = -EFAULT;
1268 break;
1269 }
1270 t = ip6_tnl_locate(net, &p, 0);
1271 }
1272 if (t == NULL)
1273 t = netdev_priv(dev);
1274 memcpy(&p, &t->parms, sizeof (p));
1275 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof (p))) {
1276 err = -EFAULT;
1277 }
1278 break;
1279 case SIOCADDTUNNEL:
1280 case SIOCCHGTUNNEL:
1281 err = -EPERM;
1282 if (!capable(CAP_NET_ADMIN))
1283 break;
1284 err = -EFAULT;
1285 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof (p)))
1286 break;
1287 err = -EINVAL;
1288 if (p.proto != IPPROTO_IPV6 && p.proto != IPPROTO_IPIP &&
1289 p.proto != 0)
1290 break;
1291 t = ip6_tnl_locate(net, &p, cmd == SIOCADDTUNNEL);
1292 if (dev != ip6n->fb_tnl_dev && cmd == SIOCCHGTUNNEL) {
1293 if (t != NULL) {
1294 if (t->dev != dev) {
1295 err = -EEXIST;
1296 break;
1297 }
1298 } else
1299 t = netdev_priv(dev);
1300
1301 ip6_tnl_unlink(ip6n, t);
1302 synchronize_net();
1303 err = ip6_tnl_change(t, &p);
1304 ip6_tnl_link(ip6n, t);
1305 netdev_state_change(dev);
1306 }
1307 if (t) {
1308 err = 0;
1309 if (copy_to_user(ifr->ifr_ifru.ifru_data, &t->parms, sizeof (p)))
1310 err = -EFAULT;
1311
1312 } else
1313 err = (cmd == SIOCADDTUNNEL ? -ENOBUFS : -ENOENT);
1314 break;
1315 case SIOCDELTUNNEL:
1316 err = -EPERM;
1317 if (!capable(CAP_NET_ADMIN))
1318 break;
1319
1320 if (dev == ip6n->fb_tnl_dev) {
1321 err = -EFAULT;
1322 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof (p)))
1323 break;
1324 err = -ENOENT;
1325 if ((t = ip6_tnl_locate(net, &p, 0)) == NULL)
1326 break;
1327 err = -EPERM;
1328 if (t->dev == ip6n->fb_tnl_dev)
1329 break;
1330 dev = t->dev;
1331 }
1332 err = 0;
1333 unregister_netdevice(dev);
1334 break;
1335 default:
1336 err = -EINVAL;
1337 }
1338 return err;
1339 }
1340
1341 /**
1342 * ip6_tnl_change_mtu - change mtu manually for tunnel device
1343 * @dev: virtual device associated with tunnel
1344 * @new_mtu: the new mtu
1345 *
1346 * Return:
1347 * 0 on success,
1348 * %-EINVAL if mtu too small
1349 **/
1350
1351 static int
1352 ip6_tnl_change_mtu(struct net_device *dev, int new_mtu)
1353 {
1354 if (new_mtu < IPV6_MIN_MTU) {
1355 return -EINVAL;
1356 }
1357 dev->mtu = new_mtu;
1358 return 0;
1359 }
1360
1361
1362 static const struct net_device_ops ip6_tnl_netdev_ops = {
1363 .ndo_uninit = ip6_tnl_dev_uninit,
1364 .ndo_start_xmit = ip6_tnl_xmit,
1365 .ndo_do_ioctl = ip6_tnl_ioctl,
1366 .ndo_change_mtu = ip6_tnl_change_mtu,
1367 .ndo_get_stats = ip6_get_stats,
1368 };
1369
1370
1371 /**
1372 * ip6_tnl_dev_setup - setup virtual tunnel device
1373 * @dev: virtual device associated with tunnel
1374 *
1375 * Description:
1376 * Initialize function pointers and device parameters
1377 **/
1378
1379 static void ip6_tnl_dev_setup(struct net_device *dev)
1380 {
1381 struct ip6_tnl *t;
1382
1383 dev->netdev_ops = &ip6_tnl_netdev_ops;
1384 dev->destructor = ip6_dev_free;
1385
1386 dev->type = ARPHRD_TUNNEL6;
1387 dev->hard_header_len = LL_MAX_HEADER + sizeof (struct ipv6hdr);
1388 dev->mtu = ETH_DATA_LEN - sizeof (struct ipv6hdr);
1389 t = netdev_priv(dev);
1390 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
1391 dev->mtu-=8;
1392 dev->flags |= IFF_NOARP;
1393 dev->addr_len = sizeof(struct in6_addr);
1394 dev->features |= NETIF_F_NETNS_LOCAL;
1395 dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
1396 }
1397
1398
1399 /**
1400 * ip6_tnl_dev_init_gen - general initializer for all tunnel devices
1401 * @dev: virtual device associated with tunnel
1402 **/
1403
1404 static inline int
1405 ip6_tnl_dev_init_gen(struct net_device *dev)
1406 {
1407 struct ip6_tnl *t = netdev_priv(dev);
1408
1409 t->dev = dev;
1410 strcpy(t->parms.name, dev->name);
1411 dev->tstats = alloc_percpu(struct pcpu_tstats);
1412 if (!dev->tstats)
1413 return -ENOMEM;
1414 return 0;
1415 }
1416
1417 /**
1418 * ip6_tnl_dev_init - initializer for all non fallback tunnel devices
1419 * @dev: virtual device associated with tunnel
1420 **/
1421
1422 static int ip6_tnl_dev_init(struct net_device *dev)
1423 {
1424 struct ip6_tnl *t = netdev_priv(dev);
1425 int err = ip6_tnl_dev_init_gen(dev);
1426
1427 if (err)
1428 return err;
1429 ip6_tnl_link_config(t);
1430 return 0;
1431 }
1432
1433 /**
1434 * ip6_fb_tnl_dev_init - initializer for fallback tunnel device
1435 * @dev: fallback device
1436 *
1437 * Return: 0
1438 **/
1439
1440 static int __net_init ip6_fb_tnl_dev_init(struct net_device *dev)
1441 {
1442 struct ip6_tnl *t = netdev_priv(dev);
1443 struct net *net = dev_net(dev);
1444 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
1445 int err = ip6_tnl_dev_init_gen(dev);
1446
1447 if (err)
1448 return err;
1449
1450 t->parms.proto = IPPROTO_IPV6;
1451 dev_hold(dev);
1452 RCU_INIT_POINTER(ip6n->tnls_wc[0], t);
1453 return 0;
1454 }
1455
1456 static struct xfrm6_tunnel ip4ip6_handler __read_mostly = {
1457 .handler = ip4ip6_rcv,
1458 .err_handler = ip4ip6_err,
1459 .priority = 1,
1460 };
1461
1462 static struct xfrm6_tunnel ip6ip6_handler __read_mostly = {
1463 .handler = ip6ip6_rcv,
1464 .err_handler = ip6ip6_err,
1465 .priority = 1,
1466 };
1467
1468 static void __net_exit ip6_tnl_destroy_tunnels(struct ip6_tnl_net *ip6n)
1469 {
1470 int h;
1471 struct ip6_tnl *t;
1472 LIST_HEAD(list);
1473
1474 for (h = 0; h < HASH_SIZE; h++) {
1475 t = rtnl_dereference(ip6n->tnls_r_l[h]);
1476 while (t != NULL) {
1477 unregister_netdevice_queue(t->dev, &list);
1478 t = rtnl_dereference(t->next);
1479 }
1480 }
1481
1482 t = rtnl_dereference(ip6n->tnls_wc[0]);
1483 unregister_netdevice_queue(t->dev, &list);
1484 unregister_netdevice_many(&list);
1485 }
1486
1487 static int __net_init ip6_tnl_init_net(struct net *net)
1488 {
1489 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
1490 int err;
1491
1492 ip6n->tnls[0] = ip6n->tnls_wc;
1493 ip6n->tnls[1] = ip6n->tnls_r_l;
1494
1495 err = -ENOMEM;
1496 ip6n->fb_tnl_dev = alloc_netdev(sizeof(struct ip6_tnl), "ip6tnl0",
1497 ip6_tnl_dev_setup);
1498
1499 if (!ip6n->fb_tnl_dev)
1500 goto err_alloc_dev;
1501 dev_net_set(ip6n->fb_tnl_dev, net);
1502
1503 err = ip6_fb_tnl_dev_init(ip6n->fb_tnl_dev);
1504 if (err < 0)
1505 goto err_register;
1506
1507 err = register_netdev(ip6n->fb_tnl_dev);
1508 if (err < 0)
1509 goto err_register;
1510 return 0;
1511
1512 err_register:
1513 ip6_dev_free(ip6n->fb_tnl_dev);
1514 err_alloc_dev:
1515 return err;
1516 }
1517
1518 static void __net_exit ip6_tnl_exit_net(struct net *net)
1519 {
1520 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
1521
1522 rtnl_lock();
1523 ip6_tnl_destroy_tunnels(ip6n);
1524 rtnl_unlock();
1525 }
1526
1527 static struct pernet_operations ip6_tnl_net_ops = {
1528 .init = ip6_tnl_init_net,
1529 .exit = ip6_tnl_exit_net,
1530 .id = &ip6_tnl_net_id,
1531 .size = sizeof(struct ip6_tnl_net),
1532 };
1533
1534 /**
1535 * ip6_tunnel_init - register protocol and reserve needed resources
1536 *
1537 * Return: 0 on success
1538 **/
1539
1540 static int __init ip6_tunnel_init(void)
1541 {
1542 int err;
1543
1544 err = register_pernet_device(&ip6_tnl_net_ops);
1545 if (err < 0)
1546 goto out_pernet;
1547
1548 err = xfrm6_tunnel_register(&ip4ip6_handler, AF_INET);
1549 if (err < 0) {
1550 printk(KERN_ERR "ip6_tunnel init: can't register ip4ip6\n");
1551 goto out_ip4ip6;
1552 }
1553
1554 err = xfrm6_tunnel_register(&ip6ip6_handler, AF_INET6);
1555 if (err < 0) {
1556 printk(KERN_ERR "ip6_tunnel init: can't register ip6ip6\n");
1557 goto out_ip6ip6;
1558 }
1559
1560 return 0;
1561
1562 out_ip6ip6:
1563 xfrm6_tunnel_deregister(&ip4ip6_handler, AF_INET);
1564 out_ip4ip6:
1565 unregister_pernet_device(&ip6_tnl_net_ops);
1566 out_pernet:
1567 return err;
1568 }
1569
1570 /**
1571 * ip6_tunnel_cleanup - free resources and unregister protocol
1572 **/
1573
1574 static void __exit ip6_tunnel_cleanup(void)
1575 {
1576 if (xfrm6_tunnel_deregister(&ip4ip6_handler, AF_INET))
1577 printk(KERN_INFO "ip6_tunnel close: can't deregister ip4ip6\n");
1578
1579 if (xfrm6_tunnel_deregister(&ip6ip6_handler, AF_INET6))
1580 printk(KERN_INFO "ip6_tunnel close: can't deregister ip6ip6\n");
1581
1582 unregister_pernet_device(&ip6_tnl_net_ops);
1583 }
1584
1585 module_init(ip6_tunnel_init);
1586 module_exit(ip6_tunnel_cleanup);
This page took 0.065497 seconds and 6 git commands to generate.