gre: build header correctly for collect metadata tunnels
[deliverable/linux.git] / net / ipv4 / ip_gre.c
1 /*
2 * Linux NET3: GRE over IP protocol decoder.
3 *
4 * Authors: Alexey Kuznetsov (kuznet@ms2.inr.ac.ru)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 */
12
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
15 #include <linux/capability.h>
16 #include <linux/module.h>
17 #include <linux/types.h>
18 #include <linux/kernel.h>
19 #include <linux/slab.h>
20 #include <asm/uaccess.h>
21 #include <linux/skbuff.h>
22 #include <linux/netdevice.h>
23 #include <linux/in.h>
24 #include <linux/tcp.h>
25 #include <linux/udp.h>
26 #include <linux/if_arp.h>
27 #include <linux/if_vlan.h>
28 #include <linux/init.h>
29 #include <linux/in6.h>
30 #include <linux/inetdevice.h>
31 #include <linux/igmp.h>
32 #include <linux/netfilter_ipv4.h>
33 #include <linux/etherdevice.h>
34 #include <linux/if_ether.h>
35
36 #include <net/sock.h>
37 #include <net/ip.h>
38 #include <net/icmp.h>
39 #include <net/protocol.h>
40 #include <net/ip_tunnels.h>
41 #include <net/arp.h>
42 #include <net/checksum.h>
43 #include <net/dsfield.h>
44 #include <net/inet_ecn.h>
45 #include <net/xfrm.h>
46 #include <net/net_namespace.h>
47 #include <net/netns/generic.h>
48 #include <net/rtnetlink.h>
49 #include <net/gre.h>
50 #include <net/dst_metadata.h>
51
52 #if IS_ENABLED(CONFIG_IPV6)
53 #include <net/ipv6.h>
54 #include <net/ip6_fib.h>
55 #include <net/ip6_route.h>
56 #endif
57
58 /*
59 Problems & solutions
60 --------------------
61
62 1. The most important issue is detecting local dead loops.
63 They would cause complete host lockup in transmit, which
64 would be "resolved" by stack overflow or, if queueing is enabled,
65 with infinite looping in net_bh.
66
67 We cannot track such dead loops during route installation,
68 it is infeasible task. The most general solutions would be
69 to keep skb->encapsulation counter (sort of local ttl),
70 and silently drop packet when it expires. It is a good
71 solution, but it supposes maintaining new variable in ALL
72 skb, even if no tunneling is used.
73
74 Current solution: xmit_recursion breaks dead loops. This is a percpu
75 counter, since when we enter the first ndo_xmit(), cpu migration is
76 forbidden. We force an exit if this counter reaches RECURSION_LIMIT
77
78 2. Networking dead loops would not kill routers, but would really
79 kill network. IP hop limit plays role of "t->recursion" in this case,
80 if we copy it from packet being encapsulated to upper header.
81 It is very good solution, but it introduces two problems:
82
83 - Routing protocols, using packets with ttl=1 (OSPF, RIP2),
84 do not work over tunnels.
85 - traceroute does not work. I planned to relay ICMP from tunnel,
86 so that this problem would be solved and traceroute output
87 would even more informative. This idea appeared to be wrong:
88 only Linux complies to rfc1812 now (yes, guys, Linux is the only
89 true router now :-)), all routers (at least, in neighbourhood of mine)
90 return only 8 bytes of payload. It is the end.
91
92 Hence, if we want that OSPF worked or traceroute said something reasonable,
93 we should search for another solution.
94
95 One of them is to parse packet trying to detect inner encapsulation
96 made by our node. It is difficult or even impossible, especially,
97 taking into account fragmentation. TO be short, ttl is not solution at all.
98
99 Current solution: The solution was UNEXPECTEDLY SIMPLE.
100 We force DF flag on tunnels with preconfigured hop limit,
101 that is ALL. :-) Well, it does not remove the problem completely,
102 but exponential growth of network traffic is changed to linear
103 (branches, that exceed pmtu are pruned) and tunnel mtu
104 rapidly degrades to value <68, where looping stops.
105 Yes, it is not good if there exists a router in the loop,
106 which does not force DF, even when encapsulating packets have DF set.
107 But it is not our problem! Nobody could accuse us, we made
108 all that we could make. Even if it is your gated who injected
109 fatal route to network, even if it were you who configured
110 fatal static route: you are innocent. :-)
111
112 Alexey Kuznetsov.
113 */
114
115 static bool log_ecn_error = true;
116 module_param(log_ecn_error, bool, 0644);
117 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
118
119 static struct rtnl_link_ops ipgre_link_ops __read_mostly;
120 static int ipgre_tunnel_init(struct net_device *dev);
121
122 static int ipgre_net_id __read_mostly;
123 static int gre_tap_net_id __read_mostly;
124
125 static int ip_gre_calc_hlen(__be16 o_flags)
126 {
127 int addend = 4;
128
129 if (o_flags & TUNNEL_CSUM)
130 addend += 4;
131 if (o_flags & TUNNEL_KEY)
132 addend += 4;
133 if (o_flags & TUNNEL_SEQ)
134 addend += 4;
135 return addend;
136 }
137
138 static __be16 gre_flags_to_tnl_flags(__be16 flags)
139 {
140 __be16 tflags = 0;
141
142 if (flags & GRE_CSUM)
143 tflags |= TUNNEL_CSUM;
144 if (flags & GRE_ROUTING)
145 tflags |= TUNNEL_ROUTING;
146 if (flags & GRE_KEY)
147 tflags |= TUNNEL_KEY;
148 if (flags & GRE_SEQ)
149 tflags |= TUNNEL_SEQ;
150 if (flags & GRE_STRICT)
151 tflags |= TUNNEL_STRICT;
152 if (flags & GRE_REC)
153 tflags |= TUNNEL_REC;
154 if (flags & GRE_VERSION)
155 tflags |= TUNNEL_VERSION;
156
157 return tflags;
158 }
159
160 static __be16 tnl_flags_to_gre_flags(__be16 tflags)
161 {
162 __be16 flags = 0;
163
164 if (tflags & TUNNEL_CSUM)
165 flags |= GRE_CSUM;
166 if (tflags & TUNNEL_ROUTING)
167 flags |= GRE_ROUTING;
168 if (tflags & TUNNEL_KEY)
169 flags |= GRE_KEY;
170 if (tflags & TUNNEL_SEQ)
171 flags |= GRE_SEQ;
172 if (tflags & TUNNEL_STRICT)
173 flags |= GRE_STRICT;
174 if (tflags & TUNNEL_REC)
175 flags |= GRE_REC;
176 if (tflags & TUNNEL_VERSION)
177 flags |= GRE_VERSION;
178
179 return flags;
180 }
181
182 static int parse_gre_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
183 bool *csum_err)
184 {
185 const struct gre_base_hdr *greh;
186 __be32 *options;
187 int hdr_len;
188
189 if (unlikely(!pskb_may_pull(skb, sizeof(struct gre_base_hdr))))
190 return -EINVAL;
191
192 greh = (struct gre_base_hdr *)skb_transport_header(skb);
193 if (unlikely(greh->flags & (GRE_VERSION | GRE_ROUTING)))
194 return -EINVAL;
195
196 tpi->flags = gre_flags_to_tnl_flags(greh->flags);
197 hdr_len = ip_gre_calc_hlen(tpi->flags);
198
199 if (!pskb_may_pull(skb, hdr_len))
200 return -EINVAL;
201
202 greh = (struct gre_base_hdr *)skb_transport_header(skb);
203 tpi->proto = greh->protocol;
204
205 options = (__be32 *)(greh + 1);
206 if (greh->flags & GRE_CSUM) {
207 if (skb_checksum_simple_validate(skb)) {
208 *csum_err = true;
209 return -EINVAL;
210 }
211
212 skb_checksum_try_convert(skb, IPPROTO_GRE, 0,
213 null_compute_pseudo);
214 options++;
215 }
216
217 if (greh->flags & GRE_KEY) {
218 tpi->key = *options;
219 options++;
220 } else {
221 tpi->key = 0;
222 }
223 if (unlikely(greh->flags & GRE_SEQ)) {
224 tpi->seq = *options;
225 options++;
226 } else {
227 tpi->seq = 0;
228 }
229 /* WCCP version 1 and 2 protocol decoding.
230 * - Change protocol to IP
231 * - When dealing with WCCPv2, Skip extra 4 bytes in GRE header
232 */
233 if (greh->flags == 0 && tpi->proto == htons(ETH_P_WCCP)) {
234 tpi->proto = htons(ETH_P_IP);
235 if ((*(u8 *)options & 0xF0) != 0x40) {
236 hdr_len += 4;
237 if (!pskb_may_pull(skb, hdr_len))
238 return -EINVAL;
239 }
240 }
241 return iptunnel_pull_header(skb, hdr_len, tpi->proto, false);
242 }
243
244 static void ipgre_err(struct sk_buff *skb, u32 info,
245 const struct tnl_ptk_info *tpi)
246 {
247
248 /* All the routers (except for Linux) return only
249 8 bytes of packet payload. It means, that precise relaying of
250 ICMP in the real Internet is absolutely infeasible.
251
252 Moreover, Cisco "wise men" put GRE key to the third word
253 in GRE header. It makes impossible maintaining even soft
254 state for keyed GRE tunnels with enabled checksum. Tell
255 them "thank you".
256
257 Well, I wonder, rfc1812 was written by Cisco employee,
258 what the hell these idiots break standards established
259 by themselves???
260 */
261 struct net *net = dev_net(skb->dev);
262 struct ip_tunnel_net *itn;
263 const struct iphdr *iph;
264 const int type = icmp_hdr(skb)->type;
265 const int code = icmp_hdr(skb)->code;
266 struct ip_tunnel *t;
267
268 switch (type) {
269 default:
270 case ICMP_PARAMETERPROB:
271 return;
272
273 case ICMP_DEST_UNREACH:
274 switch (code) {
275 case ICMP_SR_FAILED:
276 case ICMP_PORT_UNREACH:
277 /* Impossible event. */
278 return;
279 default:
280 /* All others are translated to HOST_UNREACH.
281 rfc2003 contains "deep thoughts" about NET_UNREACH,
282 I believe they are just ether pollution. --ANK
283 */
284 break;
285 }
286 break;
287
288 case ICMP_TIME_EXCEEDED:
289 if (code != ICMP_EXC_TTL)
290 return;
291 break;
292
293 case ICMP_REDIRECT:
294 break;
295 }
296
297 if (tpi->proto == htons(ETH_P_TEB))
298 itn = net_generic(net, gre_tap_net_id);
299 else
300 itn = net_generic(net, ipgre_net_id);
301
302 iph = (const struct iphdr *)(icmp_hdr(skb) + 1);
303 t = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi->flags,
304 iph->daddr, iph->saddr, tpi->key);
305
306 if (!t)
307 return;
308
309 if (t->parms.iph.daddr == 0 ||
310 ipv4_is_multicast(t->parms.iph.daddr))
311 return;
312
313 if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
314 return;
315
316 if (time_before(jiffies, t->err_time + IPTUNNEL_ERR_TIMEO))
317 t->err_count++;
318 else
319 t->err_count = 1;
320 t->err_time = jiffies;
321 }
322
323 static void gre_err(struct sk_buff *skb, u32 info)
324 {
325 /* All the routers (except for Linux) return only
326 * 8 bytes of packet payload. It means, that precise relaying of
327 * ICMP in the real Internet is absolutely infeasible.
328 *
329 * Moreover, Cisco "wise men" put GRE key to the third word
330 * in GRE header. It makes impossible maintaining even soft
331 * state for keyed
332 * GRE tunnels with enabled checksum. Tell them "thank you".
333 *
334 * Well, I wonder, rfc1812 was written by Cisco employee,
335 * what the hell these idiots break standards established
336 * by themselves???
337 */
338
339 const int type = icmp_hdr(skb)->type;
340 const int code = icmp_hdr(skb)->code;
341 struct tnl_ptk_info tpi;
342 bool csum_err = false;
343
344 if (parse_gre_header(skb, &tpi, &csum_err)) {
345 if (!csum_err) /* ignore csum errors. */
346 return;
347 }
348
349 if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
350 ipv4_update_pmtu(skb, dev_net(skb->dev), info,
351 skb->dev->ifindex, 0, IPPROTO_GRE, 0);
352 return;
353 }
354 if (type == ICMP_REDIRECT) {
355 ipv4_redirect(skb, dev_net(skb->dev), skb->dev->ifindex, 0,
356 IPPROTO_GRE, 0);
357 return;
358 }
359
360 ipgre_err(skb, info, &tpi);
361 }
362
363 static __be64 key_to_tunnel_id(__be32 key)
364 {
365 #ifdef __BIG_ENDIAN
366 return (__force __be64)((__force u32)key);
367 #else
368 return (__force __be64)((__force u64)key << 32);
369 #endif
370 }
371
372 /* Returns the least-significant 32 bits of a __be64. */
373 static __be32 tunnel_id_to_key(__be64 x)
374 {
375 #ifdef __BIG_ENDIAN
376 return (__force __be32)x;
377 #else
378 return (__force __be32)((__force u64)x >> 32);
379 #endif
380 }
381
382 static int ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi)
383 {
384 struct net *net = dev_net(skb->dev);
385 struct metadata_dst *tun_dst = NULL;
386 struct ip_tunnel_net *itn;
387 const struct iphdr *iph;
388 struct ip_tunnel *tunnel;
389
390 if (tpi->proto == htons(ETH_P_TEB))
391 itn = net_generic(net, gre_tap_net_id);
392 else
393 itn = net_generic(net, ipgre_net_id);
394
395 iph = ip_hdr(skb);
396 tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi->flags,
397 iph->saddr, iph->daddr, tpi->key);
398
399 if (tunnel) {
400 skb_pop_mac_header(skb);
401 if (tunnel->collect_md) {
402 __be16 flags;
403 __be64 tun_id;
404
405 flags = tpi->flags & (TUNNEL_CSUM | TUNNEL_KEY);
406 tun_id = key_to_tunnel_id(tpi->key);
407 tun_dst = ip_tun_rx_dst(skb, flags, tun_id, 0);
408 if (!tun_dst)
409 return PACKET_REJECT;
410 }
411
412 ip_tunnel_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error);
413 return PACKET_RCVD;
414 }
415 return PACKET_REJECT;
416 }
417
418 static int gre_rcv(struct sk_buff *skb)
419 {
420 struct tnl_ptk_info tpi;
421 bool csum_err = false;
422
423 #ifdef CONFIG_NET_IPGRE_BROADCAST
424 if (ipv4_is_multicast(ip_hdr(skb)->daddr)) {
425 /* Looped back packet, drop it! */
426 if (rt_is_output_route(skb_rtable(skb)))
427 goto drop;
428 }
429 #endif
430
431 if (parse_gre_header(skb, &tpi, &csum_err) < 0)
432 goto drop;
433
434 if (ipgre_rcv(skb, &tpi) == PACKET_RCVD)
435 return 0;
436
437 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
438 drop:
439 kfree_skb(skb);
440 return 0;
441 }
442
443 static __sum16 gre_checksum(struct sk_buff *skb)
444 {
445 __wsum csum;
446
447 if (skb->ip_summed == CHECKSUM_PARTIAL)
448 csum = lco_csum(skb);
449 else
450 csum = skb_checksum(skb, 0, skb->len, 0);
451 return csum_fold(csum);
452 }
453
454 static void build_header(struct sk_buff *skb, int hdr_len, __be16 flags,
455 __be16 proto, __be32 key, __be32 seq)
456 {
457 struct gre_base_hdr *greh;
458
459 skb_push(skb, hdr_len);
460
461 skb_reset_transport_header(skb);
462 greh = (struct gre_base_hdr *)skb->data;
463 greh->flags = tnl_flags_to_gre_flags(flags);
464 greh->protocol = proto;
465
466 if (flags & (TUNNEL_KEY | TUNNEL_CSUM | TUNNEL_SEQ)) {
467 __be32 *ptr = (__be32 *)(((u8 *)greh) + hdr_len - 4);
468
469 if (flags & TUNNEL_SEQ) {
470 *ptr = seq;
471 ptr--;
472 }
473 if (flags & TUNNEL_KEY) {
474 *ptr = key;
475 ptr--;
476 }
477 if (flags & TUNNEL_CSUM &&
478 !(skb_shinfo(skb)->gso_type &
479 (SKB_GSO_GRE | SKB_GSO_GRE_CSUM))) {
480 *ptr = 0;
481 *(__sum16 *)ptr = gre_checksum(skb);
482 }
483 }
484 }
485
486 static void __gre_xmit(struct sk_buff *skb, struct net_device *dev,
487 const struct iphdr *tnl_params,
488 __be16 proto)
489 {
490 struct ip_tunnel *tunnel = netdev_priv(dev);
491
492 if (tunnel->parms.o_flags & TUNNEL_SEQ)
493 tunnel->o_seqno++;
494
495 /* Push GRE header. */
496 build_header(skb, tunnel->tun_hlen, tunnel->parms.o_flags,
497 proto, tunnel->parms.o_key, htonl(tunnel->o_seqno));
498
499 skb_set_inner_protocol(skb, proto);
500 ip_tunnel_xmit(skb, dev, tnl_params, tnl_params->protocol);
501 }
502
503 static struct sk_buff *gre_handle_offloads(struct sk_buff *skb,
504 bool csum)
505 {
506 return iptunnel_handle_offloads(skb, csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE);
507 }
508
509 static struct rtable *gre_get_rt(struct sk_buff *skb,
510 struct net_device *dev,
511 struct flowi4 *fl,
512 const struct ip_tunnel_key *key)
513 {
514 struct net *net = dev_net(dev);
515
516 memset(fl, 0, sizeof(*fl));
517 fl->daddr = key->u.ipv4.dst;
518 fl->saddr = key->u.ipv4.src;
519 fl->flowi4_tos = RT_TOS(key->tos);
520 fl->flowi4_mark = skb->mark;
521 fl->flowi4_proto = IPPROTO_GRE;
522
523 return ip_route_output_key(net, fl);
524 }
525
526 static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev,
527 __be16 proto)
528 {
529 struct ip_tunnel_info *tun_info;
530 const struct ip_tunnel_key *key;
531 struct rtable *rt = NULL;
532 struct flowi4 fl;
533 int min_headroom;
534 int tunnel_hlen;
535 __be16 df, flags;
536 bool use_cache;
537 int err;
538
539 tun_info = skb_tunnel_info(skb);
540 if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
541 ip_tunnel_info_af(tun_info) != AF_INET))
542 goto err_free_skb;
543
544 key = &tun_info->key;
545 use_cache = ip_tunnel_dst_cache_usable(skb, tun_info);
546 if (use_cache)
547 rt = dst_cache_get_ip4(&tun_info->dst_cache, &fl.saddr);
548 if (!rt) {
549 rt = gre_get_rt(skb, dev, &fl, key);
550 if (IS_ERR(rt))
551 goto err_free_skb;
552 if (use_cache)
553 dst_cache_set_ip4(&tun_info->dst_cache, &rt->dst,
554 fl.saddr);
555 }
556
557 tunnel_hlen = ip_gre_calc_hlen(key->tun_flags);
558
559 min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
560 + tunnel_hlen + sizeof(struct iphdr);
561 if (skb_headroom(skb) < min_headroom || skb_header_cloned(skb)) {
562 int head_delta = SKB_DATA_ALIGN(min_headroom -
563 skb_headroom(skb) +
564 16);
565 err = pskb_expand_head(skb, max_t(int, head_delta, 0),
566 0, GFP_ATOMIC);
567 if (unlikely(err))
568 goto err_free_rt;
569 }
570
571 /* Push Tunnel header. */
572 skb = gre_handle_offloads(skb, !!(tun_info->key.tun_flags & TUNNEL_CSUM));
573 if (IS_ERR(skb)) {
574 skb = NULL;
575 goto err_free_rt;
576 }
577
578 flags = tun_info->key.tun_flags & (TUNNEL_CSUM | TUNNEL_KEY);
579 build_header(skb, tunnel_hlen, flags, proto,
580 tunnel_id_to_key(tun_info->key.tun_id), 0);
581
582 df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0;
583
584 iptunnel_xmit(skb->sk, rt, skb, fl.saddr, key->u.ipv4.dst, IPPROTO_GRE,
585 key->tos, key->ttl, df, false);
586 return;
587
588 err_free_rt:
589 ip_rt_put(rt);
590 err_free_skb:
591 kfree_skb(skb);
592 dev->stats.tx_dropped++;
593 }
594
595 static int gre_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
596 {
597 struct ip_tunnel_info *info = skb_tunnel_info(skb);
598 struct rtable *rt;
599 struct flowi4 fl4;
600
601 if (ip_tunnel_info_af(info) != AF_INET)
602 return -EINVAL;
603
604 rt = gre_get_rt(skb, dev, &fl4, &info->key);
605 if (IS_ERR(rt))
606 return PTR_ERR(rt);
607
608 ip_rt_put(rt);
609 info->key.u.ipv4.src = fl4.saddr;
610 return 0;
611 }
612
613 static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
614 struct net_device *dev)
615 {
616 struct ip_tunnel *tunnel = netdev_priv(dev);
617 const struct iphdr *tnl_params;
618
619 if (tunnel->collect_md) {
620 gre_fb_xmit(skb, dev, skb->protocol);
621 return NETDEV_TX_OK;
622 }
623
624 if (dev->header_ops) {
625 /* Need space for new headers */
626 if (skb_cow_head(skb, dev->needed_headroom -
627 (tunnel->hlen + sizeof(struct iphdr))))
628 goto free_skb;
629
630 tnl_params = (const struct iphdr *)skb->data;
631
632 /* Pull skb since ip_tunnel_xmit() needs skb->data pointing
633 * to gre header.
634 */
635 skb_pull(skb, tunnel->hlen + sizeof(struct iphdr));
636 skb_reset_mac_header(skb);
637 } else {
638 if (skb_cow_head(skb, dev->needed_headroom))
639 goto free_skb;
640
641 tnl_params = &tunnel->parms.iph;
642 }
643
644 skb = gre_handle_offloads(skb, !!(tunnel->parms.o_flags&TUNNEL_CSUM));
645 if (IS_ERR(skb))
646 goto out;
647
648 __gre_xmit(skb, dev, tnl_params, skb->protocol);
649 return NETDEV_TX_OK;
650
651 free_skb:
652 kfree_skb(skb);
653 out:
654 dev->stats.tx_dropped++;
655 return NETDEV_TX_OK;
656 }
657
658 static netdev_tx_t gre_tap_xmit(struct sk_buff *skb,
659 struct net_device *dev)
660 {
661 struct ip_tunnel *tunnel = netdev_priv(dev);
662
663 if (tunnel->collect_md) {
664 gre_fb_xmit(skb, dev, htons(ETH_P_TEB));
665 return NETDEV_TX_OK;
666 }
667
668 skb = gre_handle_offloads(skb, !!(tunnel->parms.o_flags&TUNNEL_CSUM));
669 if (IS_ERR(skb))
670 goto out;
671
672 if (skb_cow_head(skb, dev->needed_headroom))
673 goto free_skb;
674
675 __gre_xmit(skb, dev, &tunnel->parms.iph, htons(ETH_P_TEB));
676 return NETDEV_TX_OK;
677
678 free_skb:
679 kfree_skb(skb);
680 out:
681 dev->stats.tx_dropped++;
682 return NETDEV_TX_OK;
683 }
684
685 static int ipgre_tunnel_ioctl(struct net_device *dev,
686 struct ifreq *ifr, int cmd)
687 {
688 int err;
689 struct ip_tunnel_parm p;
690
691 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
692 return -EFAULT;
693 if (cmd == SIOCADDTUNNEL || cmd == SIOCCHGTUNNEL) {
694 if (p.iph.version != 4 || p.iph.protocol != IPPROTO_GRE ||
695 p.iph.ihl != 5 || (p.iph.frag_off&htons(~IP_DF)) ||
696 ((p.i_flags|p.o_flags)&(GRE_VERSION|GRE_ROUTING)))
697 return -EINVAL;
698 }
699 p.i_flags = gre_flags_to_tnl_flags(p.i_flags);
700 p.o_flags = gre_flags_to_tnl_flags(p.o_flags);
701
702 err = ip_tunnel_ioctl(dev, &p, cmd);
703 if (err)
704 return err;
705
706 p.i_flags = tnl_flags_to_gre_flags(p.i_flags);
707 p.o_flags = tnl_flags_to_gre_flags(p.o_flags);
708
709 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
710 return -EFAULT;
711 return 0;
712 }
713
714 /* Nice toy. Unfortunately, useless in real life :-)
715 It allows to construct virtual multiprotocol broadcast "LAN"
716 over the Internet, provided multicast routing is tuned.
717
718
719 I have no idea was this bicycle invented before me,
720 so that I had to set ARPHRD_IPGRE to a random value.
721 I have an impression, that Cisco could make something similar,
722 but this feature is apparently missing in IOS<=11.2(8).
723
724 I set up 10.66.66/24 and fec0:6666:6666::0/96 as virtual networks
725 with broadcast 224.66.66.66. If you have access to mbone, play with me :-)
726
727 ping -t 255 224.66.66.66
728
729 If nobody answers, mbone does not work.
730
731 ip tunnel add Universe mode gre remote 224.66.66.66 local <Your_real_addr> ttl 255
732 ip addr add 10.66.66.<somewhat>/24 dev Universe
733 ifconfig Universe up
734 ifconfig Universe add fe80::<Your_real_addr>/10
735 ifconfig Universe add fec0:6666:6666::<Your_real_addr>/96
736 ftp 10.66.66.66
737 ...
738 ftp fec0:6666:6666::193.233.7.65
739 ...
740 */
741 static int ipgre_header(struct sk_buff *skb, struct net_device *dev,
742 unsigned short type,
743 const void *daddr, const void *saddr, unsigned int len)
744 {
745 struct ip_tunnel *t = netdev_priv(dev);
746 struct iphdr *iph;
747 struct gre_base_hdr *greh;
748
749 iph = (struct iphdr *)skb_push(skb, t->hlen + sizeof(*iph));
750 greh = (struct gre_base_hdr *)(iph+1);
751 greh->flags = tnl_flags_to_gre_flags(t->parms.o_flags);
752 greh->protocol = htons(type);
753
754 memcpy(iph, &t->parms.iph, sizeof(struct iphdr));
755
756 /* Set the source hardware address. */
757 if (saddr)
758 memcpy(&iph->saddr, saddr, 4);
759 if (daddr)
760 memcpy(&iph->daddr, daddr, 4);
761 if (iph->daddr)
762 return t->hlen + sizeof(*iph);
763
764 return -(t->hlen + sizeof(*iph));
765 }
766
767 static int ipgre_header_parse(const struct sk_buff *skb, unsigned char *haddr)
768 {
769 const struct iphdr *iph = (const struct iphdr *) skb_mac_header(skb);
770 memcpy(haddr, &iph->saddr, 4);
771 return 4;
772 }
773
774 static const struct header_ops ipgre_header_ops = {
775 .create = ipgre_header,
776 .parse = ipgre_header_parse,
777 };
778
779 #ifdef CONFIG_NET_IPGRE_BROADCAST
780 static int ipgre_open(struct net_device *dev)
781 {
782 struct ip_tunnel *t = netdev_priv(dev);
783
784 if (ipv4_is_multicast(t->parms.iph.daddr)) {
785 struct flowi4 fl4;
786 struct rtable *rt;
787
788 rt = ip_route_output_gre(t->net, &fl4,
789 t->parms.iph.daddr,
790 t->parms.iph.saddr,
791 t->parms.o_key,
792 RT_TOS(t->parms.iph.tos),
793 t->parms.link);
794 if (IS_ERR(rt))
795 return -EADDRNOTAVAIL;
796 dev = rt->dst.dev;
797 ip_rt_put(rt);
798 if (!__in_dev_get_rtnl(dev))
799 return -EADDRNOTAVAIL;
800 t->mlink = dev->ifindex;
801 ip_mc_inc_group(__in_dev_get_rtnl(dev), t->parms.iph.daddr);
802 }
803 return 0;
804 }
805
806 static int ipgre_close(struct net_device *dev)
807 {
808 struct ip_tunnel *t = netdev_priv(dev);
809
810 if (ipv4_is_multicast(t->parms.iph.daddr) && t->mlink) {
811 struct in_device *in_dev;
812 in_dev = inetdev_by_index(t->net, t->mlink);
813 if (in_dev)
814 ip_mc_dec_group(in_dev, t->parms.iph.daddr);
815 }
816 return 0;
817 }
818 #endif
819
820 static const struct net_device_ops ipgre_netdev_ops = {
821 .ndo_init = ipgre_tunnel_init,
822 .ndo_uninit = ip_tunnel_uninit,
823 #ifdef CONFIG_NET_IPGRE_BROADCAST
824 .ndo_open = ipgre_open,
825 .ndo_stop = ipgre_close,
826 #endif
827 .ndo_start_xmit = ipgre_xmit,
828 .ndo_do_ioctl = ipgre_tunnel_ioctl,
829 .ndo_change_mtu = ip_tunnel_change_mtu,
830 .ndo_get_stats64 = ip_tunnel_get_stats64,
831 .ndo_get_iflink = ip_tunnel_get_iflink,
832 };
833
834 #define GRE_FEATURES (NETIF_F_SG | \
835 NETIF_F_FRAGLIST | \
836 NETIF_F_HIGHDMA | \
837 NETIF_F_HW_CSUM)
838
839 static void ipgre_tunnel_setup(struct net_device *dev)
840 {
841 dev->netdev_ops = &ipgre_netdev_ops;
842 dev->type = ARPHRD_IPGRE;
843 ip_tunnel_setup(dev, ipgre_net_id);
844 }
845
846 static void __gre_tunnel_init(struct net_device *dev)
847 {
848 struct ip_tunnel *tunnel;
849 int t_hlen;
850
851 tunnel = netdev_priv(dev);
852 tunnel->tun_hlen = ip_gre_calc_hlen(tunnel->parms.o_flags);
853 tunnel->parms.iph.protocol = IPPROTO_GRE;
854
855 tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen;
856
857 t_hlen = tunnel->hlen + sizeof(struct iphdr);
858
859 dev->needed_headroom = LL_MAX_HEADER + t_hlen + 4;
860 dev->mtu = ETH_DATA_LEN - t_hlen - 4;
861
862 dev->features |= GRE_FEATURES;
863 dev->hw_features |= GRE_FEATURES;
864
865 if (!(tunnel->parms.o_flags & TUNNEL_SEQ)) {
866 /* TCP offload with GRE SEQ is not supported, nor
867 * can we support 2 levels of outer headers requiring
868 * an update.
869 */
870 if (!(tunnel->parms.o_flags & TUNNEL_CSUM) ||
871 (tunnel->encap.type == TUNNEL_ENCAP_NONE)) {
872 dev->features |= NETIF_F_GSO_SOFTWARE;
873 dev->hw_features |= NETIF_F_GSO_SOFTWARE;
874 }
875
876 /* Can use a lockless transmit, unless we generate
877 * output sequences
878 */
879 dev->features |= NETIF_F_LLTX;
880 }
881 }
882
883 static int ipgre_tunnel_init(struct net_device *dev)
884 {
885 struct ip_tunnel *tunnel = netdev_priv(dev);
886 struct iphdr *iph = &tunnel->parms.iph;
887
888 __gre_tunnel_init(dev);
889
890 memcpy(dev->dev_addr, &iph->saddr, 4);
891 memcpy(dev->broadcast, &iph->daddr, 4);
892
893 dev->flags = IFF_NOARP;
894 netif_keep_dst(dev);
895 dev->addr_len = 4;
896
897 if (iph->daddr && !tunnel->collect_md) {
898 #ifdef CONFIG_NET_IPGRE_BROADCAST
899 if (ipv4_is_multicast(iph->daddr)) {
900 if (!iph->saddr)
901 return -EINVAL;
902 dev->flags = IFF_BROADCAST;
903 dev->header_ops = &ipgre_header_ops;
904 }
905 #endif
906 } else if (!tunnel->collect_md) {
907 dev->header_ops = &ipgre_header_ops;
908 }
909
910 return ip_tunnel_init(dev);
911 }
912
913 static const struct gre_protocol ipgre_protocol = {
914 .handler = gre_rcv,
915 .err_handler = gre_err,
916 };
917
918 static int __net_init ipgre_init_net(struct net *net)
919 {
920 return ip_tunnel_init_net(net, ipgre_net_id, &ipgre_link_ops, NULL);
921 }
922
923 static void __net_exit ipgre_exit_net(struct net *net)
924 {
925 struct ip_tunnel_net *itn = net_generic(net, ipgre_net_id);
926 ip_tunnel_delete_net(itn, &ipgre_link_ops);
927 }
928
929 static struct pernet_operations ipgre_net_ops = {
930 .init = ipgre_init_net,
931 .exit = ipgre_exit_net,
932 .id = &ipgre_net_id,
933 .size = sizeof(struct ip_tunnel_net),
934 };
935
936 static int ipgre_tunnel_validate(struct nlattr *tb[], struct nlattr *data[])
937 {
938 __be16 flags;
939
940 if (!data)
941 return 0;
942
943 flags = 0;
944 if (data[IFLA_GRE_IFLAGS])
945 flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]);
946 if (data[IFLA_GRE_OFLAGS])
947 flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]);
948 if (flags & (GRE_VERSION|GRE_ROUTING))
949 return -EINVAL;
950
951 return 0;
952 }
953
954 static int ipgre_tap_validate(struct nlattr *tb[], struct nlattr *data[])
955 {
956 __be32 daddr;
957
958 if (tb[IFLA_ADDRESS]) {
959 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
960 return -EINVAL;
961 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
962 return -EADDRNOTAVAIL;
963 }
964
965 if (!data)
966 goto out;
967
968 if (data[IFLA_GRE_REMOTE]) {
969 memcpy(&daddr, nla_data(data[IFLA_GRE_REMOTE]), 4);
970 if (!daddr)
971 return -EINVAL;
972 }
973
974 out:
975 return ipgre_tunnel_validate(tb, data);
976 }
977
978 static void ipgre_netlink_parms(struct net_device *dev,
979 struct nlattr *data[],
980 struct nlattr *tb[],
981 struct ip_tunnel_parm *parms)
982 {
983 memset(parms, 0, sizeof(*parms));
984
985 parms->iph.protocol = IPPROTO_GRE;
986
987 if (!data)
988 return;
989
990 if (data[IFLA_GRE_LINK])
991 parms->link = nla_get_u32(data[IFLA_GRE_LINK]);
992
993 if (data[IFLA_GRE_IFLAGS])
994 parms->i_flags = gre_flags_to_tnl_flags(nla_get_be16(data[IFLA_GRE_IFLAGS]));
995
996 if (data[IFLA_GRE_OFLAGS])
997 parms->o_flags = gre_flags_to_tnl_flags(nla_get_be16(data[IFLA_GRE_OFLAGS]));
998
999 if (data[IFLA_GRE_IKEY])
1000 parms->i_key = nla_get_be32(data[IFLA_GRE_IKEY]);
1001
1002 if (data[IFLA_GRE_OKEY])
1003 parms->o_key = nla_get_be32(data[IFLA_GRE_OKEY]);
1004
1005 if (data[IFLA_GRE_LOCAL])
1006 parms->iph.saddr = nla_get_in_addr(data[IFLA_GRE_LOCAL]);
1007
1008 if (data[IFLA_GRE_REMOTE])
1009 parms->iph.daddr = nla_get_in_addr(data[IFLA_GRE_REMOTE]);
1010
1011 if (data[IFLA_GRE_TTL])
1012 parms->iph.ttl = nla_get_u8(data[IFLA_GRE_TTL]);
1013
1014 if (data[IFLA_GRE_TOS])
1015 parms->iph.tos = nla_get_u8(data[IFLA_GRE_TOS]);
1016
1017 if (!data[IFLA_GRE_PMTUDISC] || nla_get_u8(data[IFLA_GRE_PMTUDISC]))
1018 parms->iph.frag_off = htons(IP_DF);
1019
1020 if (data[IFLA_GRE_COLLECT_METADATA]) {
1021 struct ip_tunnel *t = netdev_priv(dev);
1022
1023 t->collect_md = true;
1024 }
1025 }
1026
1027 /* This function returns true when ENCAP attributes are present in the nl msg */
1028 static bool ipgre_netlink_encap_parms(struct nlattr *data[],
1029 struct ip_tunnel_encap *ipencap)
1030 {
1031 bool ret = false;
1032
1033 memset(ipencap, 0, sizeof(*ipencap));
1034
1035 if (!data)
1036 return ret;
1037
1038 if (data[IFLA_GRE_ENCAP_TYPE]) {
1039 ret = true;
1040 ipencap->type = nla_get_u16(data[IFLA_GRE_ENCAP_TYPE]);
1041 }
1042
1043 if (data[IFLA_GRE_ENCAP_FLAGS]) {
1044 ret = true;
1045 ipencap->flags = nla_get_u16(data[IFLA_GRE_ENCAP_FLAGS]);
1046 }
1047
1048 if (data[IFLA_GRE_ENCAP_SPORT]) {
1049 ret = true;
1050 ipencap->sport = nla_get_be16(data[IFLA_GRE_ENCAP_SPORT]);
1051 }
1052
1053 if (data[IFLA_GRE_ENCAP_DPORT]) {
1054 ret = true;
1055 ipencap->dport = nla_get_be16(data[IFLA_GRE_ENCAP_DPORT]);
1056 }
1057
1058 return ret;
1059 }
1060
1061 static int gre_tap_init(struct net_device *dev)
1062 {
1063 __gre_tunnel_init(dev);
1064 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1065
1066 return ip_tunnel_init(dev);
1067 }
1068
1069 static const struct net_device_ops gre_tap_netdev_ops = {
1070 .ndo_init = gre_tap_init,
1071 .ndo_uninit = ip_tunnel_uninit,
1072 .ndo_start_xmit = gre_tap_xmit,
1073 .ndo_set_mac_address = eth_mac_addr,
1074 .ndo_validate_addr = eth_validate_addr,
1075 .ndo_change_mtu = ip_tunnel_change_mtu,
1076 .ndo_get_stats64 = ip_tunnel_get_stats64,
1077 .ndo_get_iflink = ip_tunnel_get_iflink,
1078 .ndo_fill_metadata_dst = gre_fill_metadata_dst,
1079 };
1080
1081 static void ipgre_tap_setup(struct net_device *dev)
1082 {
1083 ether_setup(dev);
1084 dev->netdev_ops = &gre_tap_netdev_ops;
1085 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1086 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1087 ip_tunnel_setup(dev, gre_tap_net_id);
1088 }
1089
1090 static int ipgre_newlink(struct net *src_net, struct net_device *dev,
1091 struct nlattr *tb[], struct nlattr *data[])
1092 {
1093 struct ip_tunnel_parm p;
1094 struct ip_tunnel_encap ipencap;
1095
1096 if (ipgre_netlink_encap_parms(data, &ipencap)) {
1097 struct ip_tunnel *t = netdev_priv(dev);
1098 int err = ip_tunnel_encap_setup(t, &ipencap);
1099
1100 if (err < 0)
1101 return err;
1102 }
1103
1104 ipgre_netlink_parms(dev, data, tb, &p);
1105 return ip_tunnel_newlink(dev, tb, &p);
1106 }
1107
1108 static int ipgre_changelink(struct net_device *dev, struct nlattr *tb[],
1109 struct nlattr *data[])
1110 {
1111 struct ip_tunnel_parm p;
1112 struct ip_tunnel_encap ipencap;
1113
1114 if (ipgre_netlink_encap_parms(data, &ipencap)) {
1115 struct ip_tunnel *t = netdev_priv(dev);
1116 int err = ip_tunnel_encap_setup(t, &ipencap);
1117
1118 if (err < 0)
1119 return err;
1120 }
1121
1122 ipgre_netlink_parms(dev, data, tb, &p);
1123 return ip_tunnel_changelink(dev, tb, &p);
1124 }
1125
1126 static size_t ipgre_get_size(const struct net_device *dev)
1127 {
1128 return
1129 /* IFLA_GRE_LINK */
1130 nla_total_size(4) +
1131 /* IFLA_GRE_IFLAGS */
1132 nla_total_size(2) +
1133 /* IFLA_GRE_OFLAGS */
1134 nla_total_size(2) +
1135 /* IFLA_GRE_IKEY */
1136 nla_total_size(4) +
1137 /* IFLA_GRE_OKEY */
1138 nla_total_size(4) +
1139 /* IFLA_GRE_LOCAL */
1140 nla_total_size(4) +
1141 /* IFLA_GRE_REMOTE */
1142 nla_total_size(4) +
1143 /* IFLA_GRE_TTL */
1144 nla_total_size(1) +
1145 /* IFLA_GRE_TOS */
1146 nla_total_size(1) +
1147 /* IFLA_GRE_PMTUDISC */
1148 nla_total_size(1) +
1149 /* IFLA_GRE_ENCAP_TYPE */
1150 nla_total_size(2) +
1151 /* IFLA_GRE_ENCAP_FLAGS */
1152 nla_total_size(2) +
1153 /* IFLA_GRE_ENCAP_SPORT */
1154 nla_total_size(2) +
1155 /* IFLA_GRE_ENCAP_DPORT */
1156 nla_total_size(2) +
1157 /* IFLA_GRE_COLLECT_METADATA */
1158 nla_total_size(0) +
1159 0;
1160 }
1161
1162 static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev)
1163 {
1164 struct ip_tunnel *t = netdev_priv(dev);
1165 struct ip_tunnel_parm *p = &t->parms;
1166
1167 if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) ||
1168 nla_put_be16(skb, IFLA_GRE_IFLAGS, tnl_flags_to_gre_flags(p->i_flags)) ||
1169 nla_put_be16(skb, IFLA_GRE_OFLAGS, tnl_flags_to_gre_flags(p->o_flags)) ||
1170 nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) ||
1171 nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) ||
1172 nla_put_in_addr(skb, IFLA_GRE_LOCAL, p->iph.saddr) ||
1173 nla_put_in_addr(skb, IFLA_GRE_REMOTE, p->iph.daddr) ||
1174 nla_put_u8(skb, IFLA_GRE_TTL, p->iph.ttl) ||
1175 nla_put_u8(skb, IFLA_GRE_TOS, p->iph.tos) ||
1176 nla_put_u8(skb, IFLA_GRE_PMTUDISC,
1177 !!(p->iph.frag_off & htons(IP_DF))))
1178 goto nla_put_failure;
1179
1180 if (nla_put_u16(skb, IFLA_GRE_ENCAP_TYPE,
1181 t->encap.type) ||
1182 nla_put_be16(skb, IFLA_GRE_ENCAP_SPORT,
1183 t->encap.sport) ||
1184 nla_put_be16(skb, IFLA_GRE_ENCAP_DPORT,
1185 t->encap.dport) ||
1186 nla_put_u16(skb, IFLA_GRE_ENCAP_FLAGS,
1187 t->encap.flags))
1188 goto nla_put_failure;
1189
1190 if (t->collect_md) {
1191 if (nla_put_flag(skb, IFLA_GRE_COLLECT_METADATA))
1192 goto nla_put_failure;
1193 }
1194
1195 return 0;
1196
1197 nla_put_failure:
1198 return -EMSGSIZE;
1199 }
1200
1201 static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = {
1202 [IFLA_GRE_LINK] = { .type = NLA_U32 },
1203 [IFLA_GRE_IFLAGS] = { .type = NLA_U16 },
1204 [IFLA_GRE_OFLAGS] = { .type = NLA_U16 },
1205 [IFLA_GRE_IKEY] = { .type = NLA_U32 },
1206 [IFLA_GRE_OKEY] = { .type = NLA_U32 },
1207 [IFLA_GRE_LOCAL] = { .len = FIELD_SIZEOF(struct iphdr, saddr) },
1208 [IFLA_GRE_REMOTE] = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
1209 [IFLA_GRE_TTL] = { .type = NLA_U8 },
1210 [IFLA_GRE_TOS] = { .type = NLA_U8 },
1211 [IFLA_GRE_PMTUDISC] = { .type = NLA_U8 },
1212 [IFLA_GRE_ENCAP_TYPE] = { .type = NLA_U16 },
1213 [IFLA_GRE_ENCAP_FLAGS] = { .type = NLA_U16 },
1214 [IFLA_GRE_ENCAP_SPORT] = { .type = NLA_U16 },
1215 [IFLA_GRE_ENCAP_DPORT] = { .type = NLA_U16 },
1216 [IFLA_GRE_COLLECT_METADATA] = { .type = NLA_FLAG },
1217 };
1218
1219 static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
1220 .kind = "gre",
1221 .maxtype = IFLA_GRE_MAX,
1222 .policy = ipgre_policy,
1223 .priv_size = sizeof(struct ip_tunnel),
1224 .setup = ipgre_tunnel_setup,
1225 .validate = ipgre_tunnel_validate,
1226 .newlink = ipgre_newlink,
1227 .changelink = ipgre_changelink,
1228 .dellink = ip_tunnel_dellink,
1229 .get_size = ipgre_get_size,
1230 .fill_info = ipgre_fill_info,
1231 .get_link_net = ip_tunnel_get_link_net,
1232 };
1233
1234 static struct rtnl_link_ops ipgre_tap_ops __read_mostly = {
1235 .kind = "gretap",
1236 .maxtype = IFLA_GRE_MAX,
1237 .policy = ipgre_policy,
1238 .priv_size = sizeof(struct ip_tunnel),
1239 .setup = ipgre_tap_setup,
1240 .validate = ipgre_tap_validate,
1241 .newlink = ipgre_newlink,
1242 .changelink = ipgre_changelink,
1243 .dellink = ip_tunnel_dellink,
1244 .get_size = ipgre_get_size,
1245 .fill_info = ipgre_fill_info,
1246 .get_link_net = ip_tunnel_get_link_net,
1247 };
1248
1249 struct net_device *gretap_fb_dev_create(struct net *net, const char *name,
1250 u8 name_assign_type)
1251 {
1252 struct nlattr *tb[IFLA_MAX + 1];
1253 struct net_device *dev;
1254 struct ip_tunnel *t;
1255 int err;
1256
1257 memset(&tb, 0, sizeof(tb));
1258
1259 dev = rtnl_create_link(net, name, name_assign_type,
1260 &ipgre_tap_ops, tb);
1261 if (IS_ERR(dev))
1262 return dev;
1263
1264 /* Configure flow based GRE device. */
1265 t = netdev_priv(dev);
1266 t->collect_md = true;
1267
1268 err = ipgre_newlink(net, dev, tb, NULL);
1269 if (err < 0)
1270 goto out;
1271
1272 /* openvswitch users expect packet sizes to be unrestricted,
1273 * so set the largest MTU we can.
1274 */
1275 err = __ip_tunnel_change_mtu(dev, IP_MAX_MTU, false);
1276 if (err)
1277 goto out;
1278
1279 return dev;
1280 out:
1281 free_netdev(dev);
1282 return ERR_PTR(err);
1283 }
1284 EXPORT_SYMBOL_GPL(gretap_fb_dev_create);
1285
1286 static int __net_init ipgre_tap_init_net(struct net *net)
1287 {
1288 return ip_tunnel_init_net(net, gre_tap_net_id, &ipgre_tap_ops, "gretap0");
1289 }
1290
1291 static void __net_exit ipgre_tap_exit_net(struct net *net)
1292 {
1293 struct ip_tunnel_net *itn = net_generic(net, gre_tap_net_id);
1294 ip_tunnel_delete_net(itn, &ipgre_tap_ops);
1295 }
1296
1297 static struct pernet_operations ipgre_tap_net_ops = {
1298 .init = ipgre_tap_init_net,
1299 .exit = ipgre_tap_exit_net,
1300 .id = &gre_tap_net_id,
1301 .size = sizeof(struct ip_tunnel_net),
1302 };
1303
1304 static int __init ipgre_init(void)
1305 {
1306 int err;
1307
1308 pr_info("GRE over IPv4 tunneling driver\n");
1309
1310 err = register_pernet_device(&ipgre_net_ops);
1311 if (err < 0)
1312 return err;
1313
1314 err = register_pernet_device(&ipgre_tap_net_ops);
1315 if (err < 0)
1316 goto pnet_tap_faied;
1317
1318 err = gre_add_protocol(&ipgre_protocol, GREPROTO_CISCO);
1319 if (err < 0) {
1320 pr_info("%s: can't add protocol\n", __func__);
1321 goto add_proto_failed;
1322 }
1323
1324 err = rtnl_link_register(&ipgre_link_ops);
1325 if (err < 0)
1326 goto rtnl_link_failed;
1327
1328 err = rtnl_link_register(&ipgre_tap_ops);
1329 if (err < 0)
1330 goto tap_ops_failed;
1331
1332 return 0;
1333
1334 tap_ops_failed:
1335 rtnl_link_unregister(&ipgre_link_ops);
1336 rtnl_link_failed:
1337 gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO);
1338 add_proto_failed:
1339 unregister_pernet_device(&ipgre_tap_net_ops);
1340 pnet_tap_faied:
1341 unregister_pernet_device(&ipgre_net_ops);
1342 return err;
1343 }
1344
1345 static void __exit ipgre_fini(void)
1346 {
1347 rtnl_link_unregister(&ipgre_tap_ops);
1348 rtnl_link_unregister(&ipgre_link_ops);
1349 gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO);
1350 unregister_pernet_device(&ipgre_tap_net_ops);
1351 unregister_pernet_device(&ipgre_net_ops);
1352 }
1353
1354 module_init(ipgre_init);
1355 module_exit(ipgre_fini);
1356 MODULE_LICENSE("GPL");
1357 MODULE_ALIAS_RTNL_LINK("gre");
1358 MODULE_ALIAS_RTNL_LINK("gretap");
1359 MODULE_ALIAS_NETDEV("gre0");
1360 MODULE_ALIAS_NETDEV("gretap0");
This page took 0.085461 seconds and 5 git commands to generate.