c12b395a |
1 | /* |
2 | * GRE over IPv6 protocol decoder. |
3 | * |
4 | * Authors: Dmitry Kozlov (xeb@mail.ru) |
5 | * |
6 | * This program is free software; you can redistribute it and/or |
7 | * modify it under the terms of the GNU General Public License |
8 | * as published by the Free Software Foundation; either version |
9 | * 2 of the License, or (at your option) any later version. |
10 | * |
11 | */ |
12 | |
13 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
14 | |
15 | #include <linux/capability.h> |
16 | #include <linux/module.h> |
17 | #include <linux/types.h> |
18 | #include <linux/kernel.h> |
19 | #include <linux/slab.h> |
20 | #include <linux/uaccess.h> |
21 | #include <linux/skbuff.h> |
22 | #include <linux/netdevice.h> |
23 | #include <linux/in.h> |
24 | #include <linux/tcp.h> |
25 | #include <linux/udp.h> |
26 | #include <linux/if_arp.h> |
27 | #include <linux/mroute.h> |
28 | #include <linux/init.h> |
29 | #include <linux/in6.h> |
30 | #include <linux/inetdevice.h> |
31 | #include <linux/igmp.h> |
32 | #include <linux/netfilter_ipv4.h> |
33 | #include <linux/etherdevice.h> |
34 | #include <linux/if_ether.h> |
35 | #include <linux/hash.h> |
36 | #include <linux/if_tunnel.h> |
37 | #include <linux/ip6_tunnel.h> |
38 | |
39 | #include <net/sock.h> |
40 | #include <net/ip.h> |
41 | #include <net/icmp.h> |
42 | #include <net/protocol.h> |
43 | #include <net/addrconf.h> |
44 | #include <net/arp.h> |
45 | #include <net/checksum.h> |
46 | #include <net/dsfield.h> |
47 | #include <net/inet_ecn.h> |
48 | #include <net/xfrm.h> |
49 | #include <net/net_namespace.h> |
50 | #include <net/netns/generic.h> |
51 | #include <net/rtnetlink.h> |
52 | |
53 | #include <net/ipv6.h> |
54 | #include <net/ip6_fib.h> |
55 | #include <net/ip6_route.h> |
56 | #include <net/ip6_tunnel.h> |
57 | |
58 | |
59 | #define IPV6_TCLASS_MASK (IPV6_FLOWINFO_MASK & ~IPV6_FLOWLABEL_MASK) |
60 | #define IPV6_TCLASS_SHIFT 20 |
61 | |
62 | #define HASH_SIZE_SHIFT 5 |
63 | #define HASH_SIZE (1 << HASH_SIZE_SHIFT) |
64 | |
65 | static int ip6gre_net_id __read_mostly; |
66 | struct ip6gre_net { |
67 | struct ip6_tnl __rcu *tunnels[4][HASH_SIZE]; |
68 | |
69 | struct net_device *fb_tunnel_dev; |
70 | }; |
71 | |
72 | static struct rtnl_link_ops ip6gre_link_ops __read_mostly; |
73 | static int ip6gre_tunnel_init(struct net_device *dev); |
74 | static void ip6gre_tunnel_setup(struct net_device *dev); |
75 | static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t); |
76 | static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu); |
77 | |
78 | /* Tunnel hash table */ |
79 | |
80 | /* |
81 | 4 hash tables: |
82 | |
83 | 3: (remote,local) |
84 | 2: (remote,*) |
85 | 1: (*,local) |
86 | 0: (*,*) |
87 | |
88 | We require exact key match i.e. if a key is present in packet |
89 | it will match only tunnel with the same key; if it is not present, |
90 | it will match only keyless tunnel. |
91 | |
92 | All keysless packets, if not matched configured keyless tunnels |
93 | will match fallback tunnel. |
94 | */ |
95 | |
96 | #define HASH_KEY(key) (((__force u32)key^((__force u32)key>>4))&(HASH_SIZE - 1)) |
97 | static u32 HASH_ADDR(const struct in6_addr *addr) |
98 | { |
99 | u32 hash = ipv6_addr_hash(addr); |
100 | |
101 | return hash_32(hash, HASH_SIZE_SHIFT); |
102 | } |
103 | |
104 | #define tunnels_r_l tunnels[3] |
105 | #define tunnels_r tunnels[2] |
106 | #define tunnels_l tunnels[1] |
107 | #define tunnels_wc tunnels[0] |
108 | /* |
109 | * Locking : hash tables are protected by RCU and RTNL |
110 | */ |
111 | |
112 | #define for_each_ip_tunnel_rcu(start) \ |
113 | for (t = rcu_dereference(start); t; t = rcu_dereference(t->next)) |
114 | |
115 | /* often modified stats are per cpu, other are shared (netdev->stats) */ |
116 | struct pcpu_tstats { |
117 | u64 rx_packets; |
118 | u64 rx_bytes; |
119 | u64 tx_packets; |
120 | u64 tx_bytes; |
121 | struct u64_stats_sync syncp; |
122 | }; |
123 | |
124 | static struct rtnl_link_stats64 *ip6gre_get_stats64(struct net_device *dev, |
125 | struct rtnl_link_stats64 *tot) |
126 | { |
127 | int i; |
128 | |
129 | for_each_possible_cpu(i) { |
130 | const struct pcpu_tstats *tstats = per_cpu_ptr(dev->tstats, i); |
131 | u64 rx_packets, rx_bytes, tx_packets, tx_bytes; |
132 | unsigned int start; |
133 | |
134 | do { |
135 | start = u64_stats_fetch_begin_bh(&tstats->syncp); |
136 | rx_packets = tstats->rx_packets; |
137 | tx_packets = tstats->tx_packets; |
138 | rx_bytes = tstats->rx_bytes; |
139 | tx_bytes = tstats->tx_bytes; |
140 | } while (u64_stats_fetch_retry_bh(&tstats->syncp, start)); |
141 | |
142 | tot->rx_packets += rx_packets; |
143 | tot->tx_packets += tx_packets; |
144 | tot->rx_bytes += rx_bytes; |
145 | tot->tx_bytes += tx_bytes; |
146 | } |
147 | |
148 | tot->multicast = dev->stats.multicast; |
149 | tot->rx_crc_errors = dev->stats.rx_crc_errors; |
150 | tot->rx_fifo_errors = dev->stats.rx_fifo_errors; |
151 | tot->rx_length_errors = dev->stats.rx_length_errors; |
152 | tot->rx_errors = dev->stats.rx_errors; |
153 | tot->tx_fifo_errors = dev->stats.tx_fifo_errors; |
154 | tot->tx_carrier_errors = dev->stats.tx_carrier_errors; |
155 | tot->tx_dropped = dev->stats.tx_dropped; |
156 | tot->tx_aborted_errors = dev->stats.tx_aborted_errors; |
157 | tot->tx_errors = dev->stats.tx_errors; |
158 | |
159 | return tot; |
160 | } |
161 | |
162 | /* Given src, dst and key, find appropriate for input tunnel. */ |
163 | |
164 | static struct ip6_tnl *ip6gre_tunnel_lookup(struct net_device *dev, |
165 | const struct in6_addr *remote, const struct in6_addr *local, |
166 | __be32 key, __be16 gre_proto) |
167 | { |
168 | struct net *net = dev_net(dev); |
169 | int link = dev->ifindex; |
170 | unsigned int h0 = HASH_ADDR(remote); |
171 | unsigned int h1 = HASH_KEY(key); |
172 | struct ip6_tnl *t, *cand = NULL; |
173 | struct ip6gre_net *ign = net_generic(net, ip6gre_net_id); |
174 | int dev_type = (gre_proto == htons(ETH_P_TEB)) ? |
175 | ARPHRD_ETHER : ARPHRD_IP6GRE; |
176 | int score, cand_score = 4; |
177 | |
178 | for_each_ip_tunnel_rcu(ign->tunnels_r_l[h0 ^ h1]) { |
179 | if (!ipv6_addr_equal(local, &t->parms.laddr) || |
180 | !ipv6_addr_equal(remote, &t->parms.raddr) || |
181 | key != t->parms.i_key || |
182 | !(t->dev->flags & IFF_UP)) |
183 | continue; |
184 | |
185 | if (t->dev->type != ARPHRD_IP6GRE && |
186 | t->dev->type != dev_type) |
187 | continue; |
188 | |
189 | score = 0; |
190 | if (t->parms.link != link) |
191 | score |= 1; |
192 | if (t->dev->type != dev_type) |
193 | score |= 2; |
194 | if (score == 0) |
195 | return t; |
196 | |
197 | if (score < cand_score) { |
198 | cand = t; |
199 | cand_score = score; |
200 | } |
201 | } |
202 | |
203 | for_each_ip_tunnel_rcu(ign->tunnels_r[h0 ^ h1]) { |
204 | if (!ipv6_addr_equal(remote, &t->parms.raddr) || |
205 | key != t->parms.i_key || |
206 | !(t->dev->flags & IFF_UP)) |
207 | continue; |
208 | |
209 | if (t->dev->type != ARPHRD_IP6GRE && |
210 | t->dev->type != dev_type) |
211 | continue; |
212 | |
213 | score = 0; |
214 | if (t->parms.link != link) |
215 | score |= 1; |
216 | if (t->dev->type != dev_type) |
217 | score |= 2; |
218 | if (score == 0) |
219 | return t; |
220 | |
221 | if (score < cand_score) { |
222 | cand = t; |
223 | cand_score = score; |
224 | } |
225 | } |
226 | |
227 | for_each_ip_tunnel_rcu(ign->tunnels_l[h1]) { |
228 | if ((!ipv6_addr_equal(local, &t->parms.laddr) && |
229 | (!ipv6_addr_equal(local, &t->parms.raddr) || |
230 | !ipv6_addr_is_multicast(local))) || |
231 | key != t->parms.i_key || |
232 | !(t->dev->flags & IFF_UP)) |
233 | continue; |
234 | |
235 | if (t->dev->type != ARPHRD_IP6GRE && |
236 | t->dev->type != dev_type) |
237 | continue; |
238 | |
239 | score = 0; |
240 | if (t->parms.link != link) |
241 | score |= 1; |
242 | if (t->dev->type != dev_type) |
243 | score |= 2; |
244 | if (score == 0) |
245 | return t; |
246 | |
247 | if (score < cand_score) { |
248 | cand = t; |
249 | cand_score = score; |
250 | } |
251 | } |
252 | |
253 | for_each_ip_tunnel_rcu(ign->tunnels_wc[h1]) { |
254 | if (t->parms.i_key != key || |
255 | !(t->dev->flags & IFF_UP)) |
256 | continue; |
257 | |
258 | if (t->dev->type != ARPHRD_IP6GRE && |
259 | t->dev->type != dev_type) |
260 | continue; |
261 | |
262 | score = 0; |
263 | if (t->parms.link != link) |
264 | score |= 1; |
265 | if (t->dev->type != dev_type) |
266 | score |= 2; |
267 | if (score == 0) |
268 | return t; |
269 | |
270 | if (score < cand_score) { |
271 | cand = t; |
272 | cand_score = score; |
273 | } |
274 | } |
275 | |
276 | if (cand != NULL) |
277 | return cand; |
278 | |
279 | dev = ign->fb_tunnel_dev; |
280 | if (dev->flags & IFF_UP) |
281 | return netdev_priv(dev); |
282 | |
283 | return NULL; |
284 | } |
285 | |
286 | static struct ip6_tnl __rcu **__ip6gre_bucket(struct ip6gre_net *ign, |
287 | const struct __ip6_tnl_parm *p) |
288 | { |
289 | const struct in6_addr *remote = &p->raddr; |
290 | const struct in6_addr *local = &p->laddr; |
291 | unsigned int h = HASH_KEY(p->i_key); |
292 | int prio = 0; |
293 | |
294 | if (!ipv6_addr_any(local)) |
295 | prio |= 1; |
296 | if (!ipv6_addr_any(remote) && !ipv6_addr_is_multicast(remote)) { |
297 | prio |= 2; |
298 | h ^= HASH_ADDR(remote); |
299 | } |
300 | |
301 | return &ign->tunnels[prio][h]; |
302 | } |
303 | |
304 | static inline struct ip6_tnl __rcu **ip6gre_bucket(struct ip6gre_net *ign, |
305 | const struct ip6_tnl *t) |
306 | { |
307 | return __ip6gre_bucket(ign, &t->parms); |
308 | } |
309 | |
310 | static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t) |
311 | { |
312 | struct ip6_tnl __rcu **tp = ip6gre_bucket(ign, t); |
313 | |
314 | rcu_assign_pointer(t->next, rtnl_dereference(*tp)); |
315 | rcu_assign_pointer(*tp, t); |
316 | } |
317 | |
318 | static void ip6gre_tunnel_unlink(struct ip6gre_net *ign, struct ip6_tnl *t) |
319 | { |
320 | struct ip6_tnl __rcu **tp; |
321 | struct ip6_tnl *iter; |
322 | |
323 | for (tp = ip6gre_bucket(ign, t); |
324 | (iter = rtnl_dereference(*tp)) != NULL; |
325 | tp = &iter->next) { |
326 | if (t == iter) { |
327 | rcu_assign_pointer(*tp, t->next); |
328 | break; |
329 | } |
330 | } |
331 | } |
332 | |
333 | static struct ip6_tnl *ip6gre_tunnel_find(struct net *net, |
334 | const struct __ip6_tnl_parm *parms, |
335 | int type) |
336 | { |
337 | const struct in6_addr *remote = &parms->raddr; |
338 | const struct in6_addr *local = &parms->laddr; |
339 | __be32 key = parms->i_key; |
340 | int link = parms->link; |
341 | struct ip6_tnl *t; |
342 | struct ip6_tnl __rcu **tp; |
343 | struct ip6gre_net *ign = net_generic(net, ip6gre_net_id); |
344 | |
345 | for (tp = __ip6gre_bucket(ign, parms); |
346 | (t = rtnl_dereference(*tp)) != NULL; |
347 | tp = &t->next) |
348 | if (ipv6_addr_equal(local, &t->parms.laddr) && |
349 | ipv6_addr_equal(remote, &t->parms.raddr) && |
350 | key == t->parms.i_key && |
351 | link == t->parms.link && |
352 | type == t->dev->type) |
353 | break; |
354 | |
355 | return t; |
356 | } |
357 | |
358 | static struct ip6_tnl *ip6gre_tunnel_locate(struct net *net, |
359 | const struct __ip6_tnl_parm *parms, int create) |
360 | { |
361 | struct ip6_tnl *t, *nt; |
362 | struct net_device *dev; |
363 | char name[IFNAMSIZ]; |
364 | struct ip6gre_net *ign = net_generic(net, ip6gre_net_id); |
365 | |
366 | t = ip6gre_tunnel_find(net, parms, ARPHRD_IP6GRE); |
367 | if (t || !create) |
368 | return t; |
369 | |
370 | if (parms->name[0]) |
371 | strlcpy(name, parms->name, IFNAMSIZ); |
372 | else |
373 | strcpy(name, "ip6gre%d"); |
374 | |
375 | dev = alloc_netdev(sizeof(*t), name, ip6gre_tunnel_setup); |
376 | if (!dev) |
377 | return NULL; |
378 | |
379 | dev_net_set(dev, net); |
380 | |
381 | nt = netdev_priv(dev); |
382 | nt->parms = *parms; |
383 | dev->rtnl_link_ops = &ip6gre_link_ops; |
384 | |
385 | nt->dev = dev; |
386 | ip6gre_tnl_link_config(nt, 1); |
387 | |
388 | if (register_netdevice(dev) < 0) |
389 | goto failed_free; |
390 | |
391 | /* Can use a lockless transmit, unless we generate output sequences */ |
392 | if (!(nt->parms.o_flags & GRE_SEQ)) |
393 | dev->features |= NETIF_F_LLTX; |
394 | |
395 | dev_hold(dev); |
396 | ip6gre_tunnel_link(ign, nt); |
397 | return nt; |
398 | |
399 | failed_free: |
400 | free_netdev(dev); |
401 | return NULL; |
402 | } |
403 | |
404 | static void ip6gre_tunnel_uninit(struct net_device *dev) |
405 | { |
406 | struct net *net = dev_net(dev); |
407 | struct ip6gre_net *ign = net_generic(net, ip6gre_net_id); |
408 | |
409 | ip6gre_tunnel_unlink(ign, netdev_priv(dev)); |
410 | dev_put(dev); |
411 | } |
412 | |
413 | |
414 | static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt, |
415 | u8 type, u8 code, int offset, __be32 info) |
416 | { |
417 | const struct ipv6hdr *ipv6h = (const struct ipv6hdr *)skb->data; |
418 | __be16 *p = (__be16 *)(ipv6h + 1); |
419 | int grehlen = sizeof(ipv6h) + 4; |
420 | struct ip6_tnl *t; |
421 | __be16 flags; |
422 | |
423 | flags = p[0]; |
424 | if (flags&(GRE_CSUM|GRE_KEY|GRE_SEQ|GRE_ROUTING|GRE_VERSION)) { |
425 | if (flags&(GRE_VERSION|GRE_ROUTING)) |
426 | return; |
427 | if (flags&GRE_KEY) { |
428 | grehlen += 4; |
429 | if (flags&GRE_CSUM) |
430 | grehlen += 4; |
431 | } |
432 | } |
433 | |
434 | /* If only 8 bytes returned, keyed message will be dropped here */ |
435 | if (skb_headlen(skb) < grehlen) |
436 | return; |
437 | |
438 | rcu_read_lock(); |
439 | |
440 | t = ip6gre_tunnel_lookup(skb->dev, &ipv6h->daddr, &ipv6h->saddr, |
441 | flags & GRE_KEY ? |
442 | *(((__be32 *)p) + (grehlen / 4) - 1) : 0, |
443 | p[1]); |
444 | if (t == NULL) |
445 | goto out; |
446 | |
447 | switch (type) { |
448 | __u32 teli; |
449 | struct ipv6_tlv_tnl_enc_lim *tel; |
450 | __u32 mtu; |
451 | case ICMPV6_DEST_UNREACH: |
452 | net_warn_ratelimited("%s: Path to destination invalid or inactive!\n", |
453 | t->parms.name); |
454 | break; |
455 | case ICMPV6_TIME_EXCEED: |
456 | if (code == ICMPV6_EXC_HOPLIMIT) { |
457 | net_warn_ratelimited("%s: Too small hop limit or routing loop in tunnel!\n", |
458 | t->parms.name); |
459 | } |
460 | break; |
461 | case ICMPV6_PARAMPROB: |
462 | teli = 0; |
463 | if (code == ICMPV6_HDR_FIELD) |
464 | teli = ip6_tnl_parse_tlv_enc_lim(skb, skb->data); |
465 | |
466 | if (teli && teli == info - 2) { |
467 | tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->data[teli]; |
468 | if (tel->encap_limit == 0) { |
469 | net_warn_ratelimited("%s: Too small encapsulation limit or routing loop in tunnel!\n", |
470 | t->parms.name); |
471 | } |
472 | } else { |
473 | net_warn_ratelimited("%s: Recipient unable to parse tunneled packet!\n", |
474 | t->parms.name); |
475 | } |
476 | break; |
477 | case ICMPV6_PKT_TOOBIG: |
478 | mtu = info - offset; |
479 | if (mtu < IPV6_MIN_MTU) |
480 | mtu = IPV6_MIN_MTU; |
481 | t->dev->mtu = mtu; |
482 | break; |
483 | } |
484 | |
485 | if (time_before(jiffies, t->err_time + IP6TUNNEL_ERR_TIMEO)) |
486 | t->err_count++; |
487 | else |
488 | t->err_count = 1; |
489 | t->err_time = jiffies; |
490 | out: |
491 | rcu_read_unlock(); |
492 | } |
493 | |
494 | static inline void ip6gre_ecn_decapsulate_ipv4(const struct ip6_tnl *t, |
495 | const struct ipv6hdr *ipv6h, struct sk_buff *skb) |
496 | { |
497 | __u8 dsfield = ipv6_get_dsfield(ipv6h) & ~INET_ECN_MASK; |
498 | |
499 | if (t->parms.flags & IP6_TNL_F_RCV_DSCP_COPY) |
500 | ipv4_change_dsfield(ip_hdr(skb), INET_ECN_MASK, dsfield); |
501 | |
502 | if (INET_ECN_is_ce(dsfield)) |
503 | IP_ECN_set_ce(ip_hdr(skb)); |
504 | } |
505 | |
506 | static inline void ip6gre_ecn_decapsulate_ipv6(const struct ip6_tnl *t, |
507 | const struct ipv6hdr *ipv6h, struct sk_buff *skb) |
508 | { |
509 | if (t->parms.flags & IP6_TNL_F_RCV_DSCP_COPY) |
510 | ipv6_copy_dscp(ipv6_get_dsfield(ipv6h), ipv6_hdr(skb)); |
511 | |
512 | if (INET_ECN_is_ce(ipv6_get_dsfield(ipv6h))) |
513 | IP6_ECN_set_ce(ipv6_hdr(skb)); |
514 | } |
515 | |
516 | static int ip6gre_rcv(struct sk_buff *skb) |
517 | { |
518 | const struct ipv6hdr *ipv6h; |
519 | u8 *h; |
520 | __be16 flags; |
521 | __sum16 csum = 0; |
522 | __be32 key = 0; |
523 | u32 seqno = 0; |
524 | struct ip6_tnl *tunnel; |
525 | int offset = 4; |
526 | __be16 gre_proto; |
527 | |
528 | if (!pskb_may_pull(skb, sizeof(struct in6_addr))) |
529 | goto drop_nolock; |
530 | |
531 | ipv6h = ipv6_hdr(skb); |
532 | h = skb->data; |
533 | flags = *(__be16 *)h; |
534 | |
535 | if (flags&(GRE_CSUM|GRE_KEY|GRE_ROUTING|GRE_SEQ|GRE_VERSION)) { |
536 | /* - Version must be 0. |
537 | - We do not support routing headers. |
538 | */ |
539 | if (flags&(GRE_VERSION|GRE_ROUTING)) |
540 | goto drop_nolock; |
541 | |
542 | if (flags&GRE_CSUM) { |
543 | switch (skb->ip_summed) { |
544 | case CHECKSUM_COMPLETE: |
545 | csum = csum_fold(skb->csum); |
546 | if (!csum) |
547 | break; |
548 | /* fall through */ |
549 | case CHECKSUM_NONE: |
550 | skb->csum = 0; |
551 | csum = __skb_checksum_complete(skb); |
552 | skb->ip_summed = CHECKSUM_COMPLETE; |
553 | } |
554 | offset += 4; |
555 | } |
556 | if (flags&GRE_KEY) { |
557 | key = *(__be32 *)(h + offset); |
558 | offset += 4; |
559 | } |
560 | if (flags&GRE_SEQ) { |
561 | seqno = ntohl(*(__be32 *)(h + offset)); |
562 | offset += 4; |
563 | } |
564 | } |
565 | |
566 | gre_proto = *(__be16 *)(h + 2); |
567 | |
568 | rcu_read_lock(); |
569 | tunnel = ip6gre_tunnel_lookup(skb->dev, |
570 | &ipv6h->saddr, &ipv6h->daddr, key, |
571 | gre_proto); |
572 | if (tunnel) { |
573 | struct pcpu_tstats *tstats; |
574 | |
575 | if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) |
576 | goto drop; |
577 | |
578 | if (!ip6_tnl_rcv_ctl(tunnel, &ipv6h->daddr, &ipv6h->saddr)) { |
579 | tunnel->dev->stats.rx_dropped++; |
580 | goto drop; |
581 | } |
582 | |
583 | secpath_reset(skb); |
584 | |
585 | skb->protocol = gre_proto; |
586 | /* WCCP version 1 and 2 protocol decoding. |
587 | * - Change protocol to IP |
588 | * - When dealing with WCCPv2, Skip extra 4 bytes in GRE header |
589 | */ |
590 | if (flags == 0 && gre_proto == htons(ETH_P_WCCP)) { |
591 | skb->protocol = htons(ETH_P_IP); |
592 | if ((*(h + offset) & 0xF0) != 0x40) |
593 | offset += 4; |
594 | } |
595 | |
596 | skb->mac_header = skb->network_header; |
597 | __pskb_pull(skb, offset); |
598 | skb_postpull_rcsum(skb, skb_transport_header(skb), offset); |
599 | skb->pkt_type = PACKET_HOST; |
600 | |
601 | if (((flags&GRE_CSUM) && csum) || |
602 | (!(flags&GRE_CSUM) && tunnel->parms.i_flags&GRE_CSUM)) { |
603 | tunnel->dev->stats.rx_crc_errors++; |
604 | tunnel->dev->stats.rx_errors++; |
605 | goto drop; |
606 | } |
607 | if (tunnel->parms.i_flags&GRE_SEQ) { |
608 | if (!(flags&GRE_SEQ) || |
609 | (tunnel->i_seqno && |
610 | (s32)(seqno - tunnel->i_seqno) < 0)) { |
611 | tunnel->dev->stats.rx_fifo_errors++; |
612 | tunnel->dev->stats.rx_errors++; |
613 | goto drop; |
614 | } |
615 | tunnel->i_seqno = seqno + 1; |
616 | } |
617 | |
618 | /* Warning: All skb pointers will be invalidated! */ |
619 | if (tunnel->dev->type == ARPHRD_ETHER) { |
620 | if (!pskb_may_pull(skb, ETH_HLEN)) { |
621 | tunnel->dev->stats.rx_length_errors++; |
622 | tunnel->dev->stats.rx_errors++; |
623 | goto drop; |
624 | } |
625 | |
626 | ipv6h = ipv6_hdr(skb); |
627 | skb->protocol = eth_type_trans(skb, tunnel->dev); |
628 | skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN); |
629 | } |
630 | |
631 | tstats = this_cpu_ptr(tunnel->dev->tstats); |
632 | u64_stats_update_begin(&tstats->syncp); |
633 | tstats->rx_packets++; |
634 | tstats->rx_bytes += skb->len; |
635 | u64_stats_update_end(&tstats->syncp); |
636 | |
637 | __skb_tunnel_rx(skb, tunnel->dev); |
638 | |
639 | skb_reset_network_header(skb); |
640 | if (skb->protocol == htons(ETH_P_IP)) |
641 | ip6gre_ecn_decapsulate_ipv4(tunnel, ipv6h, skb); |
642 | else if (skb->protocol == htons(ETH_P_IPV6)) |
643 | ip6gre_ecn_decapsulate_ipv6(tunnel, ipv6h, skb); |
644 | |
645 | netif_rx(skb); |
646 | |
647 | rcu_read_unlock(); |
648 | return 0; |
649 | } |
650 | icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0); |
651 | |
652 | drop: |
653 | rcu_read_unlock(); |
654 | drop_nolock: |
655 | kfree_skb(skb); |
656 | return 0; |
657 | } |
658 | |
659 | struct ipv6_tel_txoption { |
660 | struct ipv6_txoptions ops; |
661 | __u8 dst_opt[8]; |
662 | }; |
663 | |
664 | static void init_tel_txopt(struct ipv6_tel_txoption *opt, __u8 encap_limit) |
665 | { |
666 | memset(opt, 0, sizeof(struct ipv6_tel_txoption)); |
667 | |
668 | opt->dst_opt[2] = IPV6_TLV_TNL_ENCAP_LIMIT; |
669 | opt->dst_opt[3] = 1; |
670 | opt->dst_opt[4] = encap_limit; |
671 | opt->dst_opt[5] = IPV6_TLV_PADN; |
672 | opt->dst_opt[6] = 1; |
673 | |
674 | opt->ops.dst0opt = (struct ipv6_opt_hdr *) opt->dst_opt; |
675 | opt->ops.opt_nflen = 8; |
676 | } |
677 | |
678 | static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb, |
679 | struct net_device *dev, |
680 | __u8 dsfield, |
681 | struct flowi6 *fl6, |
682 | int encap_limit, |
683 | __u32 *pmtu) |
684 | { |
685 | struct net *net = dev_net(dev); |
686 | struct ip6_tnl *tunnel = netdev_priv(dev); |
687 | struct net_device *tdev; /* Device to other host */ |
688 | struct ipv6hdr *ipv6h; /* Our new IP header */ |
689 | unsigned int max_headroom; /* The extra header space needed */ |
690 | int gre_hlen; |
691 | struct ipv6_tel_txoption opt; |
692 | int mtu; |
693 | struct dst_entry *dst = NULL, *ndst = NULL; |
694 | struct net_device_stats *stats = &tunnel->dev->stats; |
695 | int err = -1; |
696 | u8 proto; |
697 | int pkt_len; |
698 | struct sk_buff *new_skb; |
699 | |
700 | if (dev->type == ARPHRD_ETHER) |
701 | IPCB(skb)->flags = 0; |
702 | |
703 | if (dev->header_ops && dev->type == ARPHRD_IP6GRE) { |
704 | gre_hlen = 0; |
705 | ipv6h = (struct ipv6hdr *)skb->data; |
706 | fl6->daddr = ipv6h->daddr; |
707 | } else { |
708 | gre_hlen = tunnel->hlen; |
709 | fl6->daddr = tunnel->parms.raddr; |
710 | } |
711 | |
712 | if (!fl6->flowi6_mark) |
713 | dst = ip6_tnl_dst_check(tunnel); |
714 | |
715 | if (!dst) { |
716 | ndst = ip6_route_output(net, NULL, fl6); |
717 | |
718 | if (ndst->error) |
719 | goto tx_err_link_failure; |
720 | ndst = xfrm_lookup(net, ndst, flowi6_to_flowi(fl6), NULL, 0); |
721 | if (IS_ERR(ndst)) { |
722 | err = PTR_ERR(ndst); |
723 | ndst = NULL; |
724 | goto tx_err_link_failure; |
725 | } |
726 | dst = ndst; |
727 | } |
728 | |
729 | tdev = dst->dev; |
730 | |
731 | if (tdev == dev) { |
732 | stats->collisions++; |
733 | net_warn_ratelimited("%s: Local routing loop detected!\n", |
734 | tunnel->parms.name); |
735 | goto tx_err_dst_release; |
736 | } |
737 | |
738 | mtu = dst_mtu(dst) - sizeof(*ipv6h); |
739 | if (encap_limit >= 0) { |
740 | max_headroom += 8; |
741 | mtu -= 8; |
742 | } |
743 | if (mtu < IPV6_MIN_MTU) |
744 | mtu = IPV6_MIN_MTU; |
745 | if (skb_dst(skb)) |
746 | skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu); |
747 | if (skb->len > mtu) { |
748 | *pmtu = mtu; |
749 | err = -EMSGSIZE; |
750 | goto tx_err_dst_release; |
751 | } |
752 | |
753 | if (tunnel->err_count > 0) { |
754 | if (time_before(jiffies, |
755 | tunnel->err_time + IP6TUNNEL_ERR_TIMEO)) { |
756 | tunnel->err_count--; |
757 | |
758 | dst_link_failure(skb); |
759 | } else |
760 | tunnel->err_count = 0; |
761 | } |
762 | |
763 | max_headroom = LL_RESERVED_SPACE(tdev) + gre_hlen + dst->header_len; |
764 | |
765 | if (skb_headroom(skb) < max_headroom || skb_shared(skb) || |
766 | (skb_cloned(skb) && !skb_clone_writable(skb, 0))) { |
767 | new_skb = skb_realloc_headroom(skb, max_headroom); |
768 | if (max_headroom > dev->needed_headroom) |
769 | dev->needed_headroom = max_headroom; |
770 | if (!new_skb) |
771 | goto tx_err_dst_release; |
772 | |
773 | if (skb->sk) |
774 | skb_set_owner_w(new_skb, skb->sk); |
775 | consume_skb(skb); |
776 | skb = new_skb; |
777 | } |
778 | |
779 | skb_dst_drop(skb); |
780 | |
781 | if (fl6->flowi6_mark) { |
782 | skb_dst_set(skb, dst); |
783 | ndst = NULL; |
784 | } else { |
785 | skb_dst_set_noref(skb, dst); |
786 | } |
787 | |
788 | skb->transport_header = skb->network_header; |
789 | |
790 | proto = NEXTHDR_GRE; |
791 | if (encap_limit >= 0) { |
792 | init_tel_txopt(&opt, encap_limit); |
793 | ipv6_push_nfrag_opts(skb, &opt.ops, &proto, NULL); |
794 | } |
795 | |
796 | skb_push(skb, gre_hlen); |
797 | skb_reset_network_header(skb); |
798 | |
799 | /* |
800 | * Push down and install the IP header. |
801 | */ |
802 | ipv6h = ipv6_hdr(skb); |
803 | *(__be32 *)ipv6h = fl6->flowlabel | htonl(0x60000000); |
804 | dsfield = INET_ECN_encapsulate(0, dsfield); |
805 | ipv6_change_dsfield(ipv6h, ~INET_ECN_MASK, dsfield); |
806 | ipv6h->hop_limit = tunnel->parms.hop_limit; |
807 | ipv6h->nexthdr = proto; |
808 | ipv6h->saddr = fl6->saddr; |
809 | ipv6h->daddr = fl6->daddr; |
810 | |
811 | ((__be16 *)(ipv6h + 1))[0] = tunnel->parms.o_flags; |
812 | ((__be16 *)(ipv6h + 1))[1] = (dev->type == ARPHRD_ETHER) ? |
813 | htons(ETH_P_TEB) : skb->protocol; |
814 | |
815 | if (tunnel->parms.o_flags&(GRE_KEY|GRE_CSUM|GRE_SEQ)) { |
816 | __be32 *ptr = (__be32 *)(((u8 *)ipv6h) + tunnel->hlen - 4); |
817 | |
818 | if (tunnel->parms.o_flags&GRE_SEQ) { |
819 | ++tunnel->o_seqno; |
820 | *ptr = htonl(tunnel->o_seqno); |
821 | ptr--; |
822 | } |
823 | if (tunnel->parms.o_flags&GRE_KEY) { |
824 | *ptr = tunnel->parms.o_key; |
825 | ptr--; |
826 | } |
827 | if (tunnel->parms.o_flags&GRE_CSUM) { |
828 | *ptr = 0; |
829 | *(__sum16 *)ptr = ip_compute_csum((void *)(ipv6h+1), |
830 | skb->len - sizeof(struct ipv6hdr)); |
831 | } |
832 | } |
833 | |
834 | nf_reset(skb); |
835 | pkt_len = skb->len; |
836 | err = ip6_local_out(skb); |
837 | |
838 | if (net_xmit_eval(err) == 0) { |
839 | struct pcpu_tstats *tstats = this_cpu_ptr(tunnel->dev->tstats); |
840 | |
841 | tstats->tx_bytes += pkt_len; |
842 | tstats->tx_packets++; |
843 | } else { |
844 | stats->tx_errors++; |
845 | stats->tx_aborted_errors++; |
846 | } |
847 | |
848 | if (ndst) |
849 | ip6_tnl_dst_store(tunnel, ndst); |
850 | |
851 | return 0; |
852 | tx_err_link_failure: |
853 | stats->tx_carrier_errors++; |
854 | dst_link_failure(skb); |
855 | tx_err_dst_release: |
856 | dst_release(ndst); |
857 | return err; |
858 | } |
859 | |
860 | static inline int ip6gre_xmit_ipv4(struct sk_buff *skb, struct net_device *dev) |
861 | { |
862 | struct ip6_tnl *t = netdev_priv(dev); |
863 | const struct iphdr *iph = ip_hdr(skb); |
864 | int encap_limit = -1; |
865 | struct flowi6 fl6; |
866 | __u8 dsfield; |
867 | __u32 mtu; |
868 | int err; |
869 | |
870 | if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) |
871 | encap_limit = t->parms.encap_limit; |
872 | |
873 | memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6)); |
874 | fl6.flowi6_proto = IPPROTO_IPIP; |
875 | |
876 | dsfield = ipv4_get_dsfield(iph); |
877 | |
878 | if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS) |
879 | fl6.flowlabel |= htonl((__u32)iph->tos << IPV6_TCLASS_SHIFT) |
880 | & IPV6_TCLASS_MASK; |
881 | if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK) |
882 | fl6.flowi6_mark = skb->mark; |
883 | |
884 | err = ip6gre_xmit2(skb, dev, dsfield, &fl6, encap_limit, &mtu); |
885 | if (err != 0) { |
886 | /* XXX: send ICMP error even if DF is not set. */ |
887 | if (err == -EMSGSIZE) |
888 | icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, |
889 | htonl(mtu)); |
890 | return -1; |
891 | } |
892 | |
893 | return 0; |
894 | } |
895 | |
896 | static inline int ip6gre_xmit_ipv6(struct sk_buff *skb, struct net_device *dev) |
897 | { |
898 | struct ip6_tnl *t = netdev_priv(dev); |
899 | struct ipv6hdr *ipv6h = ipv6_hdr(skb); |
900 | int encap_limit = -1; |
901 | __u16 offset; |
902 | struct flowi6 fl6; |
903 | __u8 dsfield; |
904 | __u32 mtu; |
905 | int err; |
906 | |
907 | if (ipv6_addr_equal(&t->parms.raddr, &ipv6h->saddr)) |
908 | return -1; |
909 | |
910 | offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb)); |
911 | if (offset > 0) { |
912 | struct ipv6_tlv_tnl_enc_lim *tel; |
913 | tel = (struct ipv6_tlv_tnl_enc_lim *)&skb_network_header(skb)[offset]; |
914 | if (tel->encap_limit == 0) { |
915 | icmpv6_send(skb, ICMPV6_PARAMPROB, |
916 | ICMPV6_HDR_FIELD, offset + 2); |
917 | return -1; |
918 | } |
919 | encap_limit = tel->encap_limit - 1; |
920 | } else if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) |
921 | encap_limit = t->parms.encap_limit; |
922 | |
923 | memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6)); |
924 | fl6.flowi6_proto = IPPROTO_IPV6; |
925 | |
926 | dsfield = ipv6_get_dsfield(ipv6h); |
927 | if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS) |
928 | fl6.flowlabel |= (*(__be32 *) ipv6h & IPV6_TCLASS_MASK); |
929 | if (t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL) |
930 | fl6.flowlabel |= (*(__be32 *) ipv6h & IPV6_FLOWLABEL_MASK); |
931 | if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK) |
932 | fl6.flowi6_mark = skb->mark; |
933 | |
934 | err = ip6gre_xmit2(skb, dev, dsfield, &fl6, encap_limit, &mtu); |
935 | if (err != 0) { |
936 | if (err == -EMSGSIZE) |
937 | icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); |
938 | return -1; |
939 | } |
940 | |
941 | return 0; |
942 | } |
943 | |
944 | /** |
945 | * ip6_tnl_addr_conflict - compare packet addresses to tunnel's own |
946 | * @t: the outgoing tunnel device |
947 | * @hdr: IPv6 header from the incoming packet |
948 | * |
949 | * Description: |
950 | * Avoid trivial tunneling loop by checking that tunnel exit-point |
951 | * doesn't match source of incoming packet. |
952 | * |
953 | * Return: |
954 | * 1 if conflict, |
955 | * 0 else |
956 | **/ |
957 | |
958 | static inline bool ip6gre_tnl_addr_conflict(const struct ip6_tnl *t, |
959 | const struct ipv6hdr *hdr) |
960 | { |
961 | return ipv6_addr_equal(&t->parms.raddr, &hdr->saddr); |
962 | } |
963 | |
964 | static int ip6gre_xmit_other(struct sk_buff *skb, struct net_device *dev) |
965 | { |
966 | struct ip6_tnl *t = netdev_priv(dev); |
967 | int encap_limit = -1; |
968 | struct flowi6 fl6; |
969 | __u32 mtu; |
970 | int err; |
971 | |
972 | if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) |
973 | encap_limit = t->parms.encap_limit; |
974 | |
975 | memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6)); |
976 | fl6.flowi6_proto = skb->protocol; |
977 | |
978 | err = ip6gre_xmit2(skb, dev, 0, &fl6, encap_limit, &mtu); |
979 | |
980 | return err; |
981 | } |
982 | |
983 | static netdev_tx_t ip6gre_tunnel_xmit(struct sk_buff *skb, |
984 | struct net_device *dev) |
985 | { |
986 | struct ip6_tnl *t = netdev_priv(dev); |
987 | struct net_device_stats *stats = &t->dev->stats; |
988 | int ret; |
989 | |
990 | if (!ip6_tnl_xmit_ctl(t)) |
991 | return -1; |
992 | |
993 | switch (skb->protocol) { |
994 | case htons(ETH_P_IP): |
995 | ret = ip6gre_xmit_ipv4(skb, dev); |
996 | break; |
997 | case htons(ETH_P_IPV6): |
998 | ret = ip6gre_xmit_ipv6(skb, dev); |
999 | break; |
1000 | default: |
1001 | ret = ip6gre_xmit_other(skb, dev); |
1002 | break; |
1003 | } |
1004 | |
1005 | if (ret < 0) |
1006 | goto tx_err; |
1007 | |
1008 | return NETDEV_TX_OK; |
1009 | |
1010 | tx_err: |
1011 | stats->tx_errors++; |
1012 | stats->tx_dropped++; |
1013 | kfree_skb(skb); |
1014 | return NETDEV_TX_OK; |
1015 | } |
1016 | |
1017 | static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu) |
1018 | { |
1019 | struct net_device *dev = t->dev; |
1020 | struct __ip6_tnl_parm *p = &t->parms; |
1021 | struct flowi6 *fl6 = &t->fl.u.ip6; |
1022 | int addend = sizeof(struct ipv6hdr) + 4; |
1023 | |
1024 | if (dev->type != ARPHRD_ETHER) { |
1025 | memcpy(dev->dev_addr, &p->laddr, sizeof(struct in6_addr)); |
1026 | memcpy(dev->broadcast, &p->raddr, sizeof(struct in6_addr)); |
1027 | } |
1028 | |
1029 | /* Set up flowi template */ |
1030 | fl6->saddr = p->laddr; |
1031 | fl6->daddr = p->raddr; |
1032 | fl6->flowi6_oif = p->link; |
1033 | fl6->flowlabel = 0; |
1034 | |
1035 | if (!(p->flags&IP6_TNL_F_USE_ORIG_TCLASS)) |
1036 | fl6->flowlabel |= IPV6_TCLASS_MASK & p->flowinfo; |
1037 | if (!(p->flags&IP6_TNL_F_USE_ORIG_FLOWLABEL)) |
1038 | fl6->flowlabel |= IPV6_FLOWLABEL_MASK & p->flowinfo; |
1039 | |
1040 | p->flags &= ~(IP6_TNL_F_CAP_XMIT|IP6_TNL_F_CAP_RCV|IP6_TNL_F_CAP_PER_PACKET); |
1041 | p->flags |= ip6_tnl_get_cap(t, &p->laddr, &p->raddr); |
1042 | |
1043 | if (p->flags&IP6_TNL_F_CAP_XMIT && |
1044 | p->flags&IP6_TNL_F_CAP_RCV && dev->type != ARPHRD_ETHER) |
1045 | dev->flags |= IFF_POINTOPOINT; |
1046 | else |
1047 | dev->flags &= ~IFF_POINTOPOINT; |
1048 | |
1049 | dev->iflink = p->link; |
1050 | |
1051 | /* Precalculate GRE options length */ |
1052 | if (t->parms.o_flags&(GRE_CSUM|GRE_KEY|GRE_SEQ)) { |
1053 | if (t->parms.o_flags&GRE_CSUM) |
1054 | addend += 4; |
1055 | if (t->parms.o_flags&GRE_KEY) |
1056 | addend += 4; |
1057 | if (t->parms.o_flags&GRE_SEQ) |
1058 | addend += 4; |
1059 | } |
1060 | |
1061 | if (p->flags & IP6_TNL_F_CAP_XMIT) { |
1062 | int strict = (ipv6_addr_type(&p->raddr) & |
1063 | (IPV6_ADDR_MULTICAST|IPV6_ADDR_LINKLOCAL)); |
1064 | |
1065 | struct rt6_info *rt = rt6_lookup(dev_net(dev), |
1066 | &p->raddr, &p->laddr, |
1067 | p->link, strict); |
1068 | |
1069 | if (rt == NULL) |
1070 | return; |
1071 | |
1072 | if (rt->dst.dev) { |
1073 | dev->hard_header_len = rt->dst.dev->hard_header_len + addend; |
1074 | |
1075 | if (set_mtu) { |
1076 | dev->mtu = rt->dst.dev->mtu - addend; |
1077 | if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) |
1078 | dev->mtu -= 8; |
1079 | |
1080 | if (dev->mtu < IPV6_MIN_MTU) |
1081 | dev->mtu = IPV6_MIN_MTU; |
1082 | } |
1083 | } |
1084 | dst_release(&rt->dst); |
1085 | } |
1086 | |
1087 | t->hlen = addend; |
1088 | } |
1089 | |
1090 | static int ip6gre_tnl_change(struct ip6_tnl *t, |
1091 | const struct __ip6_tnl_parm *p, int set_mtu) |
1092 | { |
1093 | t->parms.laddr = p->laddr; |
1094 | t->parms.raddr = p->raddr; |
1095 | t->parms.flags = p->flags; |
1096 | t->parms.hop_limit = p->hop_limit; |
1097 | t->parms.encap_limit = p->encap_limit; |
1098 | t->parms.flowinfo = p->flowinfo; |
1099 | t->parms.link = p->link; |
1100 | t->parms.proto = p->proto; |
1101 | t->parms.i_key = p->i_key; |
1102 | t->parms.o_key = p->o_key; |
1103 | t->parms.i_flags = p->i_flags; |
1104 | t->parms.o_flags = p->o_flags; |
1105 | ip6_tnl_dst_reset(t); |
1106 | ip6gre_tnl_link_config(t, set_mtu); |
1107 | return 0; |
1108 | } |
1109 | |
1110 | static void ip6gre_tnl_parm_from_user(struct __ip6_tnl_parm *p, |
1111 | const struct ip6_tnl_parm2 *u) |
1112 | { |
1113 | p->laddr = u->laddr; |
1114 | p->raddr = u->raddr; |
1115 | p->flags = u->flags; |
1116 | p->hop_limit = u->hop_limit; |
1117 | p->encap_limit = u->encap_limit; |
1118 | p->flowinfo = u->flowinfo; |
1119 | p->link = u->link; |
1120 | p->i_key = u->i_key; |
1121 | p->o_key = u->o_key; |
1122 | p->i_flags = u->i_flags; |
1123 | p->o_flags = u->o_flags; |
1124 | memcpy(p->name, u->name, sizeof(u->name)); |
1125 | } |
1126 | |
1127 | static void ip6gre_tnl_parm_to_user(struct ip6_tnl_parm2 *u, |
1128 | const struct __ip6_tnl_parm *p) |
1129 | { |
1130 | u->proto = IPPROTO_GRE; |
1131 | u->laddr = p->laddr; |
1132 | u->raddr = p->raddr; |
1133 | u->flags = p->flags; |
1134 | u->hop_limit = p->hop_limit; |
1135 | u->encap_limit = p->encap_limit; |
1136 | u->flowinfo = p->flowinfo; |
1137 | u->link = p->link; |
1138 | u->i_key = p->i_key; |
1139 | u->o_key = p->o_key; |
1140 | u->i_flags = p->i_flags; |
1141 | u->o_flags = p->o_flags; |
1142 | memcpy(u->name, p->name, sizeof(u->name)); |
1143 | } |
1144 | |
1145 | static int ip6gre_tunnel_ioctl(struct net_device *dev, |
1146 | struct ifreq *ifr, int cmd) |
1147 | { |
1148 | int err = 0; |
1149 | struct ip6_tnl_parm2 p; |
1150 | struct __ip6_tnl_parm p1; |
1151 | struct ip6_tnl *t; |
1152 | struct net *net = dev_net(dev); |
1153 | struct ip6gre_net *ign = net_generic(net, ip6gre_net_id); |
1154 | |
1155 | switch (cmd) { |
1156 | case SIOCGETTUNNEL: |
1157 | t = NULL; |
1158 | if (dev == ign->fb_tunnel_dev) { |
1159 | if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) { |
1160 | err = -EFAULT; |
1161 | break; |
1162 | } |
1163 | ip6gre_tnl_parm_from_user(&p1, &p); |
1164 | t = ip6gre_tunnel_locate(net, &p1, 0); |
1165 | } |
1166 | if (t == NULL) |
1167 | t = netdev_priv(dev); |
1168 | ip6gre_tnl_parm_to_user(&p, &t->parms); |
1169 | if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p))) |
1170 | err = -EFAULT; |
1171 | break; |
1172 | |
1173 | case SIOCADDTUNNEL: |
1174 | case SIOCCHGTUNNEL: |
1175 | err = -EPERM; |
1176 | if (!capable(CAP_NET_ADMIN)) |
1177 | goto done; |
1178 | |
1179 | err = -EFAULT; |
1180 | if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) |
1181 | goto done; |
1182 | |
1183 | err = -EINVAL; |
1184 | if ((p.i_flags|p.o_flags)&(GRE_VERSION|GRE_ROUTING)) |
1185 | goto done; |
1186 | |
1187 | if (!(p.i_flags&GRE_KEY)) |
1188 | p.i_key = 0; |
1189 | if (!(p.o_flags&GRE_KEY)) |
1190 | p.o_key = 0; |
1191 | |
1192 | ip6gre_tnl_parm_from_user(&p1, &p); |
1193 | t = ip6gre_tunnel_locate(net, &p1, cmd == SIOCADDTUNNEL); |
1194 | |
1195 | if (dev != ign->fb_tunnel_dev && cmd == SIOCCHGTUNNEL) { |
1196 | if (t != NULL) { |
1197 | if (t->dev != dev) { |
1198 | err = -EEXIST; |
1199 | break; |
1200 | } |
1201 | } else { |
1202 | t = netdev_priv(dev); |
1203 | |
1204 | ip6gre_tunnel_unlink(ign, t); |
1205 | synchronize_net(); |
1206 | ip6gre_tnl_change(t, &p1, 1); |
1207 | ip6gre_tunnel_link(ign, t); |
1208 | netdev_state_change(dev); |
1209 | } |
1210 | } |
1211 | |
1212 | if (t) { |
1213 | err = 0; |
1214 | |
1215 | ip6gre_tnl_parm_to_user(&p, &t->parms); |
1216 | if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p))) |
1217 | err = -EFAULT; |
1218 | } else |
1219 | err = (cmd == SIOCADDTUNNEL ? -ENOBUFS : -ENOENT); |
1220 | break; |
1221 | |
1222 | case SIOCDELTUNNEL: |
1223 | err = -EPERM; |
1224 | if (!capable(CAP_NET_ADMIN)) |
1225 | goto done; |
1226 | |
1227 | if (dev == ign->fb_tunnel_dev) { |
1228 | err = -EFAULT; |
1229 | if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) |
1230 | goto done; |
1231 | err = -ENOENT; |
1232 | ip6gre_tnl_parm_from_user(&p1, &p); |
1233 | t = ip6gre_tunnel_locate(net, &p1, 0); |
1234 | if (t == NULL) |
1235 | goto done; |
1236 | err = -EPERM; |
1237 | if (t == netdev_priv(ign->fb_tunnel_dev)) |
1238 | goto done; |
1239 | dev = t->dev; |
1240 | } |
1241 | unregister_netdevice(dev); |
1242 | err = 0; |
1243 | break; |
1244 | |
1245 | default: |
1246 | err = -EINVAL; |
1247 | } |
1248 | |
1249 | done: |
1250 | return err; |
1251 | } |
1252 | |
1253 | static int ip6gre_tunnel_change_mtu(struct net_device *dev, int new_mtu) |
1254 | { |
1255 | struct ip6_tnl *tunnel = netdev_priv(dev); |
1256 | if (new_mtu < 68 || |
1257 | new_mtu > 0xFFF8 - dev->hard_header_len - tunnel->hlen) |
1258 | return -EINVAL; |
1259 | dev->mtu = new_mtu; |
1260 | return 0; |
1261 | } |
1262 | |
1263 | static int ip6gre_header(struct sk_buff *skb, struct net_device *dev, |
1264 | unsigned short type, |
1265 | const void *daddr, const void *saddr, unsigned int len) |
1266 | { |
1267 | struct ip6_tnl *t = netdev_priv(dev); |
1268 | struct ipv6hdr *ipv6h = (struct ipv6hdr *)skb_push(skb, t->hlen); |
1269 | __be16 *p = (__be16 *)(ipv6h+1); |
1270 | |
1271 | *(__be32 *)ipv6h = t->fl.u.ip6.flowlabel | htonl(0x60000000); |
1272 | ipv6h->hop_limit = t->parms.hop_limit; |
1273 | ipv6h->nexthdr = NEXTHDR_GRE; |
1274 | ipv6h->saddr = t->parms.laddr; |
1275 | ipv6h->daddr = t->parms.raddr; |
1276 | |
1277 | p[0] = t->parms.o_flags; |
1278 | p[1] = htons(type); |
1279 | |
1280 | /* |
1281 | * Set the source hardware address. |
1282 | */ |
1283 | |
1284 | if (saddr) |
1285 | memcpy(&ipv6h->saddr, saddr, sizeof(struct in6_addr)); |
1286 | if (daddr) |
1287 | memcpy(&ipv6h->daddr, daddr, sizeof(struct in6_addr)); |
1288 | if (!ipv6_addr_any(&ipv6h->daddr)) |
1289 | return t->hlen; |
1290 | |
1291 | return -t->hlen; |
1292 | } |
1293 | |
1294 | static int ip6gre_header_parse(const struct sk_buff *skb, unsigned char *haddr) |
1295 | { |
1296 | const struct ipv6hdr *ipv6h = (const struct ipv6hdr *)skb_mac_header(skb); |
1297 | memcpy(haddr, &ipv6h->saddr, sizeof(struct in6_addr)); |
1298 | return sizeof(struct in6_addr); |
1299 | } |
1300 | |
1301 | static const struct header_ops ip6gre_header_ops = { |
1302 | .create = ip6gre_header, |
1303 | .parse = ip6gre_header_parse, |
1304 | }; |
1305 | |
1306 | static const struct net_device_ops ip6gre_netdev_ops = { |
1307 | .ndo_init = ip6gre_tunnel_init, |
1308 | .ndo_uninit = ip6gre_tunnel_uninit, |
1309 | .ndo_start_xmit = ip6gre_tunnel_xmit, |
1310 | .ndo_do_ioctl = ip6gre_tunnel_ioctl, |
1311 | .ndo_change_mtu = ip6gre_tunnel_change_mtu, |
1312 | .ndo_get_stats64 = ip6gre_get_stats64, |
1313 | }; |
1314 | |
1315 | static void ip6gre_dev_free(struct net_device *dev) |
1316 | { |
1317 | free_percpu(dev->tstats); |
1318 | free_netdev(dev); |
1319 | } |
1320 | |
1321 | static void ip6gre_tunnel_setup(struct net_device *dev) |
1322 | { |
1323 | struct ip6_tnl *t; |
1324 | |
1325 | dev->netdev_ops = &ip6gre_netdev_ops; |
1326 | dev->destructor = ip6gre_dev_free; |
1327 | |
1328 | dev->type = ARPHRD_IP6GRE; |
1329 | dev->hard_header_len = LL_MAX_HEADER + sizeof(struct ipv6hdr) + 4; |
1330 | dev->mtu = ETH_DATA_LEN - sizeof(struct ipv6hdr) - 4; |
1331 | t = netdev_priv(dev); |
1332 | if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) |
1333 | dev->mtu -= 8; |
1334 | dev->flags |= IFF_NOARP; |
1335 | dev->iflink = 0; |
1336 | dev->addr_len = sizeof(struct in6_addr); |
1337 | dev->features |= NETIF_F_NETNS_LOCAL; |
1338 | dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; |
1339 | } |
1340 | |
1341 | static int ip6gre_tunnel_init(struct net_device *dev) |
1342 | { |
1343 | struct ip6_tnl *tunnel; |
1344 | |
1345 | tunnel = netdev_priv(dev); |
1346 | |
1347 | tunnel->dev = dev; |
1348 | strcpy(tunnel->parms.name, dev->name); |
1349 | |
1350 | memcpy(dev->dev_addr, &tunnel->parms.laddr, sizeof(struct in6_addr)); |
1351 | memcpy(dev->broadcast, &tunnel->parms.raddr, sizeof(struct in6_addr)); |
1352 | |
1353 | if (ipv6_addr_any(&tunnel->parms.raddr)) |
1354 | dev->header_ops = &ip6gre_header_ops; |
1355 | |
1356 | dev->tstats = alloc_percpu(struct pcpu_tstats); |
1357 | if (!dev->tstats) |
1358 | return -ENOMEM; |
1359 | |
1360 | return 0; |
1361 | } |
1362 | |
1363 | static void ip6gre_fb_tunnel_init(struct net_device *dev) |
1364 | { |
1365 | struct ip6_tnl *tunnel = netdev_priv(dev); |
1366 | |
1367 | tunnel->dev = dev; |
1368 | strcpy(tunnel->parms.name, dev->name); |
1369 | |
1370 | tunnel->hlen = sizeof(struct ipv6hdr) + 4; |
1371 | |
1372 | dev_hold(dev); |
1373 | } |
1374 | |
1375 | |
1376 | static struct inet6_protocol ip6gre_protocol __read_mostly = { |
1377 | .handler = ip6gre_rcv, |
1378 | .err_handler = ip6gre_err, |
1379 | .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL, |
1380 | }; |
1381 | |
1382 | static void ip6gre_destroy_tunnels(struct ip6gre_net *ign, |
1383 | struct list_head *head) |
1384 | { |
1385 | int prio; |
1386 | |
1387 | for (prio = 0; prio < 4; prio++) { |
1388 | int h; |
1389 | for (h = 0; h < HASH_SIZE; h++) { |
1390 | struct ip6_tnl *t; |
1391 | |
1392 | t = rtnl_dereference(ign->tunnels[prio][h]); |
1393 | |
1394 | while (t != NULL) { |
1395 | unregister_netdevice_queue(t->dev, head); |
1396 | t = rtnl_dereference(t->next); |
1397 | } |
1398 | } |
1399 | } |
1400 | } |
1401 | |
1402 | static int __net_init ip6gre_init_net(struct net *net) |
1403 | { |
1404 | struct ip6gre_net *ign = net_generic(net, ip6gre_net_id); |
1405 | int err; |
1406 | |
1407 | ign->fb_tunnel_dev = alloc_netdev(sizeof(struct ip6_tnl), "ip6gre0", |
1408 | ip6gre_tunnel_setup); |
1409 | if (!ign->fb_tunnel_dev) { |
1410 | err = -ENOMEM; |
1411 | goto err_alloc_dev; |
1412 | } |
1413 | dev_net_set(ign->fb_tunnel_dev, net); |
1414 | |
1415 | ip6gre_fb_tunnel_init(ign->fb_tunnel_dev); |
1416 | ign->fb_tunnel_dev->rtnl_link_ops = &ip6gre_link_ops; |
1417 | |
1418 | err = register_netdev(ign->fb_tunnel_dev); |
1419 | if (err) |
1420 | goto err_reg_dev; |
1421 | |
1422 | rcu_assign_pointer(ign->tunnels_wc[0], |
1423 | netdev_priv(ign->fb_tunnel_dev)); |
1424 | return 0; |
1425 | |
1426 | err_reg_dev: |
1427 | ip6gre_dev_free(ign->fb_tunnel_dev); |
1428 | err_alloc_dev: |
1429 | return err; |
1430 | } |
1431 | |
1432 | static void __net_exit ip6gre_exit_net(struct net *net) |
1433 | { |
1434 | struct ip6gre_net *ign; |
1435 | LIST_HEAD(list); |
1436 | |
1437 | ign = net_generic(net, ip6gre_net_id); |
1438 | rtnl_lock(); |
1439 | ip6gre_destroy_tunnels(ign, &list); |
1440 | unregister_netdevice_many(&list); |
1441 | rtnl_unlock(); |
1442 | } |
1443 | |
1444 | static struct pernet_operations ip6gre_net_ops = { |
1445 | .init = ip6gre_init_net, |
1446 | .exit = ip6gre_exit_net, |
1447 | .id = &ip6gre_net_id, |
1448 | .size = sizeof(struct ip6gre_net), |
1449 | }; |
1450 | |
1451 | static int ip6gre_tunnel_validate(struct nlattr *tb[], struct nlattr *data[]) |
1452 | { |
1453 | __be16 flags; |
1454 | |
1455 | if (!data) |
1456 | return 0; |
1457 | |
1458 | flags = 0; |
1459 | if (data[IFLA_GRE_IFLAGS]) |
1460 | flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]); |
1461 | if (data[IFLA_GRE_OFLAGS]) |
1462 | flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]); |
1463 | if (flags & (GRE_VERSION|GRE_ROUTING)) |
1464 | return -EINVAL; |
1465 | |
1466 | return 0; |
1467 | } |
1468 | |
1469 | static int ip6gre_tap_validate(struct nlattr *tb[], struct nlattr *data[]) |
1470 | { |
1471 | struct in6_addr daddr; |
1472 | |
1473 | if (tb[IFLA_ADDRESS]) { |
1474 | if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) |
1475 | return -EINVAL; |
1476 | if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) |
1477 | return -EADDRNOTAVAIL; |
1478 | } |
1479 | |
1480 | if (!data) |
1481 | goto out; |
1482 | |
1483 | if (data[IFLA_GRE_REMOTE]) { |
1484 | nla_memcpy(&daddr, data[IFLA_GRE_REMOTE], sizeof(struct in6_addr)); |
1485 | if (ipv6_addr_any(&daddr)) |
1486 | return -EINVAL; |
1487 | } |
1488 | |
1489 | out: |
1490 | return ip6gre_tunnel_validate(tb, data); |
1491 | } |
1492 | |
1493 | |
1494 | static void ip6gre_netlink_parms(struct nlattr *data[], |
1495 | struct __ip6_tnl_parm *parms) |
1496 | { |
1497 | memset(parms, 0, sizeof(*parms)); |
1498 | |
1499 | if (!data) |
1500 | return; |
1501 | |
1502 | if (data[IFLA_GRE_LINK]) |
1503 | parms->link = nla_get_u32(data[IFLA_GRE_LINK]); |
1504 | |
1505 | if (data[IFLA_GRE_IFLAGS]) |
1506 | parms->i_flags = nla_get_be16(data[IFLA_GRE_IFLAGS]); |
1507 | |
1508 | if (data[IFLA_GRE_OFLAGS]) |
1509 | parms->o_flags = nla_get_be16(data[IFLA_GRE_OFLAGS]); |
1510 | |
1511 | if (data[IFLA_GRE_IKEY]) |
1512 | parms->i_key = nla_get_be32(data[IFLA_GRE_IKEY]); |
1513 | |
1514 | if (data[IFLA_GRE_OKEY]) |
1515 | parms->o_key = nla_get_be32(data[IFLA_GRE_OKEY]); |
1516 | |
1517 | if (data[IFLA_GRE_LOCAL]) |
1518 | nla_memcpy(&parms->laddr, data[IFLA_GRE_LOCAL], sizeof(struct in6_addr)); |
1519 | |
1520 | if (data[IFLA_GRE_REMOTE]) |
1521 | nla_memcpy(&parms->raddr, data[IFLA_GRE_REMOTE], sizeof(struct in6_addr)); |
1522 | |
1523 | if (data[IFLA_GRE_TTL]) |
1524 | parms->hop_limit = nla_get_u8(data[IFLA_GRE_TTL]); |
1525 | |
1526 | if (data[IFLA_GRE_ENCAP_LIMIT]) |
1527 | parms->encap_limit = nla_get_u8(data[IFLA_GRE_ENCAP_LIMIT]); |
1528 | |
1529 | if (data[IFLA_GRE_FLOWINFO]) |
1530 | parms->flowinfo = nla_get_u32(data[IFLA_GRE_FLOWINFO]); |
1531 | |
1532 | if (data[IFLA_GRE_FLAGS]) |
1533 | parms->flags = nla_get_u32(data[IFLA_GRE_FLAGS]); |
1534 | } |
1535 | |
1536 | static int ip6gre_tap_init(struct net_device *dev) |
1537 | { |
1538 | struct ip6_tnl *tunnel; |
1539 | |
1540 | tunnel = netdev_priv(dev); |
1541 | |
1542 | tunnel->dev = dev; |
1543 | strcpy(tunnel->parms.name, dev->name); |
1544 | |
1545 | ip6gre_tnl_link_config(tunnel, 1); |
1546 | |
1547 | dev->tstats = alloc_percpu(struct pcpu_tstats); |
1548 | if (!dev->tstats) |
1549 | return -ENOMEM; |
1550 | |
1551 | return 0; |
1552 | } |
1553 | |
1554 | static const struct net_device_ops ip6gre_tap_netdev_ops = { |
1555 | .ndo_init = ip6gre_tap_init, |
1556 | .ndo_uninit = ip6gre_tunnel_uninit, |
1557 | .ndo_start_xmit = ip6gre_tunnel_xmit, |
1558 | .ndo_set_mac_address = eth_mac_addr, |
1559 | .ndo_validate_addr = eth_validate_addr, |
1560 | .ndo_change_mtu = ip6gre_tunnel_change_mtu, |
1561 | .ndo_get_stats64 = ip6gre_get_stats64, |
1562 | }; |
1563 | |
1564 | static void ip6gre_tap_setup(struct net_device *dev) |
1565 | { |
1566 | |
1567 | ether_setup(dev); |
1568 | |
1569 | dev->netdev_ops = &ip6gre_tap_netdev_ops; |
1570 | dev->destructor = ip6gre_dev_free; |
1571 | |
1572 | dev->iflink = 0; |
1573 | dev->features |= NETIF_F_NETNS_LOCAL; |
1574 | } |
1575 | |
1576 | static int ip6gre_newlink(struct net *src_net, struct net_device *dev, |
1577 | struct nlattr *tb[], struct nlattr *data[]) |
1578 | { |
1579 | struct ip6_tnl *nt; |
1580 | struct net *net = dev_net(dev); |
1581 | struct ip6gre_net *ign = net_generic(net, ip6gre_net_id); |
1582 | int err; |
1583 | |
1584 | nt = netdev_priv(dev); |
1585 | ip6gre_netlink_parms(data, &nt->parms); |
1586 | |
1587 | if (ip6gre_tunnel_find(net, &nt->parms, dev->type)) |
1588 | return -EEXIST; |
1589 | |
1590 | if (dev->type == ARPHRD_ETHER && !tb[IFLA_ADDRESS]) |
1591 | eth_hw_addr_random(dev); |
1592 | |
1593 | nt->dev = dev; |
1594 | ip6gre_tnl_link_config(nt, !tb[IFLA_MTU]); |
1595 | |
1596 | /* Can use a lockless transmit, unless we generate output sequences */ |
1597 | if (!(nt->parms.o_flags & GRE_SEQ)) |
1598 | dev->features |= NETIF_F_LLTX; |
1599 | |
1600 | err = register_netdevice(dev); |
1601 | if (err) |
1602 | goto out; |
1603 | |
1604 | dev_hold(dev); |
1605 | ip6gre_tunnel_link(ign, nt); |
1606 | |
1607 | out: |
1608 | return err; |
1609 | } |
1610 | |
1611 | static int ip6gre_changelink(struct net_device *dev, struct nlattr *tb[], |
1612 | struct nlattr *data[]) |
1613 | { |
1614 | struct ip6_tnl *t, *nt; |
1615 | struct net *net = dev_net(dev); |
1616 | struct ip6gre_net *ign = net_generic(net, ip6gre_net_id); |
1617 | struct __ip6_tnl_parm p; |
1618 | |
1619 | if (dev == ign->fb_tunnel_dev) |
1620 | return -EINVAL; |
1621 | |
1622 | nt = netdev_priv(dev); |
1623 | ip6gre_netlink_parms(data, &p); |
1624 | |
1625 | t = ip6gre_tunnel_locate(net, &p, 0); |
1626 | |
1627 | if (t) { |
1628 | if (t->dev != dev) |
1629 | return -EEXIST; |
1630 | } else { |
1631 | t = nt; |
1632 | |
1633 | ip6gre_tunnel_unlink(ign, t); |
1634 | ip6gre_tnl_change(t, &p, !tb[IFLA_MTU]); |
1635 | ip6gre_tunnel_link(ign, t); |
1636 | netdev_state_change(dev); |
1637 | } |
1638 | |
1639 | return 0; |
1640 | } |
1641 | |
1642 | static size_t ip6gre_get_size(const struct net_device *dev) |
1643 | { |
1644 | return |
1645 | /* IFLA_GRE_LINK */ |
1646 | nla_total_size(4) + |
1647 | /* IFLA_GRE_IFLAGS */ |
1648 | nla_total_size(2) + |
1649 | /* IFLA_GRE_OFLAGS */ |
1650 | nla_total_size(2) + |
1651 | /* IFLA_GRE_IKEY */ |
1652 | nla_total_size(4) + |
1653 | /* IFLA_GRE_OKEY */ |
1654 | nla_total_size(4) + |
1655 | /* IFLA_GRE_LOCAL */ |
1656 | nla_total_size(4) + |
1657 | /* IFLA_GRE_REMOTE */ |
1658 | nla_total_size(4) + |
1659 | /* IFLA_GRE_TTL */ |
1660 | nla_total_size(1) + |
1661 | /* IFLA_GRE_TOS */ |
1662 | nla_total_size(1) + |
1663 | /* IFLA_GRE_ENCAP_LIMIT */ |
1664 | nla_total_size(1) + |
1665 | /* IFLA_GRE_FLOWINFO */ |
1666 | nla_total_size(4) + |
1667 | /* IFLA_GRE_FLAGS */ |
1668 | nla_total_size(4) + |
1669 | 0; |
1670 | } |
1671 | |
1672 | static int ip6gre_fill_info(struct sk_buff *skb, const struct net_device *dev) |
1673 | { |
1674 | struct ip6_tnl *t = netdev_priv(dev); |
1675 | struct __ip6_tnl_parm *p = &t->parms; |
1676 | |
1677 | if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) || |
1678 | nla_put_be16(skb, IFLA_GRE_IFLAGS, p->i_flags) || |
1679 | nla_put_be16(skb, IFLA_GRE_OFLAGS, p->o_flags) || |
1680 | nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) || |
1681 | nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) || |
1682 | nla_put(skb, IFLA_GRE_LOCAL, sizeof(struct in6_addr), &p->raddr) || |
1683 | nla_put(skb, IFLA_GRE_REMOTE, sizeof(struct in6_addr), &p->laddr) || |
1684 | nla_put_u8(skb, IFLA_GRE_TTL, p->hop_limit) || |
1685 | /*nla_put_u8(skb, IFLA_GRE_TOS, t->priority) ||*/ |
1686 | nla_put_u8(skb, IFLA_GRE_ENCAP_LIMIT, p->encap_limit) || |
1687 | nla_put_be32(skb, IFLA_GRE_FLOWINFO, p->flowinfo) || |
1688 | nla_put_u32(skb, IFLA_GRE_FLAGS, p->flags)) |
1689 | goto nla_put_failure; |
1690 | return 0; |
1691 | |
1692 | nla_put_failure: |
1693 | return -EMSGSIZE; |
1694 | } |
1695 | |
1696 | static const struct nla_policy ip6gre_policy[IFLA_GRE_MAX + 1] = { |
1697 | [IFLA_GRE_LINK] = { .type = NLA_U32 }, |
1698 | [IFLA_GRE_IFLAGS] = { .type = NLA_U16 }, |
1699 | [IFLA_GRE_OFLAGS] = { .type = NLA_U16 }, |
1700 | [IFLA_GRE_IKEY] = { .type = NLA_U32 }, |
1701 | [IFLA_GRE_OKEY] = { .type = NLA_U32 }, |
1702 | [IFLA_GRE_LOCAL] = { .len = FIELD_SIZEOF(struct ipv6hdr, saddr) }, |
1703 | [IFLA_GRE_REMOTE] = { .len = FIELD_SIZEOF(struct ipv6hdr, daddr) }, |
1704 | [IFLA_GRE_TTL] = { .type = NLA_U8 }, |
1705 | [IFLA_GRE_ENCAP_LIMIT] = { .type = NLA_U8 }, |
1706 | [IFLA_GRE_FLOWINFO] = { .type = NLA_U32 }, |
1707 | [IFLA_GRE_FLAGS] = { .type = NLA_U32 }, |
1708 | }; |
1709 | |
1710 | static struct rtnl_link_ops ip6gre_link_ops __read_mostly = { |
1711 | .kind = "ip6gre", |
1712 | .maxtype = IFLA_GRE_MAX, |
1713 | .policy = ip6gre_policy, |
1714 | .priv_size = sizeof(struct ip6_tnl), |
1715 | .setup = ip6gre_tunnel_setup, |
1716 | .validate = ip6gre_tunnel_validate, |
1717 | .newlink = ip6gre_newlink, |
1718 | .changelink = ip6gre_changelink, |
1719 | .get_size = ip6gre_get_size, |
1720 | .fill_info = ip6gre_fill_info, |
1721 | }; |
1722 | |
1723 | static struct rtnl_link_ops ip6gre_tap_ops __read_mostly = { |
1724 | .kind = "ip6gretap", |
1725 | .maxtype = IFLA_GRE_MAX, |
1726 | .policy = ip6gre_policy, |
1727 | .priv_size = sizeof(struct ip6_tnl), |
1728 | .setup = ip6gre_tap_setup, |
1729 | .validate = ip6gre_tap_validate, |
1730 | .newlink = ip6gre_newlink, |
1731 | .changelink = ip6gre_changelink, |
1732 | .get_size = ip6gre_get_size, |
1733 | .fill_info = ip6gre_fill_info, |
1734 | }; |
1735 | |
1736 | /* |
1737 | * And now the modules code and kernel interface. |
1738 | */ |
1739 | |
1740 | static int __init ip6gre_init(void) |
1741 | { |
1742 | int err; |
1743 | |
1744 | pr_info("GRE over IPv6 tunneling driver\n"); |
1745 | |
1746 | err = register_pernet_device(&ip6gre_net_ops); |
1747 | if (err < 0) |
1748 | return err; |
1749 | |
1750 | err = inet6_add_protocol(&ip6gre_protocol, IPPROTO_GRE); |
1751 | if (err < 0) { |
1752 | pr_info("%s: can't add protocol\n", __func__); |
1753 | goto add_proto_failed; |
1754 | } |
1755 | |
1756 | err = rtnl_link_register(&ip6gre_link_ops); |
1757 | if (err < 0) |
1758 | goto rtnl_link_failed; |
1759 | |
1760 | err = rtnl_link_register(&ip6gre_tap_ops); |
1761 | if (err < 0) |
1762 | goto tap_ops_failed; |
1763 | |
1764 | out: |
1765 | return err; |
1766 | |
1767 | tap_ops_failed: |
1768 | rtnl_link_unregister(&ip6gre_link_ops); |
1769 | rtnl_link_failed: |
1770 | inet6_del_protocol(&ip6gre_protocol, IPPROTO_GRE); |
1771 | add_proto_failed: |
1772 | unregister_pernet_device(&ip6gre_net_ops); |
1773 | goto out; |
1774 | } |
1775 | |
1776 | static void __exit ip6gre_fini(void) |
1777 | { |
1778 | rtnl_link_unregister(&ip6gre_tap_ops); |
1779 | rtnl_link_unregister(&ip6gre_link_ops); |
1780 | inet6_del_protocol(&ip6gre_protocol, IPPROTO_GRE); |
1781 | unregister_pernet_device(&ip6gre_net_ops); |
1782 | } |
1783 | |
1784 | module_init(ip6gre_init); |
1785 | module_exit(ip6gre_fini); |
1786 | MODULE_LICENSE("GPL"); |
1787 | MODULE_AUTHOR("D. Kozlov (xeb@mail.ru)"); |
1788 | MODULE_DESCRIPTION("GRE over IPv6 tunneling device"); |
1789 | MODULE_ALIAS_RTNL_LINK("ip6gre"); |
1790 | MODULE_ALIAS_NETDEV("ip6gre0"); |