Merge branch 'parisc-for-3.11' of git://git.kernel.org/pub/scm/linux/kernel/git/delle...
[deliverable/linux.git] / net / ipv6 / route.c
1 /*
2 * Linux INET6 implementation
3 * FIB front-end.
4 *
5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 */
13
14 /* Changes:
15 *
16 * YOSHIFUJI Hideaki @USAGI
17 * reworked default router selection.
18 * - respect outgoing interface
19 * - select from (probably) reachable routers (i.e.
20 * routers in REACHABLE, STALE, DELAY or PROBE states).
21 * - always select the same router if it is (probably)
22 * reachable. otherwise, round-robin the list.
23 * Ville Nuorvala
24 * Fixed routing subtrees.
25 */
26
27 #define pr_fmt(fmt) "IPv6: " fmt
28
29 #include <linux/capability.h>
30 #include <linux/errno.h>
31 #include <linux/export.h>
32 #include <linux/types.h>
33 #include <linux/times.h>
34 #include <linux/socket.h>
35 #include <linux/sockios.h>
36 #include <linux/net.h>
37 #include <linux/route.h>
38 #include <linux/netdevice.h>
39 #include <linux/in6.h>
40 #include <linux/mroute6.h>
41 #include <linux/init.h>
42 #include <linux/if_arp.h>
43 #include <linux/proc_fs.h>
44 #include <linux/seq_file.h>
45 #include <linux/nsproxy.h>
46 #include <linux/slab.h>
47 #include <net/net_namespace.h>
48 #include <net/snmp.h>
49 #include <net/ipv6.h>
50 #include <net/ip6_fib.h>
51 #include <net/ip6_route.h>
52 #include <net/ndisc.h>
53 #include <net/addrconf.h>
54 #include <net/tcp.h>
55 #include <linux/rtnetlink.h>
56 #include <net/dst.h>
57 #include <net/xfrm.h>
58 #include <net/netevent.h>
59 #include <net/netlink.h>
60 #include <net/nexthop.h>
61
62 #include <asm/uaccess.h>
63
64 #ifdef CONFIG_SYSCTL
65 #include <linux/sysctl.h>
66 #endif
67
68 static struct rt6_info *ip6_rt_copy(struct rt6_info *ort,
69 const struct in6_addr *dest);
70 static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie);
71 static unsigned int ip6_default_advmss(const struct dst_entry *dst);
72 static unsigned int ip6_mtu(const struct dst_entry *dst);
73 static struct dst_entry *ip6_negative_advice(struct dst_entry *);
74 static void ip6_dst_destroy(struct dst_entry *);
75 static void ip6_dst_ifdown(struct dst_entry *,
76 struct net_device *dev, int how);
77 static int ip6_dst_gc(struct dst_ops *ops);
78
79 static int ip6_pkt_discard(struct sk_buff *skb);
80 static int ip6_pkt_discard_out(struct sk_buff *skb);
81 static void ip6_link_failure(struct sk_buff *skb);
82 static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
83 struct sk_buff *skb, u32 mtu);
84 static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk,
85 struct sk_buff *skb);
86 static int rt6_score_route(struct rt6_info *rt, int oif, int strict);
87
88 #ifdef CONFIG_IPV6_ROUTE_INFO
89 static struct rt6_info *rt6_add_route_info(struct net *net,
90 const struct in6_addr *prefix, int prefixlen,
91 const struct in6_addr *gwaddr, int ifindex,
92 unsigned int pref);
93 static struct rt6_info *rt6_get_route_info(struct net *net,
94 const struct in6_addr *prefix, int prefixlen,
95 const struct in6_addr *gwaddr, int ifindex);
96 #endif
97
98 static u32 *ipv6_cow_metrics(struct dst_entry *dst, unsigned long old)
99 {
100 struct rt6_info *rt = (struct rt6_info *) dst;
101 struct inet_peer *peer;
102 u32 *p = NULL;
103
104 if (!(rt->dst.flags & DST_HOST))
105 return NULL;
106
107 peer = rt6_get_peer_create(rt);
108 if (peer) {
109 u32 *old_p = __DST_METRICS_PTR(old);
110 unsigned long prev, new;
111
112 p = peer->metrics;
113 if (inet_metrics_new(peer))
114 memcpy(p, old_p, sizeof(u32) * RTAX_MAX);
115
116 new = (unsigned long) p;
117 prev = cmpxchg(&dst->_metrics, old, new);
118
119 if (prev != old) {
120 p = __DST_METRICS_PTR(prev);
121 if (prev & DST_METRICS_READ_ONLY)
122 p = NULL;
123 }
124 }
125 return p;
126 }
127
128 static inline const void *choose_neigh_daddr(struct rt6_info *rt,
129 struct sk_buff *skb,
130 const void *daddr)
131 {
132 struct in6_addr *p = &rt->rt6i_gateway;
133
134 if (!ipv6_addr_any(p))
135 return (const void *) p;
136 else if (skb)
137 return &ipv6_hdr(skb)->daddr;
138 return daddr;
139 }
140
141 static struct neighbour *ip6_neigh_lookup(const struct dst_entry *dst,
142 struct sk_buff *skb,
143 const void *daddr)
144 {
145 struct rt6_info *rt = (struct rt6_info *) dst;
146 struct neighbour *n;
147
148 daddr = choose_neigh_daddr(rt, skb, daddr);
149 n = __ipv6_neigh_lookup(dst->dev, daddr);
150 if (n)
151 return n;
152 return neigh_create(&nd_tbl, daddr, dst->dev);
153 }
154
155 static struct dst_ops ip6_dst_ops_template = {
156 .family = AF_INET6,
157 .protocol = cpu_to_be16(ETH_P_IPV6),
158 .gc = ip6_dst_gc,
159 .gc_thresh = 1024,
160 .check = ip6_dst_check,
161 .default_advmss = ip6_default_advmss,
162 .mtu = ip6_mtu,
163 .cow_metrics = ipv6_cow_metrics,
164 .destroy = ip6_dst_destroy,
165 .ifdown = ip6_dst_ifdown,
166 .negative_advice = ip6_negative_advice,
167 .link_failure = ip6_link_failure,
168 .update_pmtu = ip6_rt_update_pmtu,
169 .redirect = rt6_do_redirect,
170 .local_out = __ip6_local_out,
171 .neigh_lookup = ip6_neigh_lookup,
172 };
173
174 static unsigned int ip6_blackhole_mtu(const struct dst_entry *dst)
175 {
176 unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
177
178 return mtu ? : dst->dev->mtu;
179 }
180
181 static void ip6_rt_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
182 struct sk_buff *skb, u32 mtu)
183 {
184 }
185
186 static void ip6_rt_blackhole_redirect(struct dst_entry *dst, struct sock *sk,
187 struct sk_buff *skb)
188 {
189 }
190
191 static u32 *ip6_rt_blackhole_cow_metrics(struct dst_entry *dst,
192 unsigned long old)
193 {
194 return NULL;
195 }
196
197 static struct dst_ops ip6_dst_blackhole_ops = {
198 .family = AF_INET6,
199 .protocol = cpu_to_be16(ETH_P_IPV6),
200 .destroy = ip6_dst_destroy,
201 .check = ip6_dst_check,
202 .mtu = ip6_blackhole_mtu,
203 .default_advmss = ip6_default_advmss,
204 .update_pmtu = ip6_rt_blackhole_update_pmtu,
205 .redirect = ip6_rt_blackhole_redirect,
206 .cow_metrics = ip6_rt_blackhole_cow_metrics,
207 .neigh_lookup = ip6_neigh_lookup,
208 };
209
210 static const u32 ip6_template_metrics[RTAX_MAX] = {
211 [RTAX_HOPLIMIT - 1] = 0,
212 };
213
214 static const struct rt6_info ip6_null_entry_template = {
215 .dst = {
216 .__refcnt = ATOMIC_INIT(1),
217 .__use = 1,
218 .obsolete = DST_OBSOLETE_FORCE_CHK,
219 .error = -ENETUNREACH,
220 .input = ip6_pkt_discard,
221 .output = ip6_pkt_discard_out,
222 },
223 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
224 .rt6i_protocol = RTPROT_KERNEL,
225 .rt6i_metric = ~(u32) 0,
226 .rt6i_ref = ATOMIC_INIT(1),
227 };
228
229 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
230
231 static int ip6_pkt_prohibit(struct sk_buff *skb);
232 static int ip6_pkt_prohibit_out(struct sk_buff *skb);
233
234 static const struct rt6_info ip6_prohibit_entry_template = {
235 .dst = {
236 .__refcnt = ATOMIC_INIT(1),
237 .__use = 1,
238 .obsolete = DST_OBSOLETE_FORCE_CHK,
239 .error = -EACCES,
240 .input = ip6_pkt_prohibit,
241 .output = ip6_pkt_prohibit_out,
242 },
243 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
244 .rt6i_protocol = RTPROT_KERNEL,
245 .rt6i_metric = ~(u32) 0,
246 .rt6i_ref = ATOMIC_INIT(1),
247 };
248
249 static const struct rt6_info ip6_blk_hole_entry_template = {
250 .dst = {
251 .__refcnt = ATOMIC_INIT(1),
252 .__use = 1,
253 .obsolete = DST_OBSOLETE_FORCE_CHK,
254 .error = -EINVAL,
255 .input = dst_discard,
256 .output = dst_discard,
257 },
258 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
259 .rt6i_protocol = RTPROT_KERNEL,
260 .rt6i_metric = ~(u32) 0,
261 .rt6i_ref = ATOMIC_INIT(1),
262 };
263
264 #endif
265
266 /* allocate dst with ip6_dst_ops */
267 static inline struct rt6_info *ip6_dst_alloc(struct net *net,
268 struct net_device *dev,
269 int flags,
270 struct fib6_table *table)
271 {
272 struct rt6_info *rt = dst_alloc(&net->ipv6.ip6_dst_ops, dev,
273 0, DST_OBSOLETE_FORCE_CHK, flags);
274
275 if (rt) {
276 struct dst_entry *dst = &rt->dst;
277
278 memset(dst + 1, 0, sizeof(*rt) - sizeof(*dst));
279 rt6_init_peer(rt, table ? &table->tb6_peers : net->ipv6.peers);
280 rt->rt6i_genid = rt_genid(net);
281 INIT_LIST_HEAD(&rt->rt6i_siblings);
282 rt->rt6i_nsiblings = 0;
283 }
284 return rt;
285 }
286
287 static void ip6_dst_destroy(struct dst_entry *dst)
288 {
289 struct rt6_info *rt = (struct rt6_info *)dst;
290 struct inet6_dev *idev = rt->rt6i_idev;
291 struct dst_entry *from = dst->from;
292
293 if (!(rt->dst.flags & DST_HOST))
294 dst_destroy_metrics_generic(dst);
295
296 if (idev) {
297 rt->rt6i_idev = NULL;
298 in6_dev_put(idev);
299 }
300
301 dst->from = NULL;
302 dst_release(from);
303
304 if (rt6_has_peer(rt)) {
305 struct inet_peer *peer = rt6_peer_ptr(rt);
306 inet_putpeer(peer);
307 }
308 }
309
310 void rt6_bind_peer(struct rt6_info *rt, int create)
311 {
312 struct inet_peer_base *base;
313 struct inet_peer *peer;
314
315 base = inetpeer_base_ptr(rt->_rt6i_peer);
316 if (!base)
317 return;
318
319 peer = inet_getpeer_v6(base, &rt->rt6i_dst.addr, create);
320 if (peer) {
321 if (!rt6_set_peer(rt, peer))
322 inet_putpeer(peer);
323 }
324 }
325
326 static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
327 int how)
328 {
329 struct rt6_info *rt = (struct rt6_info *)dst;
330 struct inet6_dev *idev = rt->rt6i_idev;
331 struct net_device *loopback_dev =
332 dev_net(dev)->loopback_dev;
333
334 if (dev != loopback_dev) {
335 if (idev && idev->dev == dev) {
336 struct inet6_dev *loopback_idev =
337 in6_dev_get(loopback_dev);
338 if (loopback_idev) {
339 rt->rt6i_idev = loopback_idev;
340 in6_dev_put(idev);
341 }
342 }
343 }
344 }
345
346 static bool rt6_check_expired(const struct rt6_info *rt)
347 {
348 if (rt->rt6i_flags & RTF_EXPIRES) {
349 if (time_after(jiffies, rt->dst.expires))
350 return true;
351 } else if (rt->dst.from) {
352 return rt6_check_expired((struct rt6_info *) rt->dst.from);
353 }
354 return false;
355 }
356
357 static bool rt6_need_strict(const struct in6_addr *daddr)
358 {
359 return ipv6_addr_type(daddr) &
360 (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK);
361 }
362
363 /* Multipath route selection:
364 * Hash based function using packet header and flowlabel.
365 * Adapted from fib_info_hashfn()
366 */
367 static int rt6_info_hash_nhsfn(unsigned int candidate_count,
368 const struct flowi6 *fl6)
369 {
370 unsigned int val = fl6->flowi6_proto;
371
372 val ^= ipv6_addr_hash(&fl6->daddr);
373 val ^= ipv6_addr_hash(&fl6->saddr);
374
375 /* Work only if this not encapsulated */
376 switch (fl6->flowi6_proto) {
377 case IPPROTO_UDP:
378 case IPPROTO_TCP:
379 case IPPROTO_SCTP:
380 val ^= (__force u16)fl6->fl6_sport;
381 val ^= (__force u16)fl6->fl6_dport;
382 break;
383
384 case IPPROTO_ICMPV6:
385 val ^= (__force u16)fl6->fl6_icmp_type;
386 val ^= (__force u16)fl6->fl6_icmp_code;
387 break;
388 }
389 /* RFC6438 recommands to use flowlabel */
390 val ^= (__force u32)fl6->flowlabel;
391
392 /* Perhaps, we need to tune, this function? */
393 val = val ^ (val >> 7) ^ (val >> 12);
394 return val % candidate_count;
395 }
396
397 static struct rt6_info *rt6_multipath_select(struct rt6_info *match,
398 struct flowi6 *fl6, int oif,
399 int strict)
400 {
401 struct rt6_info *sibling, *next_sibling;
402 int route_choosen;
403
404 route_choosen = rt6_info_hash_nhsfn(match->rt6i_nsiblings + 1, fl6);
405 /* Don't change the route, if route_choosen == 0
406 * (siblings does not include ourself)
407 */
408 if (route_choosen)
409 list_for_each_entry_safe(sibling, next_sibling,
410 &match->rt6i_siblings, rt6i_siblings) {
411 route_choosen--;
412 if (route_choosen == 0) {
413 if (rt6_score_route(sibling, oif, strict) < 0)
414 break;
415 match = sibling;
416 break;
417 }
418 }
419 return match;
420 }
421
422 /*
423 * Route lookup. Any table->tb6_lock is implied.
424 */
425
426 static inline struct rt6_info *rt6_device_match(struct net *net,
427 struct rt6_info *rt,
428 const struct in6_addr *saddr,
429 int oif,
430 int flags)
431 {
432 struct rt6_info *local = NULL;
433 struct rt6_info *sprt;
434
435 if (!oif && ipv6_addr_any(saddr))
436 goto out;
437
438 for (sprt = rt; sprt; sprt = sprt->dst.rt6_next) {
439 struct net_device *dev = sprt->dst.dev;
440
441 if (oif) {
442 if (dev->ifindex == oif)
443 return sprt;
444 if (dev->flags & IFF_LOOPBACK) {
445 if (!sprt->rt6i_idev ||
446 sprt->rt6i_idev->dev->ifindex != oif) {
447 if (flags & RT6_LOOKUP_F_IFACE && oif)
448 continue;
449 if (local && (!oif ||
450 local->rt6i_idev->dev->ifindex == oif))
451 continue;
452 }
453 local = sprt;
454 }
455 } else {
456 if (ipv6_chk_addr(net, saddr, dev,
457 flags & RT6_LOOKUP_F_IFACE))
458 return sprt;
459 }
460 }
461
462 if (oif) {
463 if (local)
464 return local;
465
466 if (flags & RT6_LOOKUP_F_IFACE)
467 return net->ipv6.ip6_null_entry;
468 }
469 out:
470 return rt;
471 }
472
473 #ifdef CONFIG_IPV6_ROUTER_PREF
474 static void rt6_probe(struct rt6_info *rt)
475 {
476 struct neighbour *neigh;
477 /*
478 * Okay, this does not seem to be appropriate
479 * for now, however, we need to check if it
480 * is really so; aka Router Reachability Probing.
481 *
482 * Router Reachability Probe MUST be rate-limited
483 * to no more than one per minute.
484 */
485 if (!rt || !(rt->rt6i_flags & RTF_GATEWAY))
486 return;
487 rcu_read_lock_bh();
488 neigh = __ipv6_neigh_lookup_noref(rt->dst.dev, &rt->rt6i_gateway);
489 if (neigh) {
490 write_lock(&neigh->lock);
491 if (neigh->nud_state & NUD_VALID)
492 goto out;
493 }
494
495 if (!neigh ||
496 time_after(jiffies, neigh->updated + rt->rt6i_idev->cnf.rtr_probe_interval)) {
497 struct in6_addr mcaddr;
498 struct in6_addr *target;
499
500 if (neigh) {
501 neigh->updated = jiffies;
502 write_unlock(&neigh->lock);
503 }
504
505 target = (struct in6_addr *)&rt->rt6i_gateway;
506 addrconf_addr_solict_mult(target, &mcaddr);
507 ndisc_send_ns(rt->dst.dev, NULL, target, &mcaddr, NULL);
508 } else {
509 out:
510 write_unlock(&neigh->lock);
511 }
512 rcu_read_unlock_bh();
513 }
514 #else
515 static inline void rt6_probe(struct rt6_info *rt)
516 {
517 }
518 #endif
519
520 /*
521 * Default Router Selection (RFC 2461 6.3.6)
522 */
523 static inline int rt6_check_dev(struct rt6_info *rt, int oif)
524 {
525 struct net_device *dev = rt->dst.dev;
526 if (!oif || dev->ifindex == oif)
527 return 2;
528 if ((dev->flags & IFF_LOOPBACK) &&
529 rt->rt6i_idev && rt->rt6i_idev->dev->ifindex == oif)
530 return 1;
531 return 0;
532 }
533
534 static inline bool rt6_check_neigh(struct rt6_info *rt)
535 {
536 struct neighbour *neigh;
537 bool ret = false;
538
539 if (rt->rt6i_flags & RTF_NONEXTHOP ||
540 !(rt->rt6i_flags & RTF_GATEWAY))
541 return true;
542
543 rcu_read_lock_bh();
544 neigh = __ipv6_neigh_lookup_noref(rt->dst.dev, &rt->rt6i_gateway);
545 if (neigh) {
546 read_lock(&neigh->lock);
547 if (neigh->nud_state & NUD_VALID)
548 ret = true;
549 #ifdef CONFIG_IPV6_ROUTER_PREF
550 else if (!(neigh->nud_state & NUD_FAILED))
551 ret = true;
552 #endif
553 read_unlock(&neigh->lock);
554 } else if (IS_ENABLED(CONFIG_IPV6_ROUTER_PREF)) {
555 ret = true;
556 }
557 rcu_read_unlock_bh();
558
559 return ret;
560 }
561
562 static int rt6_score_route(struct rt6_info *rt, int oif,
563 int strict)
564 {
565 int m;
566
567 m = rt6_check_dev(rt, oif);
568 if (!m && (strict & RT6_LOOKUP_F_IFACE))
569 return -1;
570 #ifdef CONFIG_IPV6_ROUTER_PREF
571 m |= IPV6_DECODE_PREF(IPV6_EXTRACT_PREF(rt->rt6i_flags)) << 2;
572 #endif
573 if (!rt6_check_neigh(rt) && (strict & RT6_LOOKUP_F_REACHABLE))
574 return -1;
575 return m;
576 }
577
578 static struct rt6_info *find_match(struct rt6_info *rt, int oif, int strict,
579 int *mpri, struct rt6_info *match)
580 {
581 int m;
582
583 if (rt6_check_expired(rt))
584 goto out;
585
586 m = rt6_score_route(rt, oif, strict);
587 if (m < 0)
588 goto out;
589
590 if (m > *mpri) {
591 if (strict & RT6_LOOKUP_F_REACHABLE)
592 rt6_probe(match);
593 *mpri = m;
594 match = rt;
595 } else if (strict & RT6_LOOKUP_F_REACHABLE) {
596 rt6_probe(rt);
597 }
598
599 out:
600 return match;
601 }
602
603 static struct rt6_info *find_rr_leaf(struct fib6_node *fn,
604 struct rt6_info *rr_head,
605 u32 metric, int oif, int strict)
606 {
607 struct rt6_info *rt, *match;
608 int mpri = -1;
609
610 match = NULL;
611 for (rt = rr_head; rt && rt->rt6i_metric == metric;
612 rt = rt->dst.rt6_next)
613 match = find_match(rt, oif, strict, &mpri, match);
614 for (rt = fn->leaf; rt && rt != rr_head && rt->rt6i_metric == metric;
615 rt = rt->dst.rt6_next)
616 match = find_match(rt, oif, strict, &mpri, match);
617
618 return match;
619 }
620
621 static struct rt6_info *rt6_select(struct fib6_node *fn, int oif, int strict)
622 {
623 struct rt6_info *match, *rt0;
624 struct net *net;
625
626 rt0 = fn->rr_ptr;
627 if (!rt0)
628 fn->rr_ptr = rt0 = fn->leaf;
629
630 match = find_rr_leaf(fn, rt0, rt0->rt6i_metric, oif, strict);
631
632 if (!match &&
633 (strict & RT6_LOOKUP_F_REACHABLE)) {
634 struct rt6_info *next = rt0->dst.rt6_next;
635
636 /* no entries matched; do round-robin */
637 if (!next || next->rt6i_metric != rt0->rt6i_metric)
638 next = fn->leaf;
639
640 if (next != rt0)
641 fn->rr_ptr = next;
642 }
643
644 net = dev_net(rt0->dst.dev);
645 return match ? match : net->ipv6.ip6_null_entry;
646 }
647
648 #ifdef CONFIG_IPV6_ROUTE_INFO
649 int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
650 const struct in6_addr *gwaddr)
651 {
652 struct net *net = dev_net(dev);
653 struct route_info *rinfo = (struct route_info *) opt;
654 struct in6_addr prefix_buf, *prefix;
655 unsigned int pref;
656 unsigned long lifetime;
657 struct rt6_info *rt;
658
659 if (len < sizeof(struct route_info)) {
660 return -EINVAL;
661 }
662
663 /* Sanity check for prefix_len and length */
664 if (rinfo->length > 3) {
665 return -EINVAL;
666 } else if (rinfo->prefix_len > 128) {
667 return -EINVAL;
668 } else if (rinfo->prefix_len > 64) {
669 if (rinfo->length < 2) {
670 return -EINVAL;
671 }
672 } else if (rinfo->prefix_len > 0) {
673 if (rinfo->length < 1) {
674 return -EINVAL;
675 }
676 }
677
678 pref = rinfo->route_pref;
679 if (pref == ICMPV6_ROUTER_PREF_INVALID)
680 return -EINVAL;
681
682 lifetime = addrconf_timeout_fixup(ntohl(rinfo->lifetime), HZ);
683
684 if (rinfo->length == 3)
685 prefix = (struct in6_addr *)rinfo->prefix;
686 else {
687 /* this function is safe */
688 ipv6_addr_prefix(&prefix_buf,
689 (struct in6_addr *)rinfo->prefix,
690 rinfo->prefix_len);
691 prefix = &prefix_buf;
692 }
693
694 rt = rt6_get_route_info(net, prefix, rinfo->prefix_len, gwaddr,
695 dev->ifindex);
696
697 if (rt && !lifetime) {
698 ip6_del_rt(rt);
699 rt = NULL;
700 }
701
702 if (!rt && lifetime)
703 rt = rt6_add_route_info(net, prefix, rinfo->prefix_len, gwaddr, dev->ifindex,
704 pref);
705 else if (rt)
706 rt->rt6i_flags = RTF_ROUTEINFO |
707 (rt->rt6i_flags & ~RTF_PREF_MASK) | RTF_PREF(pref);
708
709 if (rt) {
710 if (!addrconf_finite_timeout(lifetime))
711 rt6_clean_expires(rt);
712 else
713 rt6_set_expires(rt, jiffies + HZ * lifetime);
714
715 ip6_rt_put(rt);
716 }
717 return 0;
718 }
719 #endif
720
721 #define BACKTRACK(__net, saddr) \
722 do { \
723 if (rt == __net->ipv6.ip6_null_entry) { \
724 struct fib6_node *pn; \
725 while (1) { \
726 if (fn->fn_flags & RTN_TL_ROOT) \
727 goto out; \
728 pn = fn->parent; \
729 if (FIB6_SUBTREE(pn) && FIB6_SUBTREE(pn) != fn) \
730 fn = fib6_lookup(FIB6_SUBTREE(pn), NULL, saddr); \
731 else \
732 fn = pn; \
733 if (fn->fn_flags & RTN_RTINFO) \
734 goto restart; \
735 } \
736 } \
737 } while (0)
738
739 static struct rt6_info *ip6_pol_route_lookup(struct net *net,
740 struct fib6_table *table,
741 struct flowi6 *fl6, int flags)
742 {
743 struct fib6_node *fn;
744 struct rt6_info *rt;
745
746 read_lock_bh(&table->tb6_lock);
747 fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
748 restart:
749 rt = fn->leaf;
750 rt = rt6_device_match(net, rt, &fl6->saddr, fl6->flowi6_oif, flags);
751 if (rt->rt6i_nsiblings && fl6->flowi6_oif == 0)
752 rt = rt6_multipath_select(rt, fl6, fl6->flowi6_oif, flags);
753 BACKTRACK(net, &fl6->saddr);
754 out:
755 dst_use(&rt->dst, jiffies);
756 read_unlock_bh(&table->tb6_lock);
757 return rt;
758
759 }
760
761 struct dst_entry * ip6_route_lookup(struct net *net, struct flowi6 *fl6,
762 int flags)
763 {
764 return fib6_rule_lookup(net, fl6, flags, ip6_pol_route_lookup);
765 }
766 EXPORT_SYMBOL_GPL(ip6_route_lookup);
767
768 struct rt6_info *rt6_lookup(struct net *net, const struct in6_addr *daddr,
769 const struct in6_addr *saddr, int oif, int strict)
770 {
771 struct flowi6 fl6 = {
772 .flowi6_oif = oif,
773 .daddr = *daddr,
774 };
775 struct dst_entry *dst;
776 int flags = strict ? RT6_LOOKUP_F_IFACE : 0;
777
778 if (saddr) {
779 memcpy(&fl6.saddr, saddr, sizeof(*saddr));
780 flags |= RT6_LOOKUP_F_HAS_SADDR;
781 }
782
783 dst = fib6_rule_lookup(net, &fl6, flags, ip6_pol_route_lookup);
784 if (dst->error == 0)
785 return (struct rt6_info *) dst;
786
787 dst_release(dst);
788
789 return NULL;
790 }
791
792 EXPORT_SYMBOL(rt6_lookup);
793
794 /* ip6_ins_rt is called with FREE table->tb6_lock.
795 It takes new route entry, the addition fails by any reason the
796 route is freed. In any case, if caller does not hold it, it may
797 be destroyed.
798 */
799
800 static int __ip6_ins_rt(struct rt6_info *rt, struct nl_info *info)
801 {
802 int err;
803 struct fib6_table *table;
804
805 table = rt->rt6i_table;
806 write_lock_bh(&table->tb6_lock);
807 err = fib6_add(&table->tb6_root, rt, info);
808 write_unlock_bh(&table->tb6_lock);
809
810 return err;
811 }
812
813 int ip6_ins_rt(struct rt6_info *rt)
814 {
815 struct nl_info info = {
816 .nl_net = dev_net(rt->dst.dev),
817 };
818 return __ip6_ins_rt(rt, &info);
819 }
820
821 static struct rt6_info *rt6_alloc_cow(struct rt6_info *ort,
822 const struct in6_addr *daddr,
823 const struct in6_addr *saddr)
824 {
825 struct rt6_info *rt;
826
827 /*
828 * Clone the route.
829 */
830
831 rt = ip6_rt_copy(ort, daddr);
832
833 if (rt) {
834 if (!(rt->rt6i_flags & RTF_GATEWAY)) {
835 if (ort->rt6i_dst.plen != 128 &&
836 ipv6_addr_equal(&ort->rt6i_dst.addr, daddr))
837 rt->rt6i_flags |= RTF_ANYCAST;
838 rt->rt6i_gateway = *daddr;
839 }
840
841 rt->rt6i_flags |= RTF_CACHE;
842
843 #ifdef CONFIG_IPV6_SUBTREES
844 if (rt->rt6i_src.plen && saddr) {
845 rt->rt6i_src.addr = *saddr;
846 rt->rt6i_src.plen = 128;
847 }
848 #endif
849 }
850
851 return rt;
852 }
853
854 static struct rt6_info *rt6_alloc_clone(struct rt6_info *ort,
855 const struct in6_addr *daddr)
856 {
857 struct rt6_info *rt = ip6_rt_copy(ort, daddr);
858
859 if (rt)
860 rt->rt6i_flags |= RTF_CACHE;
861 return rt;
862 }
863
864 static struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table, int oif,
865 struct flowi6 *fl6, int flags)
866 {
867 struct fib6_node *fn;
868 struct rt6_info *rt, *nrt;
869 int strict = 0;
870 int attempts = 3;
871 int err;
872 int reachable = net->ipv6.devconf_all->forwarding ? 0 : RT6_LOOKUP_F_REACHABLE;
873
874 strict |= flags & RT6_LOOKUP_F_IFACE;
875
876 relookup:
877 read_lock_bh(&table->tb6_lock);
878
879 restart_2:
880 fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
881
882 restart:
883 rt = rt6_select(fn, oif, strict | reachable);
884 if (rt->rt6i_nsiblings)
885 rt = rt6_multipath_select(rt, fl6, oif, strict | reachable);
886 BACKTRACK(net, &fl6->saddr);
887 if (rt == net->ipv6.ip6_null_entry ||
888 rt->rt6i_flags & RTF_CACHE)
889 goto out;
890
891 dst_hold(&rt->dst);
892 read_unlock_bh(&table->tb6_lock);
893
894 if (!(rt->rt6i_flags & (RTF_NONEXTHOP | RTF_GATEWAY)))
895 nrt = rt6_alloc_cow(rt, &fl6->daddr, &fl6->saddr);
896 else if (!(rt->dst.flags & DST_HOST))
897 nrt = rt6_alloc_clone(rt, &fl6->daddr);
898 else
899 goto out2;
900
901 ip6_rt_put(rt);
902 rt = nrt ? : net->ipv6.ip6_null_entry;
903
904 dst_hold(&rt->dst);
905 if (nrt) {
906 err = ip6_ins_rt(nrt);
907 if (!err)
908 goto out2;
909 }
910
911 if (--attempts <= 0)
912 goto out2;
913
914 /*
915 * Race condition! In the gap, when table->tb6_lock was
916 * released someone could insert this route. Relookup.
917 */
918 ip6_rt_put(rt);
919 goto relookup;
920
921 out:
922 if (reachable) {
923 reachable = 0;
924 goto restart_2;
925 }
926 dst_hold(&rt->dst);
927 read_unlock_bh(&table->tb6_lock);
928 out2:
929 rt->dst.lastuse = jiffies;
930 rt->dst.__use++;
931
932 return rt;
933 }
934
935 static struct rt6_info *ip6_pol_route_input(struct net *net, struct fib6_table *table,
936 struct flowi6 *fl6, int flags)
937 {
938 return ip6_pol_route(net, table, fl6->flowi6_iif, fl6, flags);
939 }
940
941 static struct dst_entry *ip6_route_input_lookup(struct net *net,
942 struct net_device *dev,
943 struct flowi6 *fl6, int flags)
944 {
945 if (rt6_need_strict(&fl6->daddr) && dev->type != ARPHRD_PIMREG)
946 flags |= RT6_LOOKUP_F_IFACE;
947
948 return fib6_rule_lookup(net, fl6, flags, ip6_pol_route_input);
949 }
950
951 void ip6_route_input(struct sk_buff *skb)
952 {
953 const struct ipv6hdr *iph = ipv6_hdr(skb);
954 struct net *net = dev_net(skb->dev);
955 int flags = RT6_LOOKUP_F_HAS_SADDR;
956 struct flowi6 fl6 = {
957 .flowi6_iif = skb->dev->ifindex,
958 .daddr = iph->daddr,
959 .saddr = iph->saddr,
960 .flowlabel = ip6_flowinfo(iph),
961 .flowi6_mark = skb->mark,
962 .flowi6_proto = iph->nexthdr,
963 };
964
965 skb_dst_set(skb, ip6_route_input_lookup(net, skb->dev, &fl6, flags));
966 }
967
968 static struct rt6_info *ip6_pol_route_output(struct net *net, struct fib6_table *table,
969 struct flowi6 *fl6, int flags)
970 {
971 return ip6_pol_route(net, table, fl6->flowi6_oif, fl6, flags);
972 }
973
974 struct dst_entry * ip6_route_output(struct net *net, const struct sock *sk,
975 struct flowi6 *fl6)
976 {
977 int flags = 0;
978
979 fl6->flowi6_iif = LOOPBACK_IFINDEX;
980
981 if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl6->daddr))
982 flags |= RT6_LOOKUP_F_IFACE;
983
984 if (!ipv6_addr_any(&fl6->saddr))
985 flags |= RT6_LOOKUP_F_HAS_SADDR;
986 else if (sk)
987 flags |= rt6_srcprefs2flags(inet6_sk(sk)->srcprefs);
988
989 return fib6_rule_lookup(net, fl6, flags, ip6_pol_route_output);
990 }
991
992 EXPORT_SYMBOL(ip6_route_output);
993
994 struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_orig)
995 {
996 struct rt6_info *rt, *ort = (struct rt6_info *) dst_orig;
997 struct dst_entry *new = NULL;
998
999 rt = dst_alloc(&ip6_dst_blackhole_ops, ort->dst.dev, 1, DST_OBSOLETE_NONE, 0);
1000 if (rt) {
1001 new = &rt->dst;
1002
1003 memset(new + 1, 0, sizeof(*rt) - sizeof(*new));
1004 rt6_init_peer(rt, net->ipv6.peers);
1005
1006 new->__use = 1;
1007 new->input = dst_discard;
1008 new->output = dst_discard;
1009
1010 if (dst_metrics_read_only(&ort->dst))
1011 new->_metrics = ort->dst._metrics;
1012 else
1013 dst_copy_metrics(new, &ort->dst);
1014 rt->rt6i_idev = ort->rt6i_idev;
1015 if (rt->rt6i_idev)
1016 in6_dev_hold(rt->rt6i_idev);
1017
1018 rt->rt6i_gateway = ort->rt6i_gateway;
1019 rt->rt6i_flags = ort->rt6i_flags;
1020 rt->rt6i_metric = 0;
1021
1022 memcpy(&rt->rt6i_dst, &ort->rt6i_dst, sizeof(struct rt6key));
1023 #ifdef CONFIG_IPV6_SUBTREES
1024 memcpy(&rt->rt6i_src, &ort->rt6i_src, sizeof(struct rt6key));
1025 #endif
1026
1027 dst_free(new);
1028 }
1029
1030 dst_release(dst_orig);
1031 return new ? new : ERR_PTR(-ENOMEM);
1032 }
1033
1034 /*
1035 * Destination cache support functions
1036 */
1037
1038 static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie)
1039 {
1040 struct rt6_info *rt;
1041
1042 rt = (struct rt6_info *) dst;
1043
1044 /* All IPV6 dsts are created with ->obsolete set to the value
1045 * DST_OBSOLETE_FORCE_CHK which forces validation calls down
1046 * into this function always.
1047 */
1048 if (rt->rt6i_genid != rt_genid(dev_net(rt->dst.dev)))
1049 return NULL;
1050
1051 if (rt->rt6i_node && (rt->rt6i_node->fn_sernum == cookie))
1052 return dst;
1053
1054 return NULL;
1055 }
1056
1057 static struct dst_entry *ip6_negative_advice(struct dst_entry *dst)
1058 {
1059 struct rt6_info *rt = (struct rt6_info *) dst;
1060
1061 if (rt) {
1062 if (rt->rt6i_flags & RTF_CACHE) {
1063 if (rt6_check_expired(rt)) {
1064 ip6_del_rt(rt);
1065 dst = NULL;
1066 }
1067 } else {
1068 dst_release(dst);
1069 dst = NULL;
1070 }
1071 }
1072 return dst;
1073 }
1074
1075 static void ip6_link_failure(struct sk_buff *skb)
1076 {
1077 struct rt6_info *rt;
1078
1079 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0);
1080
1081 rt = (struct rt6_info *) skb_dst(skb);
1082 if (rt) {
1083 if (rt->rt6i_flags & RTF_CACHE)
1084 rt6_update_expires(rt, 0);
1085 else if (rt->rt6i_node && (rt->rt6i_flags & RTF_DEFAULT))
1086 rt->rt6i_node->fn_sernum = -1;
1087 }
1088 }
1089
1090 static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
1091 struct sk_buff *skb, u32 mtu)
1092 {
1093 struct rt6_info *rt6 = (struct rt6_info*)dst;
1094
1095 dst_confirm(dst);
1096 if (mtu < dst_mtu(dst) && rt6->rt6i_dst.plen == 128) {
1097 struct net *net = dev_net(dst->dev);
1098
1099 rt6->rt6i_flags |= RTF_MODIFIED;
1100 if (mtu < IPV6_MIN_MTU) {
1101 u32 features = dst_metric(dst, RTAX_FEATURES);
1102 mtu = IPV6_MIN_MTU;
1103 features |= RTAX_FEATURE_ALLFRAG;
1104 dst_metric_set(dst, RTAX_FEATURES, features);
1105 }
1106 dst_metric_set(dst, RTAX_MTU, mtu);
1107 rt6_update_expires(rt6, net->ipv6.sysctl.ip6_rt_mtu_expires);
1108 }
1109 }
1110
1111 void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu,
1112 int oif, u32 mark)
1113 {
1114 const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data;
1115 struct dst_entry *dst;
1116 struct flowi6 fl6;
1117
1118 memset(&fl6, 0, sizeof(fl6));
1119 fl6.flowi6_oif = oif;
1120 fl6.flowi6_mark = mark;
1121 fl6.flowi6_flags = 0;
1122 fl6.daddr = iph->daddr;
1123 fl6.saddr = iph->saddr;
1124 fl6.flowlabel = ip6_flowinfo(iph);
1125
1126 dst = ip6_route_output(net, NULL, &fl6);
1127 if (!dst->error)
1128 ip6_rt_update_pmtu(dst, NULL, skb, ntohl(mtu));
1129 dst_release(dst);
1130 }
1131 EXPORT_SYMBOL_GPL(ip6_update_pmtu);
1132
1133 void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu)
1134 {
1135 ip6_update_pmtu(skb, sock_net(sk), mtu,
1136 sk->sk_bound_dev_if, sk->sk_mark);
1137 }
1138 EXPORT_SYMBOL_GPL(ip6_sk_update_pmtu);
1139
1140 void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark)
1141 {
1142 const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data;
1143 struct dst_entry *dst;
1144 struct flowi6 fl6;
1145
1146 memset(&fl6, 0, sizeof(fl6));
1147 fl6.flowi6_oif = oif;
1148 fl6.flowi6_mark = mark;
1149 fl6.flowi6_flags = 0;
1150 fl6.daddr = iph->daddr;
1151 fl6.saddr = iph->saddr;
1152 fl6.flowlabel = ip6_flowinfo(iph);
1153
1154 dst = ip6_route_output(net, NULL, &fl6);
1155 if (!dst->error)
1156 rt6_do_redirect(dst, NULL, skb);
1157 dst_release(dst);
1158 }
1159 EXPORT_SYMBOL_GPL(ip6_redirect);
1160
1161 void ip6_sk_redirect(struct sk_buff *skb, struct sock *sk)
1162 {
1163 ip6_redirect(skb, sock_net(sk), sk->sk_bound_dev_if, sk->sk_mark);
1164 }
1165 EXPORT_SYMBOL_GPL(ip6_sk_redirect);
1166
1167 static unsigned int ip6_default_advmss(const struct dst_entry *dst)
1168 {
1169 struct net_device *dev = dst->dev;
1170 unsigned int mtu = dst_mtu(dst);
1171 struct net *net = dev_net(dev);
1172
1173 mtu -= sizeof(struct ipv6hdr) + sizeof(struct tcphdr);
1174
1175 if (mtu < net->ipv6.sysctl.ip6_rt_min_advmss)
1176 mtu = net->ipv6.sysctl.ip6_rt_min_advmss;
1177
1178 /*
1179 * Maximal non-jumbo IPv6 payload is IPV6_MAXPLEN and
1180 * corresponding MSS is IPV6_MAXPLEN - tcp_header_size.
1181 * IPV6_MAXPLEN is also valid and means: "any MSS,
1182 * rely only on pmtu discovery"
1183 */
1184 if (mtu > IPV6_MAXPLEN - sizeof(struct tcphdr))
1185 mtu = IPV6_MAXPLEN;
1186 return mtu;
1187 }
1188
1189 static unsigned int ip6_mtu(const struct dst_entry *dst)
1190 {
1191 struct inet6_dev *idev;
1192 unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
1193
1194 if (mtu)
1195 return mtu;
1196
1197 mtu = IPV6_MIN_MTU;
1198
1199 rcu_read_lock();
1200 idev = __in6_dev_get(dst->dev);
1201 if (idev)
1202 mtu = idev->cnf.mtu6;
1203 rcu_read_unlock();
1204
1205 return mtu;
1206 }
1207
1208 static struct dst_entry *icmp6_dst_gc_list;
1209 static DEFINE_SPINLOCK(icmp6_dst_lock);
1210
1211 struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
1212 struct flowi6 *fl6)
1213 {
1214 struct dst_entry *dst;
1215 struct rt6_info *rt;
1216 struct inet6_dev *idev = in6_dev_get(dev);
1217 struct net *net = dev_net(dev);
1218
1219 if (unlikely(!idev))
1220 return ERR_PTR(-ENODEV);
1221
1222 rt = ip6_dst_alloc(net, dev, 0, NULL);
1223 if (unlikely(!rt)) {
1224 in6_dev_put(idev);
1225 dst = ERR_PTR(-ENOMEM);
1226 goto out;
1227 }
1228
1229 rt->dst.flags |= DST_HOST;
1230 rt->dst.output = ip6_output;
1231 atomic_set(&rt->dst.__refcnt, 1);
1232 rt->rt6i_dst.addr = fl6->daddr;
1233 rt->rt6i_dst.plen = 128;
1234 rt->rt6i_idev = idev;
1235 dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 0);
1236
1237 spin_lock_bh(&icmp6_dst_lock);
1238 rt->dst.next = icmp6_dst_gc_list;
1239 icmp6_dst_gc_list = &rt->dst;
1240 spin_unlock_bh(&icmp6_dst_lock);
1241
1242 fib6_force_start_gc(net);
1243
1244 dst = xfrm_lookup(net, &rt->dst, flowi6_to_flowi(fl6), NULL, 0);
1245
1246 out:
1247 return dst;
1248 }
1249
1250 int icmp6_dst_gc(void)
1251 {
1252 struct dst_entry *dst, **pprev;
1253 int more = 0;
1254
1255 spin_lock_bh(&icmp6_dst_lock);
1256 pprev = &icmp6_dst_gc_list;
1257
1258 while ((dst = *pprev) != NULL) {
1259 if (!atomic_read(&dst->__refcnt)) {
1260 *pprev = dst->next;
1261 dst_free(dst);
1262 } else {
1263 pprev = &dst->next;
1264 ++more;
1265 }
1266 }
1267
1268 spin_unlock_bh(&icmp6_dst_lock);
1269
1270 return more;
1271 }
1272
1273 static void icmp6_clean_all(int (*func)(struct rt6_info *rt, void *arg),
1274 void *arg)
1275 {
1276 struct dst_entry *dst, **pprev;
1277
1278 spin_lock_bh(&icmp6_dst_lock);
1279 pprev = &icmp6_dst_gc_list;
1280 while ((dst = *pprev) != NULL) {
1281 struct rt6_info *rt = (struct rt6_info *) dst;
1282 if (func(rt, arg)) {
1283 *pprev = dst->next;
1284 dst_free(dst);
1285 } else {
1286 pprev = &dst->next;
1287 }
1288 }
1289 spin_unlock_bh(&icmp6_dst_lock);
1290 }
1291
1292 static int ip6_dst_gc(struct dst_ops *ops)
1293 {
1294 unsigned long now = jiffies;
1295 struct net *net = container_of(ops, struct net, ipv6.ip6_dst_ops);
1296 int rt_min_interval = net->ipv6.sysctl.ip6_rt_gc_min_interval;
1297 int rt_max_size = net->ipv6.sysctl.ip6_rt_max_size;
1298 int rt_elasticity = net->ipv6.sysctl.ip6_rt_gc_elasticity;
1299 int rt_gc_timeout = net->ipv6.sysctl.ip6_rt_gc_timeout;
1300 unsigned long rt_last_gc = net->ipv6.ip6_rt_last_gc;
1301 int entries;
1302
1303 entries = dst_entries_get_fast(ops);
1304 if (time_after(rt_last_gc + rt_min_interval, now) &&
1305 entries <= rt_max_size)
1306 goto out;
1307
1308 net->ipv6.ip6_rt_gc_expire++;
1309 fib6_run_gc(net->ipv6.ip6_rt_gc_expire, net);
1310 net->ipv6.ip6_rt_last_gc = now;
1311 entries = dst_entries_get_slow(ops);
1312 if (entries < ops->gc_thresh)
1313 net->ipv6.ip6_rt_gc_expire = rt_gc_timeout>>1;
1314 out:
1315 net->ipv6.ip6_rt_gc_expire -= net->ipv6.ip6_rt_gc_expire>>rt_elasticity;
1316 return entries > rt_max_size;
1317 }
1318
1319 int ip6_dst_hoplimit(struct dst_entry *dst)
1320 {
1321 int hoplimit = dst_metric_raw(dst, RTAX_HOPLIMIT);
1322 if (hoplimit == 0) {
1323 struct net_device *dev = dst->dev;
1324 struct inet6_dev *idev;
1325
1326 rcu_read_lock();
1327 idev = __in6_dev_get(dev);
1328 if (idev)
1329 hoplimit = idev->cnf.hop_limit;
1330 else
1331 hoplimit = dev_net(dev)->ipv6.devconf_all->hop_limit;
1332 rcu_read_unlock();
1333 }
1334 return hoplimit;
1335 }
1336 EXPORT_SYMBOL(ip6_dst_hoplimit);
1337
1338 /*
1339 *
1340 */
1341
1342 int ip6_route_add(struct fib6_config *cfg)
1343 {
1344 int err;
1345 struct net *net = cfg->fc_nlinfo.nl_net;
1346 struct rt6_info *rt = NULL;
1347 struct net_device *dev = NULL;
1348 struct inet6_dev *idev = NULL;
1349 struct fib6_table *table;
1350 int addr_type;
1351
1352 if (cfg->fc_dst_len > 128 || cfg->fc_src_len > 128)
1353 return -EINVAL;
1354 #ifndef CONFIG_IPV6_SUBTREES
1355 if (cfg->fc_src_len)
1356 return -EINVAL;
1357 #endif
1358 if (cfg->fc_ifindex) {
1359 err = -ENODEV;
1360 dev = dev_get_by_index(net, cfg->fc_ifindex);
1361 if (!dev)
1362 goto out;
1363 idev = in6_dev_get(dev);
1364 if (!idev)
1365 goto out;
1366 }
1367
1368 if (cfg->fc_metric == 0)
1369 cfg->fc_metric = IP6_RT_PRIO_USER;
1370
1371 err = -ENOBUFS;
1372 if (cfg->fc_nlinfo.nlh &&
1373 !(cfg->fc_nlinfo.nlh->nlmsg_flags & NLM_F_CREATE)) {
1374 table = fib6_get_table(net, cfg->fc_table);
1375 if (!table) {
1376 pr_warn("NLM_F_CREATE should be specified when creating new route\n");
1377 table = fib6_new_table(net, cfg->fc_table);
1378 }
1379 } else {
1380 table = fib6_new_table(net, cfg->fc_table);
1381 }
1382
1383 if (!table)
1384 goto out;
1385
1386 rt = ip6_dst_alloc(net, NULL, DST_NOCOUNT, table);
1387
1388 if (!rt) {
1389 err = -ENOMEM;
1390 goto out;
1391 }
1392
1393 if (cfg->fc_flags & RTF_EXPIRES)
1394 rt6_set_expires(rt, jiffies +
1395 clock_t_to_jiffies(cfg->fc_expires));
1396 else
1397 rt6_clean_expires(rt);
1398
1399 if (cfg->fc_protocol == RTPROT_UNSPEC)
1400 cfg->fc_protocol = RTPROT_BOOT;
1401 rt->rt6i_protocol = cfg->fc_protocol;
1402
1403 addr_type = ipv6_addr_type(&cfg->fc_dst);
1404
1405 if (addr_type & IPV6_ADDR_MULTICAST)
1406 rt->dst.input = ip6_mc_input;
1407 else if (cfg->fc_flags & RTF_LOCAL)
1408 rt->dst.input = ip6_input;
1409 else
1410 rt->dst.input = ip6_forward;
1411
1412 rt->dst.output = ip6_output;
1413
1414 ipv6_addr_prefix(&rt->rt6i_dst.addr, &cfg->fc_dst, cfg->fc_dst_len);
1415 rt->rt6i_dst.plen = cfg->fc_dst_len;
1416 if (rt->rt6i_dst.plen == 128)
1417 rt->dst.flags |= DST_HOST;
1418
1419 if (!(rt->dst.flags & DST_HOST) && cfg->fc_mx) {
1420 u32 *metrics = kzalloc(sizeof(u32) * RTAX_MAX, GFP_KERNEL);
1421 if (!metrics) {
1422 err = -ENOMEM;
1423 goto out;
1424 }
1425 dst_init_metrics(&rt->dst, metrics, 0);
1426 }
1427 #ifdef CONFIG_IPV6_SUBTREES
1428 ipv6_addr_prefix(&rt->rt6i_src.addr, &cfg->fc_src, cfg->fc_src_len);
1429 rt->rt6i_src.plen = cfg->fc_src_len;
1430 #endif
1431
1432 rt->rt6i_metric = cfg->fc_metric;
1433
1434 /* We cannot add true routes via loopback here,
1435 they would result in kernel looping; promote them to reject routes
1436 */
1437 if ((cfg->fc_flags & RTF_REJECT) ||
1438 (dev && (dev->flags & IFF_LOOPBACK) &&
1439 !(addr_type & IPV6_ADDR_LOOPBACK) &&
1440 !(cfg->fc_flags & RTF_LOCAL))) {
1441 /* hold loopback dev/idev if we haven't done so. */
1442 if (dev != net->loopback_dev) {
1443 if (dev) {
1444 dev_put(dev);
1445 in6_dev_put(idev);
1446 }
1447 dev = net->loopback_dev;
1448 dev_hold(dev);
1449 idev = in6_dev_get(dev);
1450 if (!idev) {
1451 err = -ENODEV;
1452 goto out;
1453 }
1454 }
1455 rt->dst.output = ip6_pkt_discard_out;
1456 rt->dst.input = ip6_pkt_discard;
1457 rt->rt6i_flags = RTF_REJECT|RTF_NONEXTHOP;
1458 switch (cfg->fc_type) {
1459 case RTN_BLACKHOLE:
1460 rt->dst.error = -EINVAL;
1461 break;
1462 case RTN_PROHIBIT:
1463 rt->dst.error = -EACCES;
1464 break;
1465 case RTN_THROW:
1466 rt->dst.error = -EAGAIN;
1467 break;
1468 default:
1469 rt->dst.error = -ENETUNREACH;
1470 break;
1471 }
1472 goto install_route;
1473 }
1474
1475 if (cfg->fc_flags & RTF_GATEWAY) {
1476 const struct in6_addr *gw_addr;
1477 int gwa_type;
1478
1479 gw_addr = &cfg->fc_gateway;
1480 rt->rt6i_gateway = *gw_addr;
1481 gwa_type = ipv6_addr_type(gw_addr);
1482
1483 if (gwa_type != (IPV6_ADDR_LINKLOCAL|IPV6_ADDR_UNICAST)) {
1484 struct rt6_info *grt;
1485
1486 /* IPv6 strictly inhibits using not link-local
1487 addresses as nexthop address.
1488 Otherwise, router will not able to send redirects.
1489 It is very good, but in some (rare!) circumstances
1490 (SIT, PtP, NBMA NOARP links) it is handy to allow
1491 some exceptions. --ANK
1492 */
1493 err = -EINVAL;
1494 if (!(gwa_type & IPV6_ADDR_UNICAST))
1495 goto out;
1496
1497 grt = rt6_lookup(net, gw_addr, NULL, cfg->fc_ifindex, 1);
1498
1499 err = -EHOSTUNREACH;
1500 if (!grt)
1501 goto out;
1502 if (dev) {
1503 if (dev != grt->dst.dev) {
1504 ip6_rt_put(grt);
1505 goto out;
1506 }
1507 } else {
1508 dev = grt->dst.dev;
1509 idev = grt->rt6i_idev;
1510 dev_hold(dev);
1511 in6_dev_hold(grt->rt6i_idev);
1512 }
1513 if (!(grt->rt6i_flags & RTF_GATEWAY))
1514 err = 0;
1515 ip6_rt_put(grt);
1516
1517 if (err)
1518 goto out;
1519 }
1520 err = -EINVAL;
1521 if (!dev || (dev->flags & IFF_LOOPBACK))
1522 goto out;
1523 }
1524
1525 err = -ENODEV;
1526 if (!dev)
1527 goto out;
1528
1529 if (!ipv6_addr_any(&cfg->fc_prefsrc)) {
1530 if (!ipv6_chk_addr(net, &cfg->fc_prefsrc, dev, 0)) {
1531 err = -EINVAL;
1532 goto out;
1533 }
1534 rt->rt6i_prefsrc.addr = cfg->fc_prefsrc;
1535 rt->rt6i_prefsrc.plen = 128;
1536 } else
1537 rt->rt6i_prefsrc.plen = 0;
1538
1539 rt->rt6i_flags = cfg->fc_flags;
1540
1541 install_route:
1542 if (cfg->fc_mx) {
1543 struct nlattr *nla;
1544 int remaining;
1545
1546 nla_for_each_attr(nla, cfg->fc_mx, cfg->fc_mx_len, remaining) {
1547 int type = nla_type(nla);
1548
1549 if (type) {
1550 if (type > RTAX_MAX) {
1551 err = -EINVAL;
1552 goto out;
1553 }
1554
1555 dst_metric_set(&rt->dst, type, nla_get_u32(nla));
1556 }
1557 }
1558 }
1559
1560 rt->dst.dev = dev;
1561 rt->rt6i_idev = idev;
1562 rt->rt6i_table = table;
1563
1564 cfg->fc_nlinfo.nl_net = dev_net(dev);
1565
1566 return __ip6_ins_rt(rt, &cfg->fc_nlinfo);
1567
1568 out:
1569 if (dev)
1570 dev_put(dev);
1571 if (idev)
1572 in6_dev_put(idev);
1573 if (rt)
1574 dst_free(&rt->dst);
1575 return err;
1576 }
1577
1578 static int __ip6_del_rt(struct rt6_info *rt, struct nl_info *info)
1579 {
1580 int err;
1581 struct fib6_table *table;
1582 struct net *net = dev_net(rt->dst.dev);
1583
1584 if (rt == net->ipv6.ip6_null_entry) {
1585 err = -ENOENT;
1586 goto out;
1587 }
1588
1589 table = rt->rt6i_table;
1590 write_lock_bh(&table->tb6_lock);
1591 err = fib6_del(rt, info);
1592 write_unlock_bh(&table->tb6_lock);
1593
1594 out:
1595 ip6_rt_put(rt);
1596 return err;
1597 }
1598
1599 int ip6_del_rt(struct rt6_info *rt)
1600 {
1601 struct nl_info info = {
1602 .nl_net = dev_net(rt->dst.dev),
1603 };
1604 return __ip6_del_rt(rt, &info);
1605 }
1606
1607 static int ip6_route_del(struct fib6_config *cfg)
1608 {
1609 struct fib6_table *table;
1610 struct fib6_node *fn;
1611 struct rt6_info *rt;
1612 int err = -ESRCH;
1613
1614 table = fib6_get_table(cfg->fc_nlinfo.nl_net, cfg->fc_table);
1615 if (!table)
1616 return err;
1617
1618 read_lock_bh(&table->tb6_lock);
1619
1620 fn = fib6_locate(&table->tb6_root,
1621 &cfg->fc_dst, cfg->fc_dst_len,
1622 &cfg->fc_src, cfg->fc_src_len);
1623
1624 if (fn) {
1625 for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) {
1626 if (cfg->fc_ifindex &&
1627 (!rt->dst.dev ||
1628 rt->dst.dev->ifindex != cfg->fc_ifindex))
1629 continue;
1630 if (cfg->fc_flags & RTF_GATEWAY &&
1631 !ipv6_addr_equal(&cfg->fc_gateway, &rt->rt6i_gateway))
1632 continue;
1633 if (cfg->fc_metric && cfg->fc_metric != rt->rt6i_metric)
1634 continue;
1635 dst_hold(&rt->dst);
1636 read_unlock_bh(&table->tb6_lock);
1637
1638 return __ip6_del_rt(rt, &cfg->fc_nlinfo);
1639 }
1640 }
1641 read_unlock_bh(&table->tb6_lock);
1642
1643 return err;
1644 }
1645
1646 static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb)
1647 {
1648 struct net *net = dev_net(skb->dev);
1649 struct netevent_redirect netevent;
1650 struct rt6_info *rt, *nrt = NULL;
1651 struct ndisc_options ndopts;
1652 struct inet6_dev *in6_dev;
1653 struct neighbour *neigh;
1654 struct rd_msg *msg;
1655 int optlen, on_link;
1656 u8 *lladdr;
1657
1658 optlen = skb_tail_pointer(skb) - skb_transport_header(skb);
1659 optlen -= sizeof(*msg);
1660
1661 if (optlen < 0) {
1662 net_dbg_ratelimited("rt6_do_redirect: packet too short\n");
1663 return;
1664 }
1665
1666 msg = (struct rd_msg *)icmp6_hdr(skb);
1667
1668 if (ipv6_addr_is_multicast(&msg->dest)) {
1669 net_dbg_ratelimited("rt6_do_redirect: destination address is multicast\n");
1670 return;
1671 }
1672
1673 on_link = 0;
1674 if (ipv6_addr_equal(&msg->dest, &msg->target)) {
1675 on_link = 1;
1676 } else if (ipv6_addr_type(&msg->target) !=
1677 (IPV6_ADDR_UNICAST|IPV6_ADDR_LINKLOCAL)) {
1678 net_dbg_ratelimited("rt6_do_redirect: target address is not link-local unicast\n");
1679 return;
1680 }
1681
1682 in6_dev = __in6_dev_get(skb->dev);
1683 if (!in6_dev)
1684 return;
1685 if (in6_dev->cnf.forwarding || !in6_dev->cnf.accept_redirects)
1686 return;
1687
1688 /* RFC2461 8.1:
1689 * The IP source address of the Redirect MUST be the same as the current
1690 * first-hop router for the specified ICMP Destination Address.
1691 */
1692
1693 if (!ndisc_parse_options(msg->opt, optlen, &ndopts)) {
1694 net_dbg_ratelimited("rt6_redirect: invalid ND options\n");
1695 return;
1696 }
1697
1698 lladdr = NULL;
1699 if (ndopts.nd_opts_tgt_lladdr) {
1700 lladdr = ndisc_opt_addr_data(ndopts.nd_opts_tgt_lladdr,
1701 skb->dev);
1702 if (!lladdr) {
1703 net_dbg_ratelimited("rt6_redirect: invalid link-layer address length\n");
1704 return;
1705 }
1706 }
1707
1708 rt = (struct rt6_info *) dst;
1709 if (rt == net->ipv6.ip6_null_entry) {
1710 net_dbg_ratelimited("rt6_redirect: source isn't a valid nexthop for redirect target\n");
1711 return;
1712 }
1713
1714 /* Redirect received -> path was valid.
1715 * Look, redirects are sent only in response to data packets,
1716 * so that this nexthop apparently is reachable. --ANK
1717 */
1718 dst_confirm(&rt->dst);
1719
1720 neigh = __neigh_lookup(&nd_tbl, &msg->target, skb->dev, 1);
1721 if (!neigh)
1722 return;
1723
1724 /*
1725 * We have finally decided to accept it.
1726 */
1727
1728 neigh_update(neigh, lladdr, NUD_STALE,
1729 NEIGH_UPDATE_F_WEAK_OVERRIDE|
1730 NEIGH_UPDATE_F_OVERRIDE|
1731 (on_link ? 0 : (NEIGH_UPDATE_F_OVERRIDE_ISROUTER|
1732 NEIGH_UPDATE_F_ISROUTER))
1733 );
1734
1735 nrt = ip6_rt_copy(rt, &msg->dest);
1736 if (!nrt)
1737 goto out;
1738
1739 nrt->rt6i_flags = RTF_GATEWAY|RTF_UP|RTF_DYNAMIC|RTF_CACHE;
1740 if (on_link)
1741 nrt->rt6i_flags &= ~RTF_GATEWAY;
1742
1743 nrt->rt6i_gateway = *(struct in6_addr *)neigh->primary_key;
1744
1745 if (ip6_ins_rt(nrt))
1746 goto out;
1747
1748 netevent.old = &rt->dst;
1749 netevent.new = &nrt->dst;
1750 netevent.daddr = &msg->dest;
1751 netevent.neigh = neigh;
1752 call_netevent_notifiers(NETEVENT_REDIRECT, &netevent);
1753
1754 if (rt->rt6i_flags & RTF_CACHE) {
1755 rt = (struct rt6_info *) dst_clone(&rt->dst);
1756 ip6_del_rt(rt);
1757 }
1758
1759 out:
1760 neigh_release(neigh);
1761 }
1762
1763 /*
1764 * Misc support functions
1765 */
1766
1767 static struct rt6_info *ip6_rt_copy(struct rt6_info *ort,
1768 const struct in6_addr *dest)
1769 {
1770 struct net *net = dev_net(ort->dst.dev);
1771 struct rt6_info *rt = ip6_dst_alloc(net, ort->dst.dev, 0,
1772 ort->rt6i_table);
1773
1774 if (rt) {
1775 rt->dst.input = ort->dst.input;
1776 rt->dst.output = ort->dst.output;
1777 rt->dst.flags |= DST_HOST;
1778
1779 rt->rt6i_dst.addr = *dest;
1780 rt->rt6i_dst.plen = 128;
1781 dst_copy_metrics(&rt->dst, &ort->dst);
1782 rt->dst.error = ort->dst.error;
1783 rt->rt6i_idev = ort->rt6i_idev;
1784 if (rt->rt6i_idev)
1785 in6_dev_hold(rt->rt6i_idev);
1786 rt->dst.lastuse = jiffies;
1787
1788 rt->rt6i_gateway = ort->rt6i_gateway;
1789 rt->rt6i_flags = ort->rt6i_flags;
1790 if ((ort->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF)) ==
1791 (RTF_DEFAULT | RTF_ADDRCONF))
1792 rt6_set_from(rt, ort);
1793 rt->rt6i_metric = 0;
1794
1795 #ifdef CONFIG_IPV6_SUBTREES
1796 memcpy(&rt->rt6i_src, &ort->rt6i_src, sizeof(struct rt6key));
1797 #endif
1798 memcpy(&rt->rt6i_prefsrc, &ort->rt6i_prefsrc, sizeof(struct rt6key));
1799 rt->rt6i_table = ort->rt6i_table;
1800 }
1801 return rt;
1802 }
1803
1804 #ifdef CONFIG_IPV6_ROUTE_INFO
1805 static struct rt6_info *rt6_get_route_info(struct net *net,
1806 const struct in6_addr *prefix, int prefixlen,
1807 const struct in6_addr *gwaddr, int ifindex)
1808 {
1809 struct fib6_node *fn;
1810 struct rt6_info *rt = NULL;
1811 struct fib6_table *table;
1812
1813 table = fib6_get_table(net, RT6_TABLE_INFO);
1814 if (!table)
1815 return NULL;
1816
1817 read_lock_bh(&table->tb6_lock);
1818 fn = fib6_locate(&table->tb6_root, prefix ,prefixlen, NULL, 0);
1819 if (!fn)
1820 goto out;
1821
1822 for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) {
1823 if (rt->dst.dev->ifindex != ifindex)
1824 continue;
1825 if ((rt->rt6i_flags & (RTF_ROUTEINFO|RTF_GATEWAY)) != (RTF_ROUTEINFO|RTF_GATEWAY))
1826 continue;
1827 if (!ipv6_addr_equal(&rt->rt6i_gateway, gwaddr))
1828 continue;
1829 dst_hold(&rt->dst);
1830 break;
1831 }
1832 out:
1833 read_unlock_bh(&table->tb6_lock);
1834 return rt;
1835 }
1836
1837 static struct rt6_info *rt6_add_route_info(struct net *net,
1838 const struct in6_addr *prefix, int prefixlen,
1839 const struct in6_addr *gwaddr, int ifindex,
1840 unsigned int pref)
1841 {
1842 struct fib6_config cfg = {
1843 .fc_table = RT6_TABLE_INFO,
1844 .fc_metric = IP6_RT_PRIO_USER,
1845 .fc_ifindex = ifindex,
1846 .fc_dst_len = prefixlen,
1847 .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_ROUTEINFO |
1848 RTF_UP | RTF_PREF(pref),
1849 .fc_nlinfo.portid = 0,
1850 .fc_nlinfo.nlh = NULL,
1851 .fc_nlinfo.nl_net = net,
1852 };
1853
1854 cfg.fc_dst = *prefix;
1855 cfg.fc_gateway = *gwaddr;
1856
1857 /* We should treat it as a default route if prefix length is 0. */
1858 if (!prefixlen)
1859 cfg.fc_flags |= RTF_DEFAULT;
1860
1861 ip6_route_add(&cfg);
1862
1863 return rt6_get_route_info(net, prefix, prefixlen, gwaddr, ifindex);
1864 }
1865 #endif
1866
1867 struct rt6_info *rt6_get_dflt_router(const struct in6_addr *addr, struct net_device *dev)
1868 {
1869 struct rt6_info *rt;
1870 struct fib6_table *table;
1871
1872 table = fib6_get_table(dev_net(dev), RT6_TABLE_DFLT);
1873 if (!table)
1874 return NULL;
1875
1876 read_lock_bh(&table->tb6_lock);
1877 for (rt = table->tb6_root.leaf; rt; rt=rt->dst.rt6_next) {
1878 if (dev == rt->dst.dev &&
1879 ((rt->rt6i_flags & (RTF_ADDRCONF | RTF_DEFAULT)) == (RTF_ADDRCONF | RTF_DEFAULT)) &&
1880 ipv6_addr_equal(&rt->rt6i_gateway, addr))
1881 break;
1882 }
1883 if (rt)
1884 dst_hold(&rt->dst);
1885 read_unlock_bh(&table->tb6_lock);
1886 return rt;
1887 }
1888
1889 struct rt6_info *rt6_add_dflt_router(const struct in6_addr *gwaddr,
1890 struct net_device *dev,
1891 unsigned int pref)
1892 {
1893 struct fib6_config cfg = {
1894 .fc_table = RT6_TABLE_DFLT,
1895 .fc_metric = IP6_RT_PRIO_USER,
1896 .fc_ifindex = dev->ifindex,
1897 .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_DEFAULT |
1898 RTF_UP | RTF_EXPIRES | RTF_PREF(pref),
1899 .fc_nlinfo.portid = 0,
1900 .fc_nlinfo.nlh = NULL,
1901 .fc_nlinfo.nl_net = dev_net(dev),
1902 };
1903
1904 cfg.fc_gateway = *gwaddr;
1905
1906 ip6_route_add(&cfg);
1907
1908 return rt6_get_dflt_router(gwaddr, dev);
1909 }
1910
1911 void rt6_purge_dflt_routers(struct net *net)
1912 {
1913 struct rt6_info *rt;
1914 struct fib6_table *table;
1915
1916 /* NOTE: Keep consistent with rt6_get_dflt_router */
1917 table = fib6_get_table(net, RT6_TABLE_DFLT);
1918 if (!table)
1919 return;
1920
1921 restart:
1922 read_lock_bh(&table->tb6_lock);
1923 for (rt = table->tb6_root.leaf; rt; rt = rt->dst.rt6_next) {
1924 if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF) &&
1925 (!rt->rt6i_idev || rt->rt6i_idev->cnf.accept_ra != 2)) {
1926 dst_hold(&rt->dst);
1927 read_unlock_bh(&table->tb6_lock);
1928 ip6_del_rt(rt);
1929 goto restart;
1930 }
1931 }
1932 read_unlock_bh(&table->tb6_lock);
1933 }
1934
1935 static void rtmsg_to_fib6_config(struct net *net,
1936 struct in6_rtmsg *rtmsg,
1937 struct fib6_config *cfg)
1938 {
1939 memset(cfg, 0, sizeof(*cfg));
1940
1941 cfg->fc_table = RT6_TABLE_MAIN;
1942 cfg->fc_ifindex = rtmsg->rtmsg_ifindex;
1943 cfg->fc_metric = rtmsg->rtmsg_metric;
1944 cfg->fc_expires = rtmsg->rtmsg_info;
1945 cfg->fc_dst_len = rtmsg->rtmsg_dst_len;
1946 cfg->fc_src_len = rtmsg->rtmsg_src_len;
1947 cfg->fc_flags = rtmsg->rtmsg_flags;
1948
1949 cfg->fc_nlinfo.nl_net = net;
1950
1951 cfg->fc_dst = rtmsg->rtmsg_dst;
1952 cfg->fc_src = rtmsg->rtmsg_src;
1953 cfg->fc_gateway = rtmsg->rtmsg_gateway;
1954 }
1955
1956 int ipv6_route_ioctl(struct net *net, unsigned int cmd, void __user *arg)
1957 {
1958 struct fib6_config cfg;
1959 struct in6_rtmsg rtmsg;
1960 int err;
1961
1962 switch(cmd) {
1963 case SIOCADDRT: /* Add a route */
1964 case SIOCDELRT: /* Delete a route */
1965 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
1966 return -EPERM;
1967 err = copy_from_user(&rtmsg, arg,
1968 sizeof(struct in6_rtmsg));
1969 if (err)
1970 return -EFAULT;
1971
1972 rtmsg_to_fib6_config(net, &rtmsg, &cfg);
1973
1974 rtnl_lock();
1975 switch (cmd) {
1976 case SIOCADDRT:
1977 err = ip6_route_add(&cfg);
1978 break;
1979 case SIOCDELRT:
1980 err = ip6_route_del(&cfg);
1981 break;
1982 default:
1983 err = -EINVAL;
1984 }
1985 rtnl_unlock();
1986
1987 return err;
1988 }
1989
1990 return -EINVAL;
1991 }
1992
1993 /*
1994 * Drop the packet on the floor
1995 */
1996
1997 static int ip6_pkt_drop(struct sk_buff *skb, u8 code, int ipstats_mib_noroutes)
1998 {
1999 int type;
2000 struct dst_entry *dst = skb_dst(skb);
2001 switch (ipstats_mib_noroutes) {
2002 case IPSTATS_MIB_INNOROUTES:
2003 type = ipv6_addr_type(&ipv6_hdr(skb)->daddr);
2004 if (type == IPV6_ADDR_ANY) {
2005 IP6_INC_STATS(dev_net(dst->dev), ip6_dst_idev(dst),
2006 IPSTATS_MIB_INADDRERRORS);
2007 break;
2008 }
2009 /* FALLTHROUGH */
2010 case IPSTATS_MIB_OUTNOROUTES:
2011 IP6_INC_STATS(dev_net(dst->dev), ip6_dst_idev(dst),
2012 ipstats_mib_noroutes);
2013 break;
2014 }
2015 icmpv6_send(skb, ICMPV6_DEST_UNREACH, code, 0);
2016 kfree_skb(skb);
2017 return 0;
2018 }
2019
2020 static int ip6_pkt_discard(struct sk_buff *skb)
2021 {
2022 return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_INNOROUTES);
2023 }
2024
2025 static int ip6_pkt_discard_out(struct sk_buff *skb)
2026 {
2027 skb->dev = skb_dst(skb)->dev;
2028 return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_OUTNOROUTES);
2029 }
2030
2031 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
2032
2033 static int ip6_pkt_prohibit(struct sk_buff *skb)
2034 {
2035 return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_INNOROUTES);
2036 }
2037
2038 static int ip6_pkt_prohibit_out(struct sk_buff *skb)
2039 {
2040 skb->dev = skb_dst(skb)->dev;
2041 return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_OUTNOROUTES);
2042 }
2043
2044 #endif
2045
2046 /*
2047 * Allocate a dst for local (unicast / anycast) address.
2048 */
2049
2050 struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
2051 const struct in6_addr *addr,
2052 bool anycast)
2053 {
2054 struct net *net = dev_net(idev->dev);
2055 struct rt6_info *rt = ip6_dst_alloc(net, net->loopback_dev, 0, NULL);
2056
2057 if (!rt) {
2058 net_warn_ratelimited("Maximum number of routes reached, consider increasing route/max_size\n");
2059 return ERR_PTR(-ENOMEM);
2060 }
2061
2062 in6_dev_hold(idev);
2063
2064 rt->dst.flags |= DST_HOST;
2065 rt->dst.input = ip6_input;
2066 rt->dst.output = ip6_output;
2067 rt->rt6i_idev = idev;
2068
2069 rt->rt6i_flags = RTF_UP | RTF_NONEXTHOP;
2070 if (anycast)
2071 rt->rt6i_flags |= RTF_ANYCAST;
2072 else
2073 rt->rt6i_flags |= RTF_LOCAL;
2074
2075 rt->rt6i_dst.addr = *addr;
2076 rt->rt6i_dst.plen = 128;
2077 rt->rt6i_table = fib6_get_table(net, RT6_TABLE_LOCAL);
2078
2079 atomic_set(&rt->dst.__refcnt, 1);
2080
2081 return rt;
2082 }
2083
2084 int ip6_route_get_saddr(struct net *net,
2085 struct rt6_info *rt,
2086 const struct in6_addr *daddr,
2087 unsigned int prefs,
2088 struct in6_addr *saddr)
2089 {
2090 struct inet6_dev *idev = ip6_dst_idev((struct dst_entry*)rt);
2091 int err = 0;
2092 if (rt->rt6i_prefsrc.plen)
2093 *saddr = rt->rt6i_prefsrc.addr;
2094 else
2095 err = ipv6_dev_get_saddr(net, idev ? idev->dev : NULL,
2096 daddr, prefs, saddr);
2097 return err;
2098 }
2099
2100 /* remove deleted ip from prefsrc entries */
2101 struct arg_dev_net_ip {
2102 struct net_device *dev;
2103 struct net *net;
2104 struct in6_addr *addr;
2105 };
2106
2107 static int fib6_remove_prefsrc(struct rt6_info *rt, void *arg)
2108 {
2109 struct net_device *dev = ((struct arg_dev_net_ip *)arg)->dev;
2110 struct net *net = ((struct arg_dev_net_ip *)arg)->net;
2111 struct in6_addr *addr = ((struct arg_dev_net_ip *)arg)->addr;
2112
2113 if (((void *)rt->dst.dev == dev || !dev) &&
2114 rt != net->ipv6.ip6_null_entry &&
2115 ipv6_addr_equal(addr, &rt->rt6i_prefsrc.addr)) {
2116 /* remove prefsrc entry */
2117 rt->rt6i_prefsrc.plen = 0;
2118 }
2119 return 0;
2120 }
2121
2122 void rt6_remove_prefsrc(struct inet6_ifaddr *ifp)
2123 {
2124 struct net *net = dev_net(ifp->idev->dev);
2125 struct arg_dev_net_ip adni = {
2126 .dev = ifp->idev->dev,
2127 .net = net,
2128 .addr = &ifp->addr,
2129 };
2130 fib6_clean_all(net, fib6_remove_prefsrc, 0, &adni);
2131 }
2132
2133 struct arg_dev_net {
2134 struct net_device *dev;
2135 struct net *net;
2136 };
2137
2138 static int fib6_ifdown(struct rt6_info *rt, void *arg)
2139 {
2140 const struct arg_dev_net *adn = arg;
2141 const struct net_device *dev = adn->dev;
2142
2143 if ((rt->dst.dev == dev || !dev) &&
2144 rt != adn->net->ipv6.ip6_null_entry)
2145 return -1;
2146
2147 return 0;
2148 }
2149
2150 void rt6_ifdown(struct net *net, struct net_device *dev)
2151 {
2152 struct arg_dev_net adn = {
2153 .dev = dev,
2154 .net = net,
2155 };
2156
2157 fib6_clean_all(net, fib6_ifdown, 0, &adn);
2158 icmp6_clean_all(fib6_ifdown, &adn);
2159 }
2160
2161 struct rt6_mtu_change_arg {
2162 struct net_device *dev;
2163 unsigned int mtu;
2164 };
2165
2166 static int rt6_mtu_change_route(struct rt6_info *rt, void *p_arg)
2167 {
2168 struct rt6_mtu_change_arg *arg = (struct rt6_mtu_change_arg *) p_arg;
2169 struct inet6_dev *idev;
2170
2171 /* In IPv6 pmtu discovery is not optional,
2172 so that RTAX_MTU lock cannot disable it.
2173 We still use this lock to block changes
2174 caused by addrconf/ndisc.
2175 */
2176
2177 idev = __in6_dev_get(arg->dev);
2178 if (!idev)
2179 return 0;
2180
2181 /* For administrative MTU increase, there is no way to discover
2182 IPv6 PMTU increase, so PMTU increase should be updated here.
2183 Since RFC 1981 doesn't include administrative MTU increase
2184 update PMTU increase is a MUST. (i.e. jumbo frame)
2185 */
2186 /*
2187 If new MTU is less than route PMTU, this new MTU will be the
2188 lowest MTU in the path, update the route PMTU to reflect PMTU
2189 decreases; if new MTU is greater than route PMTU, and the
2190 old MTU is the lowest MTU in the path, update the route PMTU
2191 to reflect the increase. In this case if the other nodes' MTU
2192 also have the lowest MTU, TOO BIG MESSAGE will be lead to
2193 PMTU discouvery.
2194 */
2195 if (rt->dst.dev == arg->dev &&
2196 !dst_metric_locked(&rt->dst, RTAX_MTU) &&
2197 (dst_mtu(&rt->dst) >= arg->mtu ||
2198 (dst_mtu(&rt->dst) < arg->mtu &&
2199 dst_mtu(&rt->dst) == idev->cnf.mtu6))) {
2200 dst_metric_set(&rt->dst, RTAX_MTU, arg->mtu);
2201 }
2202 return 0;
2203 }
2204
2205 void rt6_mtu_change(struct net_device *dev, unsigned int mtu)
2206 {
2207 struct rt6_mtu_change_arg arg = {
2208 .dev = dev,
2209 .mtu = mtu,
2210 };
2211
2212 fib6_clean_all(dev_net(dev), rt6_mtu_change_route, 0, &arg);
2213 }
2214
2215 static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = {
2216 [RTA_GATEWAY] = { .len = sizeof(struct in6_addr) },
2217 [RTA_OIF] = { .type = NLA_U32 },
2218 [RTA_IIF] = { .type = NLA_U32 },
2219 [RTA_PRIORITY] = { .type = NLA_U32 },
2220 [RTA_METRICS] = { .type = NLA_NESTED },
2221 [RTA_MULTIPATH] = { .len = sizeof(struct rtnexthop) },
2222 };
2223
2224 static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
2225 struct fib6_config *cfg)
2226 {
2227 struct rtmsg *rtm;
2228 struct nlattr *tb[RTA_MAX+1];
2229 int err;
2230
2231 err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv6_policy);
2232 if (err < 0)
2233 goto errout;
2234
2235 err = -EINVAL;
2236 rtm = nlmsg_data(nlh);
2237 memset(cfg, 0, sizeof(*cfg));
2238
2239 cfg->fc_table = rtm->rtm_table;
2240 cfg->fc_dst_len = rtm->rtm_dst_len;
2241 cfg->fc_src_len = rtm->rtm_src_len;
2242 cfg->fc_flags = RTF_UP;
2243 cfg->fc_protocol = rtm->rtm_protocol;
2244 cfg->fc_type = rtm->rtm_type;
2245
2246 if (rtm->rtm_type == RTN_UNREACHABLE ||
2247 rtm->rtm_type == RTN_BLACKHOLE ||
2248 rtm->rtm_type == RTN_PROHIBIT ||
2249 rtm->rtm_type == RTN_THROW)
2250 cfg->fc_flags |= RTF_REJECT;
2251
2252 if (rtm->rtm_type == RTN_LOCAL)
2253 cfg->fc_flags |= RTF_LOCAL;
2254
2255 cfg->fc_nlinfo.portid = NETLINK_CB(skb).portid;
2256 cfg->fc_nlinfo.nlh = nlh;
2257 cfg->fc_nlinfo.nl_net = sock_net(skb->sk);
2258
2259 if (tb[RTA_GATEWAY]) {
2260 nla_memcpy(&cfg->fc_gateway, tb[RTA_GATEWAY], 16);
2261 cfg->fc_flags |= RTF_GATEWAY;
2262 }
2263
2264 if (tb[RTA_DST]) {
2265 int plen = (rtm->rtm_dst_len + 7) >> 3;
2266
2267 if (nla_len(tb[RTA_DST]) < plen)
2268 goto errout;
2269
2270 nla_memcpy(&cfg->fc_dst, tb[RTA_DST], plen);
2271 }
2272
2273 if (tb[RTA_SRC]) {
2274 int plen = (rtm->rtm_src_len + 7) >> 3;
2275
2276 if (nla_len(tb[RTA_SRC]) < plen)
2277 goto errout;
2278
2279 nla_memcpy(&cfg->fc_src, tb[RTA_SRC], plen);
2280 }
2281
2282 if (tb[RTA_PREFSRC])
2283 nla_memcpy(&cfg->fc_prefsrc, tb[RTA_PREFSRC], 16);
2284
2285 if (tb[RTA_OIF])
2286 cfg->fc_ifindex = nla_get_u32(tb[RTA_OIF]);
2287
2288 if (tb[RTA_PRIORITY])
2289 cfg->fc_metric = nla_get_u32(tb[RTA_PRIORITY]);
2290
2291 if (tb[RTA_METRICS]) {
2292 cfg->fc_mx = nla_data(tb[RTA_METRICS]);
2293 cfg->fc_mx_len = nla_len(tb[RTA_METRICS]);
2294 }
2295
2296 if (tb[RTA_TABLE])
2297 cfg->fc_table = nla_get_u32(tb[RTA_TABLE]);
2298
2299 if (tb[RTA_MULTIPATH]) {
2300 cfg->fc_mp = nla_data(tb[RTA_MULTIPATH]);
2301 cfg->fc_mp_len = nla_len(tb[RTA_MULTIPATH]);
2302 }
2303
2304 err = 0;
2305 errout:
2306 return err;
2307 }
2308
2309 static int ip6_route_multipath(struct fib6_config *cfg, int add)
2310 {
2311 struct fib6_config r_cfg;
2312 struct rtnexthop *rtnh;
2313 int remaining;
2314 int attrlen;
2315 int err = 0, last_err = 0;
2316
2317 beginning:
2318 rtnh = (struct rtnexthop *)cfg->fc_mp;
2319 remaining = cfg->fc_mp_len;
2320
2321 /* Parse a Multipath Entry */
2322 while (rtnh_ok(rtnh, remaining)) {
2323 memcpy(&r_cfg, cfg, sizeof(*cfg));
2324 if (rtnh->rtnh_ifindex)
2325 r_cfg.fc_ifindex = rtnh->rtnh_ifindex;
2326
2327 attrlen = rtnh_attrlen(rtnh);
2328 if (attrlen > 0) {
2329 struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
2330
2331 nla = nla_find(attrs, attrlen, RTA_GATEWAY);
2332 if (nla) {
2333 nla_memcpy(&r_cfg.fc_gateway, nla, 16);
2334 r_cfg.fc_flags |= RTF_GATEWAY;
2335 }
2336 }
2337 err = add ? ip6_route_add(&r_cfg) : ip6_route_del(&r_cfg);
2338 if (err) {
2339 last_err = err;
2340 /* If we are trying to remove a route, do not stop the
2341 * loop when ip6_route_del() fails (because next hop is
2342 * already gone), we should try to remove all next hops.
2343 */
2344 if (add) {
2345 /* If add fails, we should try to delete all
2346 * next hops that have been already added.
2347 */
2348 add = 0;
2349 goto beginning;
2350 }
2351 }
2352 /* Because each route is added like a single route we remove
2353 * this flag after the first nexthop (if there is a collision,
2354 * we have already fail to add the first nexthop:
2355 * fib6_add_rt2node() has reject it).
2356 */
2357 cfg->fc_nlinfo.nlh->nlmsg_flags &= ~NLM_F_EXCL;
2358 rtnh = rtnh_next(rtnh, &remaining);
2359 }
2360
2361 return last_err;
2362 }
2363
2364 static int inet6_rtm_delroute(struct sk_buff *skb, struct nlmsghdr* nlh)
2365 {
2366 struct fib6_config cfg;
2367 int err;
2368
2369 err = rtm_to_fib6_config(skb, nlh, &cfg);
2370 if (err < 0)
2371 return err;
2372
2373 if (cfg.fc_mp)
2374 return ip6_route_multipath(&cfg, 0);
2375 else
2376 return ip6_route_del(&cfg);
2377 }
2378
2379 static int inet6_rtm_newroute(struct sk_buff *skb, struct nlmsghdr* nlh)
2380 {
2381 struct fib6_config cfg;
2382 int err;
2383
2384 err = rtm_to_fib6_config(skb, nlh, &cfg);
2385 if (err < 0)
2386 return err;
2387
2388 if (cfg.fc_mp)
2389 return ip6_route_multipath(&cfg, 1);
2390 else
2391 return ip6_route_add(&cfg);
2392 }
2393
2394 static inline size_t rt6_nlmsg_size(void)
2395 {
2396 return NLMSG_ALIGN(sizeof(struct rtmsg))
2397 + nla_total_size(16) /* RTA_SRC */
2398 + nla_total_size(16) /* RTA_DST */
2399 + nla_total_size(16) /* RTA_GATEWAY */
2400 + nla_total_size(16) /* RTA_PREFSRC */
2401 + nla_total_size(4) /* RTA_TABLE */
2402 + nla_total_size(4) /* RTA_IIF */
2403 + nla_total_size(4) /* RTA_OIF */
2404 + nla_total_size(4) /* RTA_PRIORITY */
2405 + RTAX_MAX * nla_total_size(4) /* RTA_METRICS */
2406 + nla_total_size(sizeof(struct rta_cacheinfo));
2407 }
2408
2409 static int rt6_fill_node(struct net *net,
2410 struct sk_buff *skb, struct rt6_info *rt,
2411 struct in6_addr *dst, struct in6_addr *src,
2412 int iif, int type, u32 portid, u32 seq,
2413 int prefix, int nowait, unsigned int flags)
2414 {
2415 struct rtmsg *rtm;
2416 struct nlmsghdr *nlh;
2417 long expires;
2418 u32 table;
2419
2420 if (prefix) { /* user wants prefix routes only */
2421 if (!(rt->rt6i_flags & RTF_PREFIX_RT)) {
2422 /* success since this is not a prefix route */
2423 return 1;
2424 }
2425 }
2426
2427 nlh = nlmsg_put(skb, portid, seq, type, sizeof(*rtm), flags);
2428 if (!nlh)
2429 return -EMSGSIZE;
2430
2431 rtm = nlmsg_data(nlh);
2432 rtm->rtm_family = AF_INET6;
2433 rtm->rtm_dst_len = rt->rt6i_dst.plen;
2434 rtm->rtm_src_len = rt->rt6i_src.plen;
2435 rtm->rtm_tos = 0;
2436 if (rt->rt6i_table)
2437 table = rt->rt6i_table->tb6_id;
2438 else
2439 table = RT6_TABLE_UNSPEC;
2440 rtm->rtm_table = table;
2441 if (nla_put_u32(skb, RTA_TABLE, table))
2442 goto nla_put_failure;
2443 if (rt->rt6i_flags & RTF_REJECT) {
2444 switch (rt->dst.error) {
2445 case -EINVAL:
2446 rtm->rtm_type = RTN_BLACKHOLE;
2447 break;
2448 case -EACCES:
2449 rtm->rtm_type = RTN_PROHIBIT;
2450 break;
2451 case -EAGAIN:
2452 rtm->rtm_type = RTN_THROW;
2453 break;
2454 default:
2455 rtm->rtm_type = RTN_UNREACHABLE;
2456 break;
2457 }
2458 }
2459 else if (rt->rt6i_flags & RTF_LOCAL)
2460 rtm->rtm_type = RTN_LOCAL;
2461 else if (rt->dst.dev && (rt->dst.dev->flags & IFF_LOOPBACK))
2462 rtm->rtm_type = RTN_LOCAL;
2463 else
2464 rtm->rtm_type = RTN_UNICAST;
2465 rtm->rtm_flags = 0;
2466 rtm->rtm_scope = RT_SCOPE_UNIVERSE;
2467 rtm->rtm_protocol = rt->rt6i_protocol;
2468 if (rt->rt6i_flags & RTF_DYNAMIC)
2469 rtm->rtm_protocol = RTPROT_REDIRECT;
2470 else if (rt->rt6i_flags & RTF_ADDRCONF) {
2471 if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ROUTEINFO))
2472 rtm->rtm_protocol = RTPROT_RA;
2473 else
2474 rtm->rtm_protocol = RTPROT_KERNEL;
2475 }
2476
2477 if (rt->rt6i_flags & RTF_CACHE)
2478 rtm->rtm_flags |= RTM_F_CLONED;
2479
2480 if (dst) {
2481 if (nla_put(skb, RTA_DST, 16, dst))
2482 goto nla_put_failure;
2483 rtm->rtm_dst_len = 128;
2484 } else if (rtm->rtm_dst_len)
2485 if (nla_put(skb, RTA_DST, 16, &rt->rt6i_dst.addr))
2486 goto nla_put_failure;
2487 #ifdef CONFIG_IPV6_SUBTREES
2488 if (src) {
2489 if (nla_put(skb, RTA_SRC, 16, src))
2490 goto nla_put_failure;
2491 rtm->rtm_src_len = 128;
2492 } else if (rtm->rtm_src_len &&
2493 nla_put(skb, RTA_SRC, 16, &rt->rt6i_src.addr))
2494 goto nla_put_failure;
2495 #endif
2496 if (iif) {
2497 #ifdef CONFIG_IPV6_MROUTE
2498 if (ipv6_addr_is_multicast(&rt->rt6i_dst.addr)) {
2499 int err = ip6mr_get_route(net, skb, rtm, nowait);
2500 if (err <= 0) {
2501 if (!nowait) {
2502 if (err == 0)
2503 return 0;
2504 goto nla_put_failure;
2505 } else {
2506 if (err == -EMSGSIZE)
2507 goto nla_put_failure;
2508 }
2509 }
2510 } else
2511 #endif
2512 if (nla_put_u32(skb, RTA_IIF, iif))
2513 goto nla_put_failure;
2514 } else if (dst) {
2515 struct in6_addr saddr_buf;
2516 if (ip6_route_get_saddr(net, rt, dst, 0, &saddr_buf) == 0 &&
2517 nla_put(skb, RTA_PREFSRC, 16, &saddr_buf))
2518 goto nla_put_failure;
2519 }
2520
2521 if (rt->rt6i_prefsrc.plen) {
2522 struct in6_addr saddr_buf;
2523 saddr_buf = rt->rt6i_prefsrc.addr;
2524 if (nla_put(skb, RTA_PREFSRC, 16, &saddr_buf))
2525 goto nla_put_failure;
2526 }
2527
2528 if (rtnetlink_put_metrics(skb, dst_metrics_ptr(&rt->dst)) < 0)
2529 goto nla_put_failure;
2530
2531 if (rt->rt6i_flags & RTF_GATEWAY) {
2532 if (nla_put(skb, RTA_GATEWAY, 16, &rt->rt6i_gateway) < 0)
2533 goto nla_put_failure;
2534 }
2535
2536 if (rt->dst.dev &&
2537 nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex))
2538 goto nla_put_failure;
2539 if (nla_put_u32(skb, RTA_PRIORITY, rt->rt6i_metric))
2540 goto nla_put_failure;
2541
2542 expires = (rt->rt6i_flags & RTF_EXPIRES) ? rt->dst.expires - jiffies : 0;
2543
2544 if (rtnl_put_cacheinfo(skb, &rt->dst, 0, expires, rt->dst.error) < 0)
2545 goto nla_put_failure;
2546
2547 return nlmsg_end(skb, nlh);
2548
2549 nla_put_failure:
2550 nlmsg_cancel(skb, nlh);
2551 return -EMSGSIZE;
2552 }
2553
2554 int rt6_dump_route(struct rt6_info *rt, void *p_arg)
2555 {
2556 struct rt6_rtnl_dump_arg *arg = (struct rt6_rtnl_dump_arg *) p_arg;
2557 int prefix;
2558
2559 if (nlmsg_len(arg->cb->nlh) >= sizeof(struct rtmsg)) {
2560 struct rtmsg *rtm = nlmsg_data(arg->cb->nlh);
2561 prefix = (rtm->rtm_flags & RTM_F_PREFIX) != 0;
2562 } else
2563 prefix = 0;
2564
2565 return rt6_fill_node(arg->net,
2566 arg->skb, rt, NULL, NULL, 0, RTM_NEWROUTE,
2567 NETLINK_CB(arg->cb->skb).portid, arg->cb->nlh->nlmsg_seq,
2568 prefix, 0, NLM_F_MULTI);
2569 }
2570
2571 static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh)
2572 {
2573 struct net *net = sock_net(in_skb->sk);
2574 struct nlattr *tb[RTA_MAX+1];
2575 struct rt6_info *rt;
2576 struct sk_buff *skb;
2577 struct rtmsg *rtm;
2578 struct flowi6 fl6;
2579 int err, iif = 0, oif = 0;
2580
2581 err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv6_policy);
2582 if (err < 0)
2583 goto errout;
2584
2585 err = -EINVAL;
2586 memset(&fl6, 0, sizeof(fl6));
2587
2588 if (tb[RTA_SRC]) {
2589 if (nla_len(tb[RTA_SRC]) < sizeof(struct in6_addr))
2590 goto errout;
2591
2592 fl6.saddr = *(struct in6_addr *)nla_data(tb[RTA_SRC]);
2593 }
2594
2595 if (tb[RTA_DST]) {
2596 if (nla_len(tb[RTA_DST]) < sizeof(struct in6_addr))
2597 goto errout;
2598
2599 fl6.daddr = *(struct in6_addr *)nla_data(tb[RTA_DST]);
2600 }
2601
2602 if (tb[RTA_IIF])
2603 iif = nla_get_u32(tb[RTA_IIF]);
2604
2605 if (tb[RTA_OIF])
2606 oif = nla_get_u32(tb[RTA_OIF]);
2607
2608 if (iif) {
2609 struct net_device *dev;
2610 int flags = 0;
2611
2612 dev = __dev_get_by_index(net, iif);
2613 if (!dev) {
2614 err = -ENODEV;
2615 goto errout;
2616 }
2617
2618 fl6.flowi6_iif = iif;
2619
2620 if (!ipv6_addr_any(&fl6.saddr))
2621 flags |= RT6_LOOKUP_F_HAS_SADDR;
2622
2623 rt = (struct rt6_info *)ip6_route_input_lookup(net, dev, &fl6,
2624 flags);
2625 } else {
2626 fl6.flowi6_oif = oif;
2627
2628 rt = (struct rt6_info *)ip6_route_output(net, NULL, &fl6);
2629 }
2630
2631 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2632 if (!skb) {
2633 ip6_rt_put(rt);
2634 err = -ENOBUFS;
2635 goto errout;
2636 }
2637
2638 /* Reserve room for dummy headers, this skb can pass
2639 through good chunk of routing engine.
2640 */
2641 skb_reset_mac_header(skb);
2642 skb_reserve(skb, MAX_HEADER + sizeof(struct ipv6hdr));
2643
2644 skb_dst_set(skb, &rt->dst);
2645
2646 err = rt6_fill_node(net, skb, rt, &fl6.daddr, &fl6.saddr, iif,
2647 RTM_NEWROUTE, NETLINK_CB(in_skb).portid,
2648 nlh->nlmsg_seq, 0, 0, 0);
2649 if (err < 0) {
2650 kfree_skb(skb);
2651 goto errout;
2652 }
2653
2654 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
2655 errout:
2656 return err;
2657 }
2658
2659 void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info)
2660 {
2661 struct sk_buff *skb;
2662 struct net *net = info->nl_net;
2663 u32 seq;
2664 int err;
2665
2666 err = -ENOBUFS;
2667 seq = info->nlh ? info->nlh->nlmsg_seq : 0;
2668
2669 skb = nlmsg_new(rt6_nlmsg_size(), gfp_any());
2670 if (!skb)
2671 goto errout;
2672
2673 err = rt6_fill_node(net, skb, rt, NULL, NULL, 0,
2674 event, info->portid, seq, 0, 0, 0);
2675 if (err < 0) {
2676 /* -EMSGSIZE implies BUG in rt6_nlmsg_size() */
2677 WARN_ON(err == -EMSGSIZE);
2678 kfree_skb(skb);
2679 goto errout;
2680 }
2681 rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE,
2682 info->nlh, gfp_any());
2683 return;
2684 errout:
2685 if (err < 0)
2686 rtnl_set_sk_err(net, RTNLGRP_IPV6_ROUTE, err);
2687 }
2688
2689 static int ip6_route_dev_notify(struct notifier_block *this,
2690 unsigned long event, void *ptr)
2691 {
2692 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
2693 struct net *net = dev_net(dev);
2694
2695 if (event == NETDEV_REGISTER && (dev->flags & IFF_LOOPBACK)) {
2696 net->ipv6.ip6_null_entry->dst.dev = dev;
2697 net->ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(dev);
2698 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
2699 net->ipv6.ip6_prohibit_entry->dst.dev = dev;
2700 net->ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(dev);
2701 net->ipv6.ip6_blk_hole_entry->dst.dev = dev;
2702 net->ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(dev);
2703 #endif
2704 }
2705
2706 return NOTIFY_OK;
2707 }
2708
2709 /*
2710 * /proc
2711 */
2712
2713 #ifdef CONFIG_PROC_FS
2714
2715 struct rt6_proc_arg
2716 {
2717 char *buffer;
2718 int offset;
2719 int length;
2720 int skip;
2721 int len;
2722 };
2723
2724 static int rt6_info_route(struct rt6_info *rt, void *p_arg)
2725 {
2726 struct seq_file *m = p_arg;
2727
2728 seq_printf(m, "%pi6 %02x ", &rt->rt6i_dst.addr, rt->rt6i_dst.plen);
2729
2730 #ifdef CONFIG_IPV6_SUBTREES
2731 seq_printf(m, "%pi6 %02x ", &rt->rt6i_src.addr, rt->rt6i_src.plen);
2732 #else
2733 seq_puts(m, "00000000000000000000000000000000 00 ");
2734 #endif
2735 if (rt->rt6i_flags & RTF_GATEWAY) {
2736 seq_printf(m, "%pi6", &rt->rt6i_gateway);
2737 } else {
2738 seq_puts(m, "00000000000000000000000000000000");
2739 }
2740 seq_printf(m, " %08x %08x %08x %08x %8s\n",
2741 rt->rt6i_metric, atomic_read(&rt->dst.__refcnt),
2742 rt->dst.__use, rt->rt6i_flags,
2743 rt->dst.dev ? rt->dst.dev->name : "");
2744 return 0;
2745 }
2746
2747 static int ipv6_route_show(struct seq_file *m, void *v)
2748 {
2749 struct net *net = (struct net *)m->private;
2750 fib6_clean_all_ro(net, rt6_info_route, 0, m);
2751 return 0;
2752 }
2753
2754 static int ipv6_route_open(struct inode *inode, struct file *file)
2755 {
2756 return single_open_net(inode, file, ipv6_route_show);
2757 }
2758
2759 static const struct file_operations ipv6_route_proc_fops = {
2760 .owner = THIS_MODULE,
2761 .open = ipv6_route_open,
2762 .read = seq_read,
2763 .llseek = seq_lseek,
2764 .release = single_release_net,
2765 };
2766
2767 static int rt6_stats_seq_show(struct seq_file *seq, void *v)
2768 {
2769 struct net *net = (struct net *)seq->private;
2770 seq_printf(seq, "%04x %04x %04x %04x %04x %04x %04x\n",
2771 net->ipv6.rt6_stats->fib_nodes,
2772 net->ipv6.rt6_stats->fib_route_nodes,
2773 net->ipv6.rt6_stats->fib_rt_alloc,
2774 net->ipv6.rt6_stats->fib_rt_entries,
2775 net->ipv6.rt6_stats->fib_rt_cache,
2776 dst_entries_get_slow(&net->ipv6.ip6_dst_ops),
2777 net->ipv6.rt6_stats->fib_discarded_routes);
2778
2779 return 0;
2780 }
2781
2782 static int rt6_stats_seq_open(struct inode *inode, struct file *file)
2783 {
2784 return single_open_net(inode, file, rt6_stats_seq_show);
2785 }
2786
2787 static const struct file_operations rt6_stats_seq_fops = {
2788 .owner = THIS_MODULE,
2789 .open = rt6_stats_seq_open,
2790 .read = seq_read,
2791 .llseek = seq_lseek,
2792 .release = single_release_net,
2793 };
2794 #endif /* CONFIG_PROC_FS */
2795
2796 #ifdef CONFIG_SYSCTL
2797
2798 static
2799 int ipv6_sysctl_rtcache_flush(struct ctl_table *ctl, int write,
2800 void __user *buffer, size_t *lenp, loff_t *ppos)
2801 {
2802 struct net *net;
2803 int delay;
2804 if (!write)
2805 return -EINVAL;
2806
2807 net = (struct net *)ctl->extra1;
2808 delay = net->ipv6.sysctl.flush_delay;
2809 proc_dointvec(ctl, write, buffer, lenp, ppos);
2810 fib6_run_gc(delay <= 0 ? ~0UL : (unsigned long)delay, net);
2811 return 0;
2812 }
2813
2814 struct ctl_table ipv6_route_table_template[] = {
2815 {
2816 .procname = "flush",
2817 .data = &init_net.ipv6.sysctl.flush_delay,
2818 .maxlen = sizeof(int),
2819 .mode = 0200,
2820 .proc_handler = ipv6_sysctl_rtcache_flush
2821 },
2822 {
2823 .procname = "gc_thresh",
2824 .data = &ip6_dst_ops_template.gc_thresh,
2825 .maxlen = sizeof(int),
2826 .mode = 0644,
2827 .proc_handler = proc_dointvec,
2828 },
2829 {
2830 .procname = "max_size",
2831 .data = &init_net.ipv6.sysctl.ip6_rt_max_size,
2832 .maxlen = sizeof(int),
2833 .mode = 0644,
2834 .proc_handler = proc_dointvec,
2835 },
2836 {
2837 .procname = "gc_min_interval",
2838 .data = &init_net.ipv6.sysctl.ip6_rt_gc_min_interval,
2839 .maxlen = sizeof(int),
2840 .mode = 0644,
2841 .proc_handler = proc_dointvec_jiffies,
2842 },
2843 {
2844 .procname = "gc_timeout",
2845 .data = &init_net.ipv6.sysctl.ip6_rt_gc_timeout,
2846 .maxlen = sizeof(int),
2847 .mode = 0644,
2848 .proc_handler = proc_dointvec_jiffies,
2849 },
2850 {
2851 .procname = "gc_interval",
2852 .data = &init_net.ipv6.sysctl.ip6_rt_gc_interval,
2853 .maxlen = sizeof(int),
2854 .mode = 0644,
2855 .proc_handler = proc_dointvec_jiffies,
2856 },
2857 {
2858 .procname = "gc_elasticity",
2859 .data = &init_net.ipv6.sysctl.ip6_rt_gc_elasticity,
2860 .maxlen = sizeof(int),
2861 .mode = 0644,
2862 .proc_handler = proc_dointvec,
2863 },
2864 {
2865 .procname = "mtu_expires",
2866 .data = &init_net.ipv6.sysctl.ip6_rt_mtu_expires,
2867 .maxlen = sizeof(int),
2868 .mode = 0644,
2869 .proc_handler = proc_dointvec_jiffies,
2870 },
2871 {
2872 .procname = "min_adv_mss",
2873 .data = &init_net.ipv6.sysctl.ip6_rt_min_advmss,
2874 .maxlen = sizeof(int),
2875 .mode = 0644,
2876 .proc_handler = proc_dointvec,
2877 },
2878 {
2879 .procname = "gc_min_interval_ms",
2880 .data = &init_net.ipv6.sysctl.ip6_rt_gc_min_interval,
2881 .maxlen = sizeof(int),
2882 .mode = 0644,
2883 .proc_handler = proc_dointvec_ms_jiffies,
2884 },
2885 { }
2886 };
2887
2888 struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net)
2889 {
2890 struct ctl_table *table;
2891
2892 table = kmemdup(ipv6_route_table_template,
2893 sizeof(ipv6_route_table_template),
2894 GFP_KERNEL);
2895
2896 if (table) {
2897 table[0].data = &net->ipv6.sysctl.flush_delay;
2898 table[0].extra1 = net;
2899 table[1].data = &net->ipv6.ip6_dst_ops.gc_thresh;
2900 table[2].data = &net->ipv6.sysctl.ip6_rt_max_size;
2901 table[3].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
2902 table[4].data = &net->ipv6.sysctl.ip6_rt_gc_timeout;
2903 table[5].data = &net->ipv6.sysctl.ip6_rt_gc_interval;
2904 table[6].data = &net->ipv6.sysctl.ip6_rt_gc_elasticity;
2905 table[7].data = &net->ipv6.sysctl.ip6_rt_mtu_expires;
2906 table[8].data = &net->ipv6.sysctl.ip6_rt_min_advmss;
2907 table[9].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
2908
2909 /* Don't export sysctls to unprivileged users */
2910 if (net->user_ns != &init_user_ns)
2911 table[0].procname = NULL;
2912 }
2913
2914 return table;
2915 }
2916 #endif
2917
2918 static int __net_init ip6_route_net_init(struct net *net)
2919 {
2920 int ret = -ENOMEM;
2921
2922 memcpy(&net->ipv6.ip6_dst_ops, &ip6_dst_ops_template,
2923 sizeof(net->ipv6.ip6_dst_ops));
2924
2925 if (dst_entries_init(&net->ipv6.ip6_dst_ops) < 0)
2926 goto out_ip6_dst_ops;
2927
2928 net->ipv6.ip6_null_entry = kmemdup(&ip6_null_entry_template,
2929 sizeof(*net->ipv6.ip6_null_entry),
2930 GFP_KERNEL);
2931 if (!net->ipv6.ip6_null_entry)
2932 goto out_ip6_dst_entries;
2933 net->ipv6.ip6_null_entry->dst.path =
2934 (struct dst_entry *)net->ipv6.ip6_null_entry;
2935 net->ipv6.ip6_null_entry->dst.ops = &net->ipv6.ip6_dst_ops;
2936 dst_init_metrics(&net->ipv6.ip6_null_entry->dst,
2937 ip6_template_metrics, true);
2938
2939 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
2940 net->ipv6.ip6_prohibit_entry = kmemdup(&ip6_prohibit_entry_template,
2941 sizeof(*net->ipv6.ip6_prohibit_entry),
2942 GFP_KERNEL);
2943 if (!net->ipv6.ip6_prohibit_entry)
2944 goto out_ip6_null_entry;
2945 net->ipv6.ip6_prohibit_entry->dst.path =
2946 (struct dst_entry *)net->ipv6.ip6_prohibit_entry;
2947 net->ipv6.ip6_prohibit_entry->dst.ops = &net->ipv6.ip6_dst_ops;
2948 dst_init_metrics(&net->ipv6.ip6_prohibit_entry->dst,
2949 ip6_template_metrics, true);
2950
2951 net->ipv6.ip6_blk_hole_entry = kmemdup(&ip6_blk_hole_entry_template,
2952 sizeof(*net->ipv6.ip6_blk_hole_entry),
2953 GFP_KERNEL);
2954 if (!net->ipv6.ip6_blk_hole_entry)
2955 goto out_ip6_prohibit_entry;
2956 net->ipv6.ip6_blk_hole_entry->dst.path =
2957 (struct dst_entry *)net->ipv6.ip6_blk_hole_entry;
2958 net->ipv6.ip6_blk_hole_entry->dst.ops = &net->ipv6.ip6_dst_ops;
2959 dst_init_metrics(&net->ipv6.ip6_blk_hole_entry->dst,
2960 ip6_template_metrics, true);
2961 #endif
2962
2963 net->ipv6.sysctl.flush_delay = 0;
2964 net->ipv6.sysctl.ip6_rt_max_size = 4096;
2965 net->ipv6.sysctl.ip6_rt_gc_min_interval = HZ / 2;
2966 net->ipv6.sysctl.ip6_rt_gc_timeout = 60*HZ;
2967 net->ipv6.sysctl.ip6_rt_gc_interval = 30*HZ;
2968 net->ipv6.sysctl.ip6_rt_gc_elasticity = 9;
2969 net->ipv6.sysctl.ip6_rt_mtu_expires = 10*60*HZ;
2970 net->ipv6.sysctl.ip6_rt_min_advmss = IPV6_MIN_MTU - 20 - 40;
2971
2972 net->ipv6.ip6_rt_gc_expire = 30*HZ;
2973
2974 ret = 0;
2975 out:
2976 return ret;
2977
2978 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
2979 out_ip6_prohibit_entry:
2980 kfree(net->ipv6.ip6_prohibit_entry);
2981 out_ip6_null_entry:
2982 kfree(net->ipv6.ip6_null_entry);
2983 #endif
2984 out_ip6_dst_entries:
2985 dst_entries_destroy(&net->ipv6.ip6_dst_ops);
2986 out_ip6_dst_ops:
2987 goto out;
2988 }
2989
2990 static void __net_exit ip6_route_net_exit(struct net *net)
2991 {
2992 kfree(net->ipv6.ip6_null_entry);
2993 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
2994 kfree(net->ipv6.ip6_prohibit_entry);
2995 kfree(net->ipv6.ip6_blk_hole_entry);
2996 #endif
2997 dst_entries_destroy(&net->ipv6.ip6_dst_ops);
2998 }
2999
3000 static int __net_init ip6_route_net_init_late(struct net *net)
3001 {
3002 #ifdef CONFIG_PROC_FS
3003 proc_create("ipv6_route", 0, net->proc_net, &ipv6_route_proc_fops);
3004 proc_create("rt6_stats", S_IRUGO, net->proc_net, &rt6_stats_seq_fops);
3005 #endif
3006 return 0;
3007 }
3008
3009 static void __net_exit ip6_route_net_exit_late(struct net *net)
3010 {
3011 #ifdef CONFIG_PROC_FS
3012 remove_proc_entry("ipv6_route", net->proc_net);
3013 remove_proc_entry("rt6_stats", net->proc_net);
3014 #endif
3015 }
3016
3017 static struct pernet_operations ip6_route_net_ops = {
3018 .init = ip6_route_net_init,
3019 .exit = ip6_route_net_exit,
3020 };
3021
3022 static int __net_init ipv6_inetpeer_init(struct net *net)
3023 {
3024 struct inet_peer_base *bp = kmalloc(sizeof(*bp), GFP_KERNEL);
3025
3026 if (!bp)
3027 return -ENOMEM;
3028 inet_peer_base_init(bp);
3029 net->ipv6.peers = bp;
3030 return 0;
3031 }
3032
3033 static void __net_exit ipv6_inetpeer_exit(struct net *net)
3034 {
3035 struct inet_peer_base *bp = net->ipv6.peers;
3036
3037 net->ipv6.peers = NULL;
3038 inetpeer_invalidate_tree(bp);
3039 kfree(bp);
3040 }
3041
3042 static struct pernet_operations ipv6_inetpeer_ops = {
3043 .init = ipv6_inetpeer_init,
3044 .exit = ipv6_inetpeer_exit,
3045 };
3046
3047 static struct pernet_operations ip6_route_net_late_ops = {
3048 .init = ip6_route_net_init_late,
3049 .exit = ip6_route_net_exit_late,
3050 };
3051
3052 static struct notifier_block ip6_route_dev_notifier = {
3053 .notifier_call = ip6_route_dev_notify,
3054 .priority = 0,
3055 };
3056
3057 int __init ip6_route_init(void)
3058 {
3059 int ret;
3060
3061 ret = -ENOMEM;
3062 ip6_dst_ops_template.kmem_cachep =
3063 kmem_cache_create("ip6_dst_cache", sizeof(struct rt6_info), 0,
3064 SLAB_HWCACHE_ALIGN, NULL);
3065 if (!ip6_dst_ops_template.kmem_cachep)
3066 goto out;
3067
3068 ret = dst_entries_init(&ip6_dst_blackhole_ops);
3069 if (ret)
3070 goto out_kmem_cache;
3071
3072 ret = register_pernet_subsys(&ipv6_inetpeer_ops);
3073 if (ret)
3074 goto out_dst_entries;
3075
3076 ret = register_pernet_subsys(&ip6_route_net_ops);
3077 if (ret)
3078 goto out_register_inetpeer;
3079
3080 ip6_dst_blackhole_ops.kmem_cachep = ip6_dst_ops_template.kmem_cachep;
3081
3082 /* Registering of the loopback is done before this portion of code,
3083 * the loopback reference in rt6_info will not be taken, do it
3084 * manually for init_net */
3085 init_net.ipv6.ip6_null_entry->dst.dev = init_net.loopback_dev;
3086 init_net.ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
3087 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
3088 init_net.ipv6.ip6_prohibit_entry->dst.dev = init_net.loopback_dev;
3089 init_net.ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
3090 init_net.ipv6.ip6_blk_hole_entry->dst.dev = init_net.loopback_dev;
3091 init_net.ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
3092 #endif
3093 ret = fib6_init();
3094 if (ret)
3095 goto out_register_subsys;
3096
3097 ret = xfrm6_init();
3098 if (ret)
3099 goto out_fib6_init;
3100
3101 ret = fib6_rules_init();
3102 if (ret)
3103 goto xfrm6_init;
3104
3105 ret = register_pernet_subsys(&ip6_route_net_late_ops);
3106 if (ret)
3107 goto fib6_rules_init;
3108
3109 ret = -ENOBUFS;
3110 if (__rtnl_register(PF_INET6, RTM_NEWROUTE, inet6_rtm_newroute, NULL, NULL) ||
3111 __rtnl_register(PF_INET6, RTM_DELROUTE, inet6_rtm_delroute, NULL, NULL) ||
3112 __rtnl_register(PF_INET6, RTM_GETROUTE, inet6_rtm_getroute, NULL, NULL))
3113 goto out_register_late_subsys;
3114
3115 ret = register_netdevice_notifier(&ip6_route_dev_notifier);
3116 if (ret)
3117 goto out_register_late_subsys;
3118
3119 out:
3120 return ret;
3121
3122 out_register_late_subsys:
3123 unregister_pernet_subsys(&ip6_route_net_late_ops);
3124 fib6_rules_init:
3125 fib6_rules_cleanup();
3126 xfrm6_init:
3127 xfrm6_fini();
3128 out_fib6_init:
3129 fib6_gc_cleanup();
3130 out_register_subsys:
3131 unregister_pernet_subsys(&ip6_route_net_ops);
3132 out_register_inetpeer:
3133 unregister_pernet_subsys(&ipv6_inetpeer_ops);
3134 out_dst_entries:
3135 dst_entries_destroy(&ip6_dst_blackhole_ops);
3136 out_kmem_cache:
3137 kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep);
3138 goto out;
3139 }
3140
3141 void ip6_route_cleanup(void)
3142 {
3143 unregister_netdevice_notifier(&ip6_route_dev_notifier);
3144 unregister_pernet_subsys(&ip6_route_net_late_ops);
3145 fib6_rules_cleanup();
3146 xfrm6_fini();
3147 fib6_gc_cleanup();
3148 unregister_pernet_subsys(&ipv6_inetpeer_ops);
3149 unregister_pernet_subsys(&ip6_route_net_ops);
3150 dst_entries_destroy(&ip6_dst_blackhole_ops);
3151 kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep);
3152 }
This page took 0.204438 seconds and 6 git commands to generate.