ipv6: Implement Any-IP support for IPv6.
[deliverable/linux.git] / net / ipv6 / route.c
1 /*
2 * Linux INET6 implementation
3 * FIB front-end.
4 *
5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 */
13
14 /* Changes:
15 *
16 * YOSHIFUJI Hideaki @USAGI
17 * reworked default router selection.
18 * - respect outgoing interface
19 * - select from (probably) reachable routers (i.e.
20 * routers in REACHABLE, STALE, DELAY or PROBE states).
21 * - always select the same router if it is (probably)
22 * reachable. otherwise, round-robin the list.
23 * Ville Nuorvala
24 * Fixed routing subtrees.
25 */
26
27 #include <linux/capability.h>
28 #include <linux/errno.h>
29 #include <linux/types.h>
30 #include <linux/times.h>
31 #include <linux/socket.h>
32 #include <linux/sockios.h>
33 #include <linux/net.h>
34 #include <linux/route.h>
35 #include <linux/netdevice.h>
36 #include <linux/in6.h>
37 #include <linux/mroute6.h>
38 #include <linux/init.h>
39 #include <linux/if_arp.h>
40 #include <linux/proc_fs.h>
41 #include <linux/seq_file.h>
42 #include <linux/nsproxy.h>
43 #include <linux/slab.h>
44 #include <net/net_namespace.h>
45 #include <net/snmp.h>
46 #include <net/ipv6.h>
47 #include <net/ip6_fib.h>
48 #include <net/ip6_route.h>
49 #include <net/ndisc.h>
50 #include <net/addrconf.h>
51 #include <net/tcp.h>
52 #include <linux/rtnetlink.h>
53 #include <net/dst.h>
54 #include <net/xfrm.h>
55 #include <net/netevent.h>
56 #include <net/netlink.h>
57
58 #include <asm/uaccess.h>
59
60 #ifdef CONFIG_SYSCTL
61 #include <linux/sysctl.h>
62 #endif
63
64 /* Set to 3 to get tracing. */
65 #define RT6_DEBUG 2
66
67 #if RT6_DEBUG >= 3
68 #define RDBG(x) printk x
69 #define RT6_TRACE(x...) printk(KERN_DEBUG x)
70 #else
71 #define RDBG(x)
72 #define RT6_TRACE(x...) do { ; } while (0)
73 #endif
74
75 #define CLONE_OFFLINK_ROUTE 0
76
77 static struct rt6_info * ip6_rt_copy(struct rt6_info *ort);
78 static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie);
79 static struct dst_entry *ip6_negative_advice(struct dst_entry *);
80 static void ip6_dst_destroy(struct dst_entry *);
81 static void ip6_dst_ifdown(struct dst_entry *,
82 struct net_device *dev, int how);
83 static int ip6_dst_gc(struct dst_ops *ops);
84
85 static int ip6_pkt_discard(struct sk_buff *skb);
86 static int ip6_pkt_discard_out(struct sk_buff *skb);
87 static void ip6_link_failure(struct sk_buff *skb);
88 static void ip6_rt_update_pmtu(struct dst_entry *dst, u32 mtu);
89
90 #ifdef CONFIG_IPV6_ROUTE_INFO
91 static struct rt6_info *rt6_add_route_info(struct net *net,
92 struct in6_addr *prefix, int prefixlen,
93 struct in6_addr *gwaddr, int ifindex,
94 unsigned pref);
95 static struct rt6_info *rt6_get_route_info(struct net *net,
96 struct in6_addr *prefix, int prefixlen,
97 struct in6_addr *gwaddr, int ifindex);
98 #endif
99
100 static struct dst_ops ip6_dst_ops_template = {
101 .family = AF_INET6,
102 .protocol = cpu_to_be16(ETH_P_IPV6),
103 .gc = ip6_dst_gc,
104 .gc_thresh = 1024,
105 .check = ip6_dst_check,
106 .destroy = ip6_dst_destroy,
107 .ifdown = ip6_dst_ifdown,
108 .negative_advice = ip6_negative_advice,
109 .link_failure = ip6_link_failure,
110 .update_pmtu = ip6_rt_update_pmtu,
111 .local_out = __ip6_local_out,
112 .entries = ATOMIC_INIT(0),
113 };
114
115 static void ip6_rt_blackhole_update_pmtu(struct dst_entry *dst, u32 mtu)
116 {
117 }
118
119 static struct dst_ops ip6_dst_blackhole_ops = {
120 .family = AF_INET6,
121 .protocol = cpu_to_be16(ETH_P_IPV6),
122 .destroy = ip6_dst_destroy,
123 .check = ip6_dst_check,
124 .update_pmtu = ip6_rt_blackhole_update_pmtu,
125 .entries = ATOMIC_INIT(0),
126 };
127
128 static struct rt6_info ip6_null_entry_template = {
129 .dst = {
130 .__refcnt = ATOMIC_INIT(1),
131 .__use = 1,
132 .obsolete = -1,
133 .error = -ENETUNREACH,
134 .metrics = { [RTAX_HOPLIMIT - 1] = 255, },
135 .input = ip6_pkt_discard,
136 .output = ip6_pkt_discard_out,
137 },
138 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
139 .rt6i_protocol = RTPROT_KERNEL,
140 .rt6i_metric = ~(u32) 0,
141 .rt6i_ref = ATOMIC_INIT(1),
142 };
143
144 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
145
146 static int ip6_pkt_prohibit(struct sk_buff *skb);
147 static int ip6_pkt_prohibit_out(struct sk_buff *skb);
148
149 static struct rt6_info ip6_prohibit_entry_template = {
150 .dst = {
151 .__refcnt = ATOMIC_INIT(1),
152 .__use = 1,
153 .obsolete = -1,
154 .error = -EACCES,
155 .metrics = { [RTAX_HOPLIMIT - 1] = 255, },
156 .input = ip6_pkt_prohibit,
157 .output = ip6_pkt_prohibit_out,
158 },
159 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
160 .rt6i_protocol = RTPROT_KERNEL,
161 .rt6i_metric = ~(u32) 0,
162 .rt6i_ref = ATOMIC_INIT(1),
163 };
164
165 static struct rt6_info ip6_blk_hole_entry_template = {
166 .dst = {
167 .__refcnt = ATOMIC_INIT(1),
168 .__use = 1,
169 .obsolete = -1,
170 .error = -EINVAL,
171 .metrics = { [RTAX_HOPLIMIT - 1] = 255, },
172 .input = dst_discard,
173 .output = dst_discard,
174 },
175 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
176 .rt6i_protocol = RTPROT_KERNEL,
177 .rt6i_metric = ~(u32) 0,
178 .rt6i_ref = ATOMIC_INIT(1),
179 };
180
181 #endif
182
183 /* allocate dst with ip6_dst_ops */
184 static inline struct rt6_info *ip6_dst_alloc(struct dst_ops *ops)
185 {
186 return (struct rt6_info *)dst_alloc(ops);
187 }
188
189 static void ip6_dst_destroy(struct dst_entry *dst)
190 {
191 struct rt6_info *rt = (struct rt6_info *)dst;
192 struct inet6_dev *idev = rt->rt6i_idev;
193
194 if (idev != NULL) {
195 rt->rt6i_idev = NULL;
196 in6_dev_put(idev);
197 }
198 }
199
200 static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
201 int how)
202 {
203 struct rt6_info *rt = (struct rt6_info *)dst;
204 struct inet6_dev *idev = rt->rt6i_idev;
205 struct net_device *loopback_dev =
206 dev_net(dev)->loopback_dev;
207
208 if (dev != loopback_dev && idev != NULL && idev->dev == dev) {
209 struct inet6_dev *loopback_idev =
210 in6_dev_get(loopback_dev);
211 if (loopback_idev != NULL) {
212 rt->rt6i_idev = loopback_idev;
213 in6_dev_put(idev);
214 }
215 }
216 }
217
218 static __inline__ int rt6_check_expired(const struct rt6_info *rt)
219 {
220 return (rt->rt6i_flags & RTF_EXPIRES) &&
221 time_after(jiffies, rt->rt6i_expires);
222 }
223
224 static inline int rt6_need_strict(struct in6_addr *daddr)
225 {
226 return ipv6_addr_type(daddr) &
227 (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK);
228 }
229
230 /*
231 * Route lookup. Any table->tb6_lock is implied.
232 */
233
234 static inline struct rt6_info *rt6_device_match(struct net *net,
235 struct rt6_info *rt,
236 struct in6_addr *saddr,
237 int oif,
238 int flags)
239 {
240 struct rt6_info *local = NULL;
241 struct rt6_info *sprt;
242
243 if (!oif && ipv6_addr_any(saddr))
244 goto out;
245
246 for (sprt = rt; sprt; sprt = sprt->dst.rt6_next) {
247 struct net_device *dev = sprt->rt6i_dev;
248
249 if (oif) {
250 if (dev->ifindex == oif)
251 return sprt;
252 if (dev->flags & IFF_LOOPBACK) {
253 if (sprt->rt6i_idev == NULL ||
254 sprt->rt6i_idev->dev->ifindex != oif) {
255 if (flags & RT6_LOOKUP_F_IFACE && oif)
256 continue;
257 if (local && (!oif ||
258 local->rt6i_idev->dev->ifindex == oif))
259 continue;
260 }
261 local = sprt;
262 }
263 } else {
264 if (ipv6_chk_addr(net, saddr, dev,
265 flags & RT6_LOOKUP_F_IFACE))
266 return sprt;
267 }
268 }
269
270 if (oif) {
271 if (local)
272 return local;
273
274 if (flags & RT6_LOOKUP_F_IFACE)
275 return net->ipv6.ip6_null_entry;
276 }
277 out:
278 return rt;
279 }
280
281 #ifdef CONFIG_IPV6_ROUTER_PREF
282 static void rt6_probe(struct rt6_info *rt)
283 {
284 struct neighbour *neigh = rt ? rt->rt6i_nexthop : NULL;
285 /*
286 * Okay, this does not seem to be appropriate
287 * for now, however, we need to check if it
288 * is really so; aka Router Reachability Probing.
289 *
290 * Router Reachability Probe MUST be rate-limited
291 * to no more than one per minute.
292 */
293 if (!neigh || (neigh->nud_state & NUD_VALID))
294 return;
295 read_lock_bh(&neigh->lock);
296 if (!(neigh->nud_state & NUD_VALID) &&
297 time_after(jiffies, neigh->updated + rt->rt6i_idev->cnf.rtr_probe_interval)) {
298 struct in6_addr mcaddr;
299 struct in6_addr *target;
300
301 neigh->updated = jiffies;
302 read_unlock_bh(&neigh->lock);
303
304 target = (struct in6_addr *)&neigh->primary_key;
305 addrconf_addr_solict_mult(target, &mcaddr);
306 ndisc_send_ns(rt->rt6i_dev, NULL, target, &mcaddr, NULL);
307 } else
308 read_unlock_bh(&neigh->lock);
309 }
310 #else
311 static inline void rt6_probe(struct rt6_info *rt)
312 {
313 }
314 #endif
315
316 /*
317 * Default Router Selection (RFC 2461 6.3.6)
318 */
319 static inline int rt6_check_dev(struct rt6_info *rt, int oif)
320 {
321 struct net_device *dev = rt->rt6i_dev;
322 if (!oif || dev->ifindex == oif)
323 return 2;
324 if ((dev->flags & IFF_LOOPBACK) &&
325 rt->rt6i_idev && rt->rt6i_idev->dev->ifindex == oif)
326 return 1;
327 return 0;
328 }
329
330 static inline int rt6_check_neigh(struct rt6_info *rt)
331 {
332 struct neighbour *neigh = rt->rt6i_nexthop;
333 int m;
334 if (rt->rt6i_flags & RTF_NONEXTHOP ||
335 !(rt->rt6i_flags & RTF_GATEWAY))
336 m = 1;
337 else if (neigh) {
338 read_lock_bh(&neigh->lock);
339 if (neigh->nud_state & NUD_VALID)
340 m = 2;
341 #ifdef CONFIG_IPV6_ROUTER_PREF
342 else if (neigh->nud_state & NUD_FAILED)
343 m = 0;
344 #endif
345 else
346 m = 1;
347 read_unlock_bh(&neigh->lock);
348 } else
349 m = 0;
350 return m;
351 }
352
353 static int rt6_score_route(struct rt6_info *rt, int oif,
354 int strict)
355 {
356 int m, n;
357
358 m = rt6_check_dev(rt, oif);
359 if (!m && (strict & RT6_LOOKUP_F_IFACE))
360 return -1;
361 #ifdef CONFIG_IPV6_ROUTER_PREF
362 m |= IPV6_DECODE_PREF(IPV6_EXTRACT_PREF(rt->rt6i_flags)) << 2;
363 #endif
364 n = rt6_check_neigh(rt);
365 if (!n && (strict & RT6_LOOKUP_F_REACHABLE))
366 return -1;
367 return m;
368 }
369
370 static struct rt6_info *find_match(struct rt6_info *rt, int oif, int strict,
371 int *mpri, struct rt6_info *match)
372 {
373 int m;
374
375 if (rt6_check_expired(rt))
376 goto out;
377
378 m = rt6_score_route(rt, oif, strict);
379 if (m < 0)
380 goto out;
381
382 if (m > *mpri) {
383 if (strict & RT6_LOOKUP_F_REACHABLE)
384 rt6_probe(match);
385 *mpri = m;
386 match = rt;
387 } else if (strict & RT6_LOOKUP_F_REACHABLE) {
388 rt6_probe(rt);
389 }
390
391 out:
392 return match;
393 }
394
395 static struct rt6_info *find_rr_leaf(struct fib6_node *fn,
396 struct rt6_info *rr_head,
397 u32 metric, int oif, int strict)
398 {
399 struct rt6_info *rt, *match;
400 int mpri = -1;
401
402 match = NULL;
403 for (rt = rr_head; rt && rt->rt6i_metric == metric;
404 rt = rt->dst.rt6_next)
405 match = find_match(rt, oif, strict, &mpri, match);
406 for (rt = fn->leaf; rt && rt != rr_head && rt->rt6i_metric == metric;
407 rt = rt->dst.rt6_next)
408 match = find_match(rt, oif, strict, &mpri, match);
409
410 return match;
411 }
412
413 static struct rt6_info *rt6_select(struct fib6_node *fn, int oif, int strict)
414 {
415 struct rt6_info *match, *rt0;
416 struct net *net;
417
418 RT6_TRACE("%s(fn->leaf=%p, oif=%d)\n",
419 __func__, fn->leaf, oif);
420
421 rt0 = fn->rr_ptr;
422 if (!rt0)
423 fn->rr_ptr = rt0 = fn->leaf;
424
425 match = find_rr_leaf(fn, rt0, rt0->rt6i_metric, oif, strict);
426
427 if (!match &&
428 (strict & RT6_LOOKUP_F_REACHABLE)) {
429 struct rt6_info *next = rt0->dst.rt6_next;
430
431 /* no entries matched; do round-robin */
432 if (!next || next->rt6i_metric != rt0->rt6i_metric)
433 next = fn->leaf;
434
435 if (next != rt0)
436 fn->rr_ptr = next;
437 }
438
439 RT6_TRACE("%s() => %p\n",
440 __func__, match);
441
442 net = dev_net(rt0->rt6i_dev);
443 return match ? match : net->ipv6.ip6_null_entry;
444 }
445
446 #ifdef CONFIG_IPV6_ROUTE_INFO
447 int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
448 struct in6_addr *gwaddr)
449 {
450 struct net *net = dev_net(dev);
451 struct route_info *rinfo = (struct route_info *) opt;
452 struct in6_addr prefix_buf, *prefix;
453 unsigned int pref;
454 unsigned long lifetime;
455 struct rt6_info *rt;
456
457 if (len < sizeof(struct route_info)) {
458 return -EINVAL;
459 }
460
461 /* Sanity check for prefix_len and length */
462 if (rinfo->length > 3) {
463 return -EINVAL;
464 } else if (rinfo->prefix_len > 128) {
465 return -EINVAL;
466 } else if (rinfo->prefix_len > 64) {
467 if (rinfo->length < 2) {
468 return -EINVAL;
469 }
470 } else if (rinfo->prefix_len > 0) {
471 if (rinfo->length < 1) {
472 return -EINVAL;
473 }
474 }
475
476 pref = rinfo->route_pref;
477 if (pref == ICMPV6_ROUTER_PREF_INVALID)
478 return -EINVAL;
479
480 lifetime = addrconf_timeout_fixup(ntohl(rinfo->lifetime), HZ);
481
482 if (rinfo->length == 3)
483 prefix = (struct in6_addr *)rinfo->prefix;
484 else {
485 /* this function is safe */
486 ipv6_addr_prefix(&prefix_buf,
487 (struct in6_addr *)rinfo->prefix,
488 rinfo->prefix_len);
489 prefix = &prefix_buf;
490 }
491
492 rt = rt6_get_route_info(net, prefix, rinfo->prefix_len, gwaddr,
493 dev->ifindex);
494
495 if (rt && !lifetime) {
496 ip6_del_rt(rt);
497 rt = NULL;
498 }
499
500 if (!rt && lifetime)
501 rt = rt6_add_route_info(net, prefix, rinfo->prefix_len, gwaddr, dev->ifindex,
502 pref);
503 else if (rt)
504 rt->rt6i_flags = RTF_ROUTEINFO |
505 (rt->rt6i_flags & ~RTF_PREF_MASK) | RTF_PREF(pref);
506
507 if (rt) {
508 if (!addrconf_finite_timeout(lifetime)) {
509 rt->rt6i_flags &= ~RTF_EXPIRES;
510 } else {
511 rt->rt6i_expires = jiffies + HZ * lifetime;
512 rt->rt6i_flags |= RTF_EXPIRES;
513 }
514 dst_release(&rt->dst);
515 }
516 return 0;
517 }
518 #endif
519
520 #define BACKTRACK(__net, saddr) \
521 do { \
522 if (rt == __net->ipv6.ip6_null_entry) { \
523 struct fib6_node *pn; \
524 while (1) { \
525 if (fn->fn_flags & RTN_TL_ROOT) \
526 goto out; \
527 pn = fn->parent; \
528 if (FIB6_SUBTREE(pn) && FIB6_SUBTREE(pn) != fn) \
529 fn = fib6_lookup(FIB6_SUBTREE(pn), NULL, saddr); \
530 else \
531 fn = pn; \
532 if (fn->fn_flags & RTN_RTINFO) \
533 goto restart; \
534 } \
535 } \
536 } while(0)
537
538 static struct rt6_info *ip6_pol_route_lookup(struct net *net,
539 struct fib6_table *table,
540 struct flowi *fl, int flags)
541 {
542 struct fib6_node *fn;
543 struct rt6_info *rt;
544
545 read_lock_bh(&table->tb6_lock);
546 fn = fib6_lookup(&table->tb6_root, &fl->fl6_dst, &fl->fl6_src);
547 restart:
548 rt = fn->leaf;
549 rt = rt6_device_match(net, rt, &fl->fl6_src, fl->oif, flags);
550 BACKTRACK(net, &fl->fl6_src);
551 out:
552 dst_use(&rt->dst, jiffies);
553 read_unlock_bh(&table->tb6_lock);
554 return rt;
555
556 }
557
558 struct rt6_info *rt6_lookup(struct net *net, const struct in6_addr *daddr,
559 const struct in6_addr *saddr, int oif, int strict)
560 {
561 struct flowi fl = {
562 .oif = oif,
563 .nl_u = {
564 .ip6_u = {
565 .daddr = *daddr,
566 },
567 },
568 };
569 struct dst_entry *dst;
570 int flags = strict ? RT6_LOOKUP_F_IFACE : 0;
571
572 if (saddr) {
573 memcpy(&fl.fl6_src, saddr, sizeof(*saddr));
574 flags |= RT6_LOOKUP_F_HAS_SADDR;
575 }
576
577 dst = fib6_rule_lookup(net, &fl, flags, ip6_pol_route_lookup);
578 if (dst->error == 0)
579 return (struct rt6_info *) dst;
580
581 dst_release(dst);
582
583 return NULL;
584 }
585
586 EXPORT_SYMBOL(rt6_lookup);
587
588 /* ip6_ins_rt is called with FREE table->tb6_lock.
589 It takes new route entry, the addition fails by any reason the
590 route is freed. In any case, if caller does not hold it, it may
591 be destroyed.
592 */
593
594 static int __ip6_ins_rt(struct rt6_info *rt, struct nl_info *info)
595 {
596 int err;
597 struct fib6_table *table;
598
599 table = rt->rt6i_table;
600 write_lock_bh(&table->tb6_lock);
601 err = fib6_add(&table->tb6_root, rt, info);
602 write_unlock_bh(&table->tb6_lock);
603
604 return err;
605 }
606
607 int ip6_ins_rt(struct rt6_info *rt)
608 {
609 struct nl_info info = {
610 .nl_net = dev_net(rt->rt6i_dev),
611 };
612 return __ip6_ins_rt(rt, &info);
613 }
614
615 static struct rt6_info *rt6_alloc_cow(struct rt6_info *ort, struct in6_addr *daddr,
616 struct in6_addr *saddr)
617 {
618 struct rt6_info *rt;
619
620 /*
621 * Clone the route.
622 */
623
624 rt = ip6_rt_copy(ort);
625
626 if (rt) {
627 struct neighbour *neigh;
628 int attempts = !in_softirq();
629
630 if (!(rt->rt6i_flags&RTF_GATEWAY)) {
631 if (rt->rt6i_dst.plen != 128 &&
632 ipv6_addr_equal(&rt->rt6i_dst.addr, daddr))
633 rt->rt6i_flags |= RTF_ANYCAST;
634 ipv6_addr_copy(&rt->rt6i_gateway, daddr);
635 }
636
637 ipv6_addr_copy(&rt->rt6i_dst.addr, daddr);
638 rt->rt6i_dst.plen = 128;
639 rt->rt6i_flags |= RTF_CACHE;
640 rt->dst.flags |= DST_HOST;
641
642 #ifdef CONFIG_IPV6_SUBTREES
643 if (rt->rt6i_src.plen && saddr) {
644 ipv6_addr_copy(&rt->rt6i_src.addr, saddr);
645 rt->rt6i_src.plen = 128;
646 }
647 #endif
648
649 retry:
650 neigh = ndisc_get_neigh(rt->rt6i_dev, &rt->rt6i_gateway);
651 if (IS_ERR(neigh)) {
652 struct net *net = dev_net(rt->rt6i_dev);
653 int saved_rt_min_interval =
654 net->ipv6.sysctl.ip6_rt_gc_min_interval;
655 int saved_rt_elasticity =
656 net->ipv6.sysctl.ip6_rt_gc_elasticity;
657
658 if (attempts-- > 0) {
659 net->ipv6.sysctl.ip6_rt_gc_elasticity = 1;
660 net->ipv6.sysctl.ip6_rt_gc_min_interval = 0;
661
662 ip6_dst_gc(&net->ipv6.ip6_dst_ops);
663
664 net->ipv6.sysctl.ip6_rt_gc_elasticity =
665 saved_rt_elasticity;
666 net->ipv6.sysctl.ip6_rt_gc_min_interval =
667 saved_rt_min_interval;
668 goto retry;
669 }
670
671 if (net_ratelimit())
672 printk(KERN_WARNING
673 "Neighbour table overflow.\n");
674 dst_free(&rt->dst);
675 return NULL;
676 }
677 rt->rt6i_nexthop = neigh;
678
679 }
680
681 return rt;
682 }
683
684 static struct rt6_info *rt6_alloc_clone(struct rt6_info *ort, struct in6_addr *daddr)
685 {
686 struct rt6_info *rt = ip6_rt_copy(ort);
687 if (rt) {
688 ipv6_addr_copy(&rt->rt6i_dst.addr, daddr);
689 rt->rt6i_dst.plen = 128;
690 rt->rt6i_flags |= RTF_CACHE;
691 rt->dst.flags |= DST_HOST;
692 rt->rt6i_nexthop = neigh_clone(ort->rt6i_nexthop);
693 }
694 return rt;
695 }
696
697 static struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table, int oif,
698 struct flowi *fl, int flags)
699 {
700 struct fib6_node *fn;
701 struct rt6_info *rt, *nrt;
702 int strict = 0;
703 int attempts = 3;
704 int err;
705 int reachable = net->ipv6.devconf_all->forwarding ? 0 : RT6_LOOKUP_F_REACHABLE;
706
707 strict |= flags & RT6_LOOKUP_F_IFACE;
708
709 relookup:
710 read_lock_bh(&table->tb6_lock);
711
712 restart_2:
713 fn = fib6_lookup(&table->tb6_root, &fl->fl6_dst, &fl->fl6_src);
714
715 restart:
716 rt = rt6_select(fn, oif, strict | reachable);
717
718 BACKTRACK(net, &fl->fl6_src);
719 if (rt == net->ipv6.ip6_null_entry ||
720 rt->rt6i_flags & RTF_CACHE)
721 goto out;
722
723 dst_hold(&rt->dst);
724 read_unlock_bh(&table->tb6_lock);
725
726 if (!rt->rt6i_nexthop && !(rt->rt6i_flags & RTF_NONEXTHOP))
727 nrt = rt6_alloc_cow(rt, &fl->fl6_dst, &fl->fl6_src);
728 else {
729 #if CLONE_OFFLINK_ROUTE
730 nrt = rt6_alloc_clone(rt, &fl->fl6_dst);
731 #else
732 goto out2;
733 #endif
734 }
735
736 dst_release(&rt->dst);
737 rt = nrt ? : net->ipv6.ip6_null_entry;
738
739 dst_hold(&rt->dst);
740 if (nrt) {
741 err = ip6_ins_rt(nrt);
742 if (!err)
743 goto out2;
744 }
745
746 if (--attempts <= 0)
747 goto out2;
748
749 /*
750 * Race condition! In the gap, when table->tb6_lock was
751 * released someone could insert this route. Relookup.
752 */
753 dst_release(&rt->dst);
754 goto relookup;
755
756 out:
757 if (reachable) {
758 reachable = 0;
759 goto restart_2;
760 }
761 dst_hold(&rt->dst);
762 read_unlock_bh(&table->tb6_lock);
763 out2:
764 rt->dst.lastuse = jiffies;
765 rt->dst.__use++;
766
767 return rt;
768 }
769
770 static struct rt6_info *ip6_pol_route_input(struct net *net, struct fib6_table *table,
771 struct flowi *fl, int flags)
772 {
773 return ip6_pol_route(net, table, fl->iif, fl, flags);
774 }
775
776 void ip6_route_input(struct sk_buff *skb)
777 {
778 struct ipv6hdr *iph = ipv6_hdr(skb);
779 struct net *net = dev_net(skb->dev);
780 int flags = RT6_LOOKUP_F_HAS_SADDR;
781 struct flowi fl = {
782 .iif = skb->dev->ifindex,
783 .nl_u = {
784 .ip6_u = {
785 .daddr = iph->daddr,
786 .saddr = iph->saddr,
787 .flowlabel = (* (__be32 *) iph)&IPV6_FLOWINFO_MASK,
788 },
789 },
790 .mark = skb->mark,
791 .proto = iph->nexthdr,
792 };
793
794 if (rt6_need_strict(&iph->daddr) && skb->dev->type != ARPHRD_PIMREG)
795 flags |= RT6_LOOKUP_F_IFACE;
796
797 skb_dst_set(skb, fib6_rule_lookup(net, &fl, flags, ip6_pol_route_input));
798 }
799
800 static struct rt6_info *ip6_pol_route_output(struct net *net, struct fib6_table *table,
801 struct flowi *fl, int flags)
802 {
803 return ip6_pol_route(net, table, fl->oif, fl, flags);
804 }
805
806 struct dst_entry * ip6_route_output(struct net *net, struct sock *sk,
807 struct flowi *fl)
808 {
809 int flags = 0;
810
811 if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl->fl6_dst))
812 flags |= RT6_LOOKUP_F_IFACE;
813
814 if (!ipv6_addr_any(&fl->fl6_src))
815 flags |= RT6_LOOKUP_F_HAS_SADDR;
816 else if (sk)
817 flags |= rt6_srcprefs2flags(inet6_sk(sk)->srcprefs);
818
819 return fib6_rule_lookup(net, fl, flags, ip6_pol_route_output);
820 }
821
822 EXPORT_SYMBOL(ip6_route_output);
823
824 int ip6_dst_blackhole(struct sock *sk, struct dst_entry **dstp, struct flowi *fl)
825 {
826 struct rt6_info *ort = (struct rt6_info *) *dstp;
827 struct rt6_info *rt = (struct rt6_info *)
828 dst_alloc(&ip6_dst_blackhole_ops);
829 struct dst_entry *new = NULL;
830
831 if (rt) {
832 new = &rt->dst;
833
834 atomic_set(&new->__refcnt, 1);
835 new->__use = 1;
836 new->input = dst_discard;
837 new->output = dst_discard;
838
839 memcpy(new->metrics, ort->dst.metrics, RTAX_MAX*sizeof(u32));
840 new->dev = ort->dst.dev;
841 if (new->dev)
842 dev_hold(new->dev);
843 rt->rt6i_idev = ort->rt6i_idev;
844 if (rt->rt6i_idev)
845 in6_dev_hold(rt->rt6i_idev);
846 rt->rt6i_expires = 0;
847
848 ipv6_addr_copy(&rt->rt6i_gateway, &ort->rt6i_gateway);
849 rt->rt6i_flags = ort->rt6i_flags & ~RTF_EXPIRES;
850 rt->rt6i_metric = 0;
851
852 memcpy(&rt->rt6i_dst, &ort->rt6i_dst, sizeof(struct rt6key));
853 #ifdef CONFIG_IPV6_SUBTREES
854 memcpy(&rt->rt6i_src, &ort->rt6i_src, sizeof(struct rt6key));
855 #endif
856
857 dst_free(new);
858 }
859
860 dst_release(*dstp);
861 *dstp = new;
862 return new ? 0 : -ENOMEM;
863 }
864 EXPORT_SYMBOL_GPL(ip6_dst_blackhole);
865
866 /*
867 * Destination cache support functions
868 */
869
870 static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie)
871 {
872 struct rt6_info *rt;
873
874 rt = (struct rt6_info *) dst;
875
876 if (rt->rt6i_node && (rt->rt6i_node->fn_sernum == cookie))
877 return dst;
878
879 return NULL;
880 }
881
882 static struct dst_entry *ip6_negative_advice(struct dst_entry *dst)
883 {
884 struct rt6_info *rt = (struct rt6_info *) dst;
885
886 if (rt) {
887 if (rt->rt6i_flags & RTF_CACHE) {
888 if (rt6_check_expired(rt)) {
889 ip6_del_rt(rt);
890 dst = NULL;
891 }
892 } else {
893 dst_release(dst);
894 dst = NULL;
895 }
896 }
897 return dst;
898 }
899
900 static void ip6_link_failure(struct sk_buff *skb)
901 {
902 struct rt6_info *rt;
903
904 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0);
905
906 rt = (struct rt6_info *) skb_dst(skb);
907 if (rt) {
908 if (rt->rt6i_flags&RTF_CACHE) {
909 dst_set_expires(&rt->dst, 0);
910 rt->rt6i_flags |= RTF_EXPIRES;
911 } else if (rt->rt6i_node && (rt->rt6i_flags & RTF_DEFAULT))
912 rt->rt6i_node->fn_sernum = -1;
913 }
914 }
915
916 static void ip6_rt_update_pmtu(struct dst_entry *dst, u32 mtu)
917 {
918 struct rt6_info *rt6 = (struct rt6_info*)dst;
919
920 if (mtu < dst_mtu(dst) && rt6->rt6i_dst.plen == 128) {
921 rt6->rt6i_flags |= RTF_MODIFIED;
922 if (mtu < IPV6_MIN_MTU) {
923 mtu = IPV6_MIN_MTU;
924 dst->metrics[RTAX_FEATURES-1] |= RTAX_FEATURE_ALLFRAG;
925 }
926 dst->metrics[RTAX_MTU-1] = mtu;
927 call_netevent_notifiers(NETEVENT_PMTU_UPDATE, dst);
928 }
929 }
930
931 static int ipv6_get_mtu(struct net_device *dev);
932
933 static inline unsigned int ipv6_advmss(struct net *net, unsigned int mtu)
934 {
935 mtu -= sizeof(struct ipv6hdr) + sizeof(struct tcphdr);
936
937 if (mtu < net->ipv6.sysctl.ip6_rt_min_advmss)
938 mtu = net->ipv6.sysctl.ip6_rt_min_advmss;
939
940 /*
941 * Maximal non-jumbo IPv6 payload is IPV6_MAXPLEN and
942 * corresponding MSS is IPV6_MAXPLEN - tcp_header_size.
943 * IPV6_MAXPLEN is also valid and means: "any MSS,
944 * rely only on pmtu discovery"
945 */
946 if (mtu > IPV6_MAXPLEN - sizeof(struct tcphdr))
947 mtu = IPV6_MAXPLEN;
948 return mtu;
949 }
950
951 static struct dst_entry *icmp6_dst_gc_list;
952 static DEFINE_SPINLOCK(icmp6_dst_lock);
953
954 struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
955 struct neighbour *neigh,
956 const struct in6_addr *addr)
957 {
958 struct rt6_info *rt;
959 struct inet6_dev *idev = in6_dev_get(dev);
960 struct net *net = dev_net(dev);
961
962 if (unlikely(idev == NULL))
963 return NULL;
964
965 rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops);
966 if (unlikely(rt == NULL)) {
967 in6_dev_put(idev);
968 goto out;
969 }
970
971 dev_hold(dev);
972 if (neigh)
973 neigh_hold(neigh);
974 else {
975 neigh = ndisc_get_neigh(dev, addr);
976 if (IS_ERR(neigh))
977 neigh = NULL;
978 }
979
980 rt->rt6i_dev = dev;
981 rt->rt6i_idev = idev;
982 rt->rt6i_nexthop = neigh;
983 atomic_set(&rt->dst.__refcnt, 1);
984 rt->dst.metrics[RTAX_HOPLIMIT-1] = 255;
985 rt->dst.metrics[RTAX_MTU-1] = ipv6_get_mtu(rt->rt6i_dev);
986 rt->dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(net, dst_mtu(&rt->dst));
987 rt->dst.output = ip6_output;
988
989 #if 0 /* there's no chance to use these for ndisc */
990 rt->dst.flags = ipv6_addr_type(addr) & IPV6_ADDR_UNICAST
991 ? DST_HOST
992 : 0;
993 ipv6_addr_copy(&rt->rt6i_dst.addr, addr);
994 rt->rt6i_dst.plen = 128;
995 #endif
996
997 spin_lock_bh(&icmp6_dst_lock);
998 rt->dst.next = icmp6_dst_gc_list;
999 icmp6_dst_gc_list = &rt->dst;
1000 spin_unlock_bh(&icmp6_dst_lock);
1001
1002 fib6_force_start_gc(net);
1003
1004 out:
1005 return &rt->dst;
1006 }
1007
1008 int icmp6_dst_gc(void)
1009 {
1010 struct dst_entry *dst, *next, **pprev;
1011 int more = 0;
1012
1013 next = NULL;
1014
1015 spin_lock_bh(&icmp6_dst_lock);
1016 pprev = &icmp6_dst_gc_list;
1017
1018 while ((dst = *pprev) != NULL) {
1019 if (!atomic_read(&dst->__refcnt)) {
1020 *pprev = dst->next;
1021 dst_free(dst);
1022 } else {
1023 pprev = &dst->next;
1024 ++more;
1025 }
1026 }
1027
1028 spin_unlock_bh(&icmp6_dst_lock);
1029
1030 return more;
1031 }
1032
1033 static void icmp6_clean_all(int (*func)(struct rt6_info *rt, void *arg),
1034 void *arg)
1035 {
1036 struct dst_entry *dst, **pprev;
1037
1038 spin_lock_bh(&icmp6_dst_lock);
1039 pprev = &icmp6_dst_gc_list;
1040 while ((dst = *pprev) != NULL) {
1041 struct rt6_info *rt = (struct rt6_info *) dst;
1042 if (func(rt, arg)) {
1043 *pprev = dst->next;
1044 dst_free(dst);
1045 } else {
1046 pprev = &dst->next;
1047 }
1048 }
1049 spin_unlock_bh(&icmp6_dst_lock);
1050 }
1051
1052 static int ip6_dst_gc(struct dst_ops *ops)
1053 {
1054 unsigned long now = jiffies;
1055 struct net *net = container_of(ops, struct net, ipv6.ip6_dst_ops);
1056 int rt_min_interval = net->ipv6.sysctl.ip6_rt_gc_min_interval;
1057 int rt_max_size = net->ipv6.sysctl.ip6_rt_max_size;
1058 int rt_elasticity = net->ipv6.sysctl.ip6_rt_gc_elasticity;
1059 int rt_gc_timeout = net->ipv6.sysctl.ip6_rt_gc_timeout;
1060 unsigned long rt_last_gc = net->ipv6.ip6_rt_last_gc;
1061
1062 if (time_after(rt_last_gc + rt_min_interval, now) &&
1063 atomic_read(&ops->entries) <= rt_max_size)
1064 goto out;
1065
1066 net->ipv6.ip6_rt_gc_expire++;
1067 fib6_run_gc(net->ipv6.ip6_rt_gc_expire, net);
1068 net->ipv6.ip6_rt_last_gc = now;
1069 if (atomic_read(&ops->entries) < ops->gc_thresh)
1070 net->ipv6.ip6_rt_gc_expire = rt_gc_timeout>>1;
1071 out:
1072 net->ipv6.ip6_rt_gc_expire -= net->ipv6.ip6_rt_gc_expire>>rt_elasticity;
1073 return atomic_read(&ops->entries) > rt_max_size;
1074 }
1075
1076 /* Clean host part of a prefix. Not necessary in radix tree,
1077 but results in cleaner routing tables.
1078
1079 Remove it only when all the things will work!
1080 */
1081
1082 static int ipv6_get_mtu(struct net_device *dev)
1083 {
1084 int mtu = IPV6_MIN_MTU;
1085 struct inet6_dev *idev;
1086
1087 rcu_read_lock();
1088 idev = __in6_dev_get(dev);
1089 if (idev)
1090 mtu = idev->cnf.mtu6;
1091 rcu_read_unlock();
1092 return mtu;
1093 }
1094
1095 int ip6_dst_hoplimit(struct dst_entry *dst)
1096 {
1097 int hoplimit = dst_metric(dst, RTAX_HOPLIMIT);
1098 if (hoplimit < 0) {
1099 struct net_device *dev = dst->dev;
1100 struct inet6_dev *idev;
1101
1102 rcu_read_lock();
1103 idev = __in6_dev_get(dev);
1104 if (idev)
1105 hoplimit = idev->cnf.hop_limit;
1106 else
1107 hoplimit = dev_net(dev)->ipv6.devconf_all->hop_limit;
1108 rcu_read_unlock();
1109 }
1110 return hoplimit;
1111 }
1112
1113 /*
1114 *
1115 */
1116
1117 int ip6_route_add(struct fib6_config *cfg)
1118 {
1119 int err;
1120 struct net *net = cfg->fc_nlinfo.nl_net;
1121 struct rt6_info *rt = NULL;
1122 struct net_device *dev = NULL;
1123 struct inet6_dev *idev = NULL;
1124 struct fib6_table *table;
1125 int addr_type;
1126
1127 if (cfg->fc_dst_len > 128 || cfg->fc_src_len > 128)
1128 return -EINVAL;
1129 #ifndef CONFIG_IPV6_SUBTREES
1130 if (cfg->fc_src_len)
1131 return -EINVAL;
1132 #endif
1133 if (cfg->fc_ifindex) {
1134 err = -ENODEV;
1135 dev = dev_get_by_index(net, cfg->fc_ifindex);
1136 if (!dev)
1137 goto out;
1138 idev = in6_dev_get(dev);
1139 if (!idev)
1140 goto out;
1141 }
1142
1143 if (cfg->fc_metric == 0)
1144 cfg->fc_metric = IP6_RT_PRIO_USER;
1145
1146 table = fib6_new_table(net, cfg->fc_table);
1147 if (table == NULL) {
1148 err = -ENOBUFS;
1149 goto out;
1150 }
1151
1152 rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops);
1153
1154 if (rt == NULL) {
1155 err = -ENOMEM;
1156 goto out;
1157 }
1158
1159 rt->dst.obsolete = -1;
1160 rt->rt6i_expires = (cfg->fc_flags & RTF_EXPIRES) ?
1161 jiffies + clock_t_to_jiffies(cfg->fc_expires) :
1162 0;
1163
1164 if (cfg->fc_protocol == RTPROT_UNSPEC)
1165 cfg->fc_protocol = RTPROT_BOOT;
1166 rt->rt6i_protocol = cfg->fc_protocol;
1167
1168 addr_type = ipv6_addr_type(&cfg->fc_dst);
1169
1170 if (addr_type & IPV6_ADDR_MULTICAST)
1171 rt->dst.input = ip6_mc_input;
1172 else if (cfg->fc_flags & RTF_LOCAL)
1173 rt->dst.input = ip6_input;
1174 else
1175 rt->dst.input = ip6_forward;
1176
1177 rt->dst.output = ip6_output;
1178
1179 ipv6_addr_prefix(&rt->rt6i_dst.addr, &cfg->fc_dst, cfg->fc_dst_len);
1180 rt->rt6i_dst.plen = cfg->fc_dst_len;
1181 if (rt->rt6i_dst.plen == 128)
1182 rt->dst.flags = DST_HOST;
1183
1184 #ifdef CONFIG_IPV6_SUBTREES
1185 ipv6_addr_prefix(&rt->rt6i_src.addr, &cfg->fc_src, cfg->fc_src_len);
1186 rt->rt6i_src.plen = cfg->fc_src_len;
1187 #endif
1188
1189 rt->rt6i_metric = cfg->fc_metric;
1190
1191 /* We cannot add true routes via loopback here,
1192 they would result in kernel looping; promote them to reject routes
1193 */
1194 if ((cfg->fc_flags & RTF_REJECT) ||
1195 (dev && (dev->flags&IFF_LOOPBACK) && !(addr_type&IPV6_ADDR_LOOPBACK)
1196 && !(cfg->fc_flags&RTF_LOCAL))) {
1197 /* hold loopback dev/idev if we haven't done so. */
1198 if (dev != net->loopback_dev) {
1199 if (dev) {
1200 dev_put(dev);
1201 in6_dev_put(idev);
1202 }
1203 dev = net->loopback_dev;
1204 dev_hold(dev);
1205 idev = in6_dev_get(dev);
1206 if (!idev) {
1207 err = -ENODEV;
1208 goto out;
1209 }
1210 }
1211 rt->dst.output = ip6_pkt_discard_out;
1212 rt->dst.input = ip6_pkt_discard;
1213 rt->dst.error = -ENETUNREACH;
1214 rt->rt6i_flags = RTF_REJECT|RTF_NONEXTHOP;
1215 goto install_route;
1216 }
1217
1218 if (cfg->fc_flags & RTF_GATEWAY) {
1219 struct in6_addr *gw_addr;
1220 int gwa_type;
1221
1222 gw_addr = &cfg->fc_gateway;
1223 ipv6_addr_copy(&rt->rt6i_gateway, gw_addr);
1224 gwa_type = ipv6_addr_type(gw_addr);
1225
1226 if (gwa_type != (IPV6_ADDR_LINKLOCAL|IPV6_ADDR_UNICAST)) {
1227 struct rt6_info *grt;
1228
1229 /* IPv6 strictly inhibits using not link-local
1230 addresses as nexthop address.
1231 Otherwise, router will not able to send redirects.
1232 It is very good, but in some (rare!) circumstances
1233 (SIT, PtP, NBMA NOARP links) it is handy to allow
1234 some exceptions. --ANK
1235 */
1236 err = -EINVAL;
1237 if (!(gwa_type&IPV6_ADDR_UNICAST))
1238 goto out;
1239
1240 grt = rt6_lookup(net, gw_addr, NULL, cfg->fc_ifindex, 1);
1241
1242 err = -EHOSTUNREACH;
1243 if (grt == NULL)
1244 goto out;
1245 if (dev) {
1246 if (dev != grt->rt6i_dev) {
1247 dst_release(&grt->dst);
1248 goto out;
1249 }
1250 } else {
1251 dev = grt->rt6i_dev;
1252 idev = grt->rt6i_idev;
1253 dev_hold(dev);
1254 in6_dev_hold(grt->rt6i_idev);
1255 }
1256 if (!(grt->rt6i_flags&RTF_GATEWAY))
1257 err = 0;
1258 dst_release(&grt->dst);
1259
1260 if (err)
1261 goto out;
1262 }
1263 err = -EINVAL;
1264 if (dev == NULL || (dev->flags&IFF_LOOPBACK))
1265 goto out;
1266 }
1267
1268 err = -ENODEV;
1269 if (dev == NULL)
1270 goto out;
1271
1272 if (cfg->fc_flags & (RTF_GATEWAY | RTF_NONEXTHOP)) {
1273 rt->rt6i_nexthop = __neigh_lookup_errno(&nd_tbl, &rt->rt6i_gateway, dev);
1274 if (IS_ERR(rt->rt6i_nexthop)) {
1275 err = PTR_ERR(rt->rt6i_nexthop);
1276 rt->rt6i_nexthop = NULL;
1277 goto out;
1278 }
1279 }
1280
1281 rt->rt6i_flags = cfg->fc_flags;
1282
1283 install_route:
1284 if (cfg->fc_mx) {
1285 struct nlattr *nla;
1286 int remaining;
1287
1288 nla_for_each_attr(nla, cfg->fc_mx, cfg->fc_mx_len, remaining) {
1289 int type = nla_type(nla);
1290
1291 if (type) {
1292 if (type > RTAX_MAX) {
1293 err = -EINVAL;
1294 goto out;
1295 }
1296
1297 rt->dst.metrics[type - 1] = nla_get_u32(nla);
1298 }
1299 }
1300 }
1301
1302 if (dst_metric(&rt->dst, RTAX_HOPLIMIT) == 0)
1303 rt->dst.metrics[RTAX_HOPLIMIT-1] = -1;
1304 if (!dst_mtu(&rt->dst))
1305 rt->dst.metrics[RTAX_MTU-1] = ipv6_get_mtu(dev);
1306 if (!dst_metric(&rt->dst, RTAX_ADVMSS))
1307 rt->dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(net, dst_mtu(&rt->dst));
1308 rt->dst.dev = dev;
1309 rt->rt6i_idev = idev;
1310 rt->rt6i_table = table;
1311
1312 cfg->fc_nlinfo.nl_net = dev_net(dev);
1313
1314 return __ip6_ins_rt(rt, &cfg->fc_nlinfo);
1315
1316 out:
1317 if (dev)
1318 dev_put(dev);
1319 if (idev)
1320 in6_dev_put(idev);
1321 if (rt)
1322 dst_free(&rt->dst);
1323 return err;
1324 }
1325
1326 static int __ip6_del_rt(struct rt6_info *rt, struct nl_info *info)
1327 {
1328 int err;
1329 struct fib6_table *table;
1330 struct net *net = dev_net(rt->rt6i_dev);
1331
1332 if (rt == net->ipv6.ip6_null_entry)
1333 return -ENOENT;
1334
1335 table = rt->rt6i_table;
1336 write_lock_bh(&table->tb6_lock);
1337
1338 err = fib6_del(rt, info);
1339 dst_release(&rt->dst);
1340
1341 write_unlock_bh(&table->tb6_lock);
1342
1343 return err;
1344 }
1345
1346 int ip6_del_rt(struct rt6_info *rt)
1347 {
1348 struct nl_info info = {
1349 .nl_net = dev_net(rt->rt6i_dev),
1350 };
1351 return __ip6_del_rt(rt, &info);
1352 }
1353
1354 static int ip6_route_del(struct fib6_config *cfg)
1355 {
1356 struct fib6_table *table;
1357 struct fib6_node *fn;
1358 struct rt6_info *rt;
1359 int err = -ESRCH;
1360
1361 table = fib6_get_table(cfg->fc_nlinfo.nl_net, cfg->fc_table);
1362 if (table == NULL)
1363 return err;
1364
1365 read_lock_bh(&table->tb6_lock);
1366
1367 fn = fib6_locate(&table->tb6_root,
1368 &cfg->fc_dst, cfg->fc_dst_len,
1369 &cfg->fc_src, cfg->fc_src_len);
1370
1371 if (fn) {
1372 for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) {
1373 if (cfg->fc_ifindex &&
1374 (rt->rt6i_dev == NULL ||
1375 rt->rt6i_dev->ifindex != cfg->fc_ifindex))
1376 continue;
1377 if (cfg->fc_flags & RTF_GATEWAY &&
1378 !ipv6_addr_equal(&cfg->fc_gateway, &rt->rt6i_gateway))
1379 continue;
1380 if (cfg->fc_metric && cfg->fc_metric != rt->rt6i_metric)
1381 continue;
1382 dst_hold(&rt->dst);
1383 read_unlock_bh(&table->tb6_lock);
1384
1385 return __ip6_del_rt(rt, &cfg->fc_nlinfo);
1386 }
1387 }
1388 read_unlock_bh(&table->tb6_lock);
1389
1390 return err;
1391 }
1392
1393 /*
1394 * Handle redirects
1395 */
1396 struct ip6rd_flowi {
1397 struct flowi fl;
1398 struct in6_addr gateway;
1399 };
1400
1401 static struct rt6_info *__ip6_route_redirect(struct net *net,
1402 struct fib6_table *table,
1403 struct flowi *fl,
1404 int flags)
1405 {
1406 struct ip6rd_flowi *rdfl = (struct ip6rd_flowi *)fl;
1407 struct rt6_info *rt;
1408 struct fib6_node *fn;
1409
1410 /*
1411 * Get the "current" route for this destination and
1412 * check if the redirect has come from approriate router.
1413 *
1414 * RFC 2461 specifies that redirects should only be
1415 * accepted if they come from the nexthop to the target.
1416 * Due to the way the routes are chosen, this notion
1417 * is a bit fuzzy and one might need to check all possible
1418 * routes.
1419 */
1420
1421 read_lock_bh(&table->tb6_lock);
1422 fn = fib6_lookup(&table->tb6_root, &fl->fl6_dst, &fl->fl6_src);
1423 restart:
1424 for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) {
1425 /*
1426 * Current route is on-link; redirect is always invalid.
1427 *
1428 * Seems, previous statement is not true. It could
1429 * be node, which looks for us as on-link (f.e. proxy ndisc)
1430 * But then router serving it might decide, that we should
1431 * know truth 8)8) --ANK (980726).
1432 */
1433 if (rt6_check_expired(rt))
1434 continue;
1435 if (!(rt->rt6i_flags & RTF_GATEWAY))
1436 continue;
1437 if (fl->oif != rt->rt6i_dev->ifindex)
1438 continue;
1439 if (!ipv6_addr_equal(&rdfl->gateway, &rt->rt6i_gateway))
1440 continue;
1441 break;
1442 }
1443
1444 if (!rt)
1445 rt = net->ipv6.ip6_null_entry;
1446 BACKTRACK(net, &fl->fl6_src);
1447 out:
1448 dst_hold(&rt->dst);
1449
1450 read_unlock_bh(&table->tb6_lock);
1451
1452 return rt;
1453 };
1454
1455 static struct rt6_info *ip6_route_redirect(struct in6_addr *dest,
1456 struct in6_addr *src,
1457 struct in6_addr *gateway,
1458 struct net_device *dev)
1459 {
1460 int flags = RT6_LOOKUP_F_HAS_SADDR;
1461 struct net *net = dev_net(dev);
1462 struct ip6rd_flowi rdfl = {
1463 .fl = {
1464 .oif = dev->ifindex,
1465 .nl_u = {
1466 .ip6_u = {
1467 .daddr = *dest,
1468 .saddr = *src,
1469 },
1470 },
1471 },
1472 };
1473
1474 ipv6_addr_copy(&rdfl.gateway, gateway);
1475
1476 if (rt6_need_strict(dest))
1477 flags |= RT6_LOOKUP_F_IFACE;
1478
1479 return (struct rt6_info *)fib6_rule_lookup(net, (struct flowi *)&rdfl,
1480 flags, __ip6_route_redirect);
1481 }
1482
1483 void rt6_redirect(struct in6_addr *dest, struct in6_addr *src,
1484 struct in6_addr *saddr,
1485 struct neighbour *neigh, u8 *lladdr, int on_link)
1486 {
1487 struct rt6_info *rt, *nrt = NULL;
1488 struct netevent_redirect netevent;
1489 struct net *net = dev_net(neigh->dev);
1490
1491 rt = ip6_route_redirect(dest, src, saddr, neigh->dev);
1492
1493 if (rt == net->ipv6.ip6_null_entry) {
1494 if (net_ratelimit())
1495 printk(KERN_DEBUG "rt6_redirect: source isn't a valid nexthop "
1496 "for redirect target\n");
1497 goto out;
1498 }
1499
1500 /*
1501 * We have finally decided to accept it.
1502 */
1503
1504 neigh_update(neigh, lladdr, NUD_STALE,
1505 NEIGH_UPDATE_F_WEAK_OVERRIDE|
1506 NEIGH_UPDATE_F_OVERRIDE|
1507 (on_link ? 0 : (NEIGH_UPDATE_F_OVERRIDE_ISROUTER|
1508 NEIGH_UPDATE_F_ISROUTER))
1509 );
1510
1511 /*
1512 * Redirect received -> path was valid.
1513 * Look, redirects are sent only in response to data packets,
1514 * so that this nexthop apparently is reachable. --ANK
1515 */
1516 dst_confirm(&rt->dst);
1517
1518 /* Duplicate redirect: silently ignore. */
1519 if (neigh == rt->dst.neighbour)
1520 goto out;
1521
1522 nrt = ip6_rt_copy(rt);
1523 if (nrt == NULL)
1524 goto out;
1525
1526 nrt->rt6i_flags = RTF_GATEWAY|RTF_UP|RTF_DYNAMIC|RTF_CACHE;
1527 if (on_link)
1528 nrt->rt6i_flags &= ~RTF_GATEWAY;
1529
1530 ipv6_addr_copy(&nrt->rt6i_dst.addr, dest);
1531 nrt->rt6i_dst.plen = 128;
1532 nrt->dst.flags |= DST_HOST;
1533
1534 ipv6_addr_copy(&nrt->rt6i_gateway, (struct in6_addr*)neigh->primary_key);
1535 nrt->rt6i_nexthop = neigh_clone(neigh);
1536 /* Reset pmtu, it may be better */
1537 nrt->dst.metrics[RTAX_MTU-1] = ipv6_get_mtu(neigh->dev);
1538 nrt->dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(dev_net(neigh->dev),
1539 dst_mtu(&nrt->dst));
1540
1541 if (ip6_ins_rt(nrt))
1542 goto out;
1543
1544 netevent.old = &rt->dst;
1545 netevent.new = &nrt->dst;
1546 call_netevent_notifiers(NETEVENT_REDIRECT, &netevent);
1547
1548 if (rt->rt6i_flags&RTF_CACHE) {
1549 ip6_del_rt(rt);
1550 return;
1551 }
1552
1553 out:
1554 dst_release(&rt->dst);
1555 }
1556
1557 /*
1558 * Handle ICMP "packet too big" messages
1559 * i.e. Path MTU discovery
1560 */
1561
1562 void rt6_pmtu_discovery(struct in6_addr *daddr, struct in6_addr *saddr,
1563 struct net_device *dev, u32 pmtu)
1564 {
1565 struct rt6_info *rt, *nrt;
1566 struct net *net = dev_net(dev);
1567 int allfrag = 0;
1568
1569 rt = rt6_lookup(net, daddr, saddr, dev->ifindex, 0);
1570 if (rt == NULL)
1571 return;
1572
1573 if (pmtu >= dst_mtu(&rt->dst))
1574 goto out;
1575
1576 if (pmtu < IPV6_MIN_MTU) {
1577 /*
1578 * According to RFC2460, PMTU is set to the IPv6 Minimum Link
1579 * MTU (1280) and a fragment header should always be included
1580 * after a node receiving Too Big message reporting PMTU is
1581 * less than the IPv6 Minimum Link MTU.
1582 */
1583 pmtu = IPV6_MIN_MTU;
1584 allfrag = 1;
1585 }
1586
1587 /* New mtu received -> path was valid.
1588 They are sent only in response to data packets,
1589 so that this nexthop apparently is reachable. --ANK
1590 */
1591 dst_confirm(&rt->dst);
1592
1593 /* Host route. If it is static, it would be better
1594 not to override it, but add new one, so that
1595 when cache entry will expire old pmtu
1596 would return automatically.
1597 */
1598 if (rt->rt6i_flags & RTF_CACHE) {
1599 rt->dst.metrics[RTAX_MTU-1] = pmtu;
1600 if (allfrag)
1601 rt->dst.metrics[RTAX_FEATURES-1] |= RTAX_FEATURE_ALLFRAG;
1602 dst_set_expires(&rt->dst, net->ipv6.sysctl.ip6_rt_mtu_expires);
1603 rt->rt6i_flags |= RTF_MODIFIED|RTF_EXPIRES;
1604 goto out;
1605 }
1606
1607 /* Network route.
1608 Two cases are possible:
1609 1. It is connected route. Action: COW
1610 2. It is gatewayed route or NONEXTHOP route. Action: clone it.
1611 */
1612 if (!rt->rt6i_nexthop && !(rt->rt6i_flags & RTF_NONEXTHOP))
1613 nrt = rt6_alloc_cow(rt, daddr, saddr);
1614 else
1615 nrt = rt6_alloc_clone(rt, daddr);
1616
1617 if (nrt) {
1618 nrt->dst.metrics[RTAX_MTU-1] = pmtu;
1619 if (allfrag)
1620 nrt->dst.metrics[RTAX_FEATURES-1] |= RTAX_FEATURE_ALLFRAG;
1621
1622 /* According to RFC 1981, detecting PMTU increase shouldn't be
1623 * happened within 5 mins, the recommended timer is 10 mins.
1624 * Here this route expiration time is set to ip6_rt_mtu_expires
1625 * which is 10 mins. After 10 mins the decreased pmtu is expired
1626 * and detecting PMTU increase will be automatically happened.
1627 */
1628 dst_set_expires(&nrt->dst, net->ipv6.sysctl.ip6_rt_mtu_expires);
1629 nrt->rt6i_flags |= RTF_DYNAMIC|RTF_EXPIRES;
1630
1631 ip6_ins_rt(nrt);
1632 }
1633 out:
1634 dst_release(&rt->dst);
1635 }
1636
1637 /*
1638 * Misc support functions
1639 */
1640
1641 static struct rt6_info * ip6_rt_copy(struct rt6_info *ort)
1642 {
1643 struct net *net = dev_net(ort->rt6i_dev);
1644 struct rt6_info *rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops);
1645
1646 if (rt) {
1647 rt->dst.input = ort->dst.input;
1648 rt->dst.output = ort->dst.output;
1649
1650 memcpy(rt->dst.metrics, ort->dst.metrics, RTAX_MAX*sizeof(u32));
1651 rt->dst.error = ort->dst.error;
1652 rt->dst.dev = ort->dst.dev;
1653 if (rt->dst.dev)
1654 dev_hold(rt->dst.dev);
1655 rt->rt6i_idev = ort->rt6i_idev;
1656 if (rt->rt6i_idev)
1657 in6_dev_hold(rt->rt6i_idev);
1658 rt->dst.lastuse = jiffies;
1659 rt->rt6i_expires = 0;
1660
1661 ipv6_addr_copy(&rt->rt6i_gateway, &ort->rt6i_gateway);
1662 rt->rt6i_flags = ort->rt6i_flags & ~RTF_EXPIRES;
1663 rt->rt6i_metric = 0;
1664
1665 memcpy(&rt->rt6i_dst, &ort->rt6i_dst, sizeof(struct rt6key));
1666 #ifdef CONFIG_IPV6_SUBTREES
1667 memcpy(&rt->rt6i_src, &ort->rt6i_src, sizeof(struct rt6key));
1668 #endif
1669 rt->rt6i_table = ort->rt6i_table;
1670 }
1671 return rt;
1672 }
1673
1674 #ifdef CONFIG_IPV6_ROUTE_INFO
1675 static struct rt6_info *rt6_get_route_info(struct net *net,
1676 struct in6_addr *prefix, int prefixlen,
1677 struct in6_addr *gwaddr, int ifindex)
1678 {
1679 struct fib6_node *fn;
1680 struct rt6_info *rt = NULL;
1681 struct fib6_table *table;
1682
1683 table = fib6_get_table(net, RT6_TABLE_INFO);
1684 if (table == NULL)
1685 return NULL;
1686
1687 write_lock_bh(&table->tb6_lock);
1688 fn = fib6_locate(&table->tb6_root, prefix ,prefixlen, NULL, 0);
1689 if (!fn)
1690 goto out;
1691
1692 for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) {
1693 if (rt->rt6i_dev->ifindex != ifindex)
1694 continue;
1695 if ((rt->rt6i_flags & (RTF_ROUTEINFO|RTF_GATEWAY)) != (RTF_ROUTEINFO|RTF_GATEWAY))
1696 continue;
1697 if (!ipv6_addr_equal(&rt->rt6i_gateway, gwaddr))
1698 continue;
1699 dst_hold(&rt->dst);
1700 break;
1701 }
1702 out:
1703 write_unlock_bh(&table->tb6_lock);
1704 return rt;
1705 }
1706
1707 static struct rt6_info *rt6_add_route_info(struct net *net,
1708 struct in6_addr *prefix, int prefixlen,
1709 struct in6_addr *gwaddr, int ifindex,
1710 unsigned pref)
1711 {
1712 struct fib6_config cfg = {
1713 .fc_table = RT6_TABLE_INFO,
1714 .fc_metric = IP6_RT_PRIO_USER,
1715 .fc_ifindex = ifindex,
1716 .fc_dst_len = prefixlen,
1717 .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_ROUTEINFO |
1718 RTF_UP | RTF_PREF(pref),
1719 .fc_nlinfo.pid = 0,
1720 .fc_nlinfo.nlh = NULL,
1721 .fc_nlinfo.nl_net = net,
1722 };
1723
1724 ipv6_addr_copy(&cfg.fc_dst, prefix);
1725 ipv6_addr_copy(&cfg.fc_gateway, gwaddr);
1726
1727 /* We should treat it as a default route if prefix length is 0. */
1728 if (!prefixlen)
1729 cfg.fc_flags |= RTF_DEFAULT;
1730
1731 ip6_route_add(&cfg);
1732
1733 return rt6_get_route_info(net, prefix, prefixlen, gwaddr, ifindex);
1734 }
1735 #endif
1736
1737 struct rt6_info *rt6_get_dflt_router(struct in6_addr *addr, struct net_device *dev)
1738 {
1739 struct rt6_info *rt;
1740 struct fib6_table *table;
1741
1742 table = fib6_get_table(dev_net(dev), RT6_TABLE_DFLT);
1743 if (table == NULL)
1744 return NULL;
1745
1746 write_lock_bh(&table->tb6_lock);
1747 for (rt = table->tb6_root.leaf; rt; rt=rt->dst.rt6_next) {
1748 if (dev == rt->rt6i_dev &&
1749 ((rt->rt6i_flags & (RTF_ADDRCONF | RTF_DEFAULT)) == (RTF_ADDRCONF | RTF_DEFAULT)) &&
1750 ipv6_addr_equal(&rt->rt6i_gateway, addr))
1751 break;
1752 }
1753 if (rt)
1754 dst_hold(&rt->dst);
1755 write_unlock_bh(&table->tb6_lock);
1756 return rt;
1757 }
1758
1759 struct rt6_info *rt6_add_dflt_router(struct in6_addr *gwaddr,
1760 struct net_device *dev,
1761 unsigned int pref)
1762 {
1763 struct fib6_config cfg = {
1764 .fc_table = RT6_TABLE_DFLT,
1765 .fc_metric = IP6_RT_PRIO_USER,
1766 .fc_ifindex = dev->ifindex,
1767 .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_DEFAULT |
1768 RTF_UP | RTF_EXPIRES | RTF_PREF(pref),
1769 .fc_nlinfo.pid = 0,
1770 .fc_nlinfo.nlh = NULL,
1771 .fc_nlinfo.nl_net = dev_net(dev),
1772 };
1773
1774 ipv6_addr_copy(&cfg.fc_gateway, gwaddr);
1775
1776 ip6_route_add(&cfg);
1777
1778 return rt6_get_dflt_router(gwaddr, dev);
1779 }
1780
1781 void rt6_purge_dflt_routers(struct net *net)
1782 {
1783 struct rt6_info *rt;
1784 struct fib6_table *table;
1785
1786 /* NOTE: Keep consistent with rt6_get_dflt_router */
1787 table = fib6_get_table(net, RT6_TABLE_DFLT);
1788 if (table == NULL)
1789 return;
1790
1791 restart:
1792 read_lock_bh(&table->tb6_lock);
1793 for (rt = table->tb6_root.leaf; rt; rt = rt->dst.rt6_next) {
1794 if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF)) {
1795 dst_hold(&rt->dst);
1796 read_unlock_bh(&table->tb6_lock);
1797 ip6_del_rt(rt);
1798 goto restart;
1799 }
1800 }
1801 read_unlock_bh(&table->tb6_lock);
1802 }
1803
1804 static void rtmsg_to_fib6_config(struct net *net,
1805 struct in6_rtmsg *rtmsg,
1806 struct fib6_config *cfg)
1807 {
1808 memset(cfg, 0, sizeof(*cfg));
1809
1810 cfg->fc_table = RT6_TABLE_MAIN;
1811 cfg->fc_ifindex = rtmsg->rtmsg_ifindex;
1812 cfg->fc_metric = rtmsg->rtmsg_metric;
1813 cfg->fc_expires = rtmsg->rtmsg_info;
1814 cfg->fc_dst_len = rtmsg->rtmsg_dst_len;
1815 cfg->fc_src_len = rtmsg->rtmsg_src_len;
1816 cfg->fc_flags = rtmsg->rtmsg_flags;
1817
1818 cfg->fc_nlinfo.nl_net = net;
1819
1820 ipv6_addr_copy(&cfg->fc_dst, &rtmsg->rtmsg_dst);
1821 ipv6_addr_copy(&cfg->fc_src, &rtmsg->rtmsg_src);
1822 ipv6_addr_copy(&cfg->fc_gateway, &rtmsg->rtmsg_gateway);
1823 }
1824
1825 int ipv6_route_ioctl(struct net *net, unsigned int cmd, void __user *arg)
1826 {
1827 struct fib6_config cfg;
1828 struct in6_rtmsg rtmsg;
1829 int err;
1830
1831 switch(cmd) {
1832 case SIOCADDRT: /* Add a route */
1833 case SIOCDELRT: /* Delete a route */
1834 if (!capable(CAP_NET_ADMIN))
1835 return -EPERM;
1836 err = copy_from_user(&rtmsg, arg,
1837 sizeof(struct in6_rtmsg));
1838 if (err)
1839 return -EFAULT;
1840
1841 rtmsg_to_fib6_config(net, &rtmsg, &cfg);
1842
1843 rtnl_lock();
1844 switch (cmd) {
1845 case SIOCADDRT:
1846 err = ip6_route_add(&cfg);
1847 break;
1848 case SIOCDELRT:
1849 err = ip6_route_del(&cfg);
1850 break;
1851 default:
1852 err = -EINVAL;
1853 }
1854 rtnl_unlock();
1855
1856 return err;
1857 }
1858
1859 return -EINVAL;
1860 }
1861
1862 /*
1863 * Drop the packet on the floor
1864 */
1865
1866 static int ip6_pkt_drop(struct sk_buff *skb, u8 code, int ipstats_mib_noroutes)
1867 {
1868 int type;
1869 struct dst_entry *dst = skb_dst(skb);
1870 switch (ipstats_mib_noroutes) {
1871 case IPSTATS_MIB_INNOROUTES:
1872 type = ipv6_addr_type(&ipv6_hdr(skb)->daddr);
1873 if (type == IPV6_ADDR_ANY) {
1874 IP6_INC_STATS(dev_net(dst->dev), ip6_dst_idev(dst),
1875 IPSTATS_MIB_INADDRERRORS);
1876 break;
1877 }
1878 /* FALLTHROUGH */
1879 case IPSTATS_MIB_OUTNOROUTES:
1880 IP6_INC_STATS(dev_net(dst->dev), ip6_dst_idev(dst),
1881 ipstats_mib_noroutes);
1882 break;
1883 }
1884 icmpv6_send(skb, ICMPV6_DEST_UNREACH, code, 0);
1885 kfree_skb(skb);
1886 return 0;
1887 }
1888
1889 static int ip6_pkt_discard(struct sk_buff *skb)
1890 {
1891 return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_INNOROUTES);
1892 }
1893
1894 static int ip6_pkt_discard_out(struct sk_buff *skb)
1895 {
1896 skb->dev = skb_dst(skb)->dev;
1897 return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_OUTNOROUTES);
1898 }
1899
1900 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
1901
1902 static int ip6_pkt_prohibit(struct sk_buff *skb)
1903 {
1904 return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_INNOROUTES);
1905 }
1906
1907 static int ip6_pkt_prohibit_out(struct sk_buff *skb)
1908 {
1909 skb->dev = skb_dst(skb)->dev;
1910 return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_OUTNOROUTES);
1911 }
1912
1913 #endif
1914
1915 /*
1916 * Allocate a dst for local (unicast / anycast) address.
1917 */
1918
1919 struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
1920 const struct in6_addr *addr,
1921 int anycast)
1922 {
1923 struct net *net = dev_net(idev->dev);
1924 struct rt6_info *rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops);
1925 struct neighbour *neigh;
1926
1927 if (rt == NULL)
1928 return ERR_PTR(-ENOMEM);
1929
1930 dev_hold(net->loopback_dev);
1931 in6_dev_hold(idev);
1932
1933 rt->dst.flags = DST_HOST;
1934 rt->dst.input = ip6_input;
1935 rt->dst.output = ip6_output;
1936 rt->rt6i_dev = net->loopback_dev;
1937 rt->rt6i_idev = idev;
1938 rt->dst.metrics[RTAX_MTU-1] = ipv6_get_mtu(rt->rt6i_dev);
1939 rt->dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(net, dst_mtu(&rt->dst));
1940 rt->dst.metrics[RTAX_HOPLIMIT-1] = -1;
1941 rt->dst.obsolete = -1;
1942
1943 rt->rt6i_flags = RTF_UP | RTF_NONEXTHOP;
1944 if (anycast)
1945 rt->rt6i_flags |= RTF_ANYCAST;
1946 else
1947 rt->rt6i_flags |= RTF_LOCAL;
1948 neigh = ndisc_get_neigh(rt->rt6i_dev, &rt->rt6i_gateway);
1949 if (IS_ERR(neigh)) {
1950 dst_free(&rt->dst);
1951
1952 /* We are casting this because that is the return
1953 * value type. But an errno encoded pointer is the
1954 * same regardless of the underlying pointer type,
1955 * and that's what we are returning. So this is OK.
1956 */
1957 return (struct rt6_info *) neigh;
1958 }
1959 rt->rt6i_nexthop = neigh;
1960
1961 ipv6_addr_copy(&rt->rt6i_dst.addr, addr);
1962 rt->rt6i_dst.plen = 128;
1963 rt->rt6i_table = fib6_get_table(net, RT6_TABLE_LOCAL);
1964
1965 atomic_set(&rt->dst.__refcnt, 1);
1966
1967 return rt;
1968 }
1969
1970 struct arg_dev_net {
1971 struct net_device *dev;
1972 struct net *net;
1973 };
1974
1975 static int fib6_ifdown(struct rt6_info *rt, void *arg)
1976 {
1977 struct net_device *dev = ((struct arg_dev_net *)arg)->dev;
1978 struct net *net = ((struct arg_dev_net *)arg)->net;
1979
1980 if (((void *)rt->rt6i_dev == dev || dev == NULL) &&
1981 rt != net->ipv6.ip6_null_entry) {
1982 RT6_TRACE("deleted by ifdown %p\n", rt);
1983 return -1;
1984 }
1985 return 0;
1986 }
1987
1988 void rt6_ifdown(struct net *net, struct net_device *dev)
1989 {
1990 struct arg_dev_net adn = {
1991 .dev = dev,
1992 .net = net,
1993 };
1994
1995 fib6_clean_all(net, fib6_ifdown, 0, &adn);
1996 icmp6_clean_all(fib6_ifdown, &adn);
1997 }
1998
1999 struct rt6_mtu_change_arg
2000 {
2001 struct net_device *dev;
2002 unsigned mtu;
2003 };
2004
2005 static int rt6_mtu_change_route(struct rt6_info *rt, void *p_arg)
2006 {
2007 struct rt6_mtu_change_arg *arg = (struct rt6_mtu_change_arg *) p_arg;
2008 struct inet6_dev *idev;
2009 struct net *net = dev_net(arg->dev);
2010
2011 /* In IPv6 pmtu discovery is not optional,
2012 so that RTAX_MTU lock cannot disable it.
2013 We still use this lock to block changes
2014 caused by addrconf/ndisc.
2015 */
2016
2017 idev = __in6_dev_get(arg->dev);
2018 if (idev == NULL)
2019 return 0;
2020
2021 /* For administrative MTU increase, there is no way to discover
2022 IPv6 PMTU increase, so PMTU increase should be updated here.
2023 Since RFC 1981 doesn't include administrative MTU increase
2024 update PMTU increase is a MUST. (i.e. jumbo frame)
2025 */
2026 /*
2027 If new MTU is less than route PMTU, this new MTU will be the
2028 lowest MTU in the path, update the route PMTU to reflect PMTU
2029 decreases; if new MTU is greater than route PMTU, and the
2030 old MTU is the lowest MTU in the path, update the route PMTU
2031 to reflect the increase. In this case if the other nodes' MTU
2032 also have the lowest MTU, TOO BIG MESSAGE will be lead to
2033 PMTU discouvery.
2034 */
2035 if (rt->rt6i_dev == arg->dev &&
2036 !dst_metric_locked(&rt->dst, RTAX_MTU) &&
2037 (dst_mtu(&rt->dst) >= arg->mtu ||
2038 (dst_mtu(&rt->dst) < arg->mtu &&
2039 dst_mtu(&rt->dst) == idev->cnf.mtu6))) {
2040 rt->dst.metrics[RTAX_MTU-1] = arg->mtu;
2041 rt->dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(net, arg->mtu);
2042 }
2043 return 0;
2044 }
2045
2046 void rt6_mtu_change(struct net_device *dev, unsigned mtu)
2047 {
2048 struct rt6_mtu_change_arg arg = {
2049 .dev = dev,
2050 .mtu = mtu,
2051 };
2052
2053 fib6_clean_all(dev_net(dev), rt6_mtu_change_route, 0, &arg);
2054 }
2055
2056 static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = {
2057 [RTA_GATEWAY] = { .len = sizeof(struct in6_addr) },
2058 [RTA_OIF] = { .type = NLA_U32 },
2059 [RTA_IIF] = { .type = NLA_U32 },
2060 [RTA_PRIORITY] = { .type = NLA_U32 },
2061 [RTA_METRICS] = { .type = NLA_NESTED },
2062 };
2063
2064 static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
2065 struct fib6_config *cfg)
2066 {
2067 struct rtmsg *rtm;
2068 struct nlattr *tb[RTA_MAX+1];
2069 int err;
2070
2071 err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv6_policy);
2072 if (err < 0)
2073 goto errout;
2074
2075 err = -EINVAL;
2076 rtm = nlmsg_data(nlh);
2077 memset(cfg, 0, sizeof(*cfg));
2078
2079 cfg->fc_table = rtm->rtm_table;
2080 cfg->fc_dst_len = rtm->rtm_dst_len;
2081 cfg->fc_src_len = rtm->rtm_src_len;
2082 cfg->fc_flags = RTF_UP;
2083 cfg->fc_protocol = rtm->rtm_protocol;
2084
2085 if (rtm->rtm_type == RTN_UNREACHABLE)
2086 cfg->fc_flags |= RTF_REJECT;
2087
2088 if (rtm->rtm_type == RTN_LOCAL)
2089 cfg->fc_flags |= RTF_LOCAL;
2090
2091 cfg->fc_nlinfo.pid = NETLINK_CB(skb).pid;
2092 cfg->fc_nlinfo.nlh = nlh;
2093 cfg->fc_nlinfo.nl_net = sock_net(skb->sk);
2094
2095 if (tb[RTA_GATEWAY]) {
2096 nla_memcpy(&cfg->fc_gateway, tb[RTA_GATEWAY], 16);
2097 cfg->fc_flags |= RTF_GATEWAY;
2098 }
2099
2100 if (tb[RTA_DST]) {
2101 int plen = (rtm->rtm_dst_len + 7) >> 3;
2102
2103 if (nla_len(tb[RTA_DST]) < plen)
2104 goto errout;
2105
2106 nla_memcpy(&cfg->fc_dst, tb[RTA_DST], plen);
2107 }
2108
2109 if (tb[RTA_SRC]) {
2110 int plen = (rtm->rtm_src_len + 7) >> 3;
2111
2112 if (nla_len(tb[RTA_SRC]) < plen)
2113 goto errout;
2114
2115 nla_memcpy(&cfg->fc_src, tb[RTA_SRC], plen);
2116 }
2117
2118 if (tb[RTA_OIF])
2119 cfg->fc_ifindex = nla_get_u32(tb[RTA_OIF]);
2120
2121 if (tb[RTA_PRIORITY])
2122 cfg->fc_metric = nla_get_u32(tb[RTA_PRIORITY]);
2123
2124 if (tb[RTA_METRICS]) {
2125 cfg->fc_mx = nla_data(tb[RTA_METRICS]);
2126 cfg->fc_mx_len = nla_len(tb[RTA_METRICS]);
2127 }
2128
2129 if (tb[RTA_TABLE])
2130 cfg->fc_table = nla_get_u32(tb[RTA_TABLE]);
2131
2132 err = 0;
2133 errout:
2134 return err;
2135 }
2136
2137 static int inet6_rtm_delroute(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
2138 {
2139 struct fib6_config cfg;
2140 int err;
2141
2142 err = rtm_to_fib6_config(skb, nlh, &cfg);
2143 if (err < 0)
2144 return err;
2145
2146 return ip6_route_del(&cfg);
2147 }
2148
2149 static int inet6_rtm_newroute(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
2150 {
2151 struct fib6_config cfg;
2152 int err;
2153
2154 err = rtm_to_fib6_config(skb, nlh, &cfg);
2155 if (err < 0)
2156 return err;
2157
2158 return ip6_route_add(&cfg);
2159 }
2160
2161 static inline size_t rt6_nlmsg_size(void)
2162 {
2163 return NLMSG_ALIGN(sizeof(struct rtmsg))
2164 + nla_total_size(16) /* RTA_SRC */
2165 + nla_total_size(16) /* RTA_DST */
2166 + nla_total_size(16) /* RTA_GATEWAY */
2167 + nla_total_size(16) /* RTA_PREFSRC */
2168 + nla_total_size(4) /* RTA_TABLE */
2169 + nla_total_size(4) /* RTA_IIF */
2170 + nla_total_size(4) /* RTA_OIF */
2171 + nla_total_size(4) /* RTA_PRIORITY */
2172 + RTAX_MAX * nla_total_size(4) /* RTA_METRICS */
2173 + nla_total_size(sizeof(struct rta_cacheinfo));
2174 }
2175
2176 static int rt6_fill_node(struct net *net,
2177 struct sk_buff *skb, struct rt6_info *rt,
2178 struct in6_addr *dst, struct in6_addr *src,
2179 int iif, int type, u32 pid, u32 seq,
2180 int prefix, int nowait, unsigned int flags)
2181 {
2182 struct rtmsg *rtm;
2183 struct nlmsghdr *nlh;
2184 long expires;
2185 u32 table;
2186
2187 if (prefix) { /* user wants prefix routes only */
2188 if (!(rt->rt6i_flags & RTF_PREFIX_RT)) {
2189 /* success since this is not a prefix route */
2190 return 1;
2191 }
2192 }
2193
2194 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*rtm), flags);
2195 if (nlh == NULL)
2196 return -EMSGSIZE;
2197
2198 rtm = nlmsg_data(nlh);
2199 rtm->rtm_family = AF_INET6;
2200 rtm->rtm_dst_len = rt->rt6i_dst.plen;
2201 rtm->rtm_src_len = rt->rt6i_src.plen;
2202 rtm->rtm_tos = 0;
2203 if (rt->rt6i_table)
2204 table = rt->rt6i_table->tb6_id;
2205 else
2206 table = RT6_TABLE_UNSPEC;
2207 rtm->rtm_table = table;
2208 NLA_PUT_U32(skb, RTA_TABLE, table);
2209 if (rt->rt6i_flags&RTF_REJECT)
2210 rtm->rtm_type = RTN_UNREACHABLE;
2211 else if (rt->rt6i_flags&RTF_LOCAL)
2212 rtm->rtm_type = RTN_LOCAL;
2213 else if (rt->rt6i_dev && (rt->rt6i_dev->flags&IFF_LOOPBACK))
2214 rtm->rtm_type = RTN_LOCAL;
2215 else
2216 rtm->rtm_type = RTN_UNICAST;
2217 rtm->rtm_flags = 0;
2218 rtm->rtm_scope = RT_SCOPE_UNIVERSE;
2219 rtm->rtm_protocol = rt->rt6i_protocol;
2220 if (rt->rt6i_flags&RTF_DYNAMIC)
2221 rtm->rtm_protocol = RTPROT_REDIRECT;
2222 else if (rt->rt6i_flags & RTF_ADDRCONF)
2223 rtm->rtm_protocol = RTPROT_KERNEL;
2224 else if (rt->rt6i_flags&RTF_DEFAULT)
2225 rtm->rtm_protocol = RTPROT_RA;
2226
2227 if (rt->rt6i_flags&RTF_CACHE)
2228 rtm->rtm_flags |= RTM_F_CLONED;
2229
2230 if (dst) {
2231 NLA_PUT(skb, RTA_DST, 16, dst);
2232 rtm->rtm_dst_len = 128;
2233 } else if (rtm->rtm_dst_len)
2234 NLA_PUT(skb, RTA_DST, 16, &rt->rt6i_dst.addr);
2235 #ifdef CONFIG_IPV6_SUBTREES
2236 if (src) {
2237 NLA_PUT(skb, RTA_SRC, 16, src);
2238 rtm->rtm_src_len = 128;
2239 } else if (rtm->rtm_src_len)
2240 NLA_PUT(skb, RTA_SRC, 16, &rt->rt6i_src.addr);
2241 #endif
2242 if (iif) {
2243 #ifdef CONFIG_IPV6_MROUTE
2244 if (ipv6_addr_is_multicast(&rt->rt6i_dst.addr)) {
2245 int err = ip6mr_get_route(net, skb, rtm, nowait);
2246 if (err <= 0) {
2247 if (!nowait) {
2248 if (err == 0)
2249 return 0;
2250 goto nla_put_failure;
2251 } else {
2252 if (err == -EMSGSIZE)
2253 goto nla_put_failure;
2254 }
2255 }
2256 } else
2257 #endif
2258 NLA_PUT_U32(skb, RTA_IIF, iif);
2259 } else if (dst) {
2260 struct inet6_dev *idev = ip6_dst_idev(&rt->dst);
2261 struct in6_addr saddr_buf;
2262 if (ipv6_dev_get_saddr(net, idev ? idev->dev : NULL,
2263 dst, 0, &saddr_buf) == 0)
2264 NLA_PUT(skb, RTA_PREFSRC, 16, &saddr_buf);
2265 }
2266
2267 if (rtnetlink_put_metrics(skb, rt->dst.metrics) < 0)
2268 goto nla_put_failure;
2269
2270 if (rt->dst.neighbour)
2271 NLA_PUT(skb, RTA_GATEWAY, 16, &rt->dst.neighbour->primary_key);
2272
2273 if (rt->dst.dev)
2274 NLA_PUT_U32(skb, RTA_OIF, rt->rt6i_dev->ifindex);
2275
2276 NLA_PUT_U32(skb, RTA_PRIORITY, rt->rt6i_metric);
2277
2278 if (!(rt->rt6i_flags & RTF_EXPIRES))
2279 expires = 0;
2280 else if (rt->rt6i_expires - jiffies < INT_MAX)
2281 expires = rt->rt6i_expires - jiffies;
2282 else
2283 expires = INT_MAX;
2284
2285 if (rtnl_put_cacheinfo(skb, &rt->dst, 0, 0, 0,
2286 expires, rt->dst.error) < 0)
2287 goto nla_put_failure;
2288
2289 return nlmsg_end(skb, nlh);
2290
2291 nla_put_failure:
2292 nlmsg_cancel(skb, nlh);
2293 return -EMSGSIZE;
2294 }
2295
2296 int rt6_dump_route(struct rt6_info *rt, void *p_arg)
2297 {
2298 struct rt6_rtnl_dump_arg *arg = (struct rt6_rtnl_dump_arg *) p_arg;
2299 int prefix;
2300
2301 if (nlmsg_len(arg->cb->nlh) >= sizeof(struct rtmsg)) {
2302 struct rtmsg *rtm = nlmsg_data(arg->cb->nlh);
2303 prefix = (rtm->rtm_flags & RTM_F_PREFIX) != 0;
2304 } else
2305 prefix = 0;
2306
2307 return rt6_fill_node(arg->net,
2308 arg->skb, rt, NULL, NULL, 0, RTM_NEWROUTE,
2309 NETLINK_CB(arg->cb->skb).pid, arg->cb->nlh->nlmsg_seq,
2310 prefix, 0, NLM_F_MULTI);
2311 }
2312
2313 static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void *arg)
2314 {
2315 struct net *net = sock_net(in_skb->sk);
2316 struct nlattr *tb[RTA_MAX+1];
2317 struct rt6_info *rt;
2318 struct sk_buff *skb;
2319 struct rtmsg *rtm;
2320 struct flowi fl;
2321 int err, iif = 0;
2322
2323 err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv6_policy);
2324 if (err < 0)
2325 goto errout;
2326
2327 err = -EINVAL;
2328 memset(&fl, 0, sizeof(fl));
2329
2330 if (tb[RTA_SRC]) {
2331 if (nla_len(tb[RTA_SRC]) < sizeof(struct in6_addr))
2332 goto errout;
2333
2334 ipv6_addr_copy(&fl.fl6_src, nla_data(tb[RTA_SRC]));
2335 }
2336
2337 if (tb[RTA_DST]) {
2338 if (nla_len(tb[RTA_DST]) < sizeof(struct in6_addr))
2339 goto errout;
2340
2341 ipv6_addr_copy(&fl.fl6_dst, nla_data(tb[RTA_DST]));
2342 }
2343
2344 if (tb[RTA_IIF])
2345 iif = nla_get_u32(tb[RTA_IIF]);
2346
2347 if (tb[RTA_OIF])
2348 fl.oif = nla_get_u32(tb[RTA_OIF]);
2349
2350 if (iif) {
2351 struct net_device *dev;
2352 dev = __dev_get_by_index(net, iif);
2353 if (!dev) {
2354 err = -ENODEV;
2355 goto errout;
2356 }
2357 }
2358
2359 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2360 if (skb == NULL) {
2361 err = -ENOBUFS;
2362 goto errout;
2363 }
2364
2365 /* Reserve room for dummy headers, this skb can pass
2366 through good chunk of routing engine.
2367 */
2368 skb_reset_mac_header(skb);
2369 skb_reserve(skb, MAX_HEADER + sizeof(struct ipv6hdr));
2370
2371 rt = (struct rt6_info*) ip6_route_output(net, NULL, &fl);
2372 skb_dst_set(skb, &rt->dst);
2373
2374 err = rt6_fill_node(net, skb, rt, &fl.fl6_dst, &fl.fl6_src, iif,
2375 RTM_NEWROUTE, NETLINK_CB(in_skb).pid,
2376 nlh->nlmsg_seq, 0, 0, 0);
2377 if (err < 0) {
2378 kfree_skb(skb);
2379 goto errout;
2380 }
2381
2382 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).pid);
2383 errout:
2384 return err;
2385 }
2386
2387 void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info)
2388 {
2389 struct sk_buff *skb;
2390 struct net *net = info->nl_net;
2391 u32 seq;
2392 int err;
2393
2394 err = -ENOBUFS;
2395 seq = info->nlh != NULL ? info->nlh->nlmsg_seq : 0;
2396
2397 skb = nlmsg_new(rt6_nlmsg_size(), gfp_any());
2398 if (skb == NULL)
2399 goto errout;
2400
2401 err = rt6_fill_node(net, skb, rt, NULL, NULL, 0,
2402 event, info->pid, seq, 0, 0, 0);
2403 if (err < 0) {
2404 /* -EMSGSIZE implies BUG in rt6_nlmsg_size() */
2405 WARN_ON(err == -EMSGSIZE);
2406 kfree_skb(skb);
2407 goto errout;
2408 }
2409 rtnl_notify(skb, net, info->pid, RTNLGRP_IPV6_ROUTE,
2410 info->nlh, gfp_any());
2411 return;
2412 errout:
2413 if (err < 0)
2414 rtnl_set_sk_err(net, RTNLGRP_IPV6_ROUTE, err);
2415 }
2416
2417 static int ip6_route_dev_notify(struct notifier_block *this,
2418 unsigned long event, void *data)
2419 {
2420 struct net_device *dev = (struct net_device *)data;
2421 struct net *net = dev_net(dev);
2422
2423 if (event == NETDEV_REGISTER && (dev->flags & IFF_LOOPBACK)) {
2424 net->ipv6.ip6_null_entry->dst.dev = dev;
2425 net->ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(dev);
2426 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
2427 net->ipv6.ip6_prohibit_entry->dst.dev = dev;
2428 net->ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(dev);
2429 net->ipv6.ip6_blk_hole_entry->dst.dev = dev;
2430 net->ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(dev);
2431 #endif
2432 }
2433
2434 return NOTIFY_OK;
2435 }
2436
2437 /*
2438 * /proc
2439 */
2440
2441 #ifdef CONFIG_PROC_FS
2442
2443 #define RT6_INFO_LEN (32 + 4 + 32 + 4 + 32 + 40 + 5 + 1)
2444
2445 struct rt6_proc_arg
2446 {
2447 char *buffer;
2448 int offset;
2449 int length;
2450 int skip;
2451 int len;
2452 };
2453
2454 static int rt6_info_route(struct rt6_info *rt, void *p_arg)
2455 {
2456 struct seq_file *m = p_arg;
2457
2458 seq_printf(m, "%pi6 %02x ", &rt->rt6i_dst.addr, rt->rt6i_dst.plen);
2459
2460 #ifdef CONFIG_IPV6_SUBTREES
2461 seq_printf(m, "%pi6 %02x ", &rt->rt6i_src.addr, rt->rt6i_src.plen);
2462 #else
2463 seq_puts(m, "00000000000000000000000000000000 00 ");
2464 #endif
2465
2466 if (rt->rt6i_nexthop) {
2467 seq_printf(m, "%pi6", rt->rt6i_nexthop->primary_key);
2468 } else {
2469 seq_puts(m, "00000000000000000000000000000000");
2470 }
2471 seq_printf(m, " %08x %08x %08x %08x %8s\n",
2472 rt->rt6i_metric, atomic_read(&rt->dst.__refcnt),
2473 rt->dst.__use, rt->rt6i_flags,
2474 rt->rt6i_dev ? rt->rt6i_dev->name : "");
2475 return 0;
2476 }
2477
2478 static int ipv6_route_show(struct seq_file *m, void *v)
2479 {
2480 struct net *net = (struct net *)m->private;
2481 fib6_clean_all(net, rt6_info_route, 0, m);
2482 return 0;
2483 }
2484
2485 static int ipv6_route_open(struct inode *inode, struct file *file)
2486 {
2487 return single_open_net(inode, file, ipv6_route_show);
2488 }
2489
2490 static const struct file_operations ipv6_route_proc_fops = {
2491 .owner = THIS_MODULE,
2492 .open = ipv6_route_open,
2493 .read = seq_read,
2494 .llseek = seq_lseek,
2495 .release = single_release_net,
2496 };
2497
2498 static int rt6_stats_seq_show(struct seq_file *seq, void *v)
2499 {
2500 struct net *net = (struct net *)seq->private;
2501 seq_printf(seq, "%04x %04x %04x %04x %04x %04x %04x\n",
2502 net->ipv6.rt6_stats->fib_nodes,
2503 net->ipv6.rt6_stats->fib_route_nodes,
2504 net->ipv6.rt6_stats->fib_rt_alloc,
2505 net->ipv6.rt6_stats->fib_rt_entries,
2506 net->ipv6.rt6_stats->fib_rt_cache,
2507 atomic_read(&net->ipv6.ip6_dst_ops.entries),
2508 net->ipv6.rt6_stats->fib_discarded_routes);
2509
2510 return 0;
2511 }
2512
2513 static int rt6_stats_seq_open(struct inode *inode, struct file *file)
2514 {
2515 return single_open_net(inode, file, rt6_stats_seq_show);
2516 }
2517
2518 static const struct file_operations rt6_stats_seq_fops = {
2519 .owner = THIS_MODULE,
2520 .open = rt6_stats_seq_open,
2521 .read = seq_read,
2522 .llseek = seq_lseek,
2523 .release = single_release_net,
2524 };
2525 #endif /* CONFIG_PROC_FS */
2526
2527 #ifdef CONFIG_SYSCTL
2528
2529 static
2530 int ipv6_sysctl_rtcache_flush(ctl_table *ctl, int write,
2531 void __user *buffer, size_t *lenp, loff_t *ppos)
2532 {
2533 struct net *net = current->nsproxy->net_ns;
2534 int delay = net->ipv6.sysctl.flush_delay;
2535 if (write) {
2536 proc_dointvec(ctl, write, buffer, lenp, ppos);
2537 fib6_run_gc(delay <= 0 ? ~0UL : (unsigned long)delay, net);
2538 return 0;
2539 } else
2540 return -EINVAL;
2541 }
2542
2543 ctl_table ipv6_route_table_template[] = {
2544 {
2545 .procname = "flush",
2546 .data = &init_net.ipv6.sysctl.flush_delay,
2547 .maxlen = sizeof(int),
2548 .mode = 0200,
2549 .proc_handler = ipv6_sysctl_rtcache_flush
2550 },
2551 {
2552 .procname = "gc_thresh",
2553 .data = &ip6_dst_ops_template.gc_thresh,
2554 .maxlen = sizeof(int),
2555 .mode = 0644,
2556 .proc_handler = proc_dointvec,
2557 },
2558 {
2559 .procname = "max_size",
2560 .data = &init_net.ipv6.sysctl.ip6_rt_max_size,
2561 .maxlen = sizeof(int),
2562 .mode = 0644,
2563 .proc_handler = proc_dointvec,
2564 },
2565 {
2566 .procname = "gc_min_interval",
2567 .data = &init_net.ipv6.sysctl.ip6_rt_gc_min_interval,
2568 .maxlen = sizeof(int),
2569 .mode = 0644,
2570 .proc_handler = proc_dointvec_jiffies,
2571 },
2572 {
2573 .procname = "gc_timeout",
2574 .data = &init_net.ipv6.sysctl.ip6_rt_gc_timeout,
2575 .maxlen = sizeof(int),
2576 .mode = 0644,
2577 .proc_handler = proc_dointvec_jiffies,
2578 },
2579 {
2580 .procname = "gc_interval",
2581 .data = &init_net.ipv6.sysctl.ip6_rt_gc_interval,
2582 .maxlen = sizeof(int),
2583 .mode = 0644,
2584 .proc_handler = proc_dointvec_jiffies,
2585 },
2586 {
2587 .procname = "gc_elasticity",
2588 .data = &init_net.ipv6.sysctl.ip6_rt_gc_elasticity,
2589 .maxlen = sizeof(int),
2590 .mode = 0644,
2591 .proc_handler = proc_dointvec,
2592 },
2593 {
2594 .procname = "mtu_expires",
2595 .data = &init_net.ipv6.sysctl.ip6_rt_mtu_expires,
2596 .maxlen = sizeof(int),
2597 .mode = 0644,
2598 .proc_handler = proc_dointvec_jiffies,
2599 },
2600 {
2601 .procname = "min_adv_mss",
2602 .data = &init_net.ipv6.sysctl.ip6_rt_min_advmss,
2603 .maxlen = sizeof(int),
2604 .mode = 0644,
2605 .proc_handler = proc_dointvec,
2606 },
2607 {
2608 .procname = "gc_min_interval_ms",
2609 .data = &init_net.ipv6.sysctl.ip6_rt_gc_min_interval,
2610 .maxlen = sizeof(int),
2611 .mode = 0644,
2612 .proc_handler = proc_dointvec_ms_jiffies,
2613 },
2614 { }
2615 };
2616
2617 struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net)
2618 {
2619 struct ctl_table *table;
2620
2621 table = kmemdup(ipv6_route_table_template,
2622 sizeof(ipv6_route_table_template),
2623 GFP_KERNEL);
2624
2625 if (table) {
2626 table[0].data = &net->ipv6.sysctl.flush_delay;
2627 table[1].data = &net->ipv6.ip6_dst_ops.gc_thresh;
2628 table[2].data = &net->ipv6.sysctl.ip6_rt_max_size;
2629 table[3].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
2630 table[4].data = &net->ipv6.sysctl.ip6_rt_gc_timeout;
2631 table[5].data = &net->ipv6.sysctl.ip6_rt_gc_interval;
2632 table[6].data = &net->ipv6.sysctl.ip6_rt_gc_elasticity;
2633 table[7].data = &net->ipv6.sysctl.ip6_rt_mtu_expires;
2634 table[8].data = &net->ipv6.sysctl.ip6_rt_min_advmss;
2635 table[9].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
2636 }
2637
2638 return table;
2639 }
2640 #endif
2641
2642 static int __net_init ip6_route_net_init(struct net *net)
2643 {
2644 int ret = -ENOMEM;
2645
2646 memcpy(&net->ipv6.ip6_dst_ops, &ip6_dst_ops_template,
2647 sizeof(net->ipv6.ip6_dst_ops));
2648
2649 net->ipv6.ip6_null_entry = kmemdup(&ip6_null_entry_template,
2650 sizeof(*net->ipv6.ip6_null_entry),
2651 GFP_KERNEL);
2652 if (!net->ipv6.ip6_null_entry)
2653 goto out_ip6_dst_ops;
2654 net->ipv6.ip6_null_entry->dst.path =
2655 (struct dst_entry *)net->ipv6.ip6_null_entry;
2656 net->ipv6.ip6_null_entry->dst.ops = &net->ipv6.ip6_dst_ops;
2657
2658 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
2659 net->ipv6.ip6_prohibit_entry = kmemdup(&ip6_prohibit_entry_template,
2660 sizeof(*net->ipv6.ip6_prohibit_entry),
2661 GFP_KERNEL);
2662 if (!net->ipv6.ip6_prohibit_entry)
2663 goto out_ip6_null_entry;
2664 net->ipv6.ip6_prohibit_entry->dst.path =
2665 (struct dst_entry *)net->ipv6.ip6_prohibit_entry;
2666 net->ipv6.ip6_prohibit_entry->dst.ops = &net->ipv6.ip6_dst_ops;
2667
2668 net->ipv6.ip6_blk_hole_entry = kmemdup(&ip6_blk_hole_entry_template,
2669 sizeof(*net->ipv6.ip6_blk_hole_entry),
2670 GFP_KERNEL);
2671 if (!net->ipv6.ip6_blk_hole_entry)
2672 goto out_ip6_prohibit_entry;
2673 net->ipv6.ip6_blk_hole_entry->dst.path =
2674 (struct dst_entry *)net->ipv6.ip6_blk_hole_entry;
2675 net->ipv6.ip6_blk_hole_entry->dst.ops = &net->ipv6.ip6_dst_ops;
2676 #endif
2677
2678 net->ipv6.sysctl.flush_delay = 0;
2679 net->ipv6.sysctl.ip6_rt_max_size = 4096;
2680 net->ipv6.sysctl.ip6_rt_gc_min_interval = HZ / 2;
2681 net->ipv6.sysctl.ip6_rt_gc_timeout = 60*HZ;
2682 net->ipv6.sysctl.ip6_rt_gc_interval = 30*HZ;
2683 net->ipv6.sysctl.ip6_rt_gc_elasticity = 9;
2684 net->ipv6.sysctl.ip6_rt_mtu_expires = 10*60*HZ;
2685 net->ipv6.sysctl.ip6_rt_min_advmss = IPV6_MIN_MTU - 20 - 40;
2686
2687 #ifdef CONFIG_PROC_FS
2688 proc_net_fops_create(net, "ipv6_route", 0, &ipv6_route_proc_fops);
2689 proc_net_fops_create(net, "rt6_stats", S_IRUGO, &rt6_stats_seq_fops);
2690 #endif
2691 net->ipv6.ip6_rt_gc_expire = 30*HZ;
2692
2693 ret = 0;
2694 out:
2695 return ret;
2696
2697 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
2698 out_ip6_prohibit_entry:
2699 kfree(net->ipv6.ip6_prohibit_entry);
2700 out_ip6_null_entry:
2701 kfree(net->ipv6.ip6_null_entry);
2702 #endif
2703 out_ip6_dst_ops:
2704 goto out;
2705 }
2706
2707 static void __net_exit ip6_route_net_exit(struct net *net)
2708 {
2709 #ifdef CONFIG_PROC_FS
2710 proc_net_remove(net, "ipv6_route");
2711 proc_net_remove(net, "rt6_stats");
2712 #endif
2713 kfree(net->ipv6.ip6_null_entry);
2714 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
2715 kfree(net->ipv6.ip6_prohibit_entry);
2716 kfree(net->ipv6.ip6_blk_hole_entry);
2717 #endif
2718 }
2719
2720 static struct pernet_operations ip6_route_net_ops = {
2721 .init = ip6_route_net_init,
2722 .exit = ip6_route_net_exit,
2723 };
2724
2725 static struct notifier_block ip6_route_dev_notifier = {
2726 .notifier_call = ip6_route_dev_notify,
2727 .priority = 0,
2728 };
2729
2730 int __init ip6_route_init(void)
2731 {
2732 int ret;
2733
2734 ret = -ENOMEM;
2735 ip6_dst_ops_template.kmem_cachep =
2736 kmem_cache_create("ip6_dst_cache", sizeof(struct rt6_info), 0,
2737 SLAB_HWCACHE_ALIGN, NULL);
2738 if (!ip6_dst_ops_template.kmem_cachep)
2739 goto out;
2740
2741 ret = register_pernet_subsys(&ip6_route_net_ops);
2742 if (ret)
2743 goto out_kmem_cache;
2744
2745 ip6_dst_blackhole_ops.kmem_cachep = ip6_dst_ops_template.kmem_cachep;
2746
2747 /* Registering of the loopback is done before this portion of code,
2748 * the loopback reference in rt6_info will not be taken, do it
2749 * manually for init_net */
2750 init_net.ipv6.ip6_null_entry->dst.dev = init_net.loopback_dev;
2751 init_net.ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
2752 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
2753 init_net.ipv6.ip6_prohibit_entry->dst.dev = init_net.loopback_dev;
2754 init_net.ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
2755 init_net.ipv6.ip6_blk_hole_entry->dst.dev = init_net.loopback_dev;
2756 init_net.ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
2757 #endif
2758 ret = fib6_init();
2759 if (ret)
2760 goto out_register_subsys;
2761
2762 ret = xfrm6_init();
2763 if (ret)
2764 goto out_fib6_init;
2765
2766 ret = fib6_rules_init();
2767 if (ret)
2768 goto xfrm6_init;
2769
2770 ret = -ENOBUFS;
2771 if (__rtnl_register(PF_INET6, RTM_NEWROUTE, inet6_rtm_newroute, NULL) ||
2772 __rtnl_register(PF_INET6, RTM_DELROUTE, inet6_rtm_delroute, NULL) ||
2773 __rtnl_register(PF_INET6, RTM_GETROUTE, inet6_rtm_getroute, NULL))
2774 goto fib6_rules_init;
2775
2776 ret = register_netdevice_notifier(&ip6_route_dev_notifier);
2777 if (ret)
2778 goto fib6_rules_init;
2779
2780 out:
2781 return ret;
2782
2783 fib6_rules_init:
2784 fib6_rules_cleanup();
2785 xfrm6_init:
2786 xfrm6_fini();
2787 out_fib6_init:
2788 fib6_gc_cleanup();
2789 out_register_subsys:
2790 unregister_pernet_subsys(&ip6_route_net_ops);
2791 out_kmem_cache:
2792 kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep);
2793 goto out;
2794 }
2795
2796 void ip6_route_cleanup(void)
2797 {
2798 unregister_netdevice_notifier(&ip6_route_dev_notifier);
2799 fib6_rules_cleanup();
2800 xfrm6_fini();
2801 fib6_gc_cleanup();
2802 unregister_pernet_subsys(&ip6_route_net_ops);
2803 kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep);
2804 }
This page took 0.10597 seconds and 5 git commands to generate.