mlx4_core: limiting VF port options
[deliverable/linux.git] / net / ipv6 / route.c
1 /*
2 * Linux INET6 implementation
3 * FIB front-end.
4 *
5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 */
13
14 /* Changes:
15 *
16 * YOSHIFUJI Hideaki @USAGI
17 * reworked default router selection.
18 * - respect outgoing interface
19 * - select from (probably) reachable routers (i.e.
20 * routers in REACHABLE, STALE, DELAY or PROBE states).
21 * - always select the same router if it is (probably)
22 * reachable. otherwise, round-robin the list.
23 * Ville Nuorvala
24 * Fixed routing subtrees.
25 */
26
27 #include <linux/capability.h>
28 #include <linux/errno.h>
29 #include <linux/export.h>
30 #include <linux/types.h>
31 #include <linux/times.h>
32 #include <linux/socket.h>
33 #include <linux/sockios.h>
34 #include <linux/net.h>
35 #include <linux/route.h>
36 #include <linux/netdevice.h>
37 #include <linux/in6.h>
38 #include <linux/mroute6.h>
39 #include <linux/init.h>
40 #include <linux/if_arp.h>
41 #include <linux/proc_fs.h>
42 #include <linux/seq_file.h>
43 #include <linux/nsproxy.h>
44 #include <linux/slab.h>
45 #include <net/net_namespace.h>
46 #include <net/snmp.h>
47 #include <net/ipv6.h>
48 #include <net/ip6_fib.h>
49 #include <net/ip6_route.h>
50 #include <net/ndisc.h>
51 #include <net/addrconf.h>
52 #include <net/tcp.h>
53 #include <linux/rtnetlink.h>
54 #include <net/dst.h>
55 #include <net/xfrm.h>
56 #include <net/netevent.h>
57 #include <net/netlink.h>
58
59 #include <asm/uaccess.h>
60
61 #ifdef CONFIG_SYSCTL
62 #include <linux/sysctl.h>
63 #endif
64
65 static struct rt6_info *ip6_rt_copy(const struct rt6_info *ort,
66 const struct in6_addr *dest);
67 static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie);
68 static unsigned int ip6_default_advmss(const struct dst_entry *dst);
69 static unsigned int ip6_mtu(const struct dst_entry *dst);
70 static struct dst_entry *ip6_negative_advice(struct dst_entry *);
71 static void ip6_dst_destroy(struct dst_entry *);
72 static void ip6_dst_ifdown(struct dst_entry *,
73 struct net_device *dev, int how);
74 static int ip6_dst_gc(struct dst_ops *ops);
75
76 static int ip6_pkt_discard(struct sk_buff *skb);
77 static int ip6_pkt_discard_out(struct sk_buff *skb);
78 static void ip6_link_failure(struct sk_buff *skb);
79 static void ip6_rt_update_pmtu(struct dst_entry *dst, u32 mtu);
80
81 #ifdef CONFIG_IPV6_ROUTE_INFO
82 static struct rt6_info *rt6_add_route_info(struct net *net,
83 const struct in6_addr *prefix, int prefixlen,
84 const struct in6_addr *gwaddr, int ifindex,
85 unsigned pref);
86 static struct rt6_info *rt6_get_route_info(struct net *net,
87 const struct in6_addr *prefix, int prefixlen,
88 const struct in6_addr *gwaddr, int ifindex);
89 #endif
90
91 static u32 *ipv6_cow_metrics(struct dst_entry *dst, unsigned long old)
92 {
93 struct rt6_info *rt = (struct rt6_info *) dst;
94 struct inet_peer *peer;
95 u32 *p = NULL;
96
97 if (!(rt->dst.flags & DST_HOST))
98 return NULL;
99
100 if (!rt->rt6i_peer)
101 rt6_bind_peer(rt, 1);
102
103 peer = rt->rt6i_peer;
104 if (peer) {
105 u32 *old_p = __DST_METRICS_PTR(old);
106 unsigned long prev, new;
107
108 p = peer->metrics;
109 if (inet_metrics_new(peer))
110 memcpy(p, old_p, sizeof(u32) * RTAX_MAX);
111
112 new = (unsigned long) p;
113 prev = cmpxchg(&dst->_metrics, old, new);
114
115 if (prev != old) {
116 p = __DST_METRICS_PTR(prev);
117 if (prev & DST_METRICS_READ_ONLY)
118 p = NULL;
119 }
120 }
121 return p;
122 }
123
124 static struct neighbour *ip6_neigh_lookup(const struct dst_entry *dst, const void *daddr)
125 {
126 struct neighbour *n = __ipv6_neigh_lookup(&nd_tbl, dst->dev, daddr);
127 if (n)
128 return n;
129 return neigh_create(&nd_tbl, daddr, dst->dev);
130 }
131
132 static int rt6_bind_neighbour(struct rt6_info *rt)
133 {
134 struct neighbour *n = ip6_neigh_lookup(&rt->dst, &rt->rt6i_gateway);
135 if (IS_ERR(n))
136 return PTR_ERR(n);
137 dst_set_neighbour(&rt->dst, n);
138
139 return 0;
140 }
141
142 static struct dst_ops ip6_dst_ops_template = {
143 .family = AF_INET6,
144 .protocol = cpu_to_be16(ETH_P_IPV6),
145 .gc = ip6_dst_gc,
146 .gc_thresh = 1024,
147 .check = ip6_dst_check,
148 .default_advmss = ip6_default_advmss,
149 .mtu = ip6_mtu,
150 .cow_metrics = ipv6_cow_metrics,
151 .destroy = ip6_dst_destroy,
152 .ifdown = ip6_dst_ifdown,
153 .negative_advice = ip6_negative_advice,
154 .link_failure = ip6_link_failure,
155 .update_pmtu = ip6_rt_update_pmtu,
156 .local_out = __ip6_local_out,
157 .neigh_lookup = ip6_neigh_lookup,
158 };
159
160 static unsigned int ip6_blackhole_mtu(const struct dst_entry *dst)
161 {
162 unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
163
164 return mtu ? : dst->dev->mtu;
165 }
166
167 static void ip6_rt_blackhole_update_pmtu(struct dst_entry *dst, u32 mtu)
168 {
169 }
170
171 static u32 *ip6_rt_blackhole_cow_metrics(struct dst_entry *dst,
172 unsigned long old)
173 {
174 return NULL;
175 }
176
177 static struct dst_ops ip6_dst_blackhole_ops = {
178 .family = AF_INET6,
179 .protocol = cpu_to_be16(ETH_P_IPV6),
180 .destroy = ip6_dst_destroy,
181 .check = ip6_dst_check,
182 .mtu = ip6_blackhole_mtu,
183 .default_advmss = ip6_default_advmss,
184 .update_pmtu = ip6_rt_blackhole_update_pmtu,
185 .cow_metrics = ip6_rt_blackhole_cow_metrics,
186 .neigh_lookup = ip6_neigh_lookup,
187 };
188
189 static const u32 ip6_template_metrics[RTAX_MAX] = {
190 [RTAX_HOPLIMIT - 1] = 255,
191 };
192
193 static struct rt6_info ip6_null_entry_template = {
194 .dst = {
195 .__refcnt = ATOMIC_INIT(1),
196 .__use = 1,
197 .obsolete = -1,
198 .error = -ENETUNREACH,
199 .input = ip6_pkt_discard,
200 .output = ip6_pkt_discard_out,
201 },
202 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
203 .rt6i_protocol = RTPROT_KERNEL,
204 .rt6i_metric = ~(u32) 0,
205 .rt6i_ref = ATOMIC_INIT(1),
206 };
207
208 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
209
210 static int ip6_pkt_prohibit(struct sk_buff *skb);
211 static int ip6_pkt_prohibit_out(struct sk_buff *skb);
212
213 static struct rt6_info ip6_prohibit_entry_template = {
214 .dst = {
215 .__refcnt = ATOMIC_INIT(1),
216 .__use = 1,
217 .obsolete = -1,
218 .error = -EACCES,
219 .input = ip6_pkt_prohibit,
220 .output = ip6_pkt_prohibit_out,
221 },
222 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
223 .rt6i_protocol = RTPROT_KERNEL,
224 .rt6i_metric = ~(u32) 0,
225 .rt6i_ref = ATOMIC_INIT(1),
226 };
227
228 static struct rt6_info ip6_blk_hole_entry_template = {
229 .dst = {
230 .__refcnt = ATOMIC_INIT(1),
231 .__use = 1,
232 .obsolete = -1,
233 .error = -EINVAL,
234 .input = dst_discard,
235 .output = dst_discard,
236 },
237 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
238 .rt6i_protocol = RTPROT_KERNEL,
239 .rt6i_metric = ~(u32) 0,
240 .rt6i_ref = ATOMIC_INIT(1),
241 };
242
243 #endif
244
245 /* allocate dst with ip6_dst_ops */
246 static inline struct rt6_info *ip6_dst_alloc(struct dst_ops *ops,
247 struct net_device *dev,
248 int flags)
249 {
250 struct rt6_info *rt = dst_alloc(ops, dev, 0, 0, flags);
251
252 if (rt)
253 memset(&rt->rt6i_table, 0,
254 sizeof(*rt) - sizeof(struct dst_entry));
255
256 return rt;
257 }
258
259 static void ip6_dst_destroy(struct dst_entry *dst)
260 {
261 struct rt6_info *rt = (struct rt6_info *)dst;
262 struct inet6_dev *idev = rt->rt6i_idev;
263 struct inet_peer *peer = rt->rt6i_peer;
264
265 if (!(rt->dst.flags & DST_HOST))
266 dst_destroy_metrics_generic(dst);
267
268 if (idev) {
269 rt->rt6i_idev = NULL;
270 in6_dev_put(idev);
271 }
272 if (peer) {
273 rt->rt6i_peer = NULL;
274 inet_putpeer(peer);
275 }
276 }
277
278 static atomic_t __rt6_peer_genid = ATOMIC_INIT(0);
279
280 static u32 rt6_peer_genid(void)
281 {
282 return atomic_read(&__rt6_peer_genid);
283 }
284
285 void rt6_bind_peer(struct rt6_info *rt, int create)
286 {
287 struct inet_peer *peer;
288
289 peer = inet_getpeer_v6(&rt->rt6i_dst.addr, create);
290 if (peer && cmpxchg(&rt->rt6i_peer, NULL, peer) != NULL)
291 inet_putpeer(peer);
292 else
293 rt->rt6i_peer_genid = rt6_peer_genid();
294 }
295
296 static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
297 int how)
298 {
299 struct rt6_info *rt = (struct rt6_info *)dst;
300 struct inet6_dev *idev = rt->rt6i_idev;
301 struct net_device *loopback_dev =
302 dev_net(dev)->loopback_dev;
303
304 if (dev != loopback_dev && idev && idev->dev == dev) {
305 struct inet6_dev *loopback_idev =
306 in6_dev_get(loopback_dev);
307 if (loopback_idev) {
308 rt->rt6i_idev = loopback_idev;
309 in6_dev_put(idev);
310 }
311 }
312 }
313
314 static __inline__ int rt6_check_expired(const struct rt6_info *rt)
315 {
316 return (rt->rt6i_flags & RTF_EXPIRES) &&
317 time_after(jiffies, rt->dst.expires);
318 }
319
320 static inline int rt6_need_strict(const struct in6_addr *daddr)
321 {
322 return ipv6_addr_type(daddr) &
323 (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK);
324 }
325
326 /*
327 * Route lookup. Any table->tb6_lock is implied.
328 */
329
330 static inline struct rt6_info *rt6_device_match(struct net *net,
331 struct rt6_info *rt,
332 const struct in6_addr *saddr,
333 int oif,
334 int flags)
335 {
336 struct rt6_info *local = NULL;
337 struct rt6_info *sprt;
338
339 if (!oif && ipv6_addr_any(saddr))
340 goto out;
341
342 for (sprt = rt; sprt; sprt = sprt->dst.rt6_next) {
343 struct net_device *dev = sprt->dst.dev;
344
345 if (oif) {
346 if (dev->ifindex == oif)
347 return sprt;
348 if (dev->flags & IFF_LOOPBACK) {
349 if (!sprt->rt6i_idev ||
350 sprt->rt6i_idev->dev->ifindex != oif) {
351 if (flags & RT6_LOOKUP_F_IFACE && oif)
352 continue;
353 if (local && (!oif ||
354 local->rt6i_idev->dev->ifindex == oif))
355 continue;
356 }
357 local = sprt;
358 }
359 } else {
360 if (ipv6_chk_addr(net, saddr, dev,
361 flags & RT6_LOOKUP_F_IFACE))
362 return sprt;
363 }
364 }
365
366 if (oif) {
367 if (local)
368 return local;
369
370 if (flags & RT6_LOOKUP_F_IFACE)
371 return net->ipv6.ip6_null_entry;
372 }
373 out:
374 return rt;
375 }
376
377 #ifdef CONFIG_IPV6_ROUTER_PREF
378 static void rt6_probe(struct rt6_info *rt)
379 {
380 struct neighbour *neigh;
381 /*
382 * Okay, this does not seem to be appropriate
383 * for now, however, we need to check if it
384 * is really so; aka Router Reachability Probing.
385 *
386 * Router Reachability Probe MUST be rate-limited
387 * to no more than one per minute.
388 */
389 rcu_read_lock();
390 neigh = rt ? dst_get_neighbour_noref(&rt->dst) : NULL;
391 if (!neigh || (neigh->nud_state & NUD_VALID))
392 goto out;
393 read_lock_bh(&neigh->lock);
394 if (!(neigh->nud_state & NUD_VALID) &&
395 time_after(jiffies, neigh->updated + rt->rt6i_idev->cnf.rtr_probe_interval)) {
396 struct in6_addr mcaddr;
397 struct in6_addr *target;
398
399 neigh->updated = jiffies;
400 read_unlock_bh(&neigh->lock);
401
402 target = (struct in6_addr *)&neigh->primary_key;
403 addrconf_addr_solict_mult(target, &mcaddr);
404 ndisc_send_ns(rt->dst.dev, NULL, target, &mcaddr, NULL);
405 } else {
406 read_unlock_bh(&neigh->lock);
407 }
408 out:
409 rcu_read_unlock();
410 }
411 #else
412 static inline void rt6_probe(struct rt6_info *rt)
413 {
414 }
415 #endif
416
417 /*
418 * Default Router Selection (RFC 2461 6.3.6)
419 */
420 static inline int rt6_check_dev(struct rt6_info *rt, int oif)
421 {
422 struct net_device *dev = rt->dst.dev;
423 if (!oif || dev->ifindex == oif)
424 return 2;
425 if ((dev->flags & IFF_LOOPBACK) &&
426 rt->rt6i_idev && rt->rt6i_idev->dev->ifindex == oif)
427 return 1;
428 return 0;
429 }
430
431 static inline int rt6_check_neigh(struct rt6_info *rt)
432 {
433 struct neighbour *neigh;
434 int m;
435
436 rcu_read_lock();
437 neigh = dst_get_neighbour_noref(&rt->dst);
438 if (rt->rt6i_flags & RTF_NONEXTHOP ||
439 !(rt->rt6i_flags & RTF_GATEWAY))
440 m = 1;
441 else if (neigh) {
442 read_lock_bh(&neigh->lock);
443 if (neigh->nud_state & NUD_VALID)
444 m = 2;
445 #ifdef CONFIG_IPV6_ROUTER_PREF
446 else if (neigh->nud_state & NUD_FAILED)
447 m = 0;
448 #endif
449 else
450 m = 1;
451 read_unlock_bh(&neigh->lock);
452 } else
453 m = 0;
454 rcu_read_unlock();
455 return m;
456 }
457
458 static int rt6_score_route(struct rt6_info *rt, int oif,
459 int strict)
460 {
461 int m, n;
462
463 m = rt6_check_dev(rt, oif);
464 if (!m && (strict & RT6_LOOKUP_F_IFACE))
465 return -1;
466 #ifdef CONFIG_IPV6_ROUTER_PREF
467 m |= IPV6_DECODE_PREF(IPV6_EXTRACT_PREF(rt->rt6i_flags)) << 2;
468 #endif
469 n = rt6_check_neigh(rt);
470 if (!n && (strict & RT6_LOOKUP_F_REACHABLE))
471 return -1;
472 return m;
473 }
474
475 static struct rt6_info *find_match(struct rt6_info *rt, int oif, int strict,
476 int *mpri, struct rt6_info *match)
477 {
478 int m;
479
480 if (rt6_check_expired(rt))
481 goto out;
482
483 m = rt6_score_route(rt, oif, strict);
484 if (m < 0)
485 goto out;
486
487 if (m > *mpri) {
488 if (strict & RT6_LOOKUP_F_REACHABLE)
489 rt6_probe(match);
490 *mpri = m;
491 match = rt;
492 } else if (strict & RT6_LOOKUP_F_REACHABLE) {
493 rt6_probe(rt);
494 }
495
496 out:
497 return match;
498 }
499
500 static struct rt6_info *find_rr_leaf(struct fib6_node *fn,
501 struct rt6_info *rr_head,
502 u32 metric, int oif, int strict)
503 {
504 struct rt6_info *rt, *match;
505 int mpri = -1;
506
507 match = NULL;
508 for (rt = rr_head; rt && rt->rt6i_metric == metric;
509 rt = rt->dst.rt6_next)
510 match = find_match(rt, oif, strict, &mpri, match);
511 for (rt = fn->leaf; rt && rt != rr_head && rt->rt6i_metric == metric;
512 rt = rt->dst.rt6_next)
513 match = find_match(rt, oif, strict, &mpri, match);
514
515 return match;
516 }
517
518 static struct rt6_info *rt6_select(struct fib6_node *fn, int oif, int strict)
519 {
520 struct rt6_info *match, *rt0;
521 struct net *net;
522
523 rt0 = fn->rr_ptr;
524 if (!rt0)
525 fn->rr_ptr = rt0 = fn->leaf;
526
527 match = find_rr_leaf(fn, rt0, rt0->rt6i_metric, oif, strict);
528
529 if (!match &&
530 (strict & RT6_LOOKUP_F_REACHABLE)) {
531 struct rt6_info *next = rt0->dst.rt6_next;
532
533 /* no entries matched; do round-robin */
534 if (!next || next->rt6i_metric != rt0->rt6i_metric)
535 next = fn->leaf;
536
537 if (next != rt0)
538 fn->rr_ptr = next;
539 }
540
541 net = dev_net(rt0->dst.dev);
542 return match ? match : net->ipv6.ip6_null_entry;
543 }
544
545 #ifdef CONFIG_IPV6_ROUTE_INFO
546 int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
547 const struct in6_addr *gwaddr)
548 {
549 struct net *net = dev_net(dev);
550 struct route_info *rinfo = (struct route_info *) opt;
551 struct in6_addr prefix_buf, *prefix;
552 unsigned int pref;
553 unsigned long lifetime;
554 struct rt6_info *rt;
555
556 if (len < sizeof(struct route_info)) {
557 return -EINVAL;
558 }
559
560 /* Sanity check for prefix_len and length */
561 if (rinfo->length > 3) {
562 return -EINVAL;
563 } else if (rinfo->prefix_len > 128) {
564 return -EINVAL;
565 } else if (rinfo->prefix_len > 64) {
566 if (rinfo->length < 2) {
567 return -EINVAL;
568 }
569 } else if (rinfo->prefix_len > 0) {
570 if (rinfo->length < 1) {
571 return -EINVAL;
572 }
573 }
574
575 pref = rinfo->route_pref;
576 if (pref == ICMPV6_ROUTER_PREF_INVALID)
577 return -EINVAL;
578
579 lifetime = addrconf_timeout_fixup(ntohl(rinfo->lifetime), HZ);
580
581 if (rinfo->length == 3)
582 prefix = (struct in6_addr *)rinfo->prefix;
583 else {
584 /* this function is safe */
585 ipv6_addr_prefix(&prefix_buf,
586 (struct in6_addr *)rinfo->prefix,
587 rinfo->prefix_len);
588 prefix = &prefix_buf;
589 }
590
591 rt = rt6_get_route_info(net, prefix, rinfo->prefix_len, gwaddr,
592 dev->ifindex);
593
594 if (rt && !lifetime) {
595 ip6_del_rt(rt);
596 rt = NULL;
597 }
598
599 if (!rt && lifetime)
600 rt = rt6_add_route_info(net, prefix, rinfo->prefix_len, gwaddr, dev->ifindex,
601 pref);
602 else if (rt)
603 rt->rt6i_flags = RTF_ROUTEINFO |
604 (rt->rt6i_flags & ~RTF_PREF_MASK) | RTF_PREF(pref);
605
606 if (rt) {
607 if (!addrconf_finite_timeout(lifetime)) {
608 rt->rt6i_flags &= ~RTF_EXPIRES;
609 } else {
610 rt->dst.expires = jiffies + HZ * lifetime;
611 rt->rt6i_flags |= RTF_EXPIRES;
612 }
613 dst_release(&rt->dst);
614 }
615 return 0;
616 }
617 #endif
618
619 #define BACKTRACK(__net, saddr) \
620 do { \
621 if (rt == __net->ipv6.ip6_null_entry) { \
622 struct fib6_node *pn; \
623 while (1) { \
624 if (fn->fn_flags & RTN_TL_ROOT) \
625 goto out; \
626 pn = fn->parent; \
627 if (FIB6_SUBTREE(pn) && FIB6_SUBTREE(pn) != fn) \
628 fn = fib6_lookup(FIB6_SUBTREE(pn), NULL, saddr); \
629 else \
630 fn = pn; \
631 if (fn->fn_flags & RTN_RTINFO) \
632 goto restart; \
633 } \
634 } \
635 } while (0)
636
637 static struct rt6_info *ip6_pol_route_lookup(struct net *net,
638 struct fib6_table *table,
639 struct flowi6 *fl6, int flags)
640 {
641 struct fib6_node *fn;
642 struct rt6_info *rt;
643
644 read_lock_bh(&table->tb6_lock);
645 fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
646 restart:
647 rt = fn->leaf;
648 rt = rt6_device_match(net, rt, &fl6->saddr, fl6->flowi6_oif, flags);
649 BACKTRACK(net, &fl6->saddr);
650 out:
651 dst_use(&rt->dst, jiffies);
652 read_unlock_bh(&table->tb6_lock);
653 return rt;
654
655 }
656
657 struct dst_entry * ip6_route_lookup(struct net *net, struct flowi6 *fl6,
658 int flags)
659 {
660 return fib6_rule_lookup(net, fl6, flags, ip6_pol_route_lookup);
661 }
662 EXPORT_SYMBOL_GPL(ip6_route_lookup);
663
664 struct rt6_info *rt6_lookup(struct net *net, const struct in6_addr *daddr,
665 const struct in6_addr *saddr, int oif, int strict)
666 {
667 struct flowi6 fl6 = {
668 .flowi6_oif = oif,
669 .daddr = *daddr,
670 };
671 struct dst_entry *dst;
672 int flags = strict ? RT6_LOOKUP_F_IFACE : 0;
673
674 if (saddr) {
675 memcpy(&fl6.saddr, saddr, sizeof(*saddr));
676 flags |= RT6_LOOKUP_F_HAS_SADDR;
677 }
678
679 dst = fib6_rule_lookup(net, &fl6, flags, ip6_pol_route_lookup);
680 if (dst->error == 0)
681 return (struct rt6_info *) dst;
682
683 dst_release(dst);
684
685 return NULL;
686 }
687
688 EXPORT_SYMBOL(rt6_lookup);
689
690 /* ip6_ins_rt is called with FREE table->tb6_lock.
691 It takes new route entry, the addition fails by any reason the
692 route is freed. In any case, if caller does not hold it, it may
693 be destroyed.
694 */
695
696 static int __ip6_ins_rt(struct rt6_info *rt, struct nl_info *info)
697 {
698 int err;
699 struct fib6_table *table;
700
701 table = rt->rt6i_table;
702 write_lock_bh(&table->tb6_lock);
703 err = fib6_add(&table->tb6_root, rt, info);
704 write_unlock_bh(&table->tb6_lock);
705
706 return err;
707 }
708
709 int ip6_ins_rt(struct rt6_info *rt)
710 {
711 struct nl_info info = {
712 .nl_net = dev_net(rt->dst.dev),
713 };
714 return __ip6_ins_rt(rt, &info);
715 }
716
717 static struct rt6_info *rt6_alloc_cow(const struct rt6_info *ort,
718 const struct in6_addr *daddr,
719 const struct in6_addr *saddr)
720 {
721 struct rt6_info *rt;
722
723 /*
724 * Clone the route.
725 */
726
727 rt = ip6_rt_copy(ort, daddr);
728
729 if (rt) {
730 int attempts = !in_softirq();
731
732 if (!(rt->rt6i_flags & RTF_GATEWAY)) {
733 if (ort->rt6i_dst.plen != 128 &&
734 ipv6_addr_equal(&ort->rt6i_dst.addr, daddr))
735 rt->rt6i_flags |= RTF_ANYCAST;
736 rt->rt6i_gateway = *daddr;
737 }
738
739 rt->rt6i_flags |= RTF_CACHE;
740
741 #ifdef CONFIG_IPV6_SUBTREES
742 if (rt->rt6i_src.plen && saddr) {
743 rt->rt6i_src.addr = *saddr;
744 rt->rt6i_src.plen = 128;
745 }
746 #endif
747
748 retry:
749 if (rt6_bind_neighbour(rt)) {
750 struct net *net = dev_net(rt->dst.dev);
751 int saved_rt_min_interval =
752 net->ipv6.sysctl.ip6_rt_gc_min_interval;
753 int saved_rt_elasticity =
754 net->ipv6.sysctl.ip6_rt_gc_elasticity;
755
756 if (attempts-- > 0) {
757 net->ipv6.sysctl.ip6_rt_gc_elasticity = 1;
758 net->ipv6.sysctl.ip6_rt_gc_min_interval = 0;
759
760 ip6_dst_gc(&net->ipv6.ip6_dst_ops);
761
762 net->ipv6.sysctl.ip6_rt_gc_elasticity =
763 saved_rt_elasticity;
764 net->ipv6.sysctl.ip6_rt_gc_min_interval =
765 saved_rt_min_interval;
766 goto retry;
767 }
768
769 if (net_ratelimit())
770 printk(KERN_WARNING
771 "ipv6: Neighbour table overflow.\n");
772 dst_free(&rt->dst);
773 return NULL;
774 }
775 }
776
777 return rt;
778 }
779
780 static struct rt6_info *rt6_alloc_clone(struct rt6_info *ort,
781 const struct in6_addr *daddr)
782 {
783 struct rt6_info *rt = ip6_rt_copy(ort, daddr);
784
785 if (rt) {
786 rt->rt6i_flags |= RTF_CACHE;
787 dst_set_neighbour(&rt->dst, neigh_clone(dst_get_neighbour_noref_raw(&ort->dst)));
788 }
789 return rt;
790 }
791
792 static struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table, int oif,
793 struct flowi6 *fl6, int flags)
794 {
795 struct fib6_node *fn;
796 struct rt6_info *rt, *nrt;
797 int strict = 0;
798 int attempts = 3;
799 int err;
800 int reachable = net->ipv6.devconf_all->forwarding ? 0 : RT6_LOOKUP_F_REACHABLE;
801
802 strict |= flags & RT6_LOOKUP_F_IFACE;
803
804 relookup:
805 read_lock_bh(&table->tb6_lock);
806
807 restart_2:
808 fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
809
810 restart:
811 rt = rt6_select(fn, oif, strict | reachable);
812
813 BACKTRACK(net, &fl6->saddr);
814 if (rt == net->ipv6.ip6_null_entry ||
815 rt->rt6i_flags & RTF_CACHE)
816 goto out;
817
818 dst_hold(&rt->dst);
819 read_unlock_bh(&table->tb6_lock);
820
821 if (!dst_get_neighbour_noref_raw(&rt->dst) && !(rt->rt6i_flags & RTF_NONEXTHOP))
822 nrt = rt6_alloc_cow(rt, &fl6->daddr, &fl6->saddr);
823 else if (!(rt->dst.flags & DST_HOST))
824 nrt = rt6_alloc_clone(rt, &fl6->daddr);
825 else
826 goto out2;
827
828 dst_release(&rt->dst);
829 rt = nrt ? : net->ipv6.ip6_null_entry;
830
831 dst_hold(&rt->dst);
832 if (nrt) {
833 err = ip6_ins_rt(nrt);
834 if (!err)
835 goto out2;
836 }
837
838 if (--attempts <= 0)
839 goto out2;
840
841 /*
842 * Race condition! In the gap, when table->tb6_lock was
843 * released someone could insert this route. Relookup.
844 */
845 dst_release(&rt->dst);
846 goto relookup;
847
848 out:
849 if (reachable) {
850 reachable = 0;
851 goto restart_2;
852 }
853 dst_hold(&rt->dst);
854 read_unlock_bh(&table->tb6_lock);
855 out2:
856 rt->dst.lastuse = jiffies;
857 rt->dst.__use++;
858
859 return rt;
860 }
861
862 static struct rt6_info *ip6_pol_route_input(struct net *net, struct fib6_table *table,
863 struct flowi6 *fl6, int flags)
864 {
865 return ip6_pol_route(net, table, fl6->flowi6_iif, fl6, flags);
866 }
867
868 void ip6_route_input(struct sk_buff *skb)
869 {
870 const struct ipv6hdr *iph = ipv6_hdr(skb);
871 struct net *net = dev_net(skb->dev);
872 int flags = RT6_LOOKUP_F_HAS_SADDR;
873 struct flowi6 fl6 = {
874 .flowi6_iif = skb->dev->ifindex,
875 .daddr = iph->daddr,
876 .saddr = iph->saddr,
877 .flowlabel = (* (__be32 *) iph) & IPV6_FLOWINFO_MASK,
878 .flowi6_mark = skb->mark,
879 .flowi6_proto = iph->nexthdr,
880 };
881
882 if (rt6_need_strict(&iph->daddr) && skb->dev->type != ARPHRD_PIMREG)
883 flags |= RT6_LOOKUP_F_IFACE;
884
885 skb_dst_set(skb, fib6_rule_lookup(net, &fl6, flags, ip6_pol_route_input));
886 }
887
888 static struct rt6_info *ip6_pol_route_output(struct net *net, struct fib6_table *table,
889 struct flowi6 *fl6, int flags)
890 {
891 return ip6_pol_route(net, table, fl6->flowi6_oif, fl6, flags);
892 }
893
894 struct dst_entry * ip6_route_output(struct net *net, const struct sock *sk,
895 struct flowi6 *fl6)
896 {
897 int flags = 0;
898
899 if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl6->daddr))
900 flags |= RT6_LOOKUP_F_IFACE;
901
902 if (!ipv6_addr_any(&fl6->saddr))
903 flags |= RT6_LOOKUP_F_HAS_SADDR;
904 else if (sk)
905 flags |= rt6_srcprefs2flags(inet6_sk(sk)->srcprefs);
906
907 return fib6_rule_lookup(net, fl6, flags, ip6_pol_route_output);
908 }
909
910 EXPORT_SYMBOL(ip6_route_output);
911
912 struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_orig)
913 {
914 struct rt6_info *rt, *ort = (struct rt6_info *) dst_orig;
915 struct dst_entry *new = NULL;
916
917 rt = dst_alloc(&ip6_dst_blackhole_ops, ort->dst.dev, 1, 0, 0);
918 if (rt) {
919 memset(&rt->rt6i_table, 0, sizeof(*rt) - sizeof(struct dst_entry));
920
921 new = &rt->dst;
922
923 new->__use = 1;
924 new->input = dst_discard;
925 new->output = dst_discard;
926
927 if (dst_metrics_read_only(&ort->dst))
928 new->_metrics = ort->dst._metrics;
929 else
930 dst_copy_metrics(new, &ort->dst);
931 rt->rt6i_idev = ort->rt6i_idev;
932 if (rt->rt6i_idev)
933 in6_dev_hold(rt->rt6i_idev);
934 rt->dst.expires = 0;
935
936 rt->rt6i_gateway = ort->rt6i_gateway;
937 rt->rt6i_flags = ort->rt6i_flags & ~RTF_EXPIRES;
938 rt->rt6i_metric = 0;
939
940 memcpy(&rt->rt6i_dst, &ort->rt6i_dst, sizeof(struct rt6key));
941 #ifdef CONFIG_IPV6_SUBTREES
942 memcpy(&rt->rt6i_src, &ort->rt6i_src, sizeof(struct rt6key));
943 #endif
944
945 dst_free(new);
946 }
947
948 dst_release(dst_orig);
949 return new ? new : ERR_PTR(-ENOMEM);
950 }
951
952 /*
953 * Destination cache support functions
954 */
955
956 static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie)
957 {
958 struct rt6_info *rt;
959
960 rt = (struct rt6_info *) dst;
961
962 if (rt->rt6i_node && (rt->rt6i_node->fn_sernum == cookie)) {
963 if (rt->rt6i_peer_genid != rt6_peer_genid()) {
964 if (!rt->rt6i_peer)
965 rt6_bind_peer(rt, 0);
966 rt->rt6i_peer_genid = rt6_peer_genid();
967 }
968 return dst;
969 }
970 return NULL;
971 }
972
973 static struct dst_entry *ip6_negative_advice(struct dst_entry *dst)
974 {
975 struct rt6_info *rt = (struct rt6_info *) dst;
976
977 if (rt) {
978 if (rt->rt6i_flags & RTF_CACHE) {
979 if (rt6_check_expired(rt)) {
980 ip6_del_rt(rt);
981 dst = NULL;
982 }
983 } else {
984 dst_release(dst);
985 dst = NULL;
986 }
987 }
988 return dst;
989 }
990
991 static void ip6_link_failure(struct sk_buff *skb)
992 {
993 struct rt6_info *rt;
994
995 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0);
996
997 rt = (struct rt6_info *) skb_dst(skb);
998 if (rt) {
999 if (rt->rt6i_flags & RTF_CACHE) {
1000 dst_set_expires(&rt->dst, 0);
1001 rt->rt6i_flags |= RTF_EXPIRES;
1002 } else if (rt->rt6i_node && (rt->rt6i_flags & RTF_DEFAULT))
1003 rt->rt6i_node->fn_sernum = -1;
1004 }
1005 }
1006
1007 static void ip6_rt_update_pmtu(struct dst_entry *dst, u32 mtu)
1008 {
1009 struct rt6_info *rt6 = (struct rt6_info*)dst;
1010
1011 if (mtu < dst_mtu(dst) && rt6->rt6i_dst.plen == 128) {
1012 rt6->rt6i_flags |= RTF_MODIFIED;
1013 if (mtu < IPV6_MIN_MTU) {
1014 u32 features = dst_metric(dst, RTAX_FEATURES);
1015 mtu = IPV6_MIN_MTU;
1016 features |= RTAX_FEATURE_ALLFRAG;
1017 dst_metric_set(dst, RTAX_FEATURES, features);
1018 }
1019 dst_metric_set(dst, RTAX_MTU, mtu);
1020 }
1021 }
1022
1023 static unsigned int ip6_default_advmss(const struct dst_entry *dst)
1024 {
1025 struct net_device *dev = dst->dev;
1026 unsigned int mtu = dst_mtu(dst);
1027 struct net *net = dev_net(dev);
1028
1029 mtu -= sizeof(struct ipv6hdr) + sizeof(struct tcphdr);
1030
1031 if (mtu < net->ipv6.sysctl.ip6_rt_min_advmss)
1032 mtu = net->ipv6.sysctl.ip6_rt_min_advmss;
1033
1034 /*
1035 * Maximal non-jumbo IPv6 payload is IPV6_MAXPLEN and
1036 * corresponding MSS is IPV6_MAXPLEN - tcp_header_size.
1037 * IPV6_MAXPLEN is also valid and means: "any MSS,
1038 * rely only on pmtu discovery"
1039 */
1040 if (mtu > IPV6_MAXPLEN - sizeof(struct tcphdr))
1041 mtu = IPV6_MAXPLEN;
1042 return mtu;
1043 }
1044
1045 static unsigned int ip6_mtu(const struct dst_entry *dst)
1046 {
1047 struct inet6_dev *idev;
1048 unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
1049
1050 if (mtu)
1051 return mtu;
1052
1053 mtu = IPV6_MIN_MTU;
1054
1055 rcu_read_lock();
1056 idev = __in6_dev_get(dst->dev);
1057 if (idev)
1058 mtu = idev->cnf.mtu6;
1059 rcu_read_unlock();
1060
1061 return mtu;
1062 }
1063
1064 static struct dst_entry *icmp6_dst_gc_list;
1065 static DEFINE_SPINLOCK(icmp6_dst_lock);
1066
1067 struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
1068 struct neighbour *neigh,
1069 struct flowi6 *fl6)
1070 {
1071 struct dst_entry *dst;
1072 struct rt6_info *rt;
1073 struct inet6_dev *idev = in6_dev_get(dev);
1074 struct net *net = dev_net(dev);
1075
1076 if (unlikely(!idev))
1077 return NULL;
1078
1079 rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops, dev, 0);
1080 if (unlikely(!rt)) {
1081 in6_dev_put(idev);
1082 dst = ERR_PTR(-ENOMEM);
1083 goto out;
1084 }
1085
1086 if (neigh)
1087 neigh_hold(neigh);
1088 else {
1089 neigh = ip6_neigh_lookup(&rt->dst, &fl6->daddr);
1090 if (IS_ERR(neigh)) {
1091 dst_free(&rt->dst);
1092 return ERR_CAST(neigh);
1093 }
1094 }
1095
1096 rt->dst.flags |= DST_HOST;
1097 rt->dst.output = ip6_output;
1098 dst_set_neighbour(&rt->dst, neigh);
1099 atomic_set(&rt->dst.__refcnt, 1);
1100 rt->rt6i_dst.addr = fl6->daddr;
1101 rt->rt6i_dst.plen = 128;
1102 rt->rt6i_idev = idev;
1103 dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 255);
1104
1105 spin_lock_bh(&icmp6_dst_lock);
1106 rt->dst.next = icmp6_dst_gc_list;
1107 icmp6_dst_gc_list = &rt->dst;
1108 spin_unlock_bh(&icmp6_dst_lock);
1109
1110 fib6_force_start_gc(net);
1111
1112 dst = xfrm_lookup(net, &rt->dst, flowi6_to_flowi(fl6), NULL, 0);
1113
1114 out:
1115 return dst;
1116 }
1117
1118 int icmp6_dst_gc(void)
1119 {
1120 struct dst_entry *dst, **pprev;
1121 int more = 0;
1122
1123 spin_lock_bh(&icmp6_dst_lock);
1124 pprev = &icmp6_dst_gc_list;
1125
1126 while ((dst = *pprev) != NULL) {
1127 if (!atomic_read(&dst->__refcnt)) {
1128 *pprev = dst->next;
1129 dst_free(dst);
1130 } else {
1131 pprev = &dst->next;
1132 ++more;
1133 }
1134 }
1135
1136 spin_unlock_bh(&icmp6_dst_lock);
1137
1138 return more;
1139 }
1140
1141 static void icmp6_clean_all(int (*func)(struct rt6_info *rt, void *arg),
1142 void *arg)
1143 {
1144 struct dst_entry *dst, **pprev;
1145
1146 spin_lock_bh(&icmp6_dst_lock);
1147 pprev = &icmp6_dst_gc_list;
1148 while ((dst = *pprev) != NULL) {
1149 struct rt6_info *rt = (struct rt6_info *) dst;
1150 if (func(rt, arg)) {
1151 *pprev = dst->next;
1152 dst_free(dst);
1153 } else {
1154 pprev = &dst->next;
1155 }
1156 }
1157 spin_unlock_bh(&icmp6_dst_lock);
1158 }
1159
1160 static int ip6_dst_gc(struct dst_ops *ops)
1161 {
1162 unsigned long now = jiffies;
1163 struct net *net = container_of(ops, struct net, ipv6.ip6_dst_ops);
1164 int rt_min_interval = net->ipv6.sysctl.ip6_rt_gc_min_interval;
1165 int rt_max_size = net->ipv6.sysctl.ip6_rt_max_size;
1166 int rt_elasticity = net->ipv6.sysctl.ip6_rt_gc_elasticity;
1167 int rt_gc_timeout = net->ipv6.sysctl.ip6_rt_gc_timeout;
1168 unsigned long rt_last_gc = net->ipv6.ip6_rt_last_gc;
1169 int entries;
1170
1171 entries = dst_entries_get_fast(ops);
1172 if (time_after(rt_last_gc + rt_min_interval, now) &&
1173 entries <= rt_max_size)
1174 goto out;
1175
1176 net->ipv6.ip6_rt_gc_expire++;
1177 fib6_run_gc(net->ipv6.ip6_rt_gc_expire, net);
1178 net->ipv6.ip6_rt_last_gc = now;
1179 entries = dst_entries_get_slow(ops);
1180 if (entries < ops->gc_thresh)
1181 net->ipv6.ip6_rt_gc_expire = rt_gc_timeout>>1;
1182 out:
1183 net->ipv6.ip6_rt_gc_expire -= net->ipv6.ip6_rt_gc_expire>>rt_elasticity;
1184 return entries > rt_max_size;
1185 }
1186
1187 /* Clean host part of a prefix. Not necessary in radix tree,
1188 but results in cleaner routing tables.
1189
1190 Remove it only when all the things will work!
1191 */
1192
1193 int ip6_dst_hoplimit(struct dst_entry *dst)
1194 {
1195 int hoplimit = dst_metric_raw(dst, RTAX_HOPLIMIT);
1196 if (hoplimit == 0) {
1197 struct net_device *dev = dst->dev;
1198 struct inet6_dev *idev;
1199
1200 rcu_read_lock();
1201 idev = __in6_dev_get(dev);
1202 if (idev)
1203 hoplimit = idev->cnf.hop_limit;
1204 else
1205 hoplimit = dev_net(dev)->ipv6.devconf_all->hop_limit;
1206 rcu_read_unlock();
1207 }
1208 return hoplimit;
1209 }
1210 EXPORT_SYMBOL(ip6_dst_hoplimit);
1211
1212 /*
1213 *
1214 */
1215
1216 int ip6_route_add(struct fib6_config *cfg)
1217 {
1218 int err;
1219 struct net *net = cfg->fc_nlinfo.nl_net;
1220 struct rt6_info *rt = NULL;
1221 struct net_device *dev = NULL;
1222 struct inet6_dev *idev = NULL;
1223 struct fib6_table *table;
1224 int addr_type;
1225
1226 if (cfg->fc_dst_len > 128 || cfg->fc_src_len > 128)
1227 return -EINVAL;
1228 #ifndef CONFIG_IPV6_SUBTREES
1229 if (cfg->fc_src_len)
1230 return -EINVAL;
1231 #endif
1232 if (cfg->fc_ifindex) {
1233 err = -ENODEV;
1234 dev = dev_get_by_index(net, cfg->fc_ifindex);
1235 if (!dev)
1236 goto out;
1237 idev = in6_dev_get(dev);
1238 if (!idev)
1239 goto out;
1240 }
1241
1242 if (cfg->fc_metric == 0)
1243 cfg->fc_metric = IP6_RT_PRIO_USER;
1244
1245 err = -ENOBUFS;
1246 if (cfg->fc_nlinfo.nlh &&
1247 !(cfg->fc_nlinfo.nlh->nlmsg_flags & NLM_F_CREATE)) {
1248 table = fib6_get_table(net, cfg->fc_table);
1249 if (!table) {
1250 printk(KERN_WARNING "IPv6: NLM_F_CREATE should be specified when creating new route\n");
1251 table = fib6_new_table(net, cfg->fc_table);
1252 }
1253 } else {
1254 table = fib6_new_table(net, cfg->fc_table);
1255 }
1256
1257 if (!table)
1258 goto out;
1259
1260 rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops, NULL, DST_NOCOUNT);
1261
1262 if (!rt) {
1263 err = -ENOMEM;
1264 goto out;
1265 }
1266
1267 rt->dst.obsolete = -1;
1268 rt->dst.expires = (cfg->fc_flags & RTF_EXPIRES) ?
1269 jiffies + clock_t_to_jiffies(cfg->fc_expires) :
1270 0;
1271
1272 if (cfg->fc_protocol == RTPROT_UNSPEC)
1273 cfg->fc_protocol = RTPROT_BOOT;
1274 rt->rt6i_protocol = cfg->fc_protocol;
1275
1276 addr_type = ipv6_addr_type(&cfg->fc_dst);
1277
1278 if (addr_type & IPV6_ADDR_MULTICAST)
1279 rt->dst.input = ip6_mc_input;
1280 else if (cfg->fc_flags & RTF_LOCAL)
1281 rt->dst.input = ip6_input;
1282 else
1283 rt->dst.input = ip6_forward;
1284
1285 rt->dst.output = ip6_output;
1286
1287 ipv6_addr_prefix(&rt->rt6i_dst.addr, &cfg->fc_dst, cfg->fc_dst_len);
1288 rt->rt6i_dst.plen = cfg->fc_dst_len;
1289 if (rt->rt6i_dst.plen == 128)
1290 rt->dst.flags |= DST_HOST;
1291
1292 if (!(rt->dst.flags & DST_HOST) && cfg->fc_mx) {
1293 u32 *metrics = kzalloc(sizeof(u32) * RTAX_MAX, GFP_KERNEL);
1294 if (!metrics) {
1295 err = -ENOMEM;
1296 goto out;
1297 }
1298 dst_init_metrics(&rt->dst, metrics, 0);
1299 }
1300 #ifdef CONFIG_IPV6_SUBTREES
1301 ipv6_addr_prefix(&rt->rt6i_src.addr, &cfg->fc_src, cfg->fc_src_len);
1302 rt->rt6i_src.plen = cfg->fc_src_len;
1303 #endif
1304
1305 rt->rt6i_metric = cfg->fc_metric;
1306
1307 /* We cannot add true routes via loopback here,
1308 they would result in kernel looping; promote them to reject routes
1309 */
1310 if ((cfg->fc_flags & RTF_REJECT) ||
1311 (dev && (dev->flags & IFF_LOOPBACK) &&
1312 !(addr_type & IPV6_ADDR_LOOPBACK) &&
1313 !(cfg->fc_flags & RTF_LOCAL))) {
1314 /* hold loopback dev/idev if we haven't done so. */
1315 if (dev != net->loopback_dev) {
1316 if (dev) {
1317 dev_put(dev);
1318 in6_dev_put(idev);
1319 }
1320 dev = net->loopback_dev;
1321 dev_hold(dev);
1322 idev = in6_dev_get(dev);
1323 if (!idev) {
1324 err = -ENODEV;
1325 goto out;
1326 }
1327 }
1328 rt->dst.output = ip6_pkt_discard_out;
1329 rt->dst.input = ip6_pkt_discard;
1330 rt->dst.error = -ENETUNREACH;
1331 rt->rt6i_flags = RTF_REJECT|RTF_NONEXTHOP;
1332 goto install_route;
1333 }
1334
1335 if (cfg->fc_flags & RTF_GATEWAY) {
1336 const struct in6_addr *gw_addr;
1337 int gwa_type;
1338
1339 gw_addr = &cfg->fc_gateway;
1340 rt->rt6i_gateway = *gw_addr;
1341 gwa_type = ipv6_addr_type(gw_addr);
1342
1343 if (gwa_type != (IPV6_ADDR_LINKLOCAL|IPV6_ADDR_UNICAST)) {
1344 struct rt6_info *grt;
1345
1346 /* IPv6 strictly inhibits using not link-local
1347 addresses as nexthop address.
1348 Otherwise, router will not able to send redirects.
1349 It is very good, but in some (rare!) circumstances
1350 (SIT, PtP, NBMA NOARP links) it is handy to allow
1351 some exceptions. --ANK
1352 */
1353 err = -EINVAL;
1354 if (!(gwa_type & IPV6_ADDR_UNICAST))
1355 goto out;
1356
1357 grt = rt6_lookup(net, gw_addr, NULL, cfg->fc_ifindex, 1);
1358
1359 err = -EHOSTUNREACH;
1360 if (!grt)
1361 goto out;
1362 if (dev) {
1363 if (dev != grt->dst.dev) {
1364 dst_release(&grt->dst);
1365 goto out;
1366 }
1367 } else {
1368 dev = grt->dst.dev;
1369 idev = grt->rt6i_idev;
1370 dev_hold(dev);
1371 in6_dev_hold(grt->rt6i_idev);
1372 }
1373 if (!(grt->rt6i_flags & RTF_GATEWAY))
1374 err = 0;
1375 dst_release(&grt->dst);
1376
1377 if (err)
1378 goto out;
1379 }
1380 err = -EINVAL;
1381 if (!dev || (dev->flags & IFF_LOOPBACK))
1382 goto out;
1383 }
1384
1385 err = -ENODEV;
1386 if (!dev)
1387 goto out;
1388
1389 if (!ipv6_addr_any(&cfg->fc_prefsrc)) {
1390 if (!ipv6_chk_addr(net, &cfg->fc_prefsrc, dev, 0)) {
1391 err = -EINVAL;
1392 goto out;
1393 }
1394 rt->rt6i_prefsrc.addr = cfg->fc_prefsrc;
1395 rt->rt6i_prefsrc.plen = 128;
1396 } else
1397 rt->rt6i_prefsrc.plen = 0;
1398
1399 if (cfg->fc_flags & (RTF_GATEWAY | RTF_NONEXTHOP)) {
1400 err = rt6_bind_neighbour(rt);
1401 if (err)
1402 goto out;
1403 }
1404
1405 rt->rt6i_flags = cfg->fc_flags;
1406
1407 install_route:
1408 if (cfg->fc_mx) {
1409 struct nlattr *nla;
1410 int remaining;
1411
1412 nla_for_each_attr(nla, cfg->fc_mx, cfg->fc_mx_len, remaining) {
1413 int type = nla_type(nla);
1414
1415 if (type) {
1416 if (type > RTAX_MAX) {
1417 err = -EINVAL;
1418 goto out;
1419 }
1420
1421 dst_metric_set(&rt->dst, type, nla_get_u32(nla));
1422 }
1423 }
1424 }
1425
1426 rt->dst.dev = dev;
1427 rt->rt6i_idev = idev;
1428 rt->rt6i_table = table;
1429
1430 cfg->fc_nlinfo.nl_net = dev_net(dev);
1431
1432 return __ip6_ins_rt(rt, &cfg->fc_nlinfo);
1433
1434 out:
1435 if (dev)
1436 dev_put(dev);
1437 if (idev)
1438 in6_dev_put(idev);
1439 if (rt)
1440 dst_free(&rt->dst);
1441 return err;
1442 }
1443
1444 static int __ip6_del_rt(struct rt6_info *rt, struct nl_info *info)
1445 {
1446 int err;
1447 struct fib6_table *table;
1448 struct net *net = dev_net(rt->dst.dev);
1449
1450 if (rt == net->ipv6.ip6_null_entry)
1451 return -ENOENT;
1452
1453 table = rt->rt6i_table;
1454 write_lock_bh(&table->tb6_lock);
1455
1456 err = fib6_del(rt, info);
1457 dst_release(&rt->dst);
1458
1459 write_unlock_bh(&table->tb6_lock);
1460
1461 return err;
1462 }
1463
1464 int ip6_del_rt(struct rt6_info *rt)
1465 {
1466 struct nl_info info = {
1467 .nl_net = dev_net(rt->dst.dev),
1468 };
1469 return __ip6_del_rt(rt, &info);
1470 }
1471
1472 static int ip6_route_del(struct fib6_config *cfg)
1473 {
1474 struct fib6_table *table;
1475 struct fib6_node *fn;
1476 struct rt6_info *rt;
1477 int err = -ESRCH;
1478
1479 table = fib6_get_table(cfg->fc_nlinfo.nl_net, cfg->fc_table);
1480 if (!table)
1481 return err;
1482
1483 read_lock_bh(&table->tb6_lock);
1484
1485 fn = fib6_locate(&table->tb6_root,
1486 &cfg->fc_dst, cfg->fc_dst_len,
1487 &cfg->fc_src, cfg->fc_src_len);
1488
1489 if (fn) {
1490 for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) {
1491 if (cfg->fc_ifindex &&
1492 (!rt->dst.dev ||
1493 rt->dst.dev->ifindex != cfg->fc_ifindex))
1494 continue;
1495 if (cfg->fc_flags & RTF_GATEWAY &&
1496 !ipv6_addr_equal(&cfg->fc_gateway, &rt->rt6i_gateway))
1497 continue;
1498 if (cfg->fc_metric && cfg->fc_metric != rt->rt6i_metric)
1499 continue;
1500 dst_hold(&rt->dst);
1501 read_unlock_bh(&table->tb6_lock);
1502
1503 return __ip6_del_rt(rt, &cfg->fc_nlinfo);
1504 }
1505 }
1506 read_unlock_bh(&table->tb6_lock);
1507
1508 return err;
1509 }
1510
1511 /*
1512 * Handle redirects
1513 */
1514 struct ip6rd_flowi {
1515 struct flowi6 fl6;
1516 struct in6_addr gateway;
1517 };
1518
1519 static struct rt6_info *__ip6_route_redirect(struct net *net,
1520 struct fib6_table *table,
1521 struct flowi6 *fl6,
1522 int flags)
1523 {
1524 struct ip6rd_flowi *rdfl = (struct ip6rd_flowi *)fl6;
1525 struct rt6_info *rt;
1526 struct fib6_node *fn;
1527
1528 /*
1529 * Get the "current" route for this destination and
1530 * check if the redirect has come from approriate router.
1531 *
1532 * RFC 2461 specifies that redirects should only be
1533 * accepted if they come from the nexthop to the target.
1534 * Due to the way the routes are chosen, this notion
1535 * is a bit fuzzy and one might need to check all possible
1536 * routes.
1537 */
1538
1539 read_lock_bh(&table->tb6_lock);
1540 fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
1541 restart:
1542 for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) {
1543 /*
1544 * Current route is on-link; redirect is always invalid.
1545 *
1546 * Seems, previous statement is not true. It could
1547 * be node, which looks for us as on-link (f.e. proxy ndisc)
1548 * But then router serving it might decide, that we should
1549 * know truth 8)8) --ANK (980726).
1550 */
1551 if (rt6_check_expired(rt))
1552 continue;
1553 if (!(rt->rt6i_flags & RTF_GATEWAY))
1554 continue;
1555 if (fl6->flowi6_oif != rt->dst.dev->ifindex)
1556 continue;
1557 if (!ipv6_addr_equal(&rdfl->gateway, &rt->rt6i_gateway))
1558 continue;
1559 break;
1560 }
1561
1562 if (!rt)
1563 rt = net->ipv6.ip6_null_entry;
1564 BACKTRACK(net, &fl6->saddr);
1565 out:
1566 dst_hold(&rt->dst);
1567
1568 read_unlock_bh(&table->tb6_lock);
1569
1570 return rt;
1571 };
1572
1573 static struct rt6_info *ip6_route_redirect(const struct in6_addr *dest,
1574 const struct in6_addr *src,
1575 const struct in6_addr *gateway,
1576 struct net_device *dev)
1577 {
1578 int flags = RT6_LOOKUP_F_HAS_SADDR;
1579 struct net *net = dev_net(dev);
1580 struct ip6rd_flowi rdfl = {
1581 .fl6 = {
1582 .flowi6_oif = dev->ifindex,
1583 .daddr = *dest,
1584 .saddr = *src,
1585 },
1586 };
1587
1588 rdfl.gateway = *gateway;
1589
1590 if (rt6_need_strict(dest))
1591 flags |= RT6_LOOKUP_F_IFACE;
1592
1593 return (struct rt6_info *)fib6_rule_lookup(net, &rdfl.fl6,
1594 flags, __ip6_route_redirect);
1595 }
1596
1597 void rt6_redirect(const struct in6_addr *dest, const struct in6_addr *src,
1598 const struct in6_addr *saddr,
1599 struct neighbour *neigh, u8 *lladdr, int on_link)
1600 {
1601 struct rt6_info *rt, *nrt = NULL;
1602 struct netevent_redirect netevent;
1603 struct net *net = dev_net(neigh->dev);
1604
1605 rt = ip6_route_redirect(dest, src, saddr, neigh->dev);
1606
1607 if (rt == net->ipv6.ip6_null_entry) {
1608 if (net_ratelimit())
1609 printk(KERN_DEBUG "rt6_redirect: source isn't a valid nexthop "
1610 "for redirect target\n");
1611 goto out;
1612 }
1613
1614 /*
1615 * We have finally decided to accept it.
1616 */
1617
1618 neigh_update(neigh, lladdr, NUD_STALE,
1619 NEIGH_UPDATE_F_WEAK_OVERRIDE|
1620 NEIGH_UPDATE_F_OVERRIDE|
1621 (on_link ? 0 : (NEIGH_UPDATE_F_OVERRIDE_ISROUTER|
1622 NEIGH_UPDATE_F_ISROUTER))
1623 );
1624
1625 /*
1626 * Redirect received -> path was valid.
1627 * Look, redirects are sent only in response to data packets,
1628 * so that this nexthop apparently is reachable. --ANK
1629 */
1630 dst_confirm(&rt->dst);
1631
1632 /* Duplicate redirect: silently ignore. */
1633 if (neigh == dst_get_neighbour_noref_raw(&rt->dst))
1634 goto out;
1635
1636 nrt = ip6_rt_copy(rt, dest);
1637 if (!nrt)
1638 goto out;
1639
1640 nrt->rt6i_flags = RTF_GATEWAY|RTF_UP|RTF_DYNAMIC|RTF_CACHE;
1641 if (on_link)
1642 nrt->rt6i_flags &= ~RTF_GATEWAY;
1643
1644 nrt->rt6i_gateway = *(struct in6_addr *)neigh->primary_key;
1645 dst_set_neighbour(&nrt->dst, neigh_clone(neigh));
1646
1647 if (ip6_ins_rt(nrt))
1648 goto out;
1649
1650 netevent.old = &rt->dst;
1651 netevent.new = &nrt->dst;
1652 call_netevent_notifiers(NETEVENT_REDIRECT, &netevent);
1653
1654 if (rt->rt6i_flags & RTF_CACHE) {
1655 ip6_del_rt(rt);
1656 return;
1657 }
1658
1659 out:
1660 dst_release(&rt->dst);
1661 }
1662
1663 /*
1664 * Handle ICMP "packet too big" messages
1665 * i.e. Path MTU discovery
1666 */
1667
1668 static void rt6_do_pmtu_disc(const struct in6_addr *daddr, const struct in6_addr *saddr,
1669 struct net *net, u32 pmtu, int ifindex)
1670 {
1671 struct rt6_info *rt, *nrt;
1672 int allfrag = 0;
1673 again:
1674 rt = rt6_lookup(net, daddr, saddr, ifindex, 0);
1675 if (!rt)
1676 return;
1677
1678 if (rt6_check_expired(rt)) {
1679 ip6_del_rt(rt);
1680 goto again;
1681 }
1682
1683 if (pmtu >= dst_mtu(&rt->dst))
1684 goto out;
1685
1686 if (pmtu < IPV6_MIN_MTU) {
1687 /*
1688 * According to RFC2460, PMTU is set to the IPv6 Minimum Link
1689 * MTU (1280) and a fragment header should always be included
1690 * after a node receiving Too Big message reporting PMTU is
1691 * less than the IPv6 Minimum Link MTU.
1692 */
1693 pmtu = IPV6_MIN_MTU;
1694 allfrag = 1;
1695 }
1696
1697 /* New mtu received -> path was valid.
1698 They are sent only in response to data packets,
1699 so that this nexthop apparently is reachable. --ANK
1700 */
1701 dst_confirm(&rt->dst);
1702
1703 /* Host route. If it is static, it would be better
1704 not to override it, but add new one, so that
1705 when cache entry will expire old pmtu
1706 would return automatically.
1707 */
1708 if (rt->rt6i_flags & RTF_CACHE) {
1709 dst_metric_set(&rt->dst, RTAX_MTU, pmtu);
1710 if (allfrag) {
1711 u32 features = dst_metric(&rt->dst, RTAX_FEATURES);
1712 features |= RTAX_FEATURE_ALLFRAG;
1713 dst_metric_set(&rt->dst, RTAX_FEATURES, features);
1714 }
1715 dst_set_expires(&rt->dst, net->ipv6.sysctl.ip6_rt_mtu_expires);
1716 rt->rt6i_flags |= RTF_MODIFIED|RTF_EXPIRES;
1717 goto out;
1718 }
1719
1720 /* Network route.
1721 Two cases are possible:
1722 1. It is connected route. Action: COW
1723 2. It is gatewayed route or NONEXTHOP route. Action: clone it.
1724 */
1725 if (!dst_get_neighbour_noref_raw(&rt->dst) && !(rt->rt6i_flags & RTF_NONEXTHOP))
1726 nrt = rt6_alloc_cow(rt, daddr, saddr);
1727 else
1728 nrt = rt6_alloc_clone(rt, daddr);
1729
1730 if (nrt) {
1731 dst_metric_set(&nrt->dst, RTAX_MTU, pmtu);
1732 if (allfrag) {
1733 u32 features = dst_metric(&nrt->dst, RTAX_FEATURES);
1734 features |= RTAX_FEATURE_ALLFRAG;
1735 dst_metric_set(&nrt->dst, RTAX_FEATURES, features);
1736 }
1737
1738 /* According to RFC 1981, detecting PMTU increase shouldn't be
1739 * happened within 5 mins, the recommended timer is 10 mins.
1740 * Here this route expiration time is set to ip6_rt_mtu_expires
1741 * which is 10 mins. After 10 mins the decreased pmtu is expired
1742 * and detecting PMTU increase will be automatically happened.
1743 */
1744 dst_set_expires(&nrt->dst, net->ipv6.sysctl.ip6_rt_mtu_expires);
1745 nrt->rt6i_flags |= RTF_DYNAMIC|RTF_EXPIRES;
1746
1747 ip6_ins_rt(nrt);
1748 }
1749 out:
1750 dst_release(&rt->dst);
1751 }
1752
1753 void rt6_pmtu_discovery(const struct in6_addr *daddr, const struct in6_addr *saddr,
1754 struct net_device *dev, u32 pmtu)
1755 {
1756 struct net *net = dev_net(dev);
1757
1758 /*
1759 * RFC 1981 states that a node "MUST reduce the size of the packets it
1760 * is sending along the path" that caused the Packet Too Big message.
1761 * Since it's not possible in the general case to determine which
1762 * interface was used to send the original packet, we update the MTU
1763 * on the interface that will be used to send future packets. We also
1764 * update the MTU on the interface that received the Packet Too Big in
1765 * case the original packet was forced out that interface with
1766 * SO_BINDTODEVICE or similar. This is the next best thing to the
1767 * correct behaviour, which would be to update the MTU on all
1768 * interfaces.
1769 */
1770 rt6_do_pmtu_disc(daddr, saddr, net, pmtu, 0);
1771 rt6_do_pmtu_disc(daddr, saddr, net, pmtu, dev->ifindex);
1772 }
1773
1774 /*
1775 * Misc support functions
1776 */
1777
1778 static struct rt6_info *ip6_rt_copy(const struct rt6_info *ort,
1779 const struct in6_addr *dest)
1780 {
1781 struct net *net = dev_net(ort->dst.dev);
1782 struct rt6_info *rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops,
1783 ort->dst.dev, 0);
1784
1785 if (rt) {
1786 rt->dst.input = ort->dst.input;
1787 rt->dst.output = ort->dst.output;
1788 rt->dst.flags |= DST_HOST;
1789
1790 rt->rt6i_dst.addr = *dest;
1791 rt->rt6i_dst.plen = 128;
1792 dst_copy_metrics(&rt->dst, &ort->dst);
1793 rt->dst.error = ort->dst.error;
1794 rt->rt6i_idev = ort->rt6i_idev;
1795 if (rt->rt6i_idev)
1796 in6_dev_hold(rt->rt6i_idev);
1797 rt->dst.lastuse = jiffies;
1798 rt->dst.expires = 0;
1799
1800 rt->rt6i_gateway = ort->rt6i_gateway;
1801 rt->rt6i_flags = ort->rt6i_flags & ~RTF_EXPIRES;
1802 rt->rt6i_metric = 0;
1803
1804 #ifdef CONFIG_IPV6_SUBTREES
1805 memcpy(&rt->rt6i_src, &ort->rt6i_src, sizeof(struct rt6key));
1806 #endif
1807 memcpy(&rt->rt6i_prefsrc, &ort->rt6i_prefsrc, sizeof(struct rt6key));
1808 rt->rt6i_table = ort->rt6i_table;
1809 }
1810 return rt;
1811 }
1812
1813 #ifdef CONFIG_IPV6_ROUTE_INFO
1814 static struct rt6_info *rt6_get_route_info(struct net *net,
1815 const struct in6_addr *prefix, int prefixlen,
1816 const struct in6_addr *gwaddr, int ifindex)
1817 {
1818 struct fib6_node *fn;
1819 struct rt6_info *rt = NULL;
1820 struct fib6_table *table;
1821
1822 table = fib6_get_table(net, RT6_TABLE_INFO);
1823 if (!table)
1824 return NULL;
1825
1826 write_lock_bh(&table->tb6_lock);
1827 fn = fib6_locate(&table->tb6_root, prefix ,prefixlen, NULL, 0);
1828 if (!fn)
1829 goto out;
1830
1831 for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) {
1832 if (rt->dst.dev->ifindex != ifindex)
1833 continue;
1834 if ((rt->rt6i_flags & (RTF_ROUTEINFO|RTF_GATEWAY)) != (RTF_ROUTEINFO|RTF_GATEWAY))
1835 continue;
1836 if (!ipv6_addr_equal(&rt->rt6i_gateway, gwaddr))
1837 continue;
1838 dst_hold(&rt->dst);
1839 break;
1840 }
1841 out:
1842 write_unlock_bh(&table->tb6_lock);
1843 return rt;
1844 }
1845
1846 static struct rt6_info *rt6_add_route_info(struct net *net,
1847 const struct in6_addr *prefix, int prefixlen,
1848 const struct in6_addr *gwaddr, int ifindex,
1849 unsigned pref)
1850 {
1851 struct fib6_config cfg = {
1852 .fc_table = RT6_TABLE_INFO,
1853 .fc_metric = IP6_RT_PRIO_USER,
1854 .fc_ifindex = ifindex,
1855 .fc_dst_len = prefixlen,
1856 .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_ROUTEINFO |
1857 RTF_UP | RTF_PREF(pref),
1858 .fc_nlinfo.pid = 0,
1859 .fc_nlinfo.nlh = NULL,
1860 .fc_nlinfo.nl_net = net,
1861 };
1862
1863 cfg.fc_dst = *prefix;
1864 cfg.fc_gateway = *gwaddr;
1865
1866 /* We should treat it as a default route if prefix length is 0. */
1867 if (!prefixlen)
1868 cfg.fc_flags |= RTF_DEFAULT;
1869
1870 ip6_route_add(&cfg);
1871
1872 return rt6_get_route_info(net, prefix, prefixlen, gwaddr, ifindex);
1873 }
1874 #endif
1875
1876 struct rt6_info *rt6_get_dflt_router(const struct in6_addr *addr, struct net_device *dev)
1877 {
1878 struct rt6_info *rt;
1879 struct fib6_table *table;
1880
1881 table = fib6_get_table(dev_net(dev), RT6_TABLE_DFLT);
1882 if (!table)
1883 return NULL;
1884
1885 write_lock_bh(&table->tb6_lock);
1886 for (rt = table->tb6_root.leaf; rt; rt=rt->dst.rt6_next) {
1887 if (dev == rt->dst.dev &&
1888 ((rt->rt6i_flags & (RTF_ADDRCONF | RTF_DEFAULT)) == (RTF_ADDRCONF | RTF_DEFAULT)) &&
1889 ipv6_addr_equal(&rt->rt6i_gateway, addr))
1890 break;
1891 }
1892 if (rt)
1893 dst_hold(&rt->dst);
1894 write_unlock_bh(&table->tb6_lock);
1895 return rt;
1896 }
1897
1898 struct rt6_info *rt6_add_dflt_router(const struct in6_addr *gwaddr,
1899 struct net_device *dev,
1900 unsigned int pref)
1901 {
1902 struct fib6_config cfg = {
1903 .fc_table = RT6_TABLE_DFLT,
1904 .fc_metric = IP6_RT_PRIO_USER,
1905 .fc_ifindex = dev->ifindex,
1906 .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_DEFAULT |
1907 RTF_UP | RTF_EXPIRES | RTF_PREF(pref),
1908 .fc_nlinfo.pid = 0,
1909 .fc_nlinfo.nlh = NULL,
1910 .fc_nlinfo.nl_net = dev_net(dev),
1911 };
1912
1913 cfg.fc_gateway = *gwaddr;
1914
1915 ip6_route_add(&cfg);
1916
1917 return rt6_get_dflt_router(gwaddr, dev);
1918 }
1919
1920 void rt6_purge_dflt_routers(struct net *net)
1921 {
1922 struct rt6_info *rt;
1923 struct fib6_table *table;
1924
1925 /* NOTE: Keep consistent with rt6_get_dflt_router */
1926 table = fib6_get_table(net, RT6_TABLE_DFLT);
1927 if (!table)
1928 return;
1929
1930 restart:
1931 read_lock_bh(&table->tb6_lock);
1932 for (rt = table->tb6_root.leaf; rt; rt = rt->dst.rt6_next) {
1933 if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF)) {
1934 dst_hold(&rt->dst);
1935 read_unlock_bh(&table->tb6_lock);
1936 ip6_del_rt(rt);
1937 goto restart;
1938 }
1939 }
1940 read_unlock_bh(&table->tb6_lock);
1941 }
1942
1943 static void rtmsg_to_fib6_config(struct net *net,
1944 struct in6_rtmsg *rtmsg,
1945 struct fib6_config *cfg)
1946 {
1947 memset(cfg, 0, sizeof(*cfg));
1948
1949 cfg->fc_table = RT6_TABLE_MAIN;
1950 cfg->fc_ifindex = rtmsg->rtmsg_ifindex;
1951 cfg->fc_metric = rtmsg->rtmsg_metric;
1952 cfg->fc_expires = rtmsg->rtmsg_info;
1953 cfg->fc_dst_len = rtmsg->rtmsg_dst_len;
1954 cfg->fc_src_len = rtmsg->rtmsg_src_len;
1955 cfg->fc_flags = rtmsg->rtmsg_flags;
1956
1957 cfg->fc_nlinfo.nl_net = net;
1958
1959 cfg->fc_dst = rtmsg->rtmsg_dst;
1960 cfg->fc_src = rtmsg->rtmsg_src;
1961 cfg->fc_gateway = rtmsg->rtmsg_gateway;
1962 }
1963
1964 int ipv6_route_ioctl(struct net *net, unsigned int cmd, void __user *arg)
1965 {
1966 struct fib6_config cfg;
1967 struct in6_rtmsg rtmsg;
1968 int err;
1969
1970 switch(cmd) {
1971 case SIOCADDRT: /* Add a route */
1972 case SIOCDELRT: /* Delete a route */
1973 if (!capable(CAP_NET_ADMIN))
1974 return -EPERM;
1975 err = copy_from_user(&rtmsg, arg,
1976 sizeof(struct in6_rtmsg));
1977 if (err)
1978 return -EFAULT;
1979
1980 rtmsg_to_fib6_config(net, &rtmsg, &cfg);
1981
1982 rtnl_lock();
1983 switch (cmd) {
1984 case SIOCADDRT:
1985 err = ip6_route_add(&cfg);
1986 break;
1987 case SIOCDELRT:
1988 err = ip6_route_del(&cfg);
1989 break;
1990 default:
1991 err = -EINVAL;
1992 }
1993 rtnl_unlock();
1994
1995 return err;
1996 }
1997
1998 return -EINVAL;
1999 }
2000
2001 /*
2002 * Drop the packet on the floor
2003 */
2004
2005 static int ip6_pkt_drop(struct sk_buff *skb, u8 code, int ipstats_mib_noroutes)
2006 {
2007 int type;
2008 struct dst_entry *dst = skb_dst(skb);
2009 switch (ipstats_mib_noroutes) {
2010 case IPSTATS_MIB_INNOROUTES:
2011 type = ipv6_addr_type(&ipv6_hdr(skb)->daddr);
2012 if (type == IPV6_ADDR_ANY) {
2013 IP6_INC_STATS(dev_net(dst->dev), ip6_dst_idev(dst),
2014 IPSTATS_MIB_INADDRERRORS);
2015 break;
2016 }
2017 /* FALLTHROUGH */
2018 case IPSTATS_MIB_OUTNOROUTES:
2019 IP6_INC_STATS(dev_net(dst->dev), ip6_dst_idev(dst),
2020 ipstats_mib_noroutes);
2021 break;
2022 }
2023 icmpv6_send(skb, ICMPV6_DEST_UNREACH, code, 0);
2024 kfree_skb(skb);
2025 return 0;
2026 }
2027
2028 static int ip6_pkt_discard(struct sk_buff *skb)
2029 {
2030 return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_INNOROUTES);
2031 }
2032
2033 static int ip6_pkt_discard_out(struct sk_buff *skb)
2034 {
2035 skb->dev = skb_dst(skb)->dev;
2036 return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_OUTNOROUTES);
2037 }
2038
2039 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
2040
2041 static int ip6_pkt_prohibit(struct sk_buff *skb)
2042 {
2043 return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_INNOROUTES);
2044 }
2045
2046 static int ip6_pkt_prohibit_out(struct sk_buff *skb)
2047 {
2048 skb->dev = skb_dst(skb)->dev;
2049 return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_OUTNOROUTES);
2050 }
2051
2052 #endif
2053
2054 /*
2055 * Allocate a dst for local (unicast / anycast) address.
2056 */
2057
2058 struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
2059 const struct in6_addr *addr,
2060 bool anycast)
2061 {
2062 struct net *net = dev_net(idev->dev);
2063 struct rt6_info *rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops,
2064 net->loopback_dev, 0);
2065 int err;
2066
2067 if (!rt) {
2068 if (net_ratelimit())
2069 pr_warning("IPv6: Maximum number of routes reached,"
2070 " consider increasing route/max_size.\n");
2071 return ERR_PTR(-ENOMEM);
2072 }
2073
2074 in6_dev_hold(idev);
2075
2076 rt->dst.flags |= DST_HOST;
2077 rt->dst.input = ip6_input;
2078 rt->dst.output = ip6_output;
2079 rt->rt6i_idev = idev;
2080 rt->dst.obsolete = -1;
2081
2082 rt->rt6i_flags = RTF_UP | RTF_NONEXTHOP;
2083 if (anycast)
2084 rt->rt6i_flags |= RTF_ANYCAST;
2085 else
2086 rt->rt6i_flags |= RTF_LOCAL;
2087 err = rt6_bind_neighbour(rt);
2088 if (err) {
2089 dst_free(&rt->dst);
2090 return ERR_PTR(err);
2091 }
2092
2093 rt->rt6i_dst.addr = *addr;
2094 rt->rt6i_dst.plen = 128;
2095 rt->rt6i_table = fib6_get_table(net, RT6_TABLE_LOCAL);
2096
2097 atomic_set(&rt->dst.__refcnt, 1);
2098
2099 return rt;
2100 }
2101
2102 int ip6_route_get_saddr(struct net *net,
2103 struct rt6_info *rt,
2104 const struct in6_addr *daddr,
2105 unsigned int prefs,
2106 struct in6_addr *saddr)
2107 {
2108 struct inet6_dev *idev = ip6_dst_idev((struct dst_entry*)rt);
2109 int err = 0;
2110 if (rt->rt6i_prefsrc.plen)
2111 *saddr = rt->rt6i_prefsrc.addr;
2112 else
2113 err = ipv6_dev_get_saddr(net, idev ? idev->dev : NULL,
2114 daddr, prefs, saddr);
2115 return err;
2116 }
2117
2118 /* remove deleted ip from prefsrc entries */
2119 struct arg_dev_net_ip {
2120 struct net_device *dev;
2121 struct net *net;
2122 struct in6_addr *addr;
2123 };
2124
2125 static int fib6_remove_prefsrc(struct rt6_info *rt, void *arg)
2126 {
2127 struct net_device *dev = ((struct arg_dev_net_ip *)arg)->dev;
2128 struct net *net = ((struct arg_dev_net_ip *)arg)->net;
2129 struct in6_addr *addr = ((struct arg_dev_net_ip *)arg)->addr;
2130
2131 if (((void *)rt->dst.dev == dev || !dev) &&
2132 rt != net->ipv6.ip6_null_entry &&
2133 ipv6_addr_equal(addr, &rt->rt6i_prefsrc.addr)) {
2134 /* remove prefsrc entry */
2135 rt->rt6i_prefsrc.plen = 0;
2136 }
2137 return 0;
2138 }
2139
2140 void rt6_remove_prefsrc(struct inet6_ifaddr *ifp)
2141 {
2142 struct net *net = dev_net(ifp->idev->dev);
2143 struct arg_dev_net_ip adni = {
2144 .dev = ifp->idev->dev,
2145 .net = net,
2146 .addr = &ifp->addr,
2147 };
2148 fib6_clean_all(net, fib6_remove_prefsrc, 0, &adni);
2149 }
2150
2151 struct arg_dev_net {
2152 struct net_device *dev;
2153 struct net *net;
2154 };
2155
2156 static int fib6_ifdown(struct rt6_info *rt, void *arg)
2157 {
2158 const struct arg_dev_net *adn = arg;
2159 const struct net_device *dev = adn->dev;
2160
2161 if ((rt->dst.dev == dev || !dev) &&
2162 rt != adn->net->ipv6.ip6_null_entry)
2163 return -1;
2164
2165 return 0;
2166 }
2167
2168 void rt6_ifdown(struct net *net, struct net_device *dev)
2169 {
2170 struct arg_dev_net adn = {
2171 .dev = dev,
2172 .net = net,
2173 };
2174
2175 fib6_clean_all(net, fib6_ifdown, 0, &adn);
2176 icmp6_clean_all(fib6_ifdown, &adn);
2177 }
2178
2179 struct rt6_mtu_change_arg
2180 {
2181 struct net_device *dev;
2182 unsigned mtu;
2183 };
2184
2185 static int rt6_mtu_change_route(struct rt6_info *rt, void *p_arg)
2186 {
2187 struct rt6_mtu_change_arg *arg = (struct rt6_mtu_change_arg *) p_arg;
2188 struct inet6_dev *idev;
2189
2190 /* In IPv6 pmtu discovery is not optional,
2191 so that RTAX_MTU lock cannot disable it.
2192 We still use this lock to block changes
2193 caused by addrconf/ndisc.
2194 */
2195
2196 idev = __in6_dev_get(arg->dev);
2197 if (!idev)
2198 return 0;
2199
2200 /* For administrative MTU increase, there is no way to discover
2201 IPv6 PMTU increase, so PMTU increase should be updated here.
2202 Since RFC 1981 doesn't include administrative MTU increase
2203 update PMTU increase is a MUST. (i.e. jumbo frame)
2204 */
2205 /*
2206 If new MTU is less than route PMTU, this new MTU will be the
2207 lowest MTU in the path, update the route PMTU to reflect PMTU
2208 decreases; if new MTU is greater than route PMTU, and the
2209 old MTU is the lowest MTU in the path, update the route PMTU
2210 to reflect the increase. In this case if the other nodes' MTU
2211 also have the lowest MTU, TOO BIG MESSAGE will be lead to
2212 PMTU discouvery.
2213 */
2214 if (rt->dst.dev == arg->dev &&
2215 !dst_metric_locked(&rt->dst, RTAX_MTU) &&
2216 (dst_mtu(&rt->dst) >= arg->mtu ||
2217 (dst_mtu(&rt->dst) < arg->mtu &&
2218 dst_mtu(&rt->dst) == idev->cnf.mtu6))) {
2219 dst_metric_set(&rt->dst, RTAX_MTU, arg->mtu);
2220 }
2221 return 0;
2222 }
2223
2224 void rt6_mtu_change(struct net_device *dev, unsigned mtu)
2225 {
2226 struct rt6_mtu_change_arg arg = {
2227 .dev = dev,
2228 .mtu = mtu,
2229 };
2230
2231 fib6_clean_all(dev_net(dev), rt6_mtu_change_route, 0, &arg);
2232 }
2233
2234 static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = {
2235 [RTA_GATEWAY] = { .len = sizeof(struct in6_addr) },
2236 [RTA_OIF] = { .type = NLA_U32 },
2237 [RTA_IIF] = { .type = NLA_U32 },
2238 [RTA_PRIORITY] = { .type = NLA_U32 },
2239 [RTA_METRICS] = { .type = NLA_NESTED },
2240 };
2241
2242 static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
2243 struct fib6_config *cfg)
2244 {
2245 struct rtmsg *rtm;
2246 struct nlattr *tb[RTA_MAX+1];
2247 int err;
2248
2249 err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv6_policy);
2250 if (err < 0)
2251 goto errout;
2252
2253 err = -EINVAL;
2254 rtm = nlmsg_data(nlh);
2255 memset(cfg, 0, sizeof(*cfg));
2256
2257 cfg->fc_table = rtm->rtm_table;
2258 cfg->fc_dst_len = rtm->rtm_dst_len;
2259 cfg->fc_src_len = rtm->rtm_src_len;
2260 cfg->fc_flags = RTF_UP;
2261 cfg->fc_protocol = rtm->rtm_protocol;
2262
2263 if (rtm->rtm_type == RTN_UNREACHABLE)
2264 cfg->fc_flags |= RTF_REJECT;
2265
2266 if (rtm->rtm_type == RTN_LOCAL)
2267 cfg->fc_flags |= RTF_LOCAL;
2268
2269 cfg->fc_nlinfo.pid = NETLINK_CB(skb).pid;
2270 cfg->fc_nlinfo.nlh = nlh;
2271 cfg->fc_nlinfo.nl_net = sock_net(skb->sk);
2272
2273 if (tb[RTA_GATEWAY]) {
2274 nla_memcpy(&cfg->fc_gateway, tb[RTA_GATEWAY], 16);
2275 cfg->fc_flags |= RTF_GATEWAY;
2276 }
2277
2278 if (tb[RTA_DST]) {
2279 int plen = (rtm->rtm_dst_len + 7) >> 3;
2280
2281 if (nla_len(tb[RTA_DST]) < plen)
2282 goto errout;
2283
2284 nla_memcpy(&cfg->fc_dst, tb[RTA_DST], plen);
2285 }
2286
2287 if (tb[RTA_SRC]) {
2288 int plen = (rtm->rtm_src_len + 7) >> 3;
2289
2290 if (nla_len(tb[RTA_SRC]) < plen)
2291 goto errout;
2292
2293 nla_memcpy(&cfg->fc_src, tb[RTA_SRC], plen);
2294 }
2295
2296 if (tb[RTA_PREFSRC])
2297 nla_memcpy(&cfg->fc_prefsrc, tb[RTA_PREFSRC], 16);
2298
2299 if (tb[RTA_OIF])
2300 cfg->fc_ifindex = nla_get_u32(tb[RTA_OIF]);
2301
2302 if (tb[RTA_PRIORITY])
2303 cfg->fc_metric = nla_get_u32(tb[RTA_PRIORITY]);
2304
2305 if (tb[RTA_METRICS]) {
2306 cfg->fc_mx = nla_data(tb[RTA_METRICS]);
2307 cfg->fc_mx_len = nla_len(tb[RTA_METRICS]);
2308 }
2309
2310 if (tb[RTA_TABLE])
2311 cfg->fc_table = nla_get_u32(tb[RTA_TABLE]);
2312
2313 err = 0;
2314 errout:
2315 return err;
2316 }
2317
2318 static int inet6_rtm_delroute(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
2319 {
2320 struct fib6_config cfg;
2321 int err;
2322
2323 err = rtm_to_fib6_config(skb, nlh, &cfg);
2324 if (err < 0)
2325 return err;
2326
2327 return ip6_route_del(&cfg);
2328 }
2329
2330 static int inet6_rtm_newroute(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
2331 {
2332 struct fib6_config cfg;
2333 int err;
2334
2335 err = rtm_to_fib6_config(skb, nlh, &cfg);
2336 if (err < 0)
2337 return err;
2338
2339 return ip6_route_add(&cfg);
2340 }
2341
2342 static inline size_t rt6_nlmsg_size(void)
2343 {
2344 return NLMSG_ALIGN(sizeof(struct rtmsg))
2345 + nla_total_size(16) /* RTA_SRC */
2346 + nla_total_size(16) /* RTA_DST */
2347 + nla_total_size(16) /* RTA_GATEWAY */
2348 + nla_total_size(16) /* RTA_PREFSRC */
2349 + nla_total_size(4) /* RTA_TABLE */
2350 + nla_total_size(4) /* RTA_IIF */
2351 + nla_total_size(4) /* RTA_OIF */
2352 + nla_total_size(4) /* RTA_PRIORITY */
2353 + RTAX_MAX * nla_total_size(4) /* RTA_METRICS */
2354 + nla_total_size(sizeof(struct rta_cacheinfo));
2355 }
2356
2357 static int rt6_fill_node(struct net *net,
2358 struct sk_buff *skb, struct rt6_info *rt,
2359 struct in6_addr *dst, struct in6_addr *src,
2360 int iif, int type, u32 pid, u32 seq,
2361 int prefix, int nowait, unsigned int flags)
2362 {
2363 struct rtmsg *rtm;
2364 struct nlmsghdr *nlh;
2365 long expires;
2366 u32 table;
2367 struct neighbour *n;
2368
2369 if (prefix) { /* user wants prefix routes only */
2370 if (!(rt->rt6i_flags & RTF_PREFIX_RT)) {
2371 /* success since this is not a prefix route */
2372 return 1;
2373 }
2374 }
2375
2376 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*rtm), flags);
2377 if (!nlh)
2378 return -EMSGSIZE;
2379
2380 rtm = nlmsg_data(nlh);
2381 rtm->rtm_family = AF_INET6;
2382 rtm->rtm_dst_len = rt->rt6i_dst.plen;
2383 rtm->rtm_src_len = rt->rt6i_src.plen;
2384 rtm->rtm_tos = 0;
2385 if (rt->rt6i_table)
2386 table = rt->rt6i_table->tb6_id;
2387 else
2388 table = RT6_TABLE_UNSPEC;
2389 rtm->rtm_table = table;
2390 NLA_PUT_U32(skb, RTA_TABLE, table);
2391 if (rt->rt6i_flags & RTF_REJECT)
2392 rtm->rtm_type = RTN_UNREACHABLE;
2393 else if (rt->rt6i_flags & RTF_LOCAL)
2394 rtm->rtm_type = RTN_LOCAL;
2395 else if (rt->dst.dev && (rt->dst.dev->flags & IFF_LOOPBACK))
2396 rtm->rtm_type = RTN_LOCAL;
2397 else
2398 rtm->rtm_type = RTN_UNICAST;
2399 rtm->rtm_flags = 0;
2400 rtm->rtm_scope = RT_SCOPE_UNIVERSE;
2401 rtm->rtm_protocol = rt->rt6i_protocol;
2402 if (rt->rt6i_flags & RTF_DYNAMIC)
2403 rtm->rtm_protocol = RTPROT_REDIRECT;
2404 else if (rt->rt6i_flags & RTF_ADDRCONF)
2405 rtm->rtm_protocol = RTPROT_KERNEL;
2406 else if (rt->rt6i_flags & RTF_DEFAULT)
2407 rtm->rtm_protocol = RTPROT_RA;
2408
2409 if (rt->rt6i_flags & RTF_CACHE)
2410 rtm->rtm_flags |= RTM_F_CLONED;
2411
2412 if (dst) {
2413 NLA_PUT(skb, RTA_DST, 16, dst);
2414 rtm->rtm_dst_len = 128;
2415 } else if (rtm->rtm_dst_len)
2416 NLA_PUT(skb, RTA_DST, 16, &rt->rt6i_dst.addr);
2417 #ifdef CONFIG_IPV6_SUBTREES
2418 if (src) {
2419 NLA_PUT(skb, RTA_SRC, 16, src);
2420 rtm->rtm_src_len = 128;
2421 } else if (rtm->rtm_src_len)
2422 NLA_PUT(skb, RTA_SRC, 16, &rt->rt6i_src.addr);
2423 #endif
2424 if (iif) {
2425 #ifdef CONFIG_IPV6_MROUTE
2426 if (ipv6_addr_is_multicast(&rt->rt6i_dst.addr)) {
2427 int err = ip6mr_get_route(net, skb, rtm, nowait);
2428 if (err <= 0) {
2429 if (!nowait) {
2430 if (err == 0)
2431 return 0;
2432 goto nla_put_failure;
2433 } else {
2434 if (err == -EMSGSIZE)
2435 goto nla_put_failure;
2436 }
2437 }
2438 } else
2439 #endif
2440 NLA_PUT_U32(skb, RTA_IIF, iif);
2441 } else if (dst) {
2442 struct in6_addr saddr_buf;
2443 if (ip6_route_get_saddr(net, rt, dst, 0, &saddr_buf) == 0)
2444 NLA_PUT(skb, RTA_PREFSRC, 16, &saddr_buf);
2445 }
2446
2447 if (rt->rt6i_prefsrc.plen) {
2448 struct in6_addr saddr_buf;
2449 saddr_buf = rt->rt6i_prefsrc.addr;
2450 NLA_PUT(skb, RTA_PREFSRC, 16, &saddr_buf);
2451 }
2452
2453 if (rtnetlink_put_metrics(skb, dst_metrics_ptr(&rt->dst)) < 0)
2454 goto nla_put_failure;
2455
2456 rcu_read_lock();
2457 n = dst_get_neighbour_noref(&rt->dst);
2458 if (n)
2459 NLA_PUT(skb, RTA_GATEWAY, 16, &n->primary_key);
2460 rcu_read_unlock();
2461
2462 if (rt->dst.dev)
2463 NLA_PUT_U32(skb, RTA_OIF, rt->dst.dev->ifindex);
2464
2465 NLA_PUT_U32(skb, RTA_PRIORITY, rt->rt6i_metric);
2466
2467 if (!(rt->rt6i_flags & RTF_EXPIRES))
2468 expires = 0;
2469 else if (rt->dst.expires - jiffies < INT_MAX)
2470 expires = rt->dst.expires - jiffies;
2471 else
2472 expires = INT_MAX;
2473
2474 if (rtnl_put_cacheinfo(skb, &rt->dst, 0, 0, 0,
2475 expires, rt->dst.error) < 0)
2476 goto nla_put_failure;
2477
2478 return nlmsg_end(skb, nlh);
2479
2480 nla_put_failure:
2481 nlmsg_cancel(skb, nlh);
2482 return -EMSGSIZE;
2483 }
2484
2485 int rt6_dump_route(struct rt6_info *rt, void *p_arg)
2486 {
2487 struct rt6_rtnl_dump_arg *arg = (struct rt6_rtnl_dump_arg *) p_arg;
2488 int prefix;
2489
2490 if (nlmsg_len(arg->cb->nlh) >= sizeof(struct rtmsg)) {
2491 struct rtmsg *rtm = nlmsg_data(arg->cb->nlh);
2492 prefix = (rtm->rtm_flags & RTM_F_PREFIX) != 0;
2493 } else
2494 prefix = 0;
2495
2496 return rt6_fill_node(arg->net,
2497 arg->skb, rt, NULL, NULL, 0, RTM_NEWROUTE,
2498 NETLINK_CB(arg->cb->skb).pid, arg->cb->nlh->nlmsg_seq,
2499 prefix, 0, NLM_F_MULTI);
2500 }
2501
2502 static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void *arg)
2503 {
2504 struct net *net = sock_net(in_skb->sk);
2505 struct nlattr *tb[RTA_MAX+1];
2506 struct rt6_info *rt;
2507 struct sk_buff *skb;
2508 struct rtmsg *rtm;
2509 struct flowi6 fl6;
2510 int err, iif = 0;
2511
2512 err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv6_policy);
2513 if (err < 0)
2514 goto errout;
2515
2516 err = -EINVAL;
2517 memset(&fl6, 0, sizeof(fl6));
2518
2519 if (tb[RTA_SRC]) {
2520 if (nla_len(tb[RTA_SRC]) < sizeof(struct in6_addr))
2521 goto errout;
2522
2523 fl6.saddr = *(struct in6_addr *)nla_data(tb[RTA_SRC]);
2524 }
2525
2526 if (tb[RTA_DST]) {
2527 if (nla_len(tb[RTA_DST]) < sizeof(struct in6_addr))
2528 goto errout;
2529
2530 fl6.daddr = *(struct in6_addr *)nla_data(tb[RTA_DST]);
2531 }
2532
2533 if (tb[RTA_IIF])
2534 iif = nla_get_u32(tb[RTA_IIF]);
2535
2536 if (tb[RTA_OIF])
2537 fl6.flowi6_oif = nla_get_u32(tb[RTA_OIF]);
2538
2539 if (iif) {
2540 struct net_device *dev;
2541 dev = __dev_get_by_index(net, iif);
2542 if (!dev) {
2543 err = -ENODEV;
2544 goto errout;
2545 }
2546 }
2547
2548 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2549 if (!skb) {
2550 err = -ENOBUFS;
2551 goto errout;
2552 }
2553
2554 /* Reserve room for dummy headers, this skb can pass
2555 through good chunk of routing engine.
2556 */
2557 skb_reset_mac_header(skb);
2558 skb_reserve(skb, MAX_HEADER + sizeof(struct ipv6hdr));
2559
2560 rt = (struct rt6_info*) ip6_route_output(net, NULL, &fl6);
2561 skb_dst_set(skb, &rt->dst);
2562
2563 err = rt6_fill_node(net, skb, rt, &fl6.daddr, &fl6.saddr, iif,
2564 RTM_NEWROUTE, NETLINK_CB(in_skb).pid,
2565 nlh->nlmsg_seq, 0, 0, 0);
2566 if (err < 0) {
2567 kfree_skb(skb);
2568 goto errout;
2569 }
2570
2571 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).pid);
2572 errout:
2573 return err;
2574 }
2575
2576 void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info)
2577 {
2578 struct sk_buff *skb;
2579 struct net *net = info->nl_net;
2580 u32 seq;
2581 int err;
2582
2583 err = -ENOBUFS;
2584 seq = info->nlh ? info->nlh->nlmsg_seq : 0;
2585
2586 skb = nlmsg_new(rt6_nlmsg_size(), gfp_any());
2587 if (!skb)
2588 goto errout;
2589
2590 err = rt6_fill_node(net, skb, rt, NULL, NULL, 0,
2591 event, info->pid, seq, 0, 0, 0);
2592 if (err < 0) {
2593 /* -EMSGSIZE implies BUG in rt6_nlmsg_size() */
2594 WARN_ON(err == -EMSGSIZE);
2595 kfree_skb(skb);
2596 goto errout;
2597 }
2598 rtnl_notify(skb, net, info->pid, RTNLGRP_IPV6_ROUTE,
2599 info->nlh, gfp_any());
2600 return;
2601 errout:
2602 if (err < 0)
2603 rtnl_set_sk_err(net, RTNLGRP_IPV6_ROUTE, err);
2604 }
2605
2606 static int ip6_route_dev_notify(struct notifier_block *this,
2607 unsigned long event, void *data)
2608 {
2609 struct net_device *dev = (struct net_device *)data;
2610 struct net *net = dev_net(dev);
2611
2612 if (event == NETDEV_REGISTER && (dev->flags & IFF_LOOPBACK)) {
2613 net->ipv6.ip6_null_entry->dst.dev = dev;
2614 net->ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(dev);
2615 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
2616 net->ipv6.ip6_prohibit_entry->dst.dev = dev;
2617 net->ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(dev);
2618 net->ipv6.ip6_blk_hole_entry->dst.dev = dev;
2619 net->ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(dev);
2620 #endif
2621 }
2622
2623 return NOTIFY_OK;
2624 }
2625
2626 /*
2627 * /proc
2628 */
2629
2630 #ifdef CONFIG_PROC_FS
2631
2632 struct rt6_proc_arg
2633 {
2634 char *buffer;
2635 int offset;
2636 int length;
2637 int skip;
2638 int len;
2639 };
2640
2641 static int rt6_info_route(struct rt6_info *rt, void *p_arg)
2642 {
2643 struct seq_file *m = p_arg;
2644 struct neighbour *n;
2645
2646 seq_printf(m, "%pi6 %02x ", &rt->rt6i_dst.addr, rt->rt6i_dst.plen);
2647
2648 #ifdef CONFIG_IPV6_SUBTREES
2649 seq_printf(m, "%pi6 %02x ", &rt->rt6i_src.addr, rt->rt6i_src.plen);
2650 #else
2651 seq_puts(m, "00000000000000000000000000000000 00 ");
2652 #endif
2653 rcu_read_lock();
2654 n = dst_get_neighbour_noref(&rt->dst);
2655 if (n) {
2656 seq_printf(m, "%pi6", n->primary_key);
2657 } else {
2658 seq_puts(m, "00000000000000000000000000000000");
2659 }
2660 rcu_read_unlock();
2661 seq_printf(m, " %08x %08x %08x %08x %8s\n",
2662 rt->rt6i_metric, atomic_read(&rt->dst.__refcnt),
2663 rt->dst.__use, rt->rt6i_flags,
2664 rt->dst.dev ? rt->dst.dev->name : "");
2665 return 0;
2666 }
2667
2668 static int ipv6_route_show(struct seq_file *m, void *v)
2669 {
2670 struct net *net = (struct net *)m->private;
2671 fib6_clean_all(net, rt6_info_route, 0, m);
2672 return 0;
2673 }
2674
2675 static int ipv6_route_open(struct inode *inode, struct file *file)
2676 {
2677 return single_open_net(inode, file, ipv6_route_show);
2678 }
2679
2680 static const struct file_operations ipv6_route_proc_fops = {
2681 .owner = THIS_MODULE,
2682 .open = ipv6_route_open,
2683 .read = seq_read,
2684 .llseek = seq_lseek,
2685 .release = single_release_net,
2686 };
2687
2688 static int rt6_stats_seq_show(struct seq_file *seq, void *v)
2689 {
2690 struct net *net = (struct net *)seq->private;
2691 seq_printf(seq, "%04x %04x %04x %04x %04x %04x %04x\n",
2692 net->ipv6.rt6_stats->fib_nodes,
2693 net->ipv6.rt6_stats->fib_route_nodes,
2694 net->ipv6.rt6_stats->fib_rt_alloc,
2695 net->ipv6.rt6_stats->fib_rt_entries,
2696 net->ipv6.rt6_stats->fib_rt_cache,
2697 dst_entries_get_slow(&net->ipv6.ip6_dst_ops),
2698 net->ipv6.rt6_stats->fib_discarded_routes);
2699
2700 return 0;
2701 }
2702
2703 static int rt6_stats_seq_open(struct inode *inode, struct file *file)
2704 {
2705 return single_open_net(inode, file, rt6_stats_seq_show);
2706 }
2707
2708 static const struct file_operations rt6_stats_seq_fops = {
2709 .owner = THIS_MODULE,
2710 .open = rt6_stats_seq_open,
2711 .read = seq_read,
2712 .llseek = seq_lseek,
2713 .release = single_release_net,
2714 };
2715 #endif /* CONFIG_PROC_FS */
2716
2717 #ifdef CONFIG_SYSCTL
2718
2719 static
2720 int ipv6_sysctl_rtcache_flush(ctl_table *ctl, int write,
2721 void __user *buffer, size_t *lenp, loff_t *ppos)
2722 {
2723 struct net *net;
2724 int delay;
2725 if (!write)
2726 return -EINVAL;
2727
2728 net = (struct net *)ctl->extra1;
2729 delay = net->ipv6.sysctl.flush_delay;
2730 proc_dointvec(ctl, write, buffer, lenp, ppos);
2731 fib6_run_gc(delay <= 0 ? ~0UL : (unsigned long)delay, net);
2732 return 0;
2733 }
2734
2735 ctl_table ipv6_route_table_template[] = {
2736 {
2737 .procname = "flush",
2738 .data = &init_net.ipv6.sysctl.flush_delay,
2739 .maxlen = sizeof(int),
2740 .mode = 0200,
2741 .proc_handler = ipv6_sysctl_rtcache_flush
2742 },
2743 {
2744 .procname = "gc_thresh",
2745 .data = &ip6_dst_ops_template.gc_thresh,
2746 .maxlen = sizeof(int),
2747 .mode = 0644,
2748 .proc_handler = proc_dointvec,
2749 },
2750 {
2751 .procname = "max_size",
2752 .data = &init_net.ipv6.sysctl.ip6_rt_max_size,
2753 .maxlen = sizeof(int),
2754 .mode = 0644,
2755 .proc_handler = proc_dointvec,
2756 },
2757 {
2758 .procname = "gc_min_interval",
2759 .data = &init_net.ipv6.sysctl.ip6_rt_gc_min_interval,
2760 .maxlen = sizeof(int),
2761 .mode = 0644,
2762 .proc_handler = proc_dointvec_jiffies,
2763 },
2764 {
2765 .procname = "gc_timeout",
2766 .data = &init_net.ipv6.sysctl.ip6_rt_gc_timeout,
2767 .maxlen = sizeof(int),
2768 .mode = 0644,
2769 .proc_handler = proc_dointvec_jiffies,
2770 },
2771 {
2772 .procname = "gc_interval",
2773 .data = &init_net.ipv6.sysctl.ip6_rt_gc_interval,
2774 .maxlen = sizeof(int),
2775 .mode = 0644,
2776 .proc_handler = proc_dointvec_jiffies,
2777 },
2778 {
2779 .procname = "gc_elasticity",
2780 .data = &init_net.ipv6.sysctl.ip6_rt_gc_elasticity,
2781 .maxlen = sizeof(int),
2782 .mode = 0644,
2783 .proc_handler = proc_dointvec,
2784 },
2785 {
2786 .procname = "mtu_expires",
2787 .data = &init_net.ipv6.sysctl.ip6_rt_mtu_expires,
2788 .maxlen = sizeof(int),
2789 .mode = 0644,
2790 .proc_handler = proc_dointvec_jiffies,
2791 },
2792 {
2793 .procname = "min_adv_mss",
2794 .data = &init_net.ipv6.sysctl.ip6_rt_min_advmss,
2795 .maxlen = sizeof(int),
2796 .mode = 0644,
2797 .proc_handler = proc_dointvec,
2798 },
2799 {
2800 .procname = "gc_min_interval_ms",
2801 .data = &init_net.ipv6.sysctl.ip6_rt_gc_min_interval,
2802 .maxlen = sizeof(int),
2803 .mode = 0644,
2804 .proc_handler = proc_dointvec_ms_jiffies,
2805 },
2806 { }
2807 };
2808
2809 struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net)
2810 {
2811 struct ctl_table *table;
2812
2813 table = kmemdup(ipv6_route_table_template,
2814 sizeof(ipv6_route_table_template),
2815 GFP_KERNEL);
2816
2817 if (table) {
2818 table[0].data = &net->ipv6.sysctl.flush_delay;
2819 table[0].extra1 = net;
2820 table[1].data = &net->ipv6.ip6_dst_ops.gc_thresh;
2821 table[2].data = &net->ipv6.sysctl.ip6_rt_max_size;
2822 table[3].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
2823 table[4].data = &net->ipv6.sysctl.ip6_rt_gc_timeout;
2824 table[5].data = &net->ipv6.sysctl.ip6_rt_gc_interval;
2825 table[6].data = &net->ipv6.sysctl.ip6_rt_gc_elasticity;
2826 table[7].data = &net->ipv6.sysctl.ip6_rt_mtu_expires;
2827 table[8].data = &net->ipv6.sysctl.ip6_rt_min_advmss;
2828 table[9].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
2829 }
2830
2831 return table;
2832 }
2833 #endif
2834
2835 static int __net_init ip6_route_net_init(struct net *net)
2836 {
2837 int ret = -ENOMEM;
2838
2839 memcpy(&net->ipv6.ip6_dst_ops, &ip6_dst_ops_template,
2840 sizeof(net->ipv6.ip6_dst_ops));
2841
2842 if (dst_entries_init(&net->ipv6.ip6_dst_ops) < 0)
2843 goto out_ip6_dst_ops;
2844
2845 net->ipv6.ip6_null_entry = kmemdup(&ip6_null_entry_template,
2846 sizeof(*net->ipv6.ip6_null_entry),
2847 GFP_KERNEL);
2848 if (!net->ipv6.ip6_null_entry)
2849 goto out_ip6_dst_entries;
2850 net->ipv6.ip6_null_entry->dst.path =
2851 (struct dst_entry *)net->ipv6.ip6_null_entry;
2852 net->ipv6.ip6_null_entry->dst.ops = &net->ipv6.ip6_dst_ops;
2853 dst_init_metrics(&net->ipv6.ip6_null_entry->dst,
2854 ip6_template_metrics, true);
2855
2856 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
2857 net->ipv6.ip6_prohibit_entry = kmemdup(&ip6_prohibit_entry_template,
2858 sizeof(*net->ipv6.ip6_prohibit_entry),
2859 GFP_KERNEL);
2860 if (!net->ipv6.ip6_prohibit_entry)
2861 goto out_ip6_null_entry;
2862 net->ipv6.ip6_prohibit_entry->dst.path =
2863 (struct dst_entry *)net->ipv6.ip6_prohibit_entry;
2864 net->ipv6.ip6_prohibit_entry->dst.ops = &net->ipv6.ip6_dst_ops;
2865 dst_init_metrics(&net->ipv6.ip6_prohibit_entry->dst,
2866 ip6_template_metrics, true);
2867
2868 net->ipv6.ip6_blk_hole_entry = kmemdup(&ip6_blk_hole_entry_template,
2869 sizeof(*net->ipv6.ip6_blk_hole_entry),
2870 GFP_KERNEL);
2871 if (!net->ipv6.ip6_blk_hole_entry)
2872 goto out_ip6_prohibit_entry;
2873 net->ipv6.ip6_blk_hole_entry->dst.path =
2874 (struct dst_entry *)net->ipv6.ip6_blk_hole_entry;
2875 net->ipv6.ip6_blk_hole_entry->dst.ops = &net->ipv6.ip6_dst_ops;
2876 dst_init_metrics(&net->ipv6.ip6_blk_hole_entry->dst,
2877 ip6_template_metrics, true);
2878 #endif
2879
2880 net->ipv6.sysctl.flush_delay = 0;
2881 net->ipv6.sysctl.ip6_rt_max_size = 4096;
2882 net->ipv6.sysctl.ip6_rt_gc_min_interval = HZ / 2;
2883 net->ipv6.sysctl.ip6_rt_gc_timeout = 60*HZ;
2884 net->ipv6.sysctl.ip6_rt_gc_interval = 30*HZ;
2885 net->ipv6.sysctl.ip6_rt_gc_elasticity = 9;
2886 net->ipv6.sysctl.ip6_rt_mtu_expires = 10*60*HZ;
2887 net->ipv6.sysctl.ip6_rt_min_advmss = IPV6_MIN_MTU - 20 - 40;
2888
2889 #ifdef CONFIG_PROC_FS
2890 proc_net_fops_create(net, "ipv6_route", 0, &ipv6_route_proc_fops);
2891 proc_net_fops_create(net, "rt6_stats", S_IRUGO, &rt6_stats_seq_fops);
2892 #endif
2893 net->ipv6.ip6_rt_gc_expire = 30*HZ;
2894
2895 ret = 0;
2896 out:
2897 return ret;
2898
2899 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
2900 out_ip6_prohibit_entry:
2901 kfree(net->ipv6.ip6_prohibit_entry);
2902 out_ip6_null_entry:
2903 kfree(net->ipv6.ip6_null_entry);
2904 #endif
2905 out_ip6_dst_entries:
2906 dst_entries_destroy(&net->ipv6.ip6_dst_ops);
2907 out_ip6_dst_ops:
2908 goto out;
2909 }
2910
2911 static void __net_exit ip6_route_net_exit(struct net *net)
2912 {
2913 #ifdef CONFIG_PROC_FS
2914 proc_net_remove(net, "ipv6_route");
2915 proc_net_remove(net, "rt6_stats");
2916 #endif
2917 kfree(net->ipv6.ip6_null_entry);
2918 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
2919 kfree(net->ipv6.ip6_prohibit_entry);
2920 kfree(net->ipv6.ip6_blk_hole_entry);
2921 #endif
2922 dst_entries_destroy(&net->ipv6.ip6_dst_ops);
2923 }
2924
2925 static struct pernet_operations ip6_route_net_ops = {
2926 .init = ip6_route_net_init,
2927 .exit = ip6_route_net_exit,
2928 };
2929
2930 static struct notifier_block ip6_route_dev_notifier = {
2931 .notifier_call = ip6_route_dev_notify,
2932 .priority = 0,
2933 };
2934
2935 int __init ip6_route_init(void)
2936 {
2937 int ret;
2938
2939 ret = -ENOMEM;
2940 ip6_dst_ops_template.kmem_cachep =
2941 kmem_cache_create("ip6_dst_cache", sizeof(struct rt6_info), 0,
2942 SLAB_HWCACHE_ALIGN, NULL);
2943 if (!ip6_dst_ops_template.kmem_cachep)
2944 goto out;
2945
2946 ret = dst_entries_init(&ip6_dst_blackhole_ops);
2947 if (ret)
2948 goto out_kmem_cache;
2949
2950 ret = register_pernet_subsys(&ip6_route_net_ops);
2951 if (ret)
2952 goto out_dst_entries;
2953
2954 ip6_dst_blackhole_ops.kmem_cachep = ip6_dst_ops_template.kmem_cachep;
2955
2956 /* Registering of the loopback is done before this portion of code,
2957 * the loopback reference in rt6_info will not be taken, do it
2958 * manually for init_net */
2959 init_net.ipv6.ip6_null_entry->dst.dev = init_net.loopback_dev;
2960 init_net.ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
2961 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
2962 init_net.ipv6.ip6_prohibit_entry->dst.dev = init_net.loopback_dev;
2963 init_net.ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
2964 init_net.ipv6.ip6_blk_hole_entry->dst.dev = init_net.loopback_dev;
2965 init_net.ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
2966 #endif
2967 ret = fib6_init();
2968 if (ret)
2969 goto out_register_subsys;
2970
2971 ret = xfrm6_init();
2972 if (ret)
2973 goto out_fib6_init;
2974
2975 ret = fib6_rules_init();
2976 if (ret)
2977 goto xfrm6_init;
2978
2979 ret = -ENOBUFS;
2980 if (__rtnl_register(PF_INET6, RTM_NEWROUTE, inet6_rtm_newroute, NULL, NULL) ||
2981 __rtnl_register(PF_INET6, RTM_DELROUTE, inet6_rtm_delroute, NULL, NULL) ||
2982 __rtnl_register(PF_INET6, RTM_GETROUTE, inet6_rtm_getroute, NULL, NULL))
2983 goto fib6_rules_init;
2984
2985 ret = register_netdevice_notifier(&ip6_route_dev_notifier);
2986 if (ret)
2987 goto fib6_rules_init;
2988
2989 out:
2990 return ret;
2991
2992 fib6_rules_init:
2993 fib6_rules_cleanup();
2994 xfrm6_init:
2995 xfrm6_fini();
2996 out_fib6_init:
2997 fib6_gc_cleanup();
2998 out_register_subsys:
2999 unregister_pernet_subsys(&ip6_route_net_ops);
3000 out_dst_entries:
3001 dst_entries_destroy(&ip6_dst_blackhole_ops);
3002 out_kmem_cache:
3003 kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep);
3004 goto out;
3005 }
3006
3007 void ip6_route_cleanup(void)
3008 {
3009 unregister_netdevice_notifier(&ip6_route_dev_notifier);
3010 fib6_rules_cleanup();
3011 xfrm6_fini();
3012 fib6_gc_cleanup();
3013 unregister_pernet_subsys(&ip6_route_net_ops);
3014 dst_entries_destroy(&ip6_dst_blackhole_ops);
3015 kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep);
3016 }
This page took 0.089126 seconds and 6 git commands to generate.