2 * Linux INET6 implementation
6 * Pedro Roque <roque@di.fc.ul.pt>
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
16 * YOSHIFUJI Hideaki @USAGI
17 * reworked default router selection.
18 * - respect outgoing interface
19 * - select from (probably) reachable routers (i.e.
20 * routers in REACHABLE, STALE, DELAY or PROBE states).
21 * - always select the same router if it is (probably)
22 * reachable. otherwise, round-robin the list.
24 * Fixed routing subtrees.
27 #define pr_fmt(fmt) "IPv6: " fmt
29 #include <linux/capability.h>
30 #include <linux/errno.h>
31 #include <linux/export.h>
32 #include <linux/types.h>
33 #include <linux/times.h>
34 #include <linux/socket.h>
35 #include <linux/sockios.h>
36 #include <linux/net.h>
37 #include <linux/route.h>
38 #include <linux/netdevice.h>
39 #include <linux/in6.h>
40 #include <linux/mroute6.h>
41 #include <linux/init.h>
42 #include <linux/if_arp.h>
43 #include <linux/proc_fs.h>
44 #include <linux/seq_file.h>
45 #include <linux/nsproxy.h>
46 #include <linux/slab.h>
47 #include <net/net_namespace.h>
50 #include <net/ip6_fib.h>
51 #include <net/ip6_route.h>
52 #include <net/ndisc.h>
53 #include <net/addrconf.h>
55 #include <linux/rtnetlink.h>
58 #include <net/netevent.h>
59 #include <net/netlink.h>
60 #include <net/nexthop.h>
62 #include <asm/uaccess.h>
65 #include <linux/sysctl.h>
69 RT6_NUD_FAIL_HARD
= -3,
70 RT6_NUD_FAIL_PROBE
= -2,
71 RT6_NUD_FAIL_DO_RR
= -1,
75 static void ip6_rt_copy_init(struct rt6_info
*rt
, struct rt6_info
*ort
);
76 static struct dst_entry
*ip6_dst_check(struct dst_entry
*dst
, u32 cookie
);
77 static unsigned int ip6_default_advmss(const struct dst_entry
*dst
);
78 static unsigned int ip6_mtu(const struct dst_entry
*dst
);
79 static struct dst_entry
*ip6_negative_advice(struct dst_entry
*);
80 static void ip6_dst_destroy(struct dst_entry
*);
81 static void ip6_dst_ifdown(struct dst_entry
*,
82 struct net_device
*dev
, int how
);
83 static int ip6_dst_gc(struct dst_ops
*ops
);
85 static int ip6_pkt_discard(struct sk_buff
*skb
);
86 static int ip6_pkt_discard_out(struct sock
*sk
, struct sk_buff
*skb
);
87 static int ip6_pkt_prohibit(struct sk_buff
*skb
);
88 static int ip6_pkt_prohibit_out(struct sock
*sk
, struct sk_buff
*skb
);
89 static void ip6_link_failure(struct sk_buff
*skb
);
90 static void ip6_rt_update_pmtu(struct dst_entry
*dst
, struct sock
*sk
,
91 struct sk_buff
*skb
, u32 mtu
);
92 static void rt6_do_redirect(struct dst_entry
*dst
, struct sock
*sk
,
94 static void rt6_dst_from_metrics_check(struct rt6_info
*rt
);
95 static int rt6_score_route(struct rt6_info
*rt
, int oif
, int strict
);
97 #ifdef CONFIG_IPV6_ROUTE_INFO
98 static struct rt6_info
*rt6_add_route_info(struct net
*net
,
99 const struct in6_addr
*prefix
, int prefixlen
,
100 const struct in6_addr
*gwaddr
, int ifindex
,
102 static struct rt6_info
*rt6_get_route_info(struct net
*net
,
103 const struct in6_addr
*prefix
, int prefixlen
,
104 const struct in6_addr
*gwaddr
, int ifindex
);
107 struct uncached_list
{
109 struct list_head head
;
112 static DEFINE_PER_CPU_ALIGNED(struct uncached_list
, rt6_uncached_list
);
114 static void rt6_uncached_list_add(struct rt6_info
*rt
)
116 struct uncached_list
*ul
= raw_cpu_ptr(&rt6_uncached_list
);
118 rt
->dst
.flags
|= DST_NOCACHE
;
119 rt
->rt6i_uncached_list
= ul
;
121 spin_lock_bh(&ul
->lock
);
122 list_add_tail(&rt
->rt6i_uncached
, &ul
->head
);
123 spin_unlock_bh(&ul
->lock
);
126 static void rt6_uncached_list_del(struct rt6_info
*rt
)
128 if (!list_empty(&rt
->rt6i_uncached
)) {
129 struct uncached_list
*ul
= rt
->rt6i_uncached_list
;
131 spin_lock_bh(&ul
->lock
);
132 list_del(&rt
->rt6i_uncached
);
133 spin_unlock_bh(&ul
->lock
);
137 static void rt6_uncached_list_flush_dev(struct net
*net
, struct net_device
*dev
)
139 struct net_device
*loopback_dev
= net
->loopback_dev
;
142 for_each_possible_cpu(cpu
) {
143 struct uncached_list
*ul
= per_cpu_ptr(&rt6_uncached_list
, cpu
);
146 spin_lock_bh(&ul
->lock
);
147 list_for_each_entry(rt
, &ul
->head
, rt6i_uncached
) {
148 struct inet6_dev
*rt_idev
= rt
->rt6i_idev
;
149 struct net_device
*rt_dev
= rt
->dst
.dev
;
151 if (rt_idev
&& (rt_idev
->dev
== dev
|| !dev
) &&
152 rt_idev
->dev
!= loopback_dev
) {
153 rt
->rt6i_idev
= in6_dev_get(loopback_dev
);
154 in6_dev_put(rt_idev
);
157 if (rt_dev
&& (rt_dev
== dev
|| !dev
) &&
158 rt_dev
!= loopback_dev
) {
159 rt
->dst
.dev
= loopback_dev
;
160 dev_hold(rt
->dst
.dev
);
164 spin_unlock_bh(&ul
->lock
);
168 static u32
*rt6_pcpu_cow_metrics(struct rt6_info
*rt
)
170 return dst_metrics_write_ptr(rt
->dst
.from
);
173 static u32
*ipv6_cow_metrics(struct dst_entry
*dst
, unsigned long old
)
175 struct rt6_info
*rt
= (struct rt6_info
*)dst
;
177 if (rt
->rt6i_flags
& RTF_PCPU
)
178 return rt6_pcpu_cow_metrics(rt
);
179 else if (rt
->rt6i_flags
& RTF_CACHE
)
182 return dst_cow_metrics_generic(dst
, old
);
185 static inline const void *choose_neigh_daddr(struct rt6_info
*rt
,
189 struct in6_addr
*p
= &rt
->rt6i_gateway
;
191 if (!ipv6_addr_any(p
))
192 return (const void *) p
;
194 return &ipv6_hdr(skb
)->daddr
;
198 static struct neighbour
*ip6_neigh_lookup(const struct dst_entry
*dst
,
202 struct rt6_info
*rt
= (struct rt6_info
*) dst
;
205 daddr
= choose_neigh_daddr(rt
, skb
, daddr
);
206 n
= __ipv6_neigh_lookup(dst
->dev
, daddr
);
209 return neigh_create(&nd_tbl
, daddr
, dst
->dev
);
212 static struct dst_ops ip6_dst_ops_template
= {
216 .check
= ip6_dst_check
,
217 .default_advmss
= ip6_default_advmss
,
219 .cow_metrics
= ipv6_cow_metrics
,
220 .destroy
= ip6_dst_destroy
,
221 .ifdown
= ip6_dst_ifdown
,
222 .negative_advice
= ip6_negative_advice
,
223 .link_failure
= ip6_link_failure
,
224 .update_pmtu
= ip6_rt_update_pmtu
,
225 .redirect
= rt6_do_redirect
,
226 .local_out
= __ip6_local_out
,
227 .neigh_lookup
= ip6_neigh_lookup
,
230 static unsigned int ip6_blackhole_mtu(const struct dst_entry
*dst
)
232 unsigned int mtu
= dst_metric_raw(dst
, RTAX_MTU
);
234 return mtu
? : dst
->dev
->mtu
;
237 static void ip6_rt_blackhole_update_pmtu(struct dst_entry
*dst
, struct sock
*sk
,
238 struct sk_buff
*skb
, u32 mtu
)
242 static void ip6_rt_blackhole_redirect(struct dst_entry
*dst
, struct sock
*sk
,
247 static u32
*ip6_rt_blackhole_cow_metrics(struct dst_entry
*dst
,
253 static struct dst_ops ip6_dst_blackhole_ops
= {
255 .destroy
= ip6_dst_destroy
,
256 .check
= ip6_dst_check
,
257 .mtu
= ip6_blackhole_mtu
,
258 .default_advmss
= ip6_default_advmss
,
259 .update_pmtu
= ip6_rt_blackhole_update_pmtu
,
260 .redirect
= ip6_rt_blackhole_redirect
,
261 .cow_metrics
= ip6_rt_blackhole_cow_metrics
,
262 .neigh_lookup
= ip6_neigh_lookup
,
265 static const u32 ip6_template_metrics
[RTAX_MAX
] = {
266 [RTAX_HOPLIMIT
- 1] = 0,
269 static const struct rt6_info ip6_null_entry_template
= {
271 .__refcnt
= ATOMIC_INIT(1),
273 .obsolete
= DST_OBSOLETE_FORCE_CHK
,
274 .error
= -ENETUNREACH
,
275 .input
= ip6_pkt_discard
,
276 .output
= ip6_pkt_discard_out
,
278 .rt6i_flags
= (RTF_REJECT
| RTF_NONEXTHOP
),
279 .rt6i_protocol
= RTPROT_KERNEL
,
280 .rt6i_metric
= ~(u32
) 0,
281 .rt6i_ref
= ATOMIC_INIT(1),
284 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
286 static const struct rt6_info ip6_prohibit_entry_template
= {
288 .__refcnt
= ATOMIC_INIT(1),
290 .obsolete
= DST_OBSOLETE_FORCE_CHK
,
292 .input
= ip6_pkt_prohibit
,
293 .output
= ip6_pkt_prohibit_out
,
295 .rt6i_flags
= (RTF_REJECT
| RTF_NONEXTHOP
),
296 .rt6i_protocol
= RTPROT_KERNEL
,
297 .rt6i_metric
= ~(u32
) 0,
298 .rt6i_ref
= ATOMIC_INIT(1),
301 static const struct rt6_info ip6_blk_hole_entry_template
= {
303 .__refcnt
= ATOMIC_INIT(1),
305 .obsolete
= DST_OBSOLETE_FORCE_CHK
,
307 .input
= dst_discard
,
308 .output
= dst_discard_sk
,
310 .rt6i_flags
= (RTF_REJECT
| RTF_NONEXTHOP
),
311 .rt6i_protocol
= RTPROT_KERNEL
,
312 .rt6i_metric
= ~(u32
) 0,
313 .rt6i_ref
= ATOMIC_INIT(1),
318 /* allocate dst with ip6_dst_ops */
319 static struct rt6_info
*__ip6_dst_alloc(struct net
*net
,
320 struct net_device
*dev
,
322 struct fib6_table
*table
)
324 struct rt6_info
*rt
= dst_alloc(&net
->ipv6
.ip6_dst_ops
, dev
,
325 0, DST_OBSOLETE_FORCE_CHK
, flags
);
328 struct dst_entry
*dst
= &rt
->dst
;
330 memset(dst
+ 1, 0, sizeof(*rt
) - sizeof(*dst
));
331 INIT_LIST_HEAD(&rt
->rt6i_siblings
);
332 INIT_LIST_HEAD(&rt
->rt6i_uncached
);
337 static struct rt6_info
*ip6_dst_alloc(struct net
*net
,
338 struct net_device
*dev
,
340 struct fib6_table
*table
)
342 struct rt6_info
*rt
= __ip6_dst_alloc(net
, dev
, flags
, table
);
345 rt
->rt6i_pcpu
= alloc_percpu_gfp(struct rt6_info
*, GFP_ATOMIC
);
349 for_each_possible_cpu(cpu
) {
352 p
= per_cpu_ptr(rt
->rt6i_pcpu
, cpu
);
353 /* no one shares rt */
357 dst_destroy((struct dst_entry
*)rt
);
365 static void ip6_dst_destroy(struct dst_entry
*dst
)
367 struct rt6_info
*rt
= (struct rt6_info
*)dst
;
368 struct dst_entry
*from
= dst
->from
;
369 struct inet6_dev
*idev
;
371 dst_destroy_metrics_generic(dst
);
372 free_percpu(rt
->rt6i_pcpu
);
373 rt6_uncached_list_del(rt
);
375 idev
= rt
->rt6i_idev
;
377 rt
->rt6i_idev
= NULL
;
385 static void ip6_dst_ifdown(struct dst_entry
*dst
, struct net_device
*dev
,
388 struct rt6_info
*rt
= (struct rt6_info
*)dst
;
389 struct inet6_dev
*idev
= rt
->rt6i_idev
;
390 struct net_device
*loopback_dev
=
391 dev_net(dev
)->loopback_dev
;
393 if (dev
!= loopback_dev
) {
394 if (idev
&& idev
->dev
== dev
) {
395 struct inet6_dev
*loopback_idev
=
396 in6_dev_get(loopback_dev
);
398 rt
->rt6i_idev
= loopback_idev
;
405 static bool rt6_check_expired(const struct rt6_info
*rt
)
407 if (rt
->rt6i_flags
& RTF_EXPIRES
) {
408 if (time_after(jiffies
, rt
->dst
.expires
))
410 } else if (rt
->dst
.from
) {
411 return rt6_check_expired((struct rt6_info
*) rt
->dst
.from
);
416 /* Multipath route selection:
417 * Hash based function using packet header and flowlabel.
418 * Adapted from fib_info_hashfn()
420 static int rt6_info_hash_nhsfn(unsigned int candidate_count
,
421 const struct flowi6
*fl6
)
423 unsigned int val
= fl6
->flowi6_proto
;
425 val
^= ipv6_addr_hash(&fl6
->daddr
);
426 val
^= ipv6_addr_hash(&fl6
->saddr
);
428 /* Work only if this not encapsulated */
429 switch (fl6
->flowi6_proto
) {
433 val
^= (__force u16
)fl6
->fl6_sport
;
434 val
^= (__force u16
)fl6
->fl6_dport
;
438 val
^= (__force u16
)fl6
->fl6_icmp_type
;
439 val
^= (__force u16
)fl6
->fl6_icmp_code
;
442 /* RFC6438 recommands to use flowlabel */
443 val
^= (__force u32
)fl6
->flowlabel
;
445 /* Perhaps, we need to tune, this function? */
446 val
= val
^ (val
>> 7) ^ (val
>> 12);
447 return val
% candidate_count
;
450 static struct rt6_info
*rt6_multipath_select(struct rt6_info
*match
,
451 struct flowi6
*fl6
, int oif
,
454 struct rt6_info
*sibling
, *next_sibling
;
457 route_choosen
= rt6_info_hash_nhsfn(match
->rt6i_nsiblings
+ 1, fl6
);
458 /* Don't change the route, if route_choosen == 0
459 * (siblings does not include ourself)
462 list_for_each_entry_safe(sibling
, next_sibling
,
463 &match
->rt6i_siblings
, rt6i_siblings
) {
465 if (route_choosen
== 0) {
466 if (rt6_score_route(sibling
, oif
, strict
) < 0)
476 * Route lookup. Any table->tb6_lock is implied.
479 static inline struct rt6_info
*rt6_device_match(struct net
*net
,
481 const struct in6_addr
*saddr
,
485 struct rt6_info
*local
= NULL
;
486 struct rt6_info
*sprt
;
488 if (!oif
&& ipv6_addr_any(saddr
))
491 for (sprt
= rt
; sprt
; sprt
= sprt
->dst
.rt6_next
) {
492 struct net_device
*dev
= sprt
->dst
.dev
;
495 if (dev
->ifindex
== oif
)
497 if (dev
->flags
& IFF_LOOPBACK
) {
498 if (!sprt
->rt6i_idev
||
499 sprt
->rt6i_idev
->dev
->ifindex
!= oif
) {
500 if (flags
& RT6_LOOKUP_F_IFACE
&& oif
)
502 if (local
&& (!oif
||
503 local
->rt6i_idev
->dev
->ifindex
== oif
))
509 if (ipv6_chk_addr(net
, saddr
, dev
,
510 flags
& RT6_LOOKUP_F_IFACE
))
519 if (flags
& RT6_LOOKUP_F_IFACE
)
520 return net
->ipv6
.ip6_null_entry
;
526 #ifdef CONFIG_IPV6_ROUTER_PREF
527 struct __rt6_probe_work
{
528 struct work_struct work
;
529 struct in6_addr target
;
530 struct net_device
*dev
;
533 static void rt6_probe_deferred(struct work_struct
*w
)
535 struct in6_addr mcaddr
;
536 struct __rt6_probe_work
*work
=
537 container_of(w
, struct __rt6_probe_work
, work
);
539 addrconf_addr_solict_mult(&work
->target
, &mcaddr
);
540 ndisc_send_ns(work
->dev
, NULL
, &work
->target
, &mcaddr
, NULL
);
545 static void rt6_probe(struct rt6_info
*rt
)
547 struct neighbour
*neigh
;
549 * Okay, this does not seem to be appropriate
550 * for now, however, we need to check if it
551 * is really so; aka Router Reachability Probing.
553 * Router Reachability Probe MUST be rate-limited
554 * to no more than one per minute.
556 if (!rt
|| !(rt
->rt6i_flags
& RTF_GATEWAY
))
559 neigh
= __ipv6_neigh_lookup_noref(rt
->dst
.dev
, &rt
->rt6i_gateway
);
561 write_lock(&neigh
->lock
);
562 if (neigh
->nud_state
& NUD_VALID
)
567 time_after(jiffies
, neigh
->updated
+ rt
->rt6i_idev
->cnf
.rtr_probe_interval
)) {
568 struct __rt6_probe_work
*work
;
570 work
= kmalloc(sizeof(*work
), GFP_ATOMIC
);
573 __neigh_set_probe_once(neigh
);
576 write_unlock(&neigh
->lock
);
579 INIT_WORK(&work
->work
, rt6_probe_deferred
);
580 work
->target
= rt
->rt6i_gateway
;
581 dev_hold(rt
->dst
.dev
);
582 work
->dev
= rt
->dst
.dev
;
583 schedule_work(&work
->work
);
587 write_unlock(&neigh
->lock
);
589 rcu_read_unlock_bh();
592 static inline void rt6_probe(struct rt6_info
*rt
)
598 * Default Router Selection (RFC 2461 6.3.6)
600 static inline int rt6_check_dev(struct rt6_info
*rt
, int oif
)
602 struct net_device
*dev
= rt
->dst
.dev
;
603 if (!oif
|| dev
->ifindex
== oif
)
605 if ((dev
->flags
& IFF_LOOPBACK
) &&
606 rt
->rt6i_idev
&& rt
->rt6i_idev
->dev
->ifindex
== oif
)
611 static inline enum rt6_nud_state
rt6_check_neigh(struct rt6_info
*rt
)
613 struct neighbour
*neigh
;
614 enum rt6_nud_state ret
= RT6_NUD_FAIL_HARD
;
616 if (rt
->rt6i_flags
& RTF_NONEXTHOP
||
617 !(rt
->rt6i_flags
& RTF_GATEWAY
))
618 return RT6_NUD_SUCCEED
;
621 neigh
= __ipv6_neigh_lookup_noref(rt
->dst
.dev
, &rt
->rt6i_gateway
);
623 read_lock(&neigh
->lock
);
624 if (neigh
->nud_state
& NUD_VALID
)
625 ret
= RT6_NUD_SUCCEED
;
626 #ifdef CONFIG_IPV6_ROUTER_PREF
627 else if (!(neigh
->nud_state
& NUD_FAILED
))
628 ret
= RT6_NUD_SUCCEED
;
630 ret
= RT6_NUD_FAIL_PROBE
;
632 read_unlock(&neigh
->lock
);
634 ret
= IS_ENABLED(CONFIG_IPV6_ROUTER_PREF
) ?
635 RT6_NUD_SUCCEED
: RT6_NUD_FAIL_DO_RR
;
637 rcu_read_unlock_bh();
642 static int rt6_score_route(struct rt6_info
*rt
, int oif
,
647 m
= rt6_check_dev(rt
, oif
);
648 if (!m
&& (strict
& RT6_LOOKUP_F_IFACE
))
649 return RT6_NUD_FAIL_HARD
;
650 #ifdef CONFIG_IPV6_ROUTER_PREF
651 m
|= IPV6_DECODE_PREF(IPV6_EXTRACT_PREF(rt
->rt6i_flags
)) << 2;
653 if (strict
& RT6_LOOKUP_F_REACHABLE
) {
654 int n
= rt6_check_neigh(rt
);
661 static struct rt6_info
*find_match(struct rt6_info
*rt
, int oif
, int strict
,
662 int *mpri
, struct rt6_info
*match
,
666 bool match_do_rr
= false;
668 if (rt6_check_expired(rt
))
671 m
= rt6_score_route(rt
, oif
, strict
);
672 if (m
== RT6_NUD_FAIL_DO_RR
) {
674 m
= 0; /* lowest valid score */
675 } else if (m
== RT6_NUD_FAIL_HARD
) {
679 if (strict
& RT6_LOOKUP_F_REACHABLE
)
682 /* note that m can be RT6_NUD_FAIL_PROBE at this point */
684 *do_rr
= match_do_rr
;
692 static struct rt6_info
*find_rr_leaf(struct fib6_node
*fn
,
693 struct rt6_info
*rr_head
,
694 u32 metric
, int oif
, int strict
,
697 struct rt6_info
*rt
, *match
, *cont
;
702 for (rt
= rr_head
; rt
; rt
= rt
->dst
.rt6_next
) {
703 if (rt
->rt6i_metric
!= metric
) {
708 match
= find_match(rt
, oif
, strict
, &mpri
, match
, do_rr
);
711 for (rt
= fn
->leaf
; rt
&& rt
!= rr_head
; rt
= rt
->dst
.rt6_next
) {
712 if (rt
->rt6i_metric
!= metric
) {
717 match
= find_match(rt
, oif
, strict
, &mpri
, match
, do_rr
);
723 for (rt
= cont
; rt
; rt
= rt
->dst
.rt6_next
)
724 match
= find_match(rt
, oif
, strict
, &mpri
, match
, do_rr
);
729 static struct rt6_info
*rt6_select(struct fib6_node
*fn
, int oif
, int strict
)
731 struct rt6_info
*match
, *rt0
;
737 fn
->rr_ptr
= rt0
= fn
->leaf
;
739 match
= find_rr_leaf(fn
, rt0
, rt0
->rt6i_metric
, oif
, strict
,
743 struct rt6_info
*next
= rt0
->dst
.rt6_next
;
745 /* no entries matched; do round-robin */
746 if (!next
|| next
->rt6i_metric
!= rt0
->rt6i_metric
)
753 net
= dev_net(rt0
->dst
.dev
);
754 return match
? match
: net
->ipv6
.ip6_null_entry
;
757 static bool rt6_is_gw_or_nonexthop(const struct rt6_info
*rt
)
759 return (rt
->rt6i_flags
& (RTF_NONEXTHOP
| RTF_GATEWAY
));
762 #ifdef CONFIG_IPV6_ROUTE_INFO
763 int rt6_route_rcv(struct net_device
*dev
, u8
*opt
, int len
,
764 const struct in6_addr
*gwaddr
)
766 struct net
*net
= dev_net(dev
);
767 struct route_info
*rinfo
= (struct route_info
*) opt
;
768 struct in6_addr prefix_buf
, *prefix
;
770 unsigned long lifetime
;
773 if (len
< sizeof(struct route_info
)) {
777 /* Sanity check for prefix_len and length */
778 if (rinfo
->length
> 3) {
780 } else if (rinfo
->prefix_len
> 128) {
782 } else if (rinfo
->prefix_len
> 64) {
783 if (rinfo
->length
< 2) {
786 } else if (rinfo
->prefix_len
> 0) {
787 if (rinfo
->length
< 1) {
792 pref
= rinfo
->route_pref
;
793 if (pref
== ICMPV6_ROUTER_PREF_INVALID
)
796 lifetime
= addrconf_timeout_fixup(ntohl(rinfo
->lifetime
), HZ
);
798 if (rinfo
->length
== 3)
799 prefix
= (struct in6_addr
*)rinfo
->prefix
;
801 /* this function is safe */
802 ipv6_addr_prefix(&prefix_buf
,
803 (struct in6_addr
*)rinfo
->prefix
,
805 prefix
= &prefix_buf
;
808 if (rinfo
->prefix_len
== 0)
809 rt
= rt6_get_dflt_router(gwaddr
, dev
);
811 rt
= rt6_get_route_info(net
, prefix
, rinfo
->prefix_len
,
812 gwaddr
, dev
->ifindex
);
814 if (rt
&& !lifetime
) {
820 rt
= rt6_add_route_info(net
, prefix
, rinfo
->prefix_len
, gwaddr
, dev
->ifindex
,
823 rt
->rt6i_flags
= RTF_ROUTEINFO
|
824 (rt
->rt6i_flags
& ~RTF_PREF_MASK
) | RTF_PREF(pref
);
827 if (!addrconf_finite_timeout(lifetime
))
828 rt6_clean_expires(rt
);
830 rt6_set_expires(rt
, jiffies
+ HZ
* lifetime
);
838 static struct fib6_node
* fib6_backtrack(struct fib6_node
*fn
,
839 struct in6_addr
*saddr
)
841 struct fib6_node
*pn
;
843 if (fn
->fn_flags
& RTN_TL_ROOT
)
846 if (FIB6_SUBTREE(pn
) && FIB6_SUBTREE(pn
) != fn
)
847 fn
= fib6_lookup(FIB6_SUBTREE(pn
), NULL
, saddr
);
850 if (fn
->fn_flags
& RTN_RTINFO
)
855 static struct rt6_info
*ip6_pol_route_lookup(struct net
*net
,
856 struct fib6_table
*table
,
857 struct flowi6
*fl6
, int flags
)
859 struct fib6_node
*fn
;
862 read_lock_bh(&table
->tb6_lock
);
863 fn
= fib6_lookup(&table
->tb6_root
, &fl6
->daddr
, &fl6
->saddr
);
866 rt
= rt6_device_match(net
, rt
, &fl6
->saddr
, fl6
->flowi6_oif
, flags
);
867 if (rt
->rt6i_nsiblings
&& fl6
->flowi6_oif
== 0)
868 rt
= rt6_multipath_select(rt
, fl6
, fl6
->flowi6_oif
, flags
);
869 if (rt
== net
->ipv6
.ip6_null_entry
) {
870 fn
= fib6_backtrack(fn
, &fl6
->saddr
);
874 dst_use(&rt
->dst
, jiffies
);
875 read_unlock_bh(&table
->tb6_lock
);
880 struct dst_entry
*ip6_route_lookup(struct net
*net
, struct flowi6
*fl6
,
883 return fib6_rule_lookup(net
, fl6
, flags
, ip6_pol_route_lookup
);
885 EXPORT_SYMBOL_GPL(ip6_route_lookup
);
887 struct rt6_info
*rt6_lookup(struct net
*net
, const struct in6_addr
*daddr
,
888 const struct in6_addr
*saddr
, int oif
, int strict
)
890 struct flowi6 fl6
= {
894 struct dst_entry
*dst
;
895 int flags
= strict
? RT6_LOOKUP_F_IFACE
: 0;
898 memcpy(&fl6
.saddr
, saddr
, sizeof(*saddr
));
899 flags
|= RT6_LOOKUP_F_HAS_SADDR
;
902 dst
= fib6_rule_lookup(net
, &fl6
, flags
, ip6_pol_route_lookup
);
904 return (struct rt6_info
*) dst
;
910 EXPORT_SYMBOL(rt6_lookup
);
912 /* ip6_ins_rt is called with FREE table->tb6_lock.
913 It takes new route entry, the addition fails by any reason the
914 route is freed. In any case, if caller does not hold it, it may
918 static int __ip6_ins_rt(struct rt6_info
*rt
, struct nl_info
*info
,
919 struct mx6_config
*mxc
)
922 struct fib6_table
*table
;
924 table
= rt
->rt6i_table
;
925 write_lock_bh(&table
->tb6_lock
);
926 err
= fib6_add(&table
->tb6_root
, rt
, info
, mxc
);
927 write_unlock_bh(&table
->tb6_lock
);
932 int ip6_ins_rt(struct rt6_info
*rt
)
934 struct nl_info info
= { .nl_net
= dev_net(rt
->dst
.dev
), };
935 struct mx6_config mxc
= { .mx
= NULL
, };
937 return __ip6_ins_rt(rt
, &info
, &mxc
);
940 static struct rt6_info
*ip6_rt_cache_alloc(struct rt6_info
*ort
,
941 const struct in6_addr
*daddr
,
942 const struct in6_addr
*saddr
)
950 if (ort
->rt6i_flags
& (RTF_CACHE
| RTF_PCPU
))
951 ort
= (struct rt6_info
*)ort
->dst
.from
;
953 rt
= __ip6_dst_alloc(dev_net(ort
->dst
.dev
), ort
->dst
.dev
,
959 ip6_rt_copy_init(rt
, ort
);
960 rt
->rt6i_flags
|= RTF_CACHE
;
962 rt
->dst
.flags
|= DST_HOST
;
963 rt
->rt6i_dst
.addr
= *daddr
;
964 rt
->rt6i_dst
.plen
= 128;
966 if (!rt6_is_gw_or_nonexthop(ort
)) {
967 if (ort
->rt6i_dst
.plen
!= 128 &&
968 ipv6_addr_equal(&ort
->rt6i_dst
.addr
, daddr
))
969 rt
->rt6i_flags
|= RTF_ANYCAST
;
970 #ifdef CONFIG_IPV6_SUBTREES
971 if (rt
->rt6i_src
.plen
&& saddr
) {
972 rt
->rt6i_src
.addr
= *saddr
;
973 rt
->rt6i_src
.plen
= 128;
981 static struct rt6_info
*ip6_rt_pcpu_alloc(struct rt6_info
*rt
)
983 struct rt6_info
*pcpu_rt
;
985 pcpu_rt
= __ip6_dst_alloc(dev_net(rt
->dst
.dev
),
986 rt
->dst
.dev
, rt
->dst
.flags
,
991 ip6_rt_copy_init(pcpu_rt
, rt
);
992 pcpu_rt
->rt6i_protocol
= rt
->rt6i_protocol
;
993 pcpu_rt
->rt6i_flags
|= RTF_PCPU
;
997 /* It should be called with read_lock_bh(&tb6_lock) acquired */
998 static struct rt6_info
*rt6_get_pcpu_route(struct rt6_info
*rt
)
1000 struct rt6_info
*pcpu_rt
, *prev
, **p
;
1002 p
= this_cpu_ptr(rt
->rt6i_pcpu
);
1008 pcpu_rt
= ip6_rt_pcpu_alloc(rt
);
1010 struct net
*net
= dev_net(rt
->dst
.dev
);
1012 pcpu_rt
= net
->ipv6
.ip6_null_entry
;
1016 prev
= cmpxchg(p
, NULL
, pcpu_rt
);
1018 /* If someone did it before us, return prev instead */
1019 dst_destroy(&pcpu_rt
->dst
);
1024 dst_hold(&pcpu_rt
->dst
);
1025 rt6_dst_from_metrics_check(pcpu_rt
);
1029 static struct rt6_info
*ip6_pol_route(struct net
*net
, struct fib6_table
*table
, int oif
,
1030 struct flowi6
*fl6
, int flags
)
1032 struct fib6_node
*fn
, *saved_fn
;
1033 struct rt6_info
*rt
;
1036 strict
|= flags
& RT6_LOOKUP_F_IFACE
;
1037 if (net
->ipv6
.devconf_all
->forwarding
== 0)
1038 strict
|= RT6_LOOKUP_F_REACHABLE
;
1040 read_lock_bh(&table
->tb6_lock
);
1042 fn
= fib6_lookup(&table
->tb6_root
, &fl6
->daddr
, &fl6
->saddr
);
1046 rt
= rt6_select(fn
, oif
, strict
);
1047 if (rt
->rt6i_nsiblings
)
1048 rt
= rt6_multipath_select(rt
, fl6
, oif
, strict
);
1049 if (rt
== net
->ipv6
.ip6_null_entry
) {
1050 fn
= fib6_backtrack(fn
, &fl6
->saddr
);
1052 goto redo_rt6_select
;
1053 else if (strict
& RT6_LOOKUP_F_REACHABLE
) {
1054 /* also consider unreachable route */
1055 strict
&= ~RT6_LOOKUP_F_REACHABLE
;
1057 goto redo_rt6_select
;
1062 if (rt
== net
->ipv6
.ip6_null_entry
|| (rt
->rt6i_flags
& RTF_CACHE
)) {
1063 dst_use(&rt
->dst
, jiffies
);
1064 read_unlock_bh(&table
->tb6_lock
);
1066 rt6_dst_from_metrics_check(rt
);
1068 } else if (unlikely((fl6
->flowi6_flags
& FLOWI_FLAG_KNOWN_NH
) &&
1069 !(rt
->rt6i_flags
& RTF_GATEWAY
))) {
1070 /* Create a RTF_CACHE clone which will not be
1071 * owned by the fib6 tree. It is for the special case where
1072 * the daddr in the skb during the neighbor look-up is different
1073 * from the fl6->daddr used to look-up route here.
1076 struct rt6_info
*uncached_rt
;
1078 dst_use(&rt
->dst
, jiffies
);
1079 read_unlock_bh(&table
->tb6_lock
);
1081 uncached_rt
= ip6_rt_cache_alloc(rt
, &fl6
->daddr
, NULL
);
1082 dst_release(&rt
->dst
);
1085 rt6_uncached_list_add(uncached_rt
);
1087 uncached_rt
= net
->ipv6
.ip6_null_entry
;
1089 dst_hold(&uncached_rt
->dst
);
1093 /* Get a percpu copy */
1095 struct rt6_info
*pcpu_rt
;
1097 rt
->dst
.lastuse
= jiffies
;
1099 pcpu_rt
= rt6_get_pcpu_route(rt
);
1100 read_unlock_bh(&table
->tb6_lock
);
1106 static struct rt6_info
*ip6_pol_route_input(struct net
*net
, struct fib6_table
*table
,
1107 struct flowi6
*fl6
, int flags
)
1109 return ip6_pol_route(net
, table
, fl6
->flowi6_iif
, fl6
, flags
);
1112 static struct dst_entry
*ip6_route_input_lookup(struct net
*net
,
1113 struct net_device
*dev
,
1114 struct flowi6
*fl6
, int flags
)
1116 if (rt6_need_strict(&fl6
->daddr
) && dev
->type
!= ARPHRD_PIMREG
)
1117 flags
|= RT6_LOOKUP_F_IFACE
;
1119 return fib6_rule_lookup(net
, fl6
, flags
, ip6_pol_route_input
);
1122 void ip6_route_input(struct sk_buff
*skb
)
1124 const struct ipv6hdr
*iph
= ipv6_hdr(skb
);
1125 struct net
*net
= dev_net(skb
->dev
);
1126 int flags
= RT6_LOOKUP_F_HAS_SADDR
;
1127 struct flowi6 fl6
= {
1128 .flowi6_iif
= skb
->dev
->ifindex
,
1129 .daddr
= iph
->daddr
,
1130 .saddr
= iph
->saddr
,
1131 .flowlabel
= ip6_flowinfo(iph
),
1132 .flowi6_mark
= skb
->mark
,
1133 .flowi6_proto
= iph
->nexthdr
,
1136 skb_dst_set(skb
, ip6_route_input_lookup(net
, skb
->dev
, &fl6
, flags
));
1139 static struct rt6_info
*ip6_pol_route_output(struct net
*net
, struct fib6_table
*table
,
1140 struct flowi6
*fl6
, int flags
)
1142 return ip6_pol_route(net
, table
, fl6
->flowi6_oif
, fl6
, flags
);
1145 struct dst_entry
*ip6_route_output(struct net
*net
, const struct sock
*sk
,
1150 fl6
->flowi6_iif
= LOOPBACK_IFINDEX
;
1152 if ((sk
&& sk
->sk_bound_dev_if
) || rt6_need_strict(&fl6
->daddr
))
1153 flags
|= RT6_LOOKUP_F_IFACE
;
1155 if (!ipv6_addr_any(&fl6
->saddr
))
1156 flags
|= RT6_LOOKUP_F_HAS_SADDR
;
1158 flags
|= rt6_srcprefs2flags(inet6_sk(sk
)->srcprefs
);
1160 return fib6_rule_lookup(net
, fl6
, flags
, ip6_pol_route_output
);
1162 EXPORT_SYMBOL(ip6_route_output
);
1164 struct dst_entry
*ip6_blackhole_route(struct net
*net
, struct dst_entry
*dst_orig
)
1166 struct rt6_info
*rt
, *ort
= (struct rt6_info
*) dst_orig
;
1167 struct dst_entry
*new = NULL
;
1169 rt
= dst_alloc(&ip6_dst_blackhole_ops
, ort
->dst
.dev
, 1, DST_OBSOLETE_NONE
, 0);
1173 memset(new + 1, 0, sizeof(*rt
) - sizeof(*new));
1176 new->input
= dst_discard
;
1177 new->output
= dst_discard_sk
;
1179 if (dst_metrics_read_only(&ort
->dst
))
1180 new->_metrics
= ort
->dst
._metrics
;
1182 dst_copy_metrics(new, &ort
->dst
);
1183 rt
->rt6i_idev
= ort
->rt6i_idev
;
1185 in6_dev_hold(rt
->rt6i_idev
);
1187 rt
->rt6i_gateway
= ort
->rt6i_gateway
;
1188 rt
->rt6i_flags
= ort
->rt6i_flags
;
1189 rt
->rt6i_metric
= 0;
1191 memcpy(&rt
->rt6i_dst
, &ort
->rt6i_dst
, sizeof(struct rt6key
));
1192 #ifdef CONFIG_IPV6_SUBTREES
1193 memcpy(&rt
->rt6i_src
, &ort
->rt6i_src
, sizeof(struct rt6key
));
1199 dst_release(dst_orig
);
1200 return new ? new : ERR_PTR(-ENOMEM
);
1204 * Destination cache support functions
1207 static void rt6_dst_from_metrics_check(struct rt6_info
*rt
)
1210 dst_metrics_ptr(&rt
->dst
) != dst_metrics_ptr(rt
->dst
.from
))
1211 dst_init_metrics(&rt
->dst
, dst_metrics_ptr(rt
->dst
.from
), true);
1214 static struct dst_entry
*rt6_check(struct rt6_info
*rt
, u32 cookie
)
1216 if (!rt
->rt6i_node
|| (rt
->rt6i_node
->fn_sernum
!= cookie
))
1219 if (rt6_check_expired(rt
))
1225 static struct dst_entry
*rt6_dst_from_check(struct rt6_info
*rt
, u32 cookie
)
1227 if (rt
->dst
.obsolete
== DST_OBSOLETE_FORCE_CHK
&&
1228 rt6_check((struct rt6_info
*)(rt
->dst
.from
), cookie
))
1234 static struct dst_entry
*ip6_dst_check(struct dst_entry
*dst
, u32 cookie
)
1236 struct rt6_info
*rt
;
1238 rt
= (struct rt6_info
*) dst
;
1240 /* All IPV6 dsts are created with ->obsolete set to the value
1241 * DST_OBSOLETE_FORCE_CHK which forces validation calls down
1242 * into this function always.
1245 rt6_dst_from_metrics_check(rt
);
1247 if ((rt
->rt6i_flags
& RTF_PCPU
) || unlikely(dst
->flags
& DST_NOCACHE
))
1248 return rt6_dst_from_check(rt
, cookie
);
1250 return rt6_check(rt
, cookie
);
1253 static struct dst_entry
*ip6_negative_advice(struct dst_entry
*dst
)
1255 struct rt6_info
*rt
= (struct rt6_info
*) dst
;
1258 if (rt
->rt6i_flags
& RTF_CACHE
) {
1259 if (rt6_check_expired(rt
)) {
1271 static void ip6_link_failure(struct sk_buff
*skb
)
1273 struct rt6_info
*rt
;
1275 icmpv6_send(skb
, ICMPV6_DEST_UNREACH
, ICMPV6_ADDR_UNREACH
, 0);
1277 rt
= (struct rt6_info
*) skb_dst(skb
);
1279 if (rt
->rt6i_flags
& RTF_CACHE
) {
1283 } else if (rt
->rt6i_node
&& (rt
->rt6i_flags
& RTF_DEFAULT
)) {
1284 rt
->rt6i_node
->fn_sernum
= -1;
1289 static void rt6_do_update_pmtu(struct rt6_info
*rt
, u32 mtu
)
1291 struct net
*net
= dev_net(rt
->dst
.dev
);
1293 rt
->rt6i_flags
|= RTF_MODIFIED
;
1294 rt
->rt6i_pmtu
= mtu
;
1295 rt6_update_expires(rt
, net
->ipv6
.sysctl
.ip6_rt_mtu_expires
);
1298 static void __ip6_rt_update_pmtu(struct dst_entry
*dst
, const struct sock
*sk
,
1299 const struct ipv6hdr
*iph
, u32 mtu
)
1301 struct rt6_info
*rt6
= (struct rt6_info
*)dst
;
1303 if (rt6
->rt6i_flags
& RTF_LOCAL
)
1307 mtu
= max_t(u32
, mtu
, IPV6_MIN_MTU
);
1308 if (mtu
>= dst_mtu(dst
))
1311 if (rt6
->rt6i_flags
& RTF_CACHE
) {
1312 rt6_do_update_pmtu(rt6
, mtu
);
1314 const struct in6_addr
*daddr
, *saddr
;
1315 struct rt6_info
*nrt6
;
1318 daddr
= &iph
->daddr
;
1319 saddr
= &iph
->saddr
;
1321 daddr
= &sk
->sk_v6_daddr
;
1322 saddr
= &inet6_sk(sk
)->saddr
;
1326 nrt6
= ip6_rt_cache_alloc(rt6
, daddr
, saddr
);
1328 rt6_do_update_pmtu(nrt6
, mtu
);
1330 /* ip6_ins_rt(nrt6) will bump the
1331 * rt6->rt6i_node->fn_sernum
1332 * which will fail the next rt6_check() and
1333 * invalidate the sk->sk_dst_cache.
1340 static void ip6_rt_update_pmtu(struct dst_entry
*dst
, struct sock
*sk
,
1341 struct sk_buff
*skb
, u32 mtu
)
1343 __ip6_rt_update_pmtu(dst
, sk
, skb
? ipv6_hdr(skb
) : NULL
, mtu
);
1346 void ip6_update_pmtu(struct sk_buff
*skb
, struct net
*net
, __be32 mtu
,
1349 const struct ipv6hdr
*iph
= (struct ipv6hdr
*) skb
->data
;
1350 struct dst_entry
*dst
;
1353 memset(&fl6
, 0, sizeof(fl6
));
1354 fl6
.flowi6_oif
= oif
;
1355 fl6
.flowi6_mark
= mark
? mark
: IP6_REPLY_MARK(net
, skb
->mark
);
1356 fl6
.daddr
= iph
->daddr
;
1357 fl6
.saddr
= iph
->saddr
;
1358 fl6
.flowlabel
= ip6_flowinfo(iph
);
1360 dst
= ip6_route_output(net
, NULL
, &fl6
);
1362 __ip6_rt_update_pmtu(dst
, NULL
, iph
, ntohl(mtu
));
1365 EXPORT_SYMBOL_GPL(ip6_update_pmtu
);
1367 void ip6_sk_update_pmtu(struct sk_buff
*skb
, struct sock
*sk
, __be32 mtu
)
1369 ip6_update_pmtu(skb
, sock_net(sk
), mtu
,
1370 sk
->sk_bound_dev_if
, sk
->sk_mark
);
1372 EXPORT_SYMBOL_GPL(ip6_sk_update_pmtu
);
1374 /* Handle redirects */
1375 struct ip6rd_flowi
{
1377 struct in6_addr gateway
;
1380 static struct rt6_info
*__ip6_route_redirect(struct net
*net
,
1381 struct fib6_table
*table
,
1385 struct ip6rd_flowi
*rdfl
= (struct ip6rd_flowi
*)fl6
;
1386 struct rt6_info
*rt
;
1387 struct fib6_node
*fn
;
1389 /* Get the "current" route for this destination and
1390 * check if the redirect has come from approriate router.
1392 * RFC 4861 specifies that redirects should only be
1393 * accepted if they come from the nexthop to the target.
1394 * Due to the way the routes are chosen, this notion
1395 * is a bit fuzzy and one might need to check all possible
1399 read_lock_bh(&table
->tb6_lock
);
1400 fn
= fib6_lookup(&table
->tb6_root
, &fl6
->daddr
, &fl6
->saddr
);
1402 for (rt
= fn
->leaf
; rt
; rt
= rt
->dst
.rt6_next
) {
1403 if (rt6_check_expired(rt
))
1407 if (!(rt
->rt6i_flags
& RTF_GATEWAY
))
1409 if (fl6
->flowi6_oif
!= rt
->dst
.dev
->ifindex
)
1411 if (!ipv6_addr_equal(&rdfl
->gateway
, &rt
->rt6i_gateway
))
1417 rt
= net
->ipv6
.ip6_null_entry
;
1418 else if (rt
->dst
.error
) {
1419 rt
= net
->ipv6
.ip6_null_entry
;
1423 if (rt
== net
->ipv6
.ip6_null_entry
) {
1424 fn
= fib6_backtrack(fn
, &fl6
->saddr
);
1432 read_unlock_bh(&table
->tb6_lock
);
1437 static struct dst_entry
*ip6_route_redirect(struct net
*net
,
1438 const struct flowi6
*fl6
,
1439 const struct in6_addr
*gateway
)
1441 int flags
= RT6_LOOKUP_F_HAS_SADDR
;
1442 struct ip6rd_flowi rdfl
;
1445 rdfl
.gateway
= *gateway
;
1447 return fib6_rule_lookup(net
, &rdfl
.fl6
,
1448 flags
, __ip6_route_redirect
);
1451 void ip6_redirect(struct sk_buff
*skb
, struct net
*net
, int oif
, u32 mark
)
1453 const struct ipv6hdr
*iph
= (struct ipv6hdr
*) skb
->data
;
1454 struct dst_entry
*dst
;
1457 memset(&fl6
, 0, sizeof(fl6
));
1458 fl6
.flowi6_iif
= LOOPBACK_IFINDEX
;
1459 fl6
.flowi6_oif
= oif
;
1460 fl6
.flowi6_mark
= mark
;
1461 fl6
.daddr
= iph
->daddr
;
1462 fl6
.saddr
= iph
->saddr
;
1463 fl6
.flowlabel
= ip6_flowinfo(iph
);
1465 dst
= ip6_route_redirect(net
, &fl6
, &ipv6_hdr(skb
)->saddr
);
1466 rt6_do_redirect(dst
, NULL
, skb
);
1469 EXPORT_SYMBOL_GPL(ip6_redirect
);
1471 void ip6_redirect_no_header(struct sk_buff
*skb
, struct net
*net
, int oif
,
1474 const struct ipv6hdr
*iph
= ipv6_hdr(skb
);
1475 const struct rd_msg
*msg
= (struct rd_msg
*)icmp6_hdr(skb
);
1476 struct dst_entry
*dst
;
1479 memset(&fl6
, 0, sizeof(fl6
));
1480 fl6
.flowi6_iif
= LOOPBACK_IFINDEX
;
1481 fl6
.flowi6_oif
= oif
;
1482 fl6
.flowi6_mark
= mark
;
1483 fl6
.daddr
= msg
->dest
;
1484 fl6
.saddr
= iph
->daddr
;
1486 dst
= ip6_route_redirect(net
, &fl6
, &iph
->saddr
);
1487 rt6_do_redirect(dst
, NULL
, skb
);
1491 void ip6_sk_redirect(struct sk_buff
*skb
, struct sock
*sk
)
1493 ip6_redirect(skb
, sock_net(sk
), sk
->sk_bound_dev_if
, sk
->sk_mark
);
1495 EXPORT_SYMBOL_GPL(ip6_sk_redirect
);
1497 static unsigned int ip6_default_advmss(const struct dst_entry
*dst
)
1499 struct net_device
*dev
= dst
->dev
;
1500 unsigned int mtu
= dst_mtu(dst
);
1501 struct net
*net
= dev_net(dev
);
1503 mtu
-= sizeof(struct ipv6hdr
) + sizeof(struct tcphdr
);
1505 if (mtu
< net
->ipv6
.sysctl
.ip6_rt_min_advmss
)
1506 mtu
= net
->ipv6
.sysctl
.ip6_rt_min_advmss
;
1509 * Maximal non-jumbo IPv6 payload is IPV6_MAXPLEN and
1510 * corresponding MSS is IPV6_MAXPLEN - tcp_header_size.
1511 * IPV6_MAXPLEN is also valid and means: "any MSS,
1512 * rely only on pmtu discovery"
1514 if (mtu
> IPV6_MAXPLEN
- sizeof(struct tcphdr
))
1519 static unsigned int ip6_mtu(const struct dst_entry
*dst
)
1521 const struct rt6_info
*rt
= (const struct rt6_info
*)dst
;
1522 unsigned int mtu
= rt
->rt6i_pmtu
;
1523 struct inet6_dev
*idev
;
1528 mtu
= dst_metric_raw(dst
, RTAX_MTU
);
1535 idev
= __in6_dev_get(dst
->dev
);
1537 mtu
= idev
->cnf
.mtu6
;
1541 return min_t(unsigned int, mtu
, IP6_MAX_MTU
);
1544 static struct dst_entry
*icmp6_dst_gc_list
;
1545 static DEFINE_SPINLOCK(icmp6_dst_lock
);
1547 struct dst_entry
*icmp6_dst_alloc(struct net_device
*dev
,
1550 struct dst_entry
*dst
;
1551 struct rt6_info
*rt
;
1552 struct inet6_dev
*idev
= in6_dev_get(dev
);
1553 struct net
*net
= dev_net(dev
);
1555 if (unlikely(!idev
))
1556 return ERR_PTR(-ENODEV
);
1558 rt
= ip6_dst_alloc(net
, dev
, 0, NULL
);
1559 if (unlikely(!rt
)) {
1561 dst
= ERR_PTR(-ENOMEM
);
1565 rt
->dst
.flags
|= DST_HOST
;
1566 rt
->dst
.output
= ip6_output
;
1567 atomic_set(&rt
->dst
.__refcnt
, 1);
1568 rt
->rt6i_gateway
= fl6
->daddr
;
1569 rt
->rt6i_dst
.addr
= fl6
->daddr
;
1570 rt
->rt6i_dst
.plen
= 128;
1571 rt
->rt6i_idev
= idev
;
1572 dst_metric_set(&rt
->dst
, RTAX_HOPLIMIT
, 0);
1574 spin_lock_bh(&icmp6_dst_lock
);
1575 rt
->dst
.next
= icmp6_dst_gc_list
;
1576 icmp6_dst_gc_list
= &rt
->dst
;
1577 spin_unlock_bh(&icmp6_dst_lock
);
1579 fib6_force_start_gc(net
);
1581 dst
= xfrm_lookup(net
, &rt
->dst
, flowi6_to_flowi(fl6
), NULL
, 0);
1587 int icmp6_dst_gc(void)
1589 struct dst_entry
*dst
, **pprev
;
1592 spin_lock_bh(&icmp6_dst_lock
);
1593 pprev
= &icmp6_dst_gc_list
;
1595 while ((dst
= *pprev
) != NULL
) {
1596 if (!atomic_read(&dst
->__refcnt
)) {
1605 spin_unlock_bh(&icmp6_dst_lock
);
1610 static void icmp6_clean_all(int (*func
)(struct rt6_info
*rt
, void *arg
),
1613 struct dst_entry
*dst
, **pprev
;
1615 spin_lock_bh(&icmp6_dst_lock
);
1616 pprev
= &icmp6_dst_gc_list
;
1617 while ((dst
= *pprev
) != NULL
) {
1618 struct rt6_info
*rt
= (struct rt6_info
*) dst
;
1619 if (func(rt
, arg
)) {
1626 spin_unlock_bh(&icmp6_dst_lock
);
1629 static int ip6_dst_gc(struct dst_ops
*ops
)
1631 struct net
*net
= container_of(ops
, struct net
, ipv6
.ip6_dst_ops
);
1632 int rt_min_interval
= net
->ipv6
.sysctl
.ip6_rt_gc_min_interval
;
1633 int rt_max_size
= net
->ipv6
.sysctl
.ip6_rt_max_size
;
1634 int rt_elasticity
= net
->ipv6
.sysctl
.ip6_rt_gc_elasticity
;
1635 int rt_gc_timeout
= net
->ipv6
.sysctl
.ip6_rt_gc_timeout
;
1636 unsigned long rt_last_gc
= net
->ipv6
.ip6_rt_last_gc
;
1639 entries
= dst_entries_get_fast(ops
);
1640 if (time_after(rt_last_gc
+ rt_min_interval
, jiffies
) &&
1641 entries
<= rt_max_size
)
1644 net
->ipv6
.ip6_rt_gc_expire
++;
1645 fib6_run_gc(net
->ipv6
.ip6_rt_gc_expire
, net
, true);
1646 entries
= dst_entries_get_slow(ops
);
1647 if (entries
< ops
->gc_thresh
)
1648 net
->ipv6
.ip6_rt_gc_expire
= rt_gc_timeout
>>1;
1650 net
->ipv6
.ip6_rt_gc_expire
-= net
->ipv6
.ip6_rt_gc_expire
>>rt_elasticity
;
1651 return entries
> rt_max_size
;
1654 static int ip6_convert_metrics(struct mx6_config
*mxc
,
1655 const struct fib6_config
*cfg
)
1664 mp
= kzalloc(sizeof(u32
) * RTAX_MAX
, GFP_KERNEL
);
1668 nla_for_each_attr(nla
, cfg
->fc_mx
, cfg
->fc_mx_len
, remaining
) {
1669 int type
= nla_type(nla
);
1674 if (unlikely(type
> RTAX_MAX
))
1676 if (type
== RTAX_CC_ALGO
) {
1677 char tmp
[TCP_CA_NAME_MAX
];
1679 nla_strlcpy(tmp
, nla
, sizeof(tmp
));
1680 val
= tcp_ca_get_key_by_name(tmp
);
1681 if (val
== TCP_CA_UNSPEC
)
1684 val
= nla_get_u32(nla
);
1688 __set_bit(type
- 1, mxc
->mx_valid
);
1700 int ip6_route_add(struct fib6_config
*cfg
)
1703 struct net
*net
= cfg
->fc_nlinfo
.nl_net
;
1704 struct rt6_info
*rt
= NULL
;
1705 struct net_device
*dev
= NULL
;
1706 struct inet6_dev
*idev
= NULL
;
1707 struct fib6_table
*table
;
1708 struct mx6_config mxc
= { .mx
= NULL
, };
1711 if (cfg
->fc_dst_len
> 128 || cfg
->fc_src_len
> 128)
1713 #ifndef CONFIG_IPV6_SUBTREES
1714 if (cfg
->fc_src_len
)
1717 if (cfg
->fc_ifindex
) {
1719 dev
= dev_get_by_index(net
, cfg
->fc_ifindex
);
1722 idev
= in6_dev_get(dev
);
1727 if (cfg
->fc_metric
== 0)
1728 cfg
->fc_metric
= IP6_RT_PRIO_USER
;
1731 if (cfg
->fc_nlinfo
.nlh
&&
1732 !(cfg
->fc_nlinfo
.nlh
->nlmsg_flags
& NLM_F_CREATE
)) {
1733 table
= fib6_get_table(net
, cfg
->fc_table
);
1735 pr_warn("NLM_F_CREATE should be specified when creating new route\n");
1736 table
= fib6_new_table(net
, cfg
->fc_table
);
1739 table
= fib6_new_table(net
, cfg
->fc_table
);
1745 rt
= ip6_dst_alloc(net
, NULL
, (cfg
->fc_flags
& RTF_ADDRCONF
) ? 0 : DST_NOCOUNT
, table
);
1752 if (cfg
->fc_flags
& RTF_EXPIRES
)
1753 rt6_set_expires(rt
, jiffies
+
1754 clock_t_to_jiffies(cfg
->fc_expires
));
1756 rt6_clean_expires(rt
);
1758 if (cfg
->fc_protocol
== RTPROT_UNSPEC
)
1759 cfg
->fc_protocol
= RTPROT_BOOT
;
1760 rt
->rt6i_protocol
= cfg
->fc_protocol
;
1762 addr_type
= ipv6_addr_type(&cfg
->fc_dst
);
1764 if (addr_type
& IPV6_ADDR_MULTICAST
)
1765 rt
->dst
.input
= ip6_mc_input
;
1766 else if (cfg
->fc_flags
& RTF_LOCAL
)
1767 rt
->dst
.input
= ip6_input
;
1769 rt
->dst
.input
= ip6_forward
;
1771 rt
->dst
.output
= ip6_output
;
1773 ipv6_addr_prefix(&rt
->rt6i_dst
.addr
, &cfg
->fc_dst
, cfg
->fc_dst_len
);
1774 rt
->rt6i_dst
.plen
= cfg
->fc_dst_len
;
1775 if (rt
->rt6i_dst
.plen
== 128)
1776 rt
->dst
.flags
|= DST_HOST
;
1778 #ifdef CONFIG_IPV6_SUBTREES
1779 ipv6_addr_prefix(&rt
->rt6i_src
.addr
, &cfg
->fc_src
, cfg
->fc_src_len
);
1780 rt
->rt6i_src
.plen
= cfg
->fc_src_len
;
1783 rt
->rt6i_metric
= cfg
->fc_metric
;
1785 /* We cannot add true routes via loopback here,
1786 they would result in kernel looping; promote them to reject routes
1788 if ((cfg
->fc_flags
& RTF_REJECT
) ||
1789 (dev
&& (dev
->flags
& IFF_LOOPBACK
) &&
1790 !(addr_type
& IPV6_ADDR_LOOPBACK
) &&
1791 !(cfg
->fc_flags
& RTF_LOCAL
))) {
1792 /* hold loopback dev/idev if we haven't done so. */
1793 if (dev
!= net
->loopback_dev
) {
1798 dev
= net
->loopback_dev
;
1800 idev
= in6_dev_get(dev
);
1806 rt
->rt6i_flags
= RTF_REJECT
|RTF_NONEXTHOP
;
1807 switch (cfg
->fc_type
) {
1809 rt
->dst
.error
= -EINVAL
;
1810 rt
->dst
.output
= dst_discard_sk
;
1811 rt
->dst
.input
= dst_discard
;
1814 rt
->dst
.error
= -EACCES
;
1815 rt
->dst
.output
= ip6_pkt_prohibit_out
;
1816 rt
->dst
.input
= ip6_pkt_prohibit
;
1820 rt
->dst
.error
= (cfg
->fc_type
== RTN_THROW
) ? -EAGAIN
1822 rt
->dst
.output
= ip6_pkt_discard_out
;
1823 rt
->dst
.input
= ip6_pkt_discard
;
1829 if (cfg
->fc_flags
& RTF_GATEWAY
) {
1830 const struct in6_addr
*gw_addr
;
1833 gw_addr
= &cfg
->fc_gateway
;
1835 /* if gw_addr is local we will fail to detect this in case
1836 * address is still TENTATIVE (DAD in progress). rt6_lookup()
1837 * will return already-added prefix route via interface that
1838 * prefix route was assigned to, which might be non-loopback.
1841 if (ipv6_chk_addr_and_flags(net
, gw_addr
, NULL
, 0, 0))
1844 rt
->rt6i_gateway
= *gw_addr
;
1845 gwa_type
= ipv6_addr_type(gw_addr
);
1847 if (gwa_type
!= (IPV6_ADDR_LINKLOCAL
|IPV6_ADDR_UNICAST
)) {
1848 struct rt6_info
*grt
;
1850 /* IPv6 strictly inhibits using not link-local
1851 addresses as nexthop address.
1852 Otherwise, router will not able to send redirects.
1853 It is very good, but in some (rare!) circumstances
1854 (SIT, PtP, NBMA NOARP links) it is handy to allow
1855 some exceptions. --ANK
1857 if (!(gwa_type
& IPV6_ADDR_UNICAST
))
1860 grt
= rt6_lookup(net
, gw_addr
, NULL
, cfg
->fc_ifindex
, 1);
1862 err
= -EHOSTUNREACH
;
1866 if (dev
!= grt
->dst
.dev
) {
1872 idev
= grt
->rt6i_idev
;
1874 in6_dev_hold(grt
->rt6i_idev
);
1876 if (!(grt
->rt6i_flags
& RTF_GATEWAY
))
1884 if (!dev
|| (dev
->flags
& IFF_LOOPBACK
))
1892 if (!ipv6_addr_any(&cfg
->fc_prefsrc
)) {
1893 if (!ipv6_chk_addr(net
, &cfg
->fc_prefsrc
, dev
, 0)) {
1897 rt
->rt6i_prefsrc
.addr
= cfg
->fc_prefsrc
;
1898 rt
->rt6i_prefsrc
.plen
= 128;
1900 rt
->rt6i_prefsrc
.plen
= 0;
1902 rt
->rt6i_flags
= cfg
->fc_flags
;
1906 rt
->rt6i_idev
= idev
;
1907 rt
->rt6i_table
= table
;
1909 cfg
->fc_nlinfo
.nl_net
= dev_net(dev
);
1911 err
= ip6_convert_metrics(&mxc
, cfg
);
1915 err
= __ip6_ins_rt(rt
, &cfg
->fc_nlinfo
, &mxc
);
1929 static int __ip6_del_rt(struct rt6_info
*rt
, struct nl_info
*info
)
1932 struct fib6_table
*table
;
1933 struct net
*net
= dev_net(rt
->dst
.dev
);
1935 if (rt
== net
->ipv6
.ip6_null_entry
) {
1940 table
= rt
->rt6i_table
;
1941 write_lock_bh(&table
->tb6_lock
);
1942 err
= fib6_del(rt
, info
);
1943 write_unlock_bh(&table
->tb6_lock
);
1950 int ip6_del_rt(struct rt6_info
*rt
)
1952 struct nl_info info
= {
1953 .nl_net
= dev_net(rt
->dst
.dev
),
1955 return __ip6_del_rt(rt
, &info
);
1958 static int ip6_route_del(struct fib6_config
*cfg
)
1960 struct fib6_table
*table
;
1961 struct fib6_node
*fn
;
1962 struct rt6_info
*rt
;
1965 table
= fib6_get_table(cfg
->fc_nlinfo
.nl_net
, cfg
->fc_table
);
1969 read_lock_bh(&table
->tb6_lock
);
1971 fn
= fib6_locate(&table
->tb6_root
,
1972 &cfg
->fc_dst
, cfg
->fc_dst_len
,
1973 &cfg
->fc_src
, cfg
->fc_src_len
);
1976 for (rt
= fn
->leaf
; rt
; rt
= rt
->dst
.rt6_next
) {
1977 if ((rt
->rt6i_flags
& RTF_CACHE
) &&
1978 !(cfg
->fc_flags
& RTF_CACHE
))
1980 if (cfg
->fc_ifindex
&&
1982 rt
->dst
.dev
->ifindex
!= cfg
->fc_ifindex
))
1984 if (cfg
->fc_flags
& RTF_GATEWAY
&&
1985 !ipv6_addr_equal(&cfg
->fc_gateway
, &rt
->rt6i_gateway
))
1987 if (cfg
->fc_metric
&& cfg
->fc_metric
!= rt
->rt6i_metric
)
1990 read_unlock_bh(&table
->tb6_lock
);
1992 return __ip6_del_rt(rt
, &cfg
->fc_nlinfo
);
1995 read_unlock_bh(&table
->tb6_lock
);
2000 static void rt6_do_redirect(struct dst_entry
*dst
, struct sock
*sk
, struct sk_buff
*skb
)
2002 struct net
*net
= dev_net(skb
->dev
);
2003 struct netevent_redirect netevent
;
2004 struct rt6_info
*rt
, *nrt
= NULL
;
2005 struct ndisc_options ndopts
;
2006 struct inet6_dev
*in6_dev
;
2007 struct neighbour
*neigh
;
2009 int optlen
, on_link
;
2012 optlen
= skb_tail_pointer(skb
) - skb_transport_header(skb
);
2013 optlen
-= sizeof(*msg
);
2016 net_dbg_ratelimited("rt6_do_redirect: packet too short\n");
2020 msg
= (struct rd_msg
*)icmp6_hdr(skb
);
2022 if (ipv6_addr_is_multicast(&msg
->dest
)) {
2023 net_dbg_ratelimited("rt6_do_redirect: destination address is multicast\n");
2028 if (ipv6_addr_equal(&msg
->dest
, &msg
->target
)) {
2030 } else if (ipv6_addr_type(&msg
->target
) !=
2031 (IPV6_ADDR_UNICAST
|IPV6_ADDR_LINKLOCAL
)) {
2032 net_dbg_ratelimited("rt6_do_redirect: target address is not link-local unicast\n");
2036 in6_dev
= __in6_dev_get(skb
->dev
);
2039 if (in6_dev
->cnf
.forwarding
|| !in6_dev
->cnf
.accept_redirects
)
2043 * The IP source address of the Redirect MUST be the same as the current
2044 * first-hop router for the specified ICMP Destination Address.
2047 if (!ndisc_parse_options(msg
->opt
, optlen
, &ndopts
)) {
2048 net_dbg_ratelimited("rt6_redirect: invalid ND options\n");
2053 if (ndopts
.nd_opts_tgt_lladdr
) {
2054 lladdr
= ndisc_opt_addr_data(ndopts
.nd_opts_tgt_lladdr
,
2057 net_dbg_ratelimited("rt6_redirect: invalid link-layer address length\n");
2062 rt
= (struct rt6_info
*) dst
;
2063 if (rt
== net
->ipv6
.ip6_null_entry
) {
2064 net_dbg_ratelimited("rt6_redirect: source isn't a valid nexthop for redirect target\n");
2068 /* Redirect received -> path was valid.
2069 * Look, redirects are sent only in response to data packets,
2070 * so that this nexthop apparently is reachable. --ANK
2072 dst_confirm(&rt
->dst
);
2074 neigh
= __neigh_lookup(&nd_tbl
, &msg
->target
, skb
->dev
, 1);
2079 * We have finally decided to accept it.
2082 neigh_update(neigh
, lladdr
, NUD_STALE
,
2083 NEIGH_UPDATE_F_WEAK_OVERRIDE
|
2084 NEIGH_UPDATE_F_OVERRIDE
|
2085 (on_link
? 0 : (NEIGH_UPDATE_F_OVERRIDE_ISROUTER
|
2086 NEIGH_UPDATE_F_ISROUTER
))
2089 nrt
= ip6_rt_cache_alloc(rt
, &msg
->dest
, NULL
);
2093 nrt
->rt6i_flags
= RTF_GATEWAY
|RTF_UP
|RTF_DYNAMIC
|RTF_CACHE
;
2095 nrt
->rt6i_flags
&= ~RTF_GATEWAY
;
2097 nrt
->rt6i_gateway
= *(struct in6_addr
*)neigh
->primary_key
;
2099 if (ip6_ins_rt(nrt
))
2102 netevent
.old
= &rt
->dst
;
2103 netevent
.new = &nrt
->dst
;
2104 netevent
.daddr
= &msg
->dest
;
2105 netevent
.neigh
= neigh
;
2106 call_netevent_notifiers(NETEVENT_REDIRECT
, &netevent
);
2108 if (rt
->rt6i_flags
& RTF_CACHE
) {
2109 rt
= (struct rt6_info
*) dst_clone(&rt
->dst
);
2114 neigh_release(neigh
);
2118 * Misc support functions
2121 static void rt6_set_from(struct rt6_info
*rt
, struct rt6_info
*from
)
2123 BUG_ON(from
->dst
.from
);
2125 rt
->rt6i_flags
&= ~RTF_EXPIRES
;
2126 dst_hold(&from
->dst
);
2127 rt
->dst
.from
= &from
->dst
;
2128 dst_init_metrics(&rt
->dst
, dst_metrics_ptr(&from
->dst
), true);
2131 static void ip6_rt_copy_init(struct rt6_info
*rt
, struct rt6_info
*ort
)
2133 rt
->dst
.input
= ort
->dst
.input
;
2134 rt
->dst
.output
= ort
->dst
.output
;
2135 rt
->rt6i_dst
= ort
->rt6i_dst
;
2136 rt
->dst
.error
= ort
->dst
.error
;
2137 rt
->rt6i_idev
= ort
->rt6i_idev
;
2139 in6_dev_hold(rt
->rt6i_idev
);
2140 rt
->dst
.lastuse
= jiffies
;
2141 rt
->rt6i_gateway
= ort
->rt6i_gateway
;
2142 rt
->rt6i_flags
= ort
->rt6i_flags
;
2143 rt6_set_from(rt
, ort
);
2144 rt
->rt6i_metric
= ort
->rt6i_metric
;
2145 #ifdef CONFIG_IPV6_SUBTREES
2146 rt
->rt6i_src
= ort
->rt6i_src
;
2148 rt
->rt6i_prefsrc
= ort
->rt6i_prefsrc
;
2149 rt
->rt6i_table
= ort
->rt6i_table
;
2152 #ifdef CONFIG_IPV6_ROUTE_INFO
2153 static struct rt6_info
*rt6_get_route_info(struct net
*net
,
2154 const struct in6_addr
*prefix
, int prefixlen
,
2155 const struct in6_addr
*gwaddr
, int ifindex
)
2157 struct fib6_node
*fn
;
2158 struct rt6_info
*rt
= NULL
;
2159 struct fib6_table
*table
;
2161 table
= fib6_get_table(net
, RT6_TABLE_INFO
);
2165 read_lock_bh(&table
->tb6_lock
);
2166 fn
= fib6_locate(&table
->tb6_root
, prefix
, prefixlen
, NULL
, 0);
2170 for (rt
= fn
->leaf
; rt
; rt
= rt
->dst
.rt6_next
) {
2171 if (rt
->dst
.dev
->ifindex
!= ifindex
)
2173 if ((rt
->rt6i_flags
& (RTF_ROUTEINFO
|RTF_GATEWAY
)) != (RTF_ROUTEINFO
|RTF_GATEWAY
))
2175 if (!ipv6_addr_equal(&rt
->rt6i_gateway
, gwaddr
))
2181 read_unlock_bh(&table
->tb6_lock
);
2185 static struct rt6_info
*rt6_add_route_info(struct net
*net
,
2186 const struct in6_addr
*prefix
, int prefixlen
,
2187 const struct in6_addr
*gwaddr
, int ifindex
,
2190 struct fib6_config cfg
= {
2191 .fc_table
= RT6_TABLE_INFO
,
2192 .fc_metric
= IP6_RT_PRIO_USER
,
2193 .fc_ifindex
= ifindex
,
2194 .fc_dst_len
= prefixlen
,
2195 .fc_flags
= RTF_GATEWAY
| RTF_ADDRCONF
| RTF_ROUTEINFO
|
2196 RTF_UP
| RTF_PREF(pref
),
2197 .fc_nlinfo
.portid
= 0,
2198 .fc_nlinfo
.nlh
= NULL
,
2199 .fc_nlinfo
.nl_net
= net
,
2202 cfg
.fc_dst
= *prefix
;
2203 cfg
.fc_gateway
= *gwaddr
;
2205 /* We should treat it as a default route if prefix length is 0. */
2207 cfg
.fc_flags
|= RTF_DEFAULT
;
2209 ip6_route_add(&cfg
);
2211 return rt6_get_route_info(net
, prefix
, prefixlen
, gwaddr
, ifindex
);
2215 struct rt6_info
*rt6_get_dflt_router(const struct in6_addr
*addr
, struct net_device
*dev
)
2217 struct rt6_info
*rt
;
2218 struct fib6_table
*table
;
2220 table
= fib6_get_table(dev_net(dev
), RT6_TABLE_DFLT
);
2224 read_lock_bh(&table
->tb6_lock
);
2225 for (rt
= table
->tb6_root
.leaf
; rt
; rt
= rt
->dst
.rt6_next
) {
2226 if (dev
== rt
->dst
.dev
&&
2227 ((rt
->rt6i_flags
& (RTF_ADDRCONF
| RTF_DEFAULT
)) == (RTF_ADDRCONF
| RTF_DEFAULT
)) &&
2228 ipv6_addr_equal(&rt
->rt6i_gateway
, addr
))
2233 read_unlock_bh(&table
->tb6_lock
);
2237 struct rt6_info
*rt6_add_dflt_router(const struct in6_addr
*gwaddr
,
2238 struct net_device
*dev
,
2241 struct fib6_config cfg
= {
2242 .fc_table
= RT6_TABLE_DFLT
,
2243 .fc_metric
= IP6_RT_PRIO_USER
,
2244 .fc_ifindex
= dev
->ifindex
,
2245 .fc_flags
= RTF_GATEWAY
| RTF_ADDRCONF
| RTF_DEFAULT
|
2246 RTF_UP
| RTF_EXPIRES
| RTF_PREF(pref
),
2247 .fc_nlinfo
.portid
= 0,
2248 .fc_nlinfo
.nlh
= NULL
,
2249 .fc_nlinfo
.nl_net
= dev_net(dev
),
2252 cfg
.fc_gateway
= *gwaddr
;
2254 ip6_route_add(&cfg
);
2256 return rt6_get_dflt_router(gwaddr
, dev
);
2259 void rt6_purge_dflt_routers(struct net
*net
)
2261 struct rt6_info
*rt
;
2262 struct fib6_table
*table
;
2264 /* NOTE: Keep consistent with rt6_get_dflt_router */
2265 table
= fib6_get_table(net
, RT6_TABLE_DFLT
);
2270 read_lock_bh(&table
->tb6_lock
);
2271 for (rt
= table
->tb6_root
.leaf
; rt
; rt
= rt
->dst
.rt6_next
) {
2272 if (rt
->rt6i_flags
& (RTF_DEFAULT
| RTF_ADDRCONF
) &&
2273 (!rt
->rt6i_idev
|| rt
->rt6i_idev
->cnf
.accept_ra
!= 2)) {
2275 read_unlock_bh(&table
->tb6_lock
);
2280 read_unlock_bh(&table
->tb6_lock
);
2283 static void rtmsg_to_fib6_config(struct net
*net
,
2284 struct in6_rtmsg
*rtmsg
,
2285 struct fib6_config
*cfg
)
2287 memset(cfg
, 0, sizeof(*cfg
));
2289 cfg
->fc_table
= RT6_TABLE_MAIN
;
2290 cfg
->fc_ifindex
= rtmsg
->rtmsg_ifindex
;
2291 cfg
->fc_metric
= rtmsg
->rtmsg_metric
;
2292 cfg
->fc_expires
= rtmsg
->rtmsg_info
;
2293 cfg
->fc_dst_len
= rtmsg
->rtmsg_dst_len
;
2294 cfg
->fc_src_len
= rtmsg
->rtmsg_src_len
;
2295 cfg
->fc_flags
= rtmsg
->rtmsg_flags
;
2297 cfg
->fc_nlinfo
.nl_net
= net
;
2299 cfg
->fc_dst
= rtmsg
->rtmsg_dst
;
2300 cfg
->fc_src
= rtmsg
->rtmsg_src
;
2301 cfg
->fc_gateway
= rtmsg
->rtmsg_gateway
;
2304 int ipv6_route_ioctl(struct net
*net
, unsigned int cmd
, void __user
*arg
)
2306 struct fib6_config cfg
;
2307 struct in6_rtmsg rtmsg
;
2311 case SIOCADDRT
: /* Add a route */
2312 case SIOCDELRT
: /* Delete a route */
2313 if (!ns_capable(net
->user_ns
, CAP_NET_ADMIN
))
2315 err
= copy_from_user(&rtmsg
, arg
,
2316 sizeof(struct in6_rtmsg
));
2320 rtmsg_to_fib6_config(net
, &rtmsg
, &cfg
);
2325 err
= ip6_route_add(&cfg
);
2328 err
= ip6_route_del(&cfg
);
2342 * Drop the packet on the floor
2345 static int ip6_pkt_drop(struct sk_buff
*skb
, u8 code
, int ipstats_mib_noroutes
)
2348 struct dst_entry
*dst
= skb_dst(skb
);
2349 switch (ipstats_mib_noroutes
) {
2350 case IPSTATS_MIB_INNOROUTES
:
2351 type
= ipv6_addr_type(&ipv6_hdr(skb
)->daddr
);
2352 if (type
== IPV6_ADDR_ANY
) {
2353 IP6_INC_STATS(dev_net(dst
->dev
), ip6_dst_idev(dst
),
2354 IPSTATS_MIB_INADDRERRORS
);
2358 case IPSTATS_MIB_OUTNOROUTES
:
2359 IP6_INC_STATS(dev_net(dst
->dev
), ip6_dst_idev(dst
),
2360 ipstats_mib_noroutes
);
2363 icmpv6_send(skb
, ICMPV6_DEST_UNREACH
, code
, 0);
2368 static int ip6_pkt_discard(struct sk_buff
*skb
)
2370 return ip6_pkt_drop(skb
, ICMPV6_NOROUTE
, IPSTATS_MIB_INNOROUTES
);
2373 static int ip6_pkt_discard_out(struct sock
*sk
, struct sk_buff
*skb
)
2375 skb
->dev
= skb_dst(skb
)->dev
;
2376 return ip6_pkt_drop(skb
, ICMPV6_NOROUTE
, IPSTATS_MIB_OUTNOROUTES
);
2379 static int ip6_pkt_prohibit(struct sk_buff
*skb
)
2381 return ip6_pkt_drop(skb
, ICMPV6_ADM_PROHIBITED
, IPSTATS_MIB_INNOROUTES
);
2384 static int ip6_pkt_prohibit_out(struct sock
*sk
, struct sk_buff
*skb
)
2386 skb
->dev
= skb_dst(skb
)->dev
;
2387 return ip6_pkt_drop(skb
, ICMPV6_ADM_PROHIBITED
, IPSTATS_MIB_OUTNOROUTES
);
2391 * Allocate a dst for local (unicast / anycast) address.
2394 struct rt6_info
*addrconf_dst_alloc(struct inet6_dev
*idev
,
2395 const struct in6_addr
*addr
,
2398 struct net
*net
= dev_net(idev
->dev
);
2399 struct rt6_info
*rt
= ip6_dst_alloc(net
, net
->loopback_dev
,
2402 return ERR_PTR(-ENOMEM
);
2406 rt
->dst
.flags
|= DST_HOST
;
2407 rt
->dst
.input
= ip6_input
;
2408 rt
->dst
.output
= ip6_output
;
2409 rt
->rt6i_idev
= idev
;
2411 rt
->rt6i_flags
= RTF_UP
| RTF_NONEXTHOP
;
2413 rt
->rt6i_flags
|= RTF_ANYCAST
;
2415 rt
->rt6i_flags
|= RTF_LOCAL
;
2417 rt
->rt6i_gateway
= *addr
;
2418 rt
->rt6i_dst
.addr
= *addr
;
2419 rt
->rt6i_dst
.plen
= 128;
2420 rt
->rt6i_table
= fib6_get_table(net
, RT6_TABLE_LOCAL
);
2422 atomic_set(&rt
->dst
.__refcnt
, 1);
2427 int ip6_route_get_saddr(struct net
*net
,
2428 struct rt6_info
*rt
,
2429 const struct in6_addr
*daddr
,
2431 struct in6_addr
*saddr
)
2433 struct inet6_dev
*idev
=
2434 rt
? ip6_dst_idev((struct dst_entry
*)rt
) : NULL
;
2436 if (rt
&& rt
->rt6i_prefsrc
.plen
)
2437 *saddr
= rt
->rt6i_prefsrc
.addr
;
2439 err
= ipv6_dev_get_saddr(net
, idev
? idev
->dev
: NULL
,
2440 daddr
, prefs
, saddr
);
2444 /* remove deleted ip from prefsrc entries */
2445 struct arg_dev_net_ip
{
2446 struct net_device
*dev
;
2448 struct in6_addr
*addr
;
2451 static int fib6_remove_prefsrc(struct rt6_info
*rt
, void *arg
)
2453 struct net_device
*dev
= ((struct arg_dev_net_ip
*)arg
)->dev
;
2454 struct net
*net
= ((struct arg_dev_net_ip
*)arg
)->net
;
2455 struct in6_addr
*addr
= ((struct arg_dev_net_ip
*)arg
)->addr
;
2457 if (((void *)rt
->dst
.dev
== dev
|| !dev
) &&
2458 rt
!= net
->ipv6
.ip6_null_entry
&&
2459 ipv6_addr_equal(addr
, &rt
->rt6i_prefsrc
.addr
)) {
2460 /* remove prefsrc entry */
2461 rt
->rt6i_prefsrc
.plen
= 0;
2466 void rt6_remove_prefsrc(struct inet6_ifaddr
*ifp
)
2468 struct net
*net
= dev_net(ifp
->idev
->dev
);
2469 struct arg_dev_net_ip adni
= {
2470 .dev
= ifp
->idev
->dev
,
2474 fib6_clean_all(net
, fib6_remove_prefsrc
, &adni
);
2477 #define RTF_RA_ROUTER (RTF_ADDRCONF | RTF_DEFAULT | RTF_GATEWAY)
2478 #define RTF_CACHE_GATEWAY (RTF_GATEWAY | RTF_CACHE)
2480 /* Remove routers and update dst entries when gateway turn into host. */
2481 static int fib6_clean_tohost(struct rt6_info
*rt
, void *arg
)
2483 struct in6_addr
*gateway
= (struct in6_addr
*)arg
;
2485 if ((((rt
->rt6i_flags
& RTF_RA_ROUTER
) == RTF_RA_ROUTER
) ||
2486 ((rt
->rt6i_flags
& RTF_CACHE_GATEWAY
) == RTF_CACHE_GATEWAY
)) &&
2487 ipv6_addr_equal(gateway
, &rt
->rt6i_gateway
)) {
2493 void rt6_clean_tohost(struct net
*net
, struct in6_addr
*gateway
)
2495 fib6_clean_all(net
, fib6_clean_tohost
, gateway
);
2498 struct arg_dev_net
{
2499 struct net_device
*dev
;
2503 static int fib6_ifdown(struct rt6_info
*rt
, void *arg
)
2505 const struct arg_dev_net
*adn
= arg
;
2506 const struct net_device
*dev
= adn
->dev
;
2508 if ((rt
->dst
.dev
== dev
|| !dev
) &&
2509 rt
!= adn
->net
->ipv6
.ip6_null_entry
)
2515 void rt6_ifdown(struct net
*net
, struct net_device
*dev
)
2517 struct arg_dev_net adn
= {
2522 fib6_clean_all(net
, fib6_ifdown
, &adn
);
2523 icmp6_clean_all(fib6_ifdown
, &adn
);
2524 rt6_uncached_list_flush_dev(net
, dev
);
2527 struct rt6_mtu_change_arg
{
2528 struct net_device
*dev
;
2532 static int rt6_mtu_change_route(struct rt6_info
*rt
, void *p_arg
)
2534 struct rt6_mtu_change_arg
*arg
= (struct rt6_mtu_change_arg
*) p_arg
;
2535 struct inet6_dev
*idev
;
2537 /* In IPv6 pmtu discovery is not optional,
2538 so that RTAX_MTU lock cannot disable it.
2539 We still use this lock to block changes
2540 caused by addrconf/ndisc.
2543 idev
= __in6_dev_get(arg
->dev
);
2547 /* For administrative MTU increase, there is no way to discover
2548 IPv6 PMTU increase, so PMTU increase should be updated here.
2549 Since RFC 1981 doesn't include administrative MTU increase
2550 update PMTU increase is a MUST. (i.e. jumbo frame)
2553 If new MTU is less than route PMTU, this new MTU will be the
2554 lowest MTU in the path, update the route PMTU to reflect PMTU
2555 decreases; if new MTU is greater than route PMTU, and the
2556 old MTU is the lowest MTU in the path, update the route PMTU
2557 to reflect the increase. In this case if the other nodes' MTU
2558 also have the lowest MTU, TOO BIG MESSAGE will be lead to
2561 if (rt
->dst
.dev
== arg
->dev
&&
2562 !dst_metric_locked(&rt
->dst
, RTAX_MTU
)) {
2563 if (rt
->rt6i_flags
& RTF_CACHE
) {
2564 /* For RTF_CACHE with rt6i_pmtu == 0
2565 * (i.e. a redirected route),
2566 * the metrics of its rt->dst.from has already
2569 if (rt
->rt6i_pmtu
&& rt
->rt6i_pmtu
> arg
->mtu
)
2570 rt
->rt6i_pmtu
= arg
->mtu
;
2571 } else if (dst_mtu(&rt
->dst
) >= arg
->mtu
||
2572 (dst_mtu(&rt
->dst
) < arg
->mtu
&&
2573 dst_mtu(&rt
->dst
) == idev
->cnf
.mtu6
)) {
2574 dst_metric_set(&rt
->dst
, RTAX_MTU
, arg
->mtu
);
2580 void rt6_mtu_change(struct net_device
*dev
, unsigned int mtu
)
2582 struct rt6_mtu_change_arg arg
= {
2587 fib6_clean_all(dev_net(dev
), rt6_mtu_change_route
, &arg
);
2590 static const struct nla_policy rtm_ipv6_policy
[RTA_MAX
+1] = {
2591 [RTA_GATEWAY
] = { .len
= sizeof(struct in6_addr
) },
2592 [RTA_OIF
] = { .type
= NLA_U32
},
2593 [RTA_IIF
] = { .type
= NLA_U32
},
2594 [RTA_PRIORITY
] = { .type
= NLA_U32
},
2595 [RTA_METRICS
] = { .type
= NLA_NESTED
},
2596 [RTA_MULTIPATH
] = { .len
= sizeof(struct rtnexthop
) },
2597 [RTA_PREF
] = { .type
= NLA_U8
},
2600 static int rtm_to_fib6_config(struct sk_buff
*skb
, struct nlmsghdr
*nlh
,
2601 struct fib6_config
*cfg
)
2604 struct nlattr
*tb
[RTA_MAX
+1];
2608 err
= nlmsg_parse(nlh
, sizeof(*rtm
), tb
, RTA_MAX
, rtm_ipv6_policy
);
2613 rtm
= nlmsg_data(nlh
);
2614 memset(cfg
, 0, sizeof(*cfg
));
2616 cfg
->fc_table
= rtm
->rtm_table
;
2617 cfg
->fc_dst_len
= rtm
->rtm_dst_len
;
2618 cfg
->fc_src_len
= rtm
->rtm_src_len
;
2619 cfg
->fc_flags
= RTF_UP
;
2620 cfg
->fc_protocol
= rtm
->rtm_protocol
;
2621 cfg
->fc_type
= rtm
->rtm_type
;
2623 if (rtm
->rtm_type
== RTN_UNREACHABLE
||
2624 rtm
->rtm_type
== RTN_BLACKHOLE
||
2625 rtm
->rtm_type
== RTN_PROHIBIT
||
2626 rtm
->rtm_type
== RTN_THROW
)
2627 cfg
->fc_flags
|= RTF_REJECT
;
2629 if (rtm
->rtm_type
== RTN_LOCAL
)
2630 cfg
->fc_flags
|= RTF_LOCAL
;
2632 if (rtm
->rtm_flags
& RTM_F_CLONED
)
2633 cfg
->fc_flags
|= RTF_CACHE
;
2635 cfg
->fc_nlinfo
.portid
= NETLINK_CB(skb
).portid
;
2636 cfg
->fc_nlinfo
.nlh
= nlh
;
2637 cfg
->fc_nlinfo
.nl_net
= sock_net(skb
->sk
);
2639 if (tb
[RTA_GATEWAY
]) {
2640 cfg
->fc_gateway
= nla_get_in6_addr(tb
[RTA_GATEWAY
]);
2641 cfg
->fc_flags
|= RTF_GATEWAY
;
2645 int plen
= (rtm
->rtm_dst_len
+ 7) >> 3;
2647 if (nla_len(tb
[RTA_DST
]) < plen
)
2650 nla_memcpy(&cfg
->fc_dst
, tb
[RTA_DST
], plen
);
2654 int plen
= (rtm
->rtm_src_len
+ 7) >> 3;
2656 if (nla_len(tb
[RTA_SRC
]) < plen
)
2659 nla_memcpy(&cfg
->fc_src
, tb
[RTA_SRC
], plen
);
2662 if (tb
[RTA_PREFSRC
])
2663 cfg
->fc_prefsrc
= nla_get_in6_addr(tb
[RTA_PREFSRC
]);
2666 cfg
->fc_ifindex
= nla_get_u32(tb
[RTA_OIF
]);
2668 if (tb
[RTA_PRIORITY
])
2669 cfg
->fc_metric
= nla_get_u32(tb
[RTA_PRIORITY
]);
2671 if (tb
[RTA_METRICS
]) {
2672 cfg
->fc_mx
= nla_data(tb
[RTA_METRICS
]);
2673 cfg
->fc_mx_len
= nla_len(tb
[RTA_METRICS
]);
2677 cfg
->fc_table
= nla_get_u32(tb
[RTA_TABLE
]);
2679 if (tb
[RTA_MULTIPATH
]) {
2680 cfg
->fc_mp
= nla_data(tb
[RTA_MULTIPATH
]);
2681 cfg
->fc_mp_len
= nla_len(tb
[RTA_MULTIPATH
]);
2685 pref
= nla_get_u8(tb
[RTA_PREF
]);
2686 if (pref
!= ICMPV6_ROUTER_PREF_LOW
&&
2687 pref
!= ICMPV6_ROUTER_PREF_HIGH
)
2688 pref
= ICMPV6_ROUTER_PREF_MEDIUM
;
2689 cfg
->fc_flags
|= RTF_PREF(pref
);
2697 static int ip6_route_multipath(struct fib6_config
*cfg
, int add
)
2699 struct fib6_config r_cfg
;
2700 struct rtnexthop
*rtnh
;
2703 int err
= 0, last_err
= 0;
2705 remaining
= cfg
->fc_mp_len
;
2707 rtnh
= (struct rtnexthop
*)cfg
->fc_mp
;
2709 /* Parse a Multipath Entry */
2710 while (rtnh_ok(rtnh
, remaining
)) {
2711 memcpy(&r_cfg
, cfg
, sizeof(*cfg
));
2712 if (rtnh
->rtnh_ifindex
)
2713 r_cfg
.fc_ifindex
= rtnh
->rtnh_ifindex
;
2715 attrlen
= rtnh_attrlen(rtnh
);
2717 struct nlattr
*nla
, *attrs
= rtnh_attrs(rtnh
);
2719 nla
= nla_find(attrs
, attrlen
, RTA_GATEWAY
);
2721 r_cfg
.fc_gateway
= nla_get_in6_addr(nla
);
2722 r_cfg
.fc_flags
|= RTF_GATEWAY
;
2725 err
= add
? ip6_route_add(&r_cfg
) : ip6_route_del(&r_cfg
);
2728 /* If we are trying to remove a route, do not stop the
2729 * loop when ip6_route_del() fails (because next hop is
2730 * already gone), we should try to remove all next hops.
2733 /* If add fails, we should try to delete all
2734 * next hops that have been already added.
2737 remaining
= cfg
->fc_mp_len
- remaining
;
2741 /* Because each route is added like a single route we remove
2742 * these flags after the first nexthop: if there is a collision,
2743 * we have already failed to add the first nexthop:
2744 * fib6_add_rt2node() has rejected it; when replacing, old
2745 * nexthops have been replaced by first new, the rest should
2748 cfg
->fc_nlinfo
.nlh
->nlmsg_flags
&= ~(NLM_F_EXCL
|
2750 rtnh
= rtnh_next(rtnh
, &remaining
);
2756 static int inet6_rtm_delroute(struct sk_buff
*skb
, struct nlmsghdr
*nlh
)
2758 struct fib6_config cfg
;
2761 err
= rtm_to_fib6_config(skb
, nlh
, &cfg
);
2766 return ip6_route_multipath(&cfg
, 0);
2768 return ip6_route_del(&cfg
);
2771 static int inet6_rtm_newroute(struct sk_buff
*skb
, struct nlmsghdr
*nlh
)
2773 struct fib6_config cfg
;
2776 err
= rtm_to_fib6_config(skb
, nlh
, &cfg
);
2781 return ip6_route_multipath(&cfg
, 1);
2783 return ip6_route_add(&cfg
);
2786 static inline size_t rt6_nlmsg_size(void)
2788 return NLMSG_ALIGN(sizeof(struct rtmsg
))
2789 + nla_total_size(16) /* RTA_SRC */
2790 + nla_total_size(16) /* RTA_DST */
2791 + nla_total_size(16) /* RTA_GATEWAY */
2792 + nla_total_size(16) /* RTA_PREFSRC */
2793 + nla_total_size(4) /* RTA_TABLE */
2794 + nla_total_size(4) /* RTA_IIF */
2795 + nla_total_size(4) /* RTA_OIF */
2796 + nla_total_size(4) /* RTA_PRIORITY */
2797 + RTAX_MAX
* nla_total_size(4) /* RTA_METRICS */
2798 + nla_total_size(sizeof(struct rta_cacheinfo
))
2799 + nla_total_size(TCP_CA_NAME_MAX
) /* RTAX_CC_ALGO */
2800 + nla_total_size(1); /* RTA_PREF */
2803 static int rt6_fill_node(struct net
*net
,
2804 struct sk_buff
*skb
, struct rt6_info
*rt
,
2805 struct in6_addr
*dst
, struct in6_addr
*src
,
2806 int iif
, int type
, u32 portid
, u32 seq
,
2807 int prefix
, int nowait
, unsigned int flags
)
2809 u32 metrics
[RTAX_MAX
];
2811 struct nlmsghdr
*nlh
;
2815 if (prefix
) { /* user wants prefix routes only */
2816 if (!(rt
->rt6i_flags
& RTF_PREFIX_RT
)) {
2817 /* success since this is not a prefix route */
2822 nlh
= nlmsg_put(skb
, portid
, seq
, type
, sizeof(*rtm
), flags
);
2826 rtm
= nlmsg_data(nlh
);
2827 rtm
->rtm_family
= AF_INET6
;
2828 rtm
->rtm_dst_len
= rt
->rt6i_dst
.plen
;
2829 rtm
->rtm_src_len
= rt
->rt6i_src
.plen
;
2832 table
= rt
->rt6i_table
->tb6_id
;
2834 table
= RT6_TABLE_UNSPEC
;
2835 rtm
->rtm_table
= table
;
2836 if (nla_put_u32(skb
, RTA_TABLE
, table
))
2837 goto nla_put_failure
;
2838 if (rt
->rt6i_flags
& RTF_REJECT
) {
2839 switch (rt
->dst
.error
) {
2841 rtm
->rtm_type
= RTN_BLACKHOLE
;
2844 rtm
->rtm_type
= RTN_PROHIBIT
;
2847 rtm
->rtm_type
= RTN_THROW
;
2850 rtm
->rtm_type
= RTN_UNREACHABLE
;
2854 else if (rt
->rt6i_flags
& RTF_LOCAL
)
2855 rtm
->rtm_type
= RTN_LOCAL
;
2856 else if (rt
->dst
.dev
&& (rt
->dst
.dev
->flags
& IFF_LOOPBACK
))
2857 rtm
->rtm_type
= RTN_LOCAL
;
2859 rtm
->rtm_type
= RTN_UNICAST
;
2861 rtm
->rtm_scope
= RT_SCOPE_UNIVERSE
;
2862 rtm
->rtm_protocol
= rt
->rt6i_protocol
;
2863 if (rt
->rt6i_flags
& RTF_DYNAMIC
)
2864 rtm
->rtm_protocol
= RTPROT_REDIRECT
;
2865 else if (rt
->rt6i_flags
& RTF_ADDRCONF
) {
2866 if (rt
->rt6i_flags
& (RTF_DEFAULT
| RTF_ROUTEINFO
))
2867 rtm
->rtm_protocol
= RTPROT_RA
;
2869 rtm
->rtm_protocol
= RTPROT_KERNEL
;
2872 if (rt
->rt6i_flags
& RTF_CACHE
)
2873 rtm
->rtm_flags
|= RTM_F_CLONED
;
2876 if (nla_put_in6_addr(skb
, RTA_DST
, dst
))
2877 goto nla_put_failure
;
2878 rtm
->rtm_dst_len
= 128;
2879 } else if (rtm
->rtm_dst_len
)
2880 if (nla_put_in6_addr(skb
, RTA_DST
, &rt
->rt6i_dst
.addr
))
2881 goto nla_put_failure
;
2882 #ifdef CONFIG_IPV6_SUBTREES
2884 if (nla_put_in6_addr(skb
, RTA_SRC
, src
))
2885 goto nla_put_failure
;
2886 rtm
->rtm_src_len
= 128;
2887 } else if (rtm
->rtm_src_len
&&
2888 nla_put_in6_addr(skb
, RTA_SRC
, &rt
->rt6i_src
.addr
))
2889 goto nla_put_failure
;
2892 #ifdef CONFIG_IPV6_MROUTE
2893 if (ipv6_addr_is_multicast(&rt
->rt6i_dst
.addr
)) {
2894 int err
= ip6mr_get_route(net
, skb
, rtm
, nowait
);
2899 goto nla_put_failure
;
2901 if (err
== -EMSGSIZE
)
2902 goto nla_put_failure
;
2907 if (nla_put_u32(skb
, RTA_IIF
, iif
))
2908 goto nla_put_failure
;
2910 struct in6_addr saddr_buf
;
2911 if (ip6_route_get_saddr(net
, rt
, dst
, 0, &saddr_buf
) == 0 &&
2912 nla_put_in6_addr(skb
, RTA_PREFSRC
, &saddr_buf
))
2913 goto nla_put_failure
;
2916 if (rt
->rt6i_prefsrc
.plen
) {
2917 struct in6_addr saddr_buf
;
2918 saddr_buf
= rt
->rt6i_prefsrc
.addr
;
2919 if (nla_put_in6_addr(skb
, RTA_PREFSRC
, &saddr_buf
))
2920 goto nla_put_failure
;
2923 memcpy(metrics
, dst_metrics_ptr(&rt
->dst
), sizeof(metrics
));
2925 metrics
[RTAX_MTU
- 1] = rt
->rt6i_pmtu
;
2926 if (rtnetlink_put_metrics(skb
, metrics
) < 0)
2927 goto nla_put_failure
;
2929 if (rt
->rt6i_flags
& RTF_GATEWAY
) {
2930 if (nla_put_in6_addr(skb
, RTA_GATEWAY
, &rt
->rt6i_gateway
) < 0)
2931 goto nla_put_failure
;
2935 nla_put_u32(skb
, RTA_OIF
, rt
->dst
.dev
->ifindex
))
2936 goto nla_put_failure
;
2937 if (nla_put_u32(skb
, RTA_PRIORITY
, rt
->rt6i_metric
))
2938 goto nla_put_failure
;
2940 expires
= (rt
->rt6i_flags
& RTF_EXPIRES
) ? rt
->dst
.expires
- jiffies
: 0;
2942 if (rtnl_put_cacheinfo(skb
, &rt
->dst
, 0, expires
, rt
->dst
.error
) < 0)
2943 goto nla_put_failure
;
2945 if (nla_put_u8(skb
, RTA_PREF
, IPV6_EXTRACT_PREF(rt
->rt6i_flags
)))
2946 goto nla_put_failure
;
2948 nlmsg_end(skb
, nlh
);
2952 nlmsg_cancel(skb
, nlh
);
2956 int rt6_dump_route(struct rt6_info
*rt
, void *p_arg
)
2958 struct rt6_rtnl_dump_arg
*arg
= (struct rt6_rtnl_dump_arg
*) p_arg
;
2961 if (nlmsg_len(arg
->cb
->nlh
) >= sizeof(struct rtmsg
)) {
2962 struct rtmsg
*rtm
= nlmsg_data(arg
->cb
->nlh
);
2963 prefix
= (rtm
->rtm_flags
& RTM_F_PREFIX
) != 0;
2967 return rt6_fill_node(arg
->net
,
2968 arg
->skb
, rt
, NULL
, NULL
, 0, RTM_NEWROUTE
,
2969 NETLINK_CB(arg
->cb
->skb
).portid
, arg
->cb
->nlh
->nlmsg_seq
,
2970 prefix
, 0, NLM_F_MULTI
);
2973 static int inet6_rtm_getroute(struct sk_buff
*in_skb
, struct nlmsghdr
*nlh
)
2975 struct net
*net
= sock_net(in_skb
->sk
);
2976 struct nlattr
*tb
[RTA_MAX
+1];
2977 struct rt6_info
*rt
;
2978 struct sk_buff
*skb
;
2981 int err
, iif
= 0, oif
= 0;
2983 err
= nlmsg_parse(nlh
, sizeof(*rtm
), tb
, RTA_MAX
, rtm_ipv6_policy
);
2988 memset(&fl6
, 0, sizeof(fl6
));
2991 if (nla_len(tb
[RTA_SRC
]) < sizeof(struct in6_addr
))
2994 fl6
.saddr
= *(struct in6_addr
*)nla_data(tb
[RTA_SRC
]);
2998 if (nla_len(tb
[RTA_DST
]) < sizeof(struct in6_addr
))
3001 fl6
.daddr
= *(struct in6_addr
*)nla_data(tb
[RTA_DST
]);
3005 iif
= nla_get_u32(tb
[RTA_IIF
]);
3008 oif
= nla_get_u32(tb
[RTA_OIF
]);
3011 fl6
.flowi6_mark
= nla_get_u32(tb
[RTA_MARK
]);
3014 struct net_device
*dev
;
3017 dev
= __dev_get_by_index(net
, iif
);
3023 fl6
.flowi6_iif
= iif
;
3025 if (!ipv6_addr_any(&fl6
.saddr
))
3026 flags
|= RT6_LOOKUP_F_HAS_SADDR
;
3028 rt
= (struct rt6_info
*)ip6_route_input_lookup(net
, dev
, &fl6
,
3031 fl6
.flowi6_oif
= oif
;
3033 rt
= (struct rt6_info
*)ip6_route_output(net
, NULL
, &fl6
);
3036 skb
= alloc_skb(NLMSG_GOODSIZE
, GFP_KERNEL
);
3043 /* Reserve room for dummy headers, this skb can pass
3044 through good chunk of routing engine.
3046 skb_reset_mac_header(skb
);
3047 skb_reserve(skb
, MAX_HEADER
+ sizeof(struct ipv6hdr
));
3049 skb_dst_set(skb
, &rt
->dst
);
3051 err
= rt6_fill_node(net
, skb
, rt
, &fl6
.daddr
, &fl6
.saddr
, iif
,
3052 RTM_NEWROUTE
, NETLINK_CB(in_skb
).portid
,
3053 nlh
->nlmsg_seq
, 0, 0, 0);
3059 err
= rtnl_unicast(skb
, net
, NETLINK_CB(in_skb
).portid
);
3064 void inet6_rt_notify(int event
, struct rt6_info
*rt
, struct nl_info
*info
)
3066 struct sk_buff
*skb
;
3067 struct net
*net
= info
->nl_net
;
3072 seq
= info
->nlh
? info
->nlh
->nlmsg_seq
: 0;
3074 skb
= nlmsg_new(rt6_nlmsg_size(), gfp_any());
3078 err
= rt6_fill_node(net
, skb
, rt
, NULL
, NULL
, 0,
3079 event
, info
->portid
, seq
, 0, 0, 0);
3081 /* -EMSGSIZE implies BUG in rt6_nlmsg_size() */
3082 WARN_ON(err
== -EMSGSIZE
);
3086 rtnl_notify(skb
, net
, info
->portid
, RTNLGRP_IPV6_ROUTE
,
3087 info
->nlh
, gfp_any());
3091 rtnl_set_sk_err(net
, RTNLGRP_IPV6_ROUTE
, err
);
3094 static int ip6_route_dev_notify(struct notifier_block
*this,
3095 unsigned long event
, void *ptr
)
3097 struct net_device
*dev
= netdev_notifier_info_to_dev(ptr
);
3098 struct net
*net
= dev_net(dev
);
3100 if (event
== NETDEV_REGISTER
&& (dev
->flags
& IFF_LOOPBACK
)) {
3101 net
->ipv6
.ip6_null_entry
->dst
.dev
= dev
;
3102 net
->ipv6
.ip6_null_entry
->rt6i_idev
= in6_dev_get(dev
);
3103 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
3104 net
->ipv6
.ip6_prohibit_entry
->dst
.dev
= dev
;
3105 net
->ipv6
.ip6_prohibit_entry
->rt6i_idev
= in6_dev_get(dev
);
3106 net
->ipv6
.ip6_blk_hole_entry
->dst
.dev
= dev
;
3107 net
->ipv6
.ip6_blk_hole_entry
->rt6i_idev
= in6_dev_get(dev
);
3118 #ifdef CONFIG_PROC_FS
3120 static const struct file_operations ipv6_route_proc_fops
= {
3121 .owner
= THIS_MODULE
,
3122 .open
= ipv6_route_open
,
3124 .llseek
= seq_lseek
,
3125 .release
= seq_release_net
,
3128 static int rt6_stats_seq_show(struct seq_file
*seq
, void *v
)
3130 struct net
*net
= (struct net
*)seq
->private;
3131 seq_printf(seq
, "%04x %04x %04x %04x %04x %04x %04x\n",
3132 net
->ipv6
.rt6_stats
->fib_nodes
,
3133 net
->ipv6
.rt6_stats
->fib_route_nodes
,
3134 net
->ipv6
.rt6_stats
->fib_rt_alloc
,
3135 net
->ipv6
.rt6_stats
->fib_rt_entries
,
3136 net
->ipv6
.rt6_stats
->fib_rt_cache
,
3137 dst_entries_get_slow(&net
->ipv6
.ip6_dst_ops
),
3138 net
->ipv6
.rt6_stats
->fib_discarded_routes
);
3143 static int rt6_stats_seq_open(struct inode
*inode
, struct file
*file
)
3145 return single_open_net(inode
, file
, rt6_stats_seq_show
);
3148 static const struct file_operations rt6_stats_seq_fops
= {
3149 .owner
= THIS_MODULE
,
3150 .open
= rt6_stats_seq_open
,
3152 .llseek
= seq_lseek
,
3153 .release
= single_release_net
,
3155 #endif /* CONFIG_PROC_FS */
3157 #ifdef CONFIG_SYSCTL
3160 int ipv6_sysctl_rtcache_flush(struct ctl_table
*ctl
, int write
,
3161 void __user
*buffer
, size_t *lenp
, loff_t
*ppos
)
3168 net
= (struct net
*)ctl
->extra1
;
3169 delay
= net
->ipv6
.sysctl
.flush_delay
;
3170 proc_dointvec(ctl
, write
, buffer
, lenp
, ppos
);
3171 fib6_run_gc(delay
<= 0 ? 0 : (unsigned long)delay
, net
, delay
> 0);
3175 struct ctl_table ipv6_route_table_template
[] = {
3177 .procname
= "flush",
3178 .data
= &init_net
.ipv6
.sysctl
.flush_delay
,
3179 .maxlen
= sizeof(int),
3181 .proc_handler
= ipv6_sysctl_rtcache_flush
3184 .procname
= "gc_thresh",
3185 .data
= &ip6_dst_ops_template
.gc_thresh
,
3186 .maxlen
= sizeof(int),
3188 .proc_handler
= proc_dointvec
,
3191 .procname
= "max_size",
3192 .data
= &init_net
.ipv6
.sysctl
.ip6_rt_max_size
,
3193 .maxlen
= sizeof(int),
3195 .proc_handler
= proc_dointvec
,
3198 .procname
= "gc_min_interval",
3199 .data
= &init_net
.ipv6
.sysctl
.ip6_rt_gc_min_interval
,
3200 .maxlen
= sizeof(int),
3202 .proc_handler
= proc_dointvec_jiffies
,
3205 .procname
= "gc_timeout",
3206 .data
= &init_net
.ipv6
.sysctl
.ip6_rt_gc_timeout
,
3207 .maxlen
= sizeof(int),
3209 .proc_handler
= proc_dointvec_jiffies
,
3212 .procname
= "gc_interval",
3213 .data
= &init_net
.ipv6
.sysctl
.ip6_rt_gc_interval
,
3214 .maxlen
= sizeof(int),
3216 .proc_handler
= proc_dointvec_jiffies
,
3219 .procname
= "gc_elasticity",
3220 .data
= &init_net
.ipv6
.sysctl
.ip6_rt_gc_elasticity
,
3221 .maxlen
= sizeof(int),
3223 .proc_handler
= proc_dointvec
,
3226 .procname
= "mtu_expires",
3227 .data
= &init_net
.ipv6
.sysctl
.ip6_rt_mtu_expires
,
3228 .maxlen
= sizeof(int),
3230 .proc_handler
= proc_dointvec_jiffies
,
3233 .procname
= "min_adv_mss",
3234 .data
= &init_net
.ipv6
.sysctl
.ip6_rt_min_advmss
,
3235 .maxlen
= sizeof(int),
3237 .proc_handler
= proc_dointvec
,
3240 .procname
= "gc_min_interval_ms",
3241 .data
= &init_net
.ipv6
.sysctl
.ip6_rt_gc_min_interval
,
3242 .maxlen
= sizeof(int),
3244 .proc_handler
= proc_dointvec_ms_jiffies
,
3249 struct ctl_table
* __net_init
ipv6_route_sysctl_init(struct net
*net
)
3251 struct ctl_table
*table
;
3253 table
= kmemdup(ipv6_route_table_template
,
3254 sizeof(ipv6_route_table_template
),
3258 table
[0].data
= &net
->ipv6
.sysctl
.flush_delay
;
3259 table
[0].extra1
= net
;
3260 table
[1].data
= &net
->ipv6
.ip6_dst_ops
.gc_thresh
;
3261 table
[2].data
= &net
->ipv6
.sysctl
.ip6_rt_max_size
;
3262 table
[3].data
= &net
->ipv6
.sysctl
.ip6_rt_gc_min_interval
;
3263 table
[4].data
= &net
->ipv6
.sysctl
.ip6_rt_gc_timeout
;
3264 table
[5].data
= &net
->ipv6
.sysctl
.ip6_rt_gc_interval
;
3265 table
[6].data
= &net
->ipv6
.sysctl
.ip6_rt_gc_elasticity
;
3266 table
[7].data
= &net
->ipv6
.sysctl
.ip6_rt_mtu_expires
;
3267 table
[8].data
= &net
->ipv6
.sysctl
.ip6_rt_min_advmss
;
3268 table
[9].data
= &net
->ipv6
.sysctl
.ip6_rt_gc_min_interval
;
3270 /* Don't export sysctls to unprivileged users */
3271 if (net
->user_ns
!= &init_user_ns
)
3272 table
[0].procname
= NULL
;
3279 static int __net_init
ip6_route_net_init(struct net
*net
)
3283 memcpy(&net
->ipv6
.ip6_dst_ops
, &ip6_dst_ops_template
,
3284 sizeof(net
->ipv6
.ip6_dst_ops
));
3286 if (dst_entries_init(&net
->ipv6
.ip6_dst_ops
) < 0)
3287 goto out_ip6_dst_ops
;
3289 net
->ipv6
.ip6_null_entry
= kmemdup(&ip6_null_entry_template
,
3290 sizeof(*net
->ipv6
.ip6_null_entry
),
3292 if (!net
->ipv6
.ip6_null_entry
)
3293 goto out_ip6_dst_entries
;
3294 net
->ipv6
.ip6_null_entry
->dst
.path
=
3295 (struct dst_entry
*)net
->ipv6
.ip6_null_entry
;
3296 net
->ipv6
.ip6_null_entry
->dst
.ops
= &net
->ipv6
.ip6_dst_ops
;
3297 dst_init_metrics(&net
->ipv6
.ip6_null_entry
->dst
,
3298 ip6_template_metrics
, true);
3300 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
3301 net
->ipv6
.ip6_prohibit_entry
= kmemdup(&ip6_prohibit_entry_template
,
3302 sizeof(*net
->ipv6
.ip6_prohibit_entry
),
3304 if (!net
->ipv6
.ip6_prohibit_entry
)
3305 goto out_ip6_null_entry
;
3306 net
->ipv6
.ip6_prohibit_entry
->dst
.path
=
3307 (struct dst_entry
*)net
->ipv6
.ip6_prohibit_entry
;
3308 net
->ipv6
.ip6_prohibit_entry
->dst
.ops
= &net
->ipv6
.ip6_dst_ops
;
3309 dst_init_metrics(&net
->ipv6
.ip6_prohibit_entry
->dst
,
3310 ip6_template_metrics
, true);
3312 net
->ipv6
.ip6_blk_hole_entry
= kmemdup(&ip6_blk_hole_entry_template
,
3313 sizeof(*net
->ipv6
.ip6_blk_hole_entry
),
3315 if (!net
->ipv6
.ip6_blk_hole_entry
)
3316 goto out_ip6_prohibit_entry
;
3317 net
->ipv6
.ip6_blk_hole_entry
->dst
.path
=
3318 (struct dst_entry
*)net
->ipv6
.ip6_blk_hole_entry
;
3319 net
->ipv6
.ip6_blk_hole_entry
->dst
.ops
= &net
->ipv6
.ip6_dst_ops
;
3320 dst_init_metrics(&net
->ipv6
.ip6_blk_hole_entry
->dst
,
3321 ip6_template_metrics
, true);
3324 net
->ipv6
.sysctl
.flush_delay
= 0;
3325 net
->ipv6
.sysctl
.ip6_rt_max_size
= 4096;
3326 net
->ipv6
.sysctl
.ip6_rt_gc_min_interval
= HZ
/ 2;
3327 net
->ipv6
.sysctl
.ip6_rt_gc_timeout
= 60*HZ
;
3328 net
->ipv6
.sysctl
.ip6_rt_gc_interval
= 30*HZ
;
3329 net
->ipv6
.sysctl
.ip6_rt_gc_elasticity
= 9;
3330 net
->ipv6
.sysctl
.ip6_rt_mtu_expires
= 10*60*HZ
;
3331 net
->ipv6
.sysctl
.ip6_rt_min_advmss
= IPV6_MIN_MTU
- 20 - 40;
3333 net
->ipv6
.ip6_rt_gc_expire
= 30*HZ
;
3339 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
3340 out_ip6_prohibit_entry
:
3341 kfree(net
->ipv6
.ip6_prohibit_entry
);
3343 kfree(net
->ipv6
.ip6_null_entry
);
3345 out_ip6_dst_entries
:
3346 dst_entries_destroy(&net
->ipv6
.ip6_dst_ops
);
3351 static void __net_exit
ip6_route_net_exit(struct net
*net
)
3353 kfree(net
->ipv6
.ip6_null_entry
);
3354 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
3355 kfree(net
->ipv6
.ip6_prohibit_entry
);
3356 kfree(net
->ipv6
.ip6_blk_hole_entry
);
3358 dst_entries_destroy(&net
->ipv6
.ip6_dst_ops
);
3361 static int __net_init
ip6_route_net_init_late(struct net
*net
)
3363 #ifdef CONFIG_PROC_FS
3364 proc_create("ipv6_route", 0, net
->proc_net
, &ipv6_route_proc_fops
);
3365 proc_create("rt6_stats", S_IRUGO
, net
->proc_net
, &rt6_stats_seq_fops
);
3370 static void __net_exit
ip6_route_net_exit_late(struct net
*net
)
3372 #ifdef CONFIG_PROC_FS
3373 remove_proc_entry("ipv6_route", net
->proc_net
);
3374 remove_proc_entry("rt6_stats", net
->proc_net
);
3378 static struct pernet_operations ip6_route_net_ops
= {
3379 .init
= ip6_route_net_init
,
3380 .exit
= ip6_route_net_exit
,
3383 static int __net_init
ipv6_inetpeer_init(struct net
*net
)
3385 struct inet_peer_base
*bp
= kmalloc(sizeof(*bp
), GFP_KERNEL
);
3389 inet_peer_base_init(bp
);
3390 net
->ipv6
.peers
= bp
;
3394 static void __net_exit
ipv6_inetpeer_exit(struct net
*net
)
3396 struct inet_peer_base
*bp
= net
->ipv6
.peers
;
3398 net
->ipv6
.peers
= NULL
;
3399 inetpeer_invalidate_tree(bp
);
3403 static struct pernet_operations ipv6_inetpeer_ops
= {
3404 .init
= ipv6_inetpeer_init
,
3405 .exit
= ipv6_inetpeer_exit
,
3408 static struct pernet_operations ip6_route_net_late_ops
= {
3409 .init
= ip6_route_net_init_late
,
3410 .exit
= ip6_route_net_exit_late
,
3413 static struct notifier_block ip6_route_dev_notifier
= {
3414 .notifier_call
= ip6_route_dev_notify
,
3418 int __init
ip6_route_init(void)
3424 ip6_dst_ops_template
.kmem_cachep
=
3425 kmem_cache_create("ip6_dst_cache", sizeof(struct rt6_info
), 0,
3426 SLAB_HWCACHE_ALIGN
, NULL
);
3427 if (!ip6_dst_ops_template
.kmem_cachep
)
3430 ret
= dst_entries_init(&ip6_dst_blackhole_ops
);
3432 goto out_kmem_cache
;
3434 ret
= register_pernet_subsys(&ipv6_inetpeer_ops
);
3436 goto out_dst_entries
;
3438 ret
= register_pernet_subsys(&ip6_route_net_ops
);
3440 goto out_register_inetpeer
;
3442 ip6_dst_blackhole_ops
.kmem_cachep
= ip6_dst_ops_template
.kmem_cachep
;
3444 /* Registering of the loopback is done before this portion of code,
3445 * the loopback reference in rt6_info will not be taken, do it
3446 * manually for init_net */
3447 init_net
.ipv6
.ip6_null_entry
->dst
.dev
= init_net
.loopback_dev
;
3448 init_net
.ipv6
.ip6_null_entry
->rt6i_idev
= in6_dev_get(init_net
.loopback_dev
);
3449 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
3450 init_net
.ipv6
.ip6_prohibit_entry
->dst
.dev
= init_net
.loopback_dev
;
3451 init_net
.ipv6
.ip6_prohibit_entry
->rt6i_idev
= in6_dev_get(init_net
.loopback_dev
);
3452 init_net
.ipv6
.ip6_blk_hole_entry
->dst
.dev
= init_net
.loopback_dev
;
3453 init_net
.ipv6
.ip6_blk_hole_entry
->rt6i_idev
= in6_dev_get(init_net
.loopback_dev
);
3457 goto out_register_subsys
;
3463 ret
= fib6_rules_init();
3467 ret
= register_pernet_subsys(&ip6_route_net_late_ops
);
3469 goto fib6_rules_init
;
3472 if (__rtnl_register(PF_INET6
, RTM_NEWROUTE
, inet6_rtm_newroute
, NULL
, NULL
) ||
3473 __rtnl_register(PF_INET6
, RTM_DELROUTE
, inet6_rtm_delroute
, NULL
, NULL
) ||
3474 __rtnl_register(PF_INET6
, RTM_GETROUTE
, inet6_rtm_getroute
, NULL
, NULL
))
3475 goto out_register_late_subsys
;
3477 ret
= register_netdevice_notifier(&ip6_route_dev_notifier
);
3479 goto out_register_late_subsys
;
3481 for_each_possible_cpu(cpu
) {
3482 struct uncached_list
*ul
= per_cpu_ptr(&rt6_uncached_list
, cpu
);
3484 INIT_LIST_HEAD(&ul
->head
);
3485 spin_lock_init(&ul
->lock
);
3491 out_register_late_subsys
:
3492 unregister_pernet_subsys(&ip6_route_net_late_ops
);
3494 fib6_rules_cleanup();
3499 out_register_subsys
:
3500 unregister_pernet_subsys(&ip6_route_net_ops
);
3501 out_register_inetpeer
:
3502 unregister_pernet_subsys(&ipv6_inetpeer_ops
);
3504 dst_entries_destroy(&ip6_dst_blackhole_ops
);
3506 kmem_cache_destroy(ip6_dst_ops_template
.kmem_cachep
);
3510 void ip6_route_cleanup(void)
3512 unregister_netdevice_notifier(&ip6_route_dev_notifier
);
3513 unregister_pernet_subsys(&ip6_route_net_late_ops
);
3514 fib6_rules_cleanup();
3517 unregister_pernet_subsys(&ipv6_inetpeer_ops
);
3518 unregister_pernet_subsys(&ip6_route_net_ops
);
3519 dst_entries_destroy(&ip6_dst_blackhole_ops
);
3520 kmem_cache_destroy(ip6_dst_ops_template
.kmem_cachep
);