2 * IP multicast routing support for mrouted 3.6/3.8
4 * (c) 1995 Alan Cox, <alan@lxorguk.ukuu.org.uk>
5 * Linux Consultancy and Custom Driver Development
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
13 * Michael Chastain : Incorrect size of copying.
14 * Alan Cox : Added the cache manager code
15 * Alan Cox : Fixed the clone/copy bug and device race.
16 * Mike McLagan : Routing by source
17 * Malcolm Beattie : Buffer handling fixes.
18 * Alexey Kuznetsov : Double buffer free and other fixes.
19 * SVR Anand : Fixed several multicast bugs and problems.
20 * Alexey Kuznetsov : Status, optimisations and more.
21 * Brad Parker : Better behaviour on mrouted upcall
23 * Carlos Picoto : PIMv1 Support
24 * Pavlin Ivanov Radoslavov: PIMv2 Registers must checksum only PIM header
25 * Relax this requirement to work with older peers.
29 #include <asm/uaccess.h>
30 #include <linux/types.h>
31 #include <linux/capability.h>
32 #include <linux/errno.h>
33 #include <linux/timer.h>
35 #include <linux/kernel.h>
36 #include <linux/fcntl.h>
37 #include <linux/stat.h>
38 #include <linux/socket.h>
40 #include <linux/inet.h>
41 #include <linux/netdevice.h>
42 #include <linux/inetdevice.h>
43 #include <linux/igmp.h>
44 #include <linux/proc_fs.h>
45 #include <linux/seq_file.h>
46 #include <linux/mroute.h>
47 #include <linux/init.h>
48 #include <linux/if_ether.h>
49 #include <linux/slab.h>
50 #include <net/net_namespace.h>
52 #include <net/protocol.h>
53 #include <linux/skbuff.h>
54 #include <net/route.h>
59 #include <linux/notifier.h>
60 #include <linux/if_arp.h>
61 #include <linux/netfilter_ipv4.h>
62 #include <linux/compat.h>
63 #include <linux/export.h>
64 #include <net/ip_tunnels.h>
65 #include <net/checksum.h>
66 #include <net/netlink.h>
67 #include <net/fib_rules.h>
68 #include <linux/netconf.h>
70 #if defined(CONFIG_IP_PIMSM_V1) || defined(CONFIG_IP_PIMSM_V2)
71 #define CONFIG_IP_PIMSM 1
75 struct list_head list
;
80 struct sock __rcu
*mroute_sk
;
81 struct timer_list ipmr_expire_timer
;
82 struct list_head mfc_unres_queue
;
83 struct list_head mfc_cache_array
[MFC_LINES
];
84 struct vif_device vif_table
[MAXVIFS
];
86 atomic_t cache_resolve_queue_len
;
87 bool mroute_do_assert
;
89 #if defined(CONFIG_IP_PIMSM_V1) || defined(CONFIG_IP_PIMSM_V2)
90 int mroute_reg_vif_num
;
95 struct fib_rule common
;
102 /* Big lock, protecting vif table, mrt cache and mroute socket state.
103 * Note that the changes are semaphored via rtnl_lock.
106 static DEFINE_RWLOCK(mrt_lock
);
109 * Multicast router control variables
112 #define VIF_EXISTS(_mrt, _idx) ((_mrt)->vif_table[_idx].dev != NULL)
114 /* Special spinlock for queue of unresolved entries */
115 static DEFINE_SPINLOCK(mfc_unres_lock
);
117 /* We return to original Alan's scheme. Hash table of resolved
118 * entries is changed only in process context and protected
119 * with weak lock mrt_lock. Queue of unresolved entries is protected
120 * with strong spinlock mfc_unres_lock.
122 * In this case data path is free of exclusive locks at all.
125 static struct kmem_cache
*mrt_cachep __read_mostly
;
127 static struct mr_table
*ipmr_new_table(struct net
*net
, u32 id
);
128 static void ipmr_free_table(struct mr_table
*mrt
);
130 static void ip_mr_forward(struct net
*net
, struct mr_table
*mrt
,
131 struct sk_buff
*skb
, struct mfc_cache
*cache
,
133 static int ipmr_cache_report(struct mr_table
*mrt
,
134 struct sk_buff
*pkt
, vifi_t vifi
, int assert);
135 static int __ipmr_fill_mroute(struct mr_table
*mrt
, struct sk_buff
*skb
,
136 struct mfc_cache
*c
, struct rtmsg
*rtm
);
137 static void mroute_netlink_event(struct mr_table
*mrt
, struct mfc_cache
*mfc
,
139 static void mroute_clean_tables(struct mr_table
*mrt
);
140 static void ipmr_expire_process(unsigned long arg
);
142 #ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
143 #define ipmr_for_each_table(mrt, net) \
144 list_for_each_entry_rcu(mrt, &net->ipv4.mr_tables, list)
146 static struct mr_table
*ipmr_get_table(struct net
*net
, u32 id
)
148 struct mr_table
*mrt
;
150 ipmr_for_each_table(mrt
, net
) {
157 static int ipmr_fib_lookup(struct net
*net
, struct flowi4
*flp4
,
158 struct mr_table
**mrt
)
160 struct ipmr_result res
;
161 struct fib_lookup_arg arg
= { .result
= &res
, };
164 err
= fib_rules_lookup(net
->ipv4
.mr_rules_ops
,
165 flowi4_to_flowi(flp4
), 0, &arg
);
172 static int ipmr_rule_action(struct fib_rule
*rule
, struct flowi
*flp
,
173 int flags
, struct fib_lookup_arg
*arg
)
175 struct ipmr_result
*res
= arg
->result
;
176 struct mr_table
*mrt
;
178 switch (rule
->action
) {
181 case FR_ACT_UNREACHABLE
:
183 case FR_ACT_PROHIBIT
:
185 case FR_ACT_BLACKHOLE
:
190 mrt
= ipmr_get_table(rule
->fr_net
, rule
->table
);
197 static int ipmr_rule_match(struct fib_rule
*rule
, struct flowi
*fl
, int flags
)
202 static const struct nla_policy ipmr_rule_policy
[FRA_MAX
+ 1] = {
206 static int ipmr_rule_configure(struct fib_rule
*rule
, struct sk_buff
*skb
,
207 struct fib_rule_hdr
*frh
, struct nlattr
**tb
)
212 static int ipmr_rule_compare(struct fib_rule
*rule
, struct fib_rule_hdr
*frh
,
218 static int ipmr_rule_fill(struct fib_rule
*rule
, struct sk_buff
*skb
,
219 struct fib_rule_hdr
*frh
)
227 static const struct fib_rules_ops __net_initconst ipmr_rules_ops_template
= {
228 .family
= RTNL_FAMILY_IPMR
,
229 .rule_size
= sizeof(struct ipmr_rule
),
230 .addr_size
= sizeof(u32
),
231 .action
= ipmr_rule_action
,
232 .match
= ipmr_rule_match
,
233 .configure
= ipmr_rule_configure
,
234 .compare
= ipmr_rule_compare
,
235 .default_pref
= fib_default_rule_pref
,
236 .fill
= ipmr_rule_fill
,
237 .nlgroup
= RTNLGRP_IPV4_RULE
,
238 .policy
= ipmr_rule_policy
,
239 .owner
= THIS_MODULE
,
242 static int __net_init
ipmr_rules_init(struct net
*net
)
244 struct fib_rules_ops
*ops
;
245 struct mr_table
*mrt
;
248 ops
= fib_rules_register(&ipmr_rules_ops_template
, net
);
252 INIT_LIST_HEAD(&net
->ipv4
.mr_tables
);
254 mrt
= ipmr_new_table(net
, RT_TABLE_DEFAULT
);
260 err
= fib_default_rule_add(ops
, 0x7fff, RT_TABLE_DEFAULT
, 0);
264 net
->ipv4
.mr_rules_ops
= ops
;
270 fib_rules_unregister(ops
);
274 static void __net_exit
ipmr_rules_exit(struct net
*net
)
276 struct mr_table
*mrt
, *next
;
278 list_for_each_entry_safe(mrt
, next
, &net
->ipv4
.mr_tables
, list
) {
279 list_del(&mrt
->list
);
280 ipmr_free_table(mrt
);
282 fib_rules_unregister(net
->ipv4
.mr_rules_ops
);
285 #define ipmr_for_each_table(mrt, net) \
286 for (mrt = net->ipv4.mrt; mrt; mrt = NULL)
288 static struct mr_table
*ipmr_get_table(struct net
*net
, u32 id
)
290 return net
->ipv4
.mrt
;
293 static int ipmr_fib_lookup(struct net
*net
, struct flowi4
*flp4
,
294 struct mr_table
**mrt
)
296 *mrt
= net
->ipv4
.mrt
;
300 static int __net_init
ipmr_rules_init(struct net
*net
)
302 net
->ipv4
.mrt
= ipmr_new_table(net
, RT_TABLE_DEFAULT
);
303 return net
->ipv4
.mrt
? 0 : -ENOMEM
;
306 static void __net_exit
ipmr_rules_exit(struct net
*net
)
308 ipmr_free_table(net
->ipv4
.mrt
);
312 static struct mr_table
*ipmr_new_table(struct net
*net
, u32 id
)
314 struct mr_table
*mrt
;
317 mrt
= ipmr_get_table(net
, id
);
321 mrt
= kzalloc(sizeof(*mrt
), GFP_KERNEL
);
324 write_pnet(&mrt
->net
, net
);
327 /* Forwarding cache */
328 for (i
= 0; i
< MFC_LINES
; i
++)
329 INIT_LIST_HEAD(&mrt
->mfc_cache_array
[i
]);
331 INIT_LIST_HEAD(&mrt
->mfc_unres_queue
);
333 setup_timer(&mrt
->ipmr_expire_timer
, ipmr_expire_process
,
336 #ifdef CONFIG_IP_PIMSM
337 mrt
->mroute_reg_vif_num
= -1;
339 #ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
340 list_add_tail_rcu(&mrt
->list
, &net
->ipv4
.mr_tables
);
345 static void ipmr_free_table(struct mr_table
*mrt
)
347 del_timer_sync(&mrt
->ipmr_expire_timer
);
348 mroute_clean_tables(mrt
);
352 /* Service routines creating virtual interfaces: DVMRP tunnels and PIMREG */
354 static void ipmr_del_tunnel(struct net_device
*dev
, struct vifctl
*v
)
356 struct net
*net
= dev_net(dev
);
360 dev
= __dev_get_by_name(net
, "tunl0");
362 const struct net_device_ops
*ops
= dev
->netdev_ops
;
364 struct ip_tunnel_parm p
;
366 memset(&p
, 0, sizeof(p
));
367 p
.iph
.daddr
= v
->vifc_rmt_addr
.s_addr
;
368 p
.iph
.saddr
= v
->vifc_lcl_addr
.s_addr
;
371 p
.iph
.protocol
= IPPROTO_IPIP
;
372 sprintf(p
.name
, "dvmrp%d", v
->vifc_vifi
);
373 ifr
.ifr_ifru
.ifru_data
= (__force
void __user
*)&p
;
375 if (ops
->ndo_do_ioctl
) {
376 mm_segment_t oldfs
= get_fs();
379 ops
->ndo_do_ioctl(dev
, &ifr
, SIOCDELTUNNEL
);
386 struct net_device
*ipmr_new_tunnel(struct net
*net
, struct vifctl
*v
)
388 struct net_device
*dev
;
390 dev
= __dev_get_by_name(net
, "tunl0");
393 const struct net_device_ops
*ops
= dev
->netdev_ops
;
396 struct ip_tunnel_parm p
;
397 struct in_device
*in_dev
;
399 memset(&p
, 0, sizeof(p
));
400 p
.iph
.daddr
= v
->vifc_rmt_addr
.s_addr
;
401 p
.iph
.saddr
= v
->vifc_lcl_addr
.s_addr
;
404 p
.iph
.protocol
= IPPROTO_IPIP
;
405 sprintf(p
.name
, "dvmrp%d", v
->vifc_vifi
);
406 ifr
.ifr_ifru
.ifru_data
= (__force
void __user
*)&p
;
408 if (ops
->ndo_do_ioctl
) {
409 mm_segment_t oldfs
= get_fs();
412 err
= ops
->ndo_do_ioctl(dev
, &ifr
, SIOCADDTUNNEL
);
420 (dev
= __dev_get_by_name(net
, p
.name
)) != NULL
) {
421 dev
->flags
|= IFF_MULTICAST
;
423 in_dev
= __in_dev_get_rtnl(dev
);
427 ipv4_devconf_setall(in_dev
);
428 neigh_parms_data_state_setall(in_dev
->arp_parms
);
429 IPV4_DEVCONF(in_dev
->cnf
, RP_FILTER
) = 0;
439 /* allow the register to be completed before unregistering. */
443 unregister_netdevice(dev
);
447 #ifdef CONFIG_IP_PIMSM
449 static netdev_tx_t
reg_vif_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
451 struct net
*net
= dev_net(dev
);
452 struct mr_table
*mrt
;
453 struct flowi4 fl4
= {
454 .flowi4_oif
= dev
->ifindex
,
455 .flowi4_iif
= skb
->skb_iif
,
456 .flowi4_mark
= skb
->mark
,
460 err
= ipmr_fib_lookup(net
, &fl4
, &mrt
);
466 read_lock(&mrt_lock
);
467 dev
->stats
.tx_bytes
+= skb
->len
;
468 dev
->stats
.tx_packets
++;
469 ipmr_cache_report(mrt
, skb
, mrt
->mroute_reg_vif_num
, IGMPMSG_WHOLEPKT
);
470 read_unlock(&mrt_lock
);
475 static const struct net_device_ops reg_vif_netdev_ops
= {
476 .ndo_start_xmit
= reg_vif_xmit
,
479 static void reg_vif_setup(struct net_device
*dev
)
481 dev
->type
= ARPHRD_PIMREG
;
482 dev
->mtu
= ETH_DATA_LEN
- sizeof(struct iphdr
) - 8;
483 dev
->flags
= IFF_NOARP
;
484 dev
->netdev_ops
= ®_vif_netdev_ops
,
485 dev
->destructor
= free_netdev
;
486 dev
->features
|= NETIF_F_NETNS_LOCAL
;
489 static struct net_device
*ipmr_reg_vif(struct net
*net
, struct mr_table
*mrt
)
491 struct net_device
*dev
;
492 struct in_device
*in_dev
;
495 if (mrt
->id
== RT_TABLE_DEFAULT
)
496 sprintf(name
, "pimreg");
498 sprintf(name
, "pimreg%u", mrt
->id
);
500 dev
= alloc_netdev(0, name
, reg_vif_setup
);
505 dev_net_set(dev
, net
);
507 if (register_netdevice(dev
)) {
514 in_dev
= __in_dev_get_rcu(dev
);
520 ipv4_devconf_setall(in_dev
);
521 neigh_parms_data_state_setall(in_dev
->arp_parms
);
522 IPV4_DEVCONF(in_dev
->cnf
, RP_FILTER
) = 0;
533 /* allow the register to be completed before unregistering. */
537 unregister_netdevice(dev
);
543 * vif_delete - Delete a VIF entry
544 * @notify: Set to 1, if the caller is a notifier_call
547 static int vif_delete(struct mr_table
*mrt
, int vifi
, int notify
,
548 struct list_head
*head
)
550 struct vif_device
*v
;
551 struct net_device
*dev
;
552 struct in_device
*in_dev
;
554 if (vifi
< 0 || vifi
>= mrt
->maxvif
)
555 return -EADDRNOTAVAIL
;
557 v
= &mrt
->vif_table
[vifi
];
559 write_lock_bh(&mrt_lock
);
564 write_unlock_bh(&mrt_lock
);
565 return -EADDRNOTAVAIL
;
568 #ifdef CONFIG_IP_PIMSM
569 if (vifi
== mrt
->mroute_reg_vif_num
)
570 mrt
->mroute_reg_vif_num
= -1;
573 if (vifi
+ 1 == mrt
->maxvif
) {
576 for (tmp
= vifi
- 1; tmp
>= 0; tmp
--) {
577 if (VIF_EXISTS(mrt
, tmp
))
583 write_unlock_bh(&mrt_lock
);
585 dev_set_allmulti(dev
, -1);
587 in_dev
= __in_dev_get_rtnl(dev
);
589 IPV4_DEVCONF(in_dev
->cnf
, MC_FORWARDING
)--;
590 inet_netconf_notify_devconf(dev_net(dev
),
591 NETCONFA_MC_FORWARDING
,
592 dev
->ifindex
, &in_dev
->cnf
);
593 ip_rt_multicast_event(in_dev
);
596 if (v
->flags
& (VIFF_TUNNEL
| VIFF_REGISTER
) && !notify
)
597 unregister_netdevice_queue(dev
, head
);
603 static void ipmr_cache_free_rcu(struct rcu_head
*head
)
605 struct mfc_cache
*c
= container_of(head
, struct mfc_cache
, rcu
);
607 kmem_cache_free(mrt_cachep
, c
);
610 static inline void ipmr_cache_free(struct mfc_cache
*c
)
612 call_rcu(&c
->rcu
, ipmr_cache_free_rcu
);
615 /* Destroy an unresolved cache entry, killing queued skbs
616 * and reporting error to netlink readers.
619 static void ipmr_destroy_unres(struct mr_table
*mrt
, struct mfc_cache
*c
)
621 struct net
*net
= read_pnet(&mrt
->net
);
625 atomic_dec(&mrt
->cache_resolve_queue_len
);
627 while ((skb
= skb_dequeue(&c
->mfc_un
.unres
.unresolved
))) {
628 if (ip_hdr(skb
)->version
== 0) {
629 struct nlmsghdr
*nlh
= (struct nlmsghdr
*)skb_pull(skb
, sizeof(struct iphdr
));
630 nlh
->nlmsg_type
= NLMSG_ERROR
;
631 nlh
->nlmsg_len
= nlmsg_msg_size(sizeof(struct nlmsgerr
));
632 skb_trim(skb
, nlh
->nlmsg_len
);
634 e
->error
= -ETIMEDOUT
;
635 memset(&e
->msg
, 0, sizeof(e
->msg
));
637 rtnl_unicast(skb
, net
, NETLINK_CB(skb
).portid
);
647 /* Timer process for the unresolved queue. */
649 static void ipmr_expire_process(unsigned long arg
)
651 struct mr_table
*mrt
= (struct mr_table
*)arg
;
653 unsigned long expires
;
654 struct mfc_cache
*c
, *next
;
656 if (!spin_trylock(&mfc_unres_lock
)) {
657 mod_timer(&mrt
->ipmr_expire_timer
, jiffies
+HZ
/10);
661 if (list_empty(&mrt
->mfc_unres_queue
))
667 list_for_each_entry_safe(c
, next
, &mrt
->mfc_unres_queue
, list
) {
668 if (time_after(c
->mfc_un
.unres
.expires
, now
)) {
669 unsigned long interval
= c
->mfc_un
.unres
.expires
- now
;
670 if (interval
< expires
)
676 mroute_netlink_event(mrt
, c
, RTM_DELROUTE
);
677 ipmr_destroy_unres(mrt
, c
);
680 if (!list_empty(&mrt
->mfc_unres_queue
))
681 mod_timer(&mrt
->ipmr_expire_timer
, jiffies
+ expires
);
684 spin_unlock(&mfc_unres_lock
);
687 /* Fill oifs list. It is called under write locked mrt_lock. */
689 static void ipmr_update_thresholds(struct mr_table
*mrt
, struct mfc_cache
*cache
,
694 cache
->mfc_un
.res
.minvif
= MAXVIFS
;
695 cache
->mfc_un
.res
.maxvif
= 0;
696 memset(cache
->mfc_un
.res
.ttls
, 255, MAXVIFS
);
698 for (vifi
= 0; vifi
< mrt
->maxvif
; vifi
++) {
699 if (VIF_EXISTS(mrt
, vifi
) &&
700 ttls
[vifi
] && ttls
[vifi
] < 255) {
701 cache
->mfc_un
.res
.ttls
[vifi
] = ttls
[vifi
];
702 if (cache
->mfc_un
.res
.minvif
> vifi
)
703 cache
->mfc_un
.res
.minvif
= vifi
;
704 if (cache
->mfc_un
.res
.maxvif
<= vifi
)
705 cache
->mfc_un
.res
.maxvif
= vifi
+ 1;
710 static int vif_add(struct net
*net
, struct mr_table
*mrt
,
711 struct vifctl
*vifc
, int mrtsock
)
713 int vifi
= vifc
->vifc_vifi
;
714 struct vif_device
*v
= &mrt
->vif_table
[vifi
];
715 struct net_device
*dev
;
716 struct in_device
*in_dev
;
720 if (VIF_EXISTS(mrt
, vifi
))
723 switch (vifc
->vifc_flags
) {
724 #ifdef CONFIG_IP_PIMSM
727 * Special Purpose VIF in PIM
728 * All the packets will be sent to the daemon
730 if (mrt
->mroute_reg_vif_num
>= 0)
732 dev
= ipmr_reg_vif(net
, mrt
);
735 err
= dev_set_allmulti(dev
, 1);
737 unregister_netdevice(dev
);
744 dev
= ipmr_new_tunnel(net
, vifc
);
747 err
= dev_set_allmulti(dev
, 1);
749 ipmr_del_tunnel(dev
, vifc
);
755 case VIFF_USE_IFINDEX
:
757 if (vifc
->vifc_flags
== VIFF_USE_IFINDEX
) {
758 dev
= dev_get_by_index(net
, vifc
->vifc_lcl_ifindex
);
759 if (dev
&& __in_dev_get_rtnl(dev
) == NULL
) {
761 return -EADDRNOTAVAIL
;
764 dev
= ip_dev_find(net
, vifc
->vifc_lcl_addr
.s_addr
);
767 return -EADDRNOTAVAIL
;
768 err
= dev_set_allmulti(dev
, 1);
778 in_dev
= __in_dev_get_rtnl(dev
);
781 return -EADDRNOTAVAIL
;
783 IPV4_DEVCONF(in_dev
->cnf
, MC_FORWARDING
)++;
784 inet_netconf_notify_devconf(net
, NETCONFA_MC_FORWARDING
, dev
->ifindex
,
786 ip_rt_multicast_event(in_dev
);
788 /* Fill in the VIF structures */
790 v
->rate_limit
= vifc
->vifc_rate_limit
;
791 v
->local
= vifc
->vifc_lcl_addr
.s_addr
;
792 v
->remote
= vifc
->vifc_rmt_addr
.s_addr
;
793 v
->flags
= vifc
->vifc_flags
;
795 v
->flags
|= VIFF_STATIC
;
796 v
->threshold
= vifc
->vifc_threshold
;
801 v
->link
= dev
->ifindex
;
802 if (v
->flags
& (VIFF_TUNNEL
| VIFF_REGISTER
))
803 v
->link
= dev
->iflink
;
805 /* And finish update writing critical data */
806 write_lock_bh(&mrt_lock
);
808 #ifdef CONFIG_IP_PIMSM
809 if (v
->flags
& VIFF_REGISTER
)
810 mrt
->mroute_reg_vif_num
= vifi
;
812 if (vifi
+1 > mrt
->maxvif
)
813 mrt
->maxvif
= vifi
+1;
814 write_unlock_bh(&mrt_lock
);
818 /* called with rcu_read_lock() */
819 static struct mfc_cache
*ipmr_cache_find(struct mr_table
*mrt
,
823 int line
= MFC_HASH(mcastgrp
, origin
);
826 list_for_each_entry_rcu(c
, &mrt
->mfc_cache_array
[line
], list
) {
827 if (c
->mfc_origin
== origin
&& c
->mfc_mcastgrp
== mcastgrp
)
833 /* Look for a (*,*,oif) entry */
834 static struct mfc_cache
*ipmr_cache_find_any_parent(struct mr_table
*mrt
,
837 int line
= MFC_HASH(htonl(INADDR_ANY
), htonl(INADDR_ANY
));
840 list_for_each_entry_rcu(c
, &mrt
->mfc_cache_array
[line
], list
)
841 if (c
->mfc_origin
== htonl(INADDR_ANY
) &&
842 c
->mfc_mcastgrp
== htonl(INADDR_ANY
) &&
843 c
->mfc_un
.res
.ttls
[vifi
] < 255)
849 /* Look for a (*,G) entry */
850 static struct mfc_cache
*ipmr_cache_find_any(struct mr_table
*mrt
,
851 __be32 mcastgrp
, int vifi
)
853 int line
= MFC_HASH(mcastgrp
, htonl(INADDR_ANY
));
854 struct mfc_cache
*c
, *proxy
;
856 if (mcastgrp
== htonl(INADDR_ANY
))
859 list_for_each_entry_rcu(c
, &mrt
->mfc_cache_array
[line
], list
)
860 if (c
->mfc_origin
== htonl(INADDR_ANY
) &&
861 c
->mfc_mcastgrp
== mcastgrp
) {
862 if (c
->mfc_un
.res
.ttls
[vifi
] < 255)
865 /* It's ok if the vifi is part of the static tree */
866 proxy
= ipmr_cache_find_any_parent(mrt
,
868 if (proxy
&& proxy
->mfc_un
.res
.ttls
[vifi
] < 255)
873 return ipmr_cache_find_any_parent(mrt
, vifi
);
877 * Allocate a multicast cache entry
879 static struct mfc_cache
*ipmr_cache_alloc(void)
881 struct mfc_cache
*c
= kmem_cache_zalloc(mrt_cachep
, GFP_KERNEL
);
884 c
->mfc_un
.res
.minvif
= MAXVIFS
;
888 static struct mfc_cache
*ipmr_cache_alloc_unres(void)
890 struct mfc_cache
*c
= kmem_cache_zalloc(mrt_cachep
, GFP_ATOMIC
);
893 skb_queue_head_init(&c
->mfc_un
.unres
.unresolved
);
894 c
->mfc_un
.unres
.expires
= jiffies
+ 10*HZ
;
900 * A cache entry has gone into a resolved state from queued
903 static void ipmr_cache_resolve(struct net
*net
, struct mr_table
*mrt
,
904 struct mfc_cache
*uc
, struct mfc_cache
*c
)
909 /* Play the pending entries through our router */
911 while ((skb
= __skb_dequeue(&uc
->mfc_un
.unres
.unresolved
))) {
912 if (ip_hdr(skb
)->version
== 0) {
913 struct nlmsghdr
*nlh
= (struct nlmsghdr
*)skb_pull(skb
, sizeof(struct iphdr
));
915 if (__ipmr_fill_mroute(mrt
, skb
, c
, nlmsg_data(nlh
)) > 0) {
916 nlh
->nlmsg_len
= skb_tail_pointer(skb
) -
919 nlh
->nlmsg_type
= NLMSG_ERROR
;
920 nlh
->nlmsg_len
= nlmsg_msg_size(sizeof(struct nlmsgerr
));
921 skb_trim(skb
, nlh
->nlmsg_len
);
923 e
->error
= -EMSGSIZE
;
924 memset(&e
->msg
, 0, sizeof(e
->msg
));
927 rtnl_unicast(skb
, net
, NETLINK_CB(skb
).portid
);
929 ip_mr_forward(net
, mrt
, skb
, c
, 0);
935 * Bounce a cache query up to mrouted. We could use netlink for this but mrouted
936 * expects the following bizarre scheme.
938 * Called under mrt_lock.
941 static int ipmr_cache_report(struct mr_table
*mrt
,
942 struct sk_buff
*pkt
, vifi_t vifi
, int assert)
945 const int ihl
= ip_hdrlen(pkt
);
946 struct igmphdr
*igmp
;
948 struct sock
*mroute_sk
;
951 #ifdef CONFIG_IP_PIMSM
952 if (assert == IGMPMSG_WHOLEPKT
)
953 skb
= skb_realloc_headroom(pkt
, sizeof(struct iphdr
));
956 skb
= alloc_skb(128, GFP_ATOMIC
);
961 #ifdef CONFIG_IP_PIMSM
962 if (assert == IGMPMSG_WHOLEPKT
) {
963 /* Ugly, but we have no choice with this interface.
964 * Duplicate old header, fix ihl, length etc.
965 * And all this only to mangle msg->im_msgtype and
966 * to set msg->im_mbz to "mbz" :-)
968 skb_push(skb
, sizeof(struct iphdr
));
969 skb_reset_network_header(skb
);
970 skb_reset_transport_header(skb
);
971 msg
= (struct igmpmsg
*)skb_network_header(skb
);
972 memcpy(msg
, skb_network_header(pkt
), sizeof(struct iphdr
));
973 msg
->im_msgtype
= IGMPMSG_WHOLEPKT
;
975 msg
->im_vif
= mrt
->mroute_reg_vif_num
;
976 ip_hdr(skb
)->ihl
= sizeof(struct iphdr
) >> 2;
977 ip_hdr(skb
)->tot_len
= htons(ntohs(ip_hdr(pkt
)->tot_len
) +
978 sizeof(struct iphdr
));
983 /* Copy the IP header */
985 skb_set_network_header(skb
, skb
->len
);
987 skb_copy_to_linear_data(skb
, pkt
->data
, ihl
);
988 ip_hdr(skb
)->protocol
= 0; /* Flag to the kernel this is a route add */
989 msg
= (struct igmpmsg
*)skb_network_header(skb
);
991 skb_dst_set(skb
, dst_clone(skb_dst(pkt
)));
995 igmp
= (struct igmphdr
*)skb_put(skb
, sizeof(struct igmphdr
));
997 msg
->im_msgtype
= assert;
999 ip_hdr(skb
)->tot_len
= htons(skb
->len
); /* Fix the length */
1000 skb
->transport_header
= skb
->network_header
;
1004 mroute_sk
= rcu_dereference(mrt
->mroute_sk
);
1005 if (mroute_sk
== NULL
) {
1011 /* Deliver to mrouted */
1013 ret
= sock_queue_rcv_skb(mroute_sk
, skb
);
1016 net_warn_ratelimited("mroute: pending queue full, dropping entries\n");
1024 * Queue a packet for resolution. It gets locked cache entry!
1028 ipmr_cache_unresolved(struct mr_table
*mrt
, vifi_t vifi
, struct sk_buff
*skb
)
1032 struct mfc_cache
*c
;
1033 const struct iphdr
*iph
= ip_hdr(skb
);
1035 spin_lock_bh(&mfc_unres_lock
);
1036 list_for_each_entry(c
, &mrt
->mfc_unres_queue
, list
) {
1037 if (c
->mfc_mcastgrp
== iph
->daddr
&&
1038 c
->mfc_origin
== iph
->saddr
) {
1045 /* Create a new entry if allowable */
1047 if (atomic_read(&mrt
->cache_resolve_queue_len
) >= 10 ||
1048 (c
= ipmr_cache_alloc_unres()) == NULL
) {
1049 spin_unlock_bh(&mfc_unres_lock
);
1055 /* Fill in the new cache entry */
1058 c
->mfc_origin
= iph
->saddr
;
1059 c
->mfc_mcastgrp
= iph
->daddr
;
1061 /* Reflect first query at mrouted. */
1063 err
= ipmr_cache_report(mrt
, skb
, vifi
, IGMPMSG_NOCACHE
);
1065 /* If the report failed throw the cache entry
1068 spin_unlock_bh(&mfc_unres_lock
);
1075 atomic_inc(&mrt
->cache_resolve_queue_len
);
1076 list_add(&c
->list
, &mrt
->mfc_unres_queue
);
1077 mroute_netlink_event(mrt
, c
, RTM_NEWROUTE
);
1079 if (atomic_read(&mrt
->cache_resolve_queue_len
) == 1)
1080 mod_timer(&mrt
->ipmr_expire_timer
, c
->mfc_un
.unres
.expires
);
1083 /* See if we can append the packet */
1085 if (c
->mfc_un
.unres
.unresolved
.qlen
> 3) {
1089 skb_queue_tail(&c
->mfc_un
.unres
.unresolved
, skb
);
1093 spin_unlock_bh(&mfc_unres_lock
);
1098 * MFC cache manipulation by user space mroute daemon
1101 static int ipmr_mfc_delete(struct mr_table
*mrt
, struct mfcctl
*mfc
, int parent
)
1104 struct mfc_cache
*c
, *next
;
1106 line
= MFC_HASH(mfc
->mfcc_mcastgrp
.s_addr
, mfc
->mfcc_origin
.s_addr
);
1108 list_for_each_entry_safe(c
, next
, &mrt
->mfc_cache_array
[line
], list
) {
1109 if (c
->mfc_origin
== mfc
->mfcc_origin
.s_addr
&&
1110 c
->mfc_mcastgrp
== mfc
->mfcc_mcastgrp
.s_addr
&&
1111 (parent
== -1 || parent
== c
->mfc_parent
)) {
1112 list_del_rcu(&c
->list
);
1113 mroute_netlink_event(mrt
, c
, RTM_DELROUTE
);
1121 static int ipmr_mfc_add(struct net
*net
, struct mr_table
*mrt
,
1122 struct mfcctl
*mfc
, int mrtsock
, int parent
)
1126 struct mfc_cache
*uc
, *c
;
1128 if (mfc
->mfcc_parent
>= MAXVIFS
)
1131 line
= MFC_HASH(mfc
->mfcc_mcastgrp
.s_addr
, mfc
->mfcc_origin
.s_addr
);
1133 list_for_each_entry(c
, &mrt
->mfc_cache_array
[line
], list
) {
1134 if (c
->mfc_origin
== mfc
->mfcc_origin
.s_addr
&&
1135 c
->mfc_mcastgrp
== mfc
->mfcc_mcastgrp
.s_addr
&&
1136 (parent
== -1 || parent
== c
->mfc_parent
)) {
1143 write_lock_bh(&mrt_lock
);
1144 c
->mfc_parent
= mfc
->mfcc_parent
;
1145 ipmr_update_thresholds(mrt
, c
, mfc
->mfcc_ttls
);
1147 c
->mfc_flags
|= MFC_STATIC
;
1148 write_unlock_bh(&mrt_lock
);
1149 mroute_netlink_event(mrt
, c
, RTM_NEWROUTE
);
1153 if (mfc
->mfcc_mcastgrp
.s_addr
!= htonl(INADDR_ANY
) &&
1154 !ipv4_is_multicast(mfc
->mfcc_mcastgrp
.s_addr
))
1157 c
= ipmr_cache_alloc();
1161 c
->mfc_origin
= mfc
->mfcc_origin
.s_addr
;
1162 c
->mfc_mcastgrp
= mfc
->mfcc_mcastgrp
.s_addr
;
1163 c
->mfc_parent
= mfc
->mfcc_parent
;
1164 ipmr_update_thresholds(mrt
, c
, mfc
->mfcc_ttls
);
1166 c
->mfc_flags
|= MFC_STATIC
;
1168 list_add_rcu(&c
->list
, &mrt
->mfc_cache_array
[line
]);
1171 * Check to see if we resolved a queued list. If so we
1172 * need to send on the frames and tidy up.
1175 spin_lock_bh(&mfc_unres_lock
);
1176 list_for_each_entry(uc
, &mrt
->mfc_unres_queue
, list
) {
1177 if (uc
->mfc_origin
== c
->mfc_origin
&&
1178 uc
->mfc_mcastgrp
== c
->mfc_mcastgrp
) {
1179 list_del(&uc
->list
);
1180 atomic_dec(&mrt
->cache_resolve_queue_len
);
1185 if (list_empty(&mrt
->mfc_unres_queue
))
1186 del_timer(&mrt
->ipmr_expire_timer
);
1187 spin_unlock_bh(&mfc_unres_lock
);
1190 ipmr_cache_resolve(net
, mrt
, uc
, c
);
1191 ipmr_cache_free(uc
);
1193 mroute_netlink_event(mrt
, c
, RTM_NEWROUTE
);
1198 * Close the multicast socket, and clear the vif tables etc
1201 static void mroute_clean_tables(struct mr_table
*mrt
)
1205 struct mfc_cache
*c
, *next
;
1207 /* Shut down all active vif entries */
1209 for (i
= 0; i
< mrt
->maxvif
; i
++) {
1210 if (!(mrt
->vif_table
[i
].flags
& VIFF_STATIC
))
1211 vif_delete(mrt
, i
, 0, &list
);
1213 unregister_netdevice_many(&list
);
1215 /* Wipe the cache */
1217 for (i
= 0; i
< MFC_LINES
; i
++) {
1218 list_for_each_entry_safe(c
, next
, &mrt
->mfc_cache_array
[i
], list
) {
1219 if (c
->mfc_flags
& MFC_STATIC
)
1221 list_del_rcu(&c
->list
);
1222 mroute_netlink_event(mrt
, c
, RTM_DELROUTE
);
1227 if (atomic_read(&mrt
->cache_resolve_queue_len
) != 0) {
1228 spin_lock_bh(&mfc_unres_lock
);
1229 list_for_each_entry_safe(c
, next
, &mrt
->mfc_unres_queue
, list
) {
1231 mroute_netlink_event(mrt
, c
, RTM_DELROUTE
);
1232 ipmr_destroy_unres(mrt
, c
);
1234 spin_unlock_bh(&mfc_unres_lock
);
1238 /* called from ip_ra_control(), before an RCU grace period,
1239 * we dont need to call synchronize_rcu() here
1241 static void mrtsock_destruct(struct sock
*sk
)
1243 struct net
*net
= sock_net(sk
);
1244 struct mr_table
*mrt
;
1247 ipmr_for_each_table(mrt
, net
) {
1248 if (sk
== rtnl_dereference(mrt
->mroute_sk
)) {
1249 IPV4_DEVCONF_ALL(net
, MC_FORWARDING
)--;
1250 inet_netconf_notify_devconf(net
, NETCONFA_MC_FORWARDING
,
1251 NETCONFA_IFINDEX_ALL
,
1252 net
->ipv4
.devconf_all
);
1253 RCU_INIT_POINTER(mrt
->mroute_sk
, NULL
);
1254 mroute_clean_tables(mrt
);
1261 * Socket options and virtual interface manipulation. The whole
1262 * virtual interface system is a complete heap, but unfortunately
1263 * that's how BSD mrouted happens to think. Maybe one day with a proper
1264 * MOSPF/PIM router set up we can clean this up.
1267 int ip_mroute_setsockopt(struct sock
*sk
, int optname
, char __user
*optval
, unsigned int optlen
)
1269 int ret
, parent
= 0;
1272 struct net
*net
= sock_net(sk
);
1273 struct mr_table
*mrt
;
1275 if (sk
->sk_type
!= SOCK_RAW
||
1276 inet_sk(sk
)->inet_num
!= IPPROTO_IGMP
)
1279 mrt
= ipmr_get_table(net
, raw_sk(sk
)->ipmr_table
? : RT_TABLE_DEFAULT
);
1283 if (optname
!= MRT_INIT
) {
1284 if (sk
!= rcu_access_pointer(mrt
->mroute_sk
) &&
1285 !ns_capable(net
->user_ns
, CAP_NET_ADMIN
))
1291 if (optlen
!= sizeof(int))
1295 if (rtnl_dereference(mrt
->mroute_sk
)) {
1300 ret
= ip_ra_control(sk
, 1, mrtsock_destruct
);
1302 rcu_assign_pointer(mrt
->mroute_sk
, sk
);
1303 IPV4_DEVCONF_ALL(net
, MC_FORWARDING
)++;
1304 inet_netconf_notify_devconf(net
, NETCONFA_MC_FORWARDING
,
1305 NETCONFA_IFINDEX_ALL
,
1306 net
->ipv4
.devconf_all
);
1311 if (sk
!= rcu_access_pointer(mrt
->mroute_sk
))
1313 return ip_ra_control(sk
, 0, NULL
);
1316 if (optlen
!= sizeof(vif
))
1318 if (copy_from_user(&vif
, optval
, sizeof(vif
)))
1320 if (vif
.vifc_vifi
>= MAXVIFS
)
1323 if (optname
== MRT_ADD_VIF
) {
1324 ret
= vif_add(net
, mrt
, &vif
,
1325 sk
== rtnl_dereference(mrt
->mroute_sk
));
1327 ret
= vif_delete(mrt
, vif
.vifc_vifi
, 0, NULL
);
1333 * Manipulate the forwarding caches. These live
1334 * in a sort of kernel/user symbiosis.
1339 case MRT_ADD_MFC_PROXY
:
1340 case MRT_DEL_MFC_PROXY
:
1341 if (optlen
!= sizeof(mfc
))
1343 if (copy_from_user(&mfc
, optval
, sizeof(mfc
)))
1346 parent
= mfc
.mfcc_parent
;
1348 if (optname
== MRT_DEL_MFC
|| optname
== MRT_DEL_MFC_PROXY
)
1349 ret
= ipmr_mfc_delete(mrt
, &mfc
, parent
);
1351 ret
= ipmr_mfc_add(net
, mrt
, &mfc
,
1352 sk
== rtnl_dereference(mrt
->mroute_sk
),
1357 * Control PIM assert.
1362 if (optlen
!= sizeof(v
))
1364 if (get_user(v
, (int __user
*)optval
))
1366 mrt
->mroute_do_assert
= v
;
1369 #ifdef CONFIG_IP_PIMSM
1374 if (optlen
!= sizeof(v
))
1376 if (get_user(v
, (int __user
*)optval
))
1382 if (v
!= mrt
->mroute_do_pim
) {
1383 mrt
->mroute_do_pim
= v
;
1384 mrt
->mroute_do_assert
= v
;
1390 #ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
1395 if (optlen
!= sizeof(u32
))
1397 if (get_user(v
, (u32 __user
*)optval
))
1400 /* "pimreg%u" should not exceed 16 bytes (IFNAMSIZ) */
1401 if (v
!= RT_TABLE_DEFAULT
&& v
>= 1000000000)
1406 if (sk
== rtnl_dereference(mrt
->mroute_sk
)) {
1409 if (!ipmr_new_table(net
, v
))
1412 raw_sk(sk
)->ipmr_table
= v
;
1419 * Spurious command, or MRT_VERSION which you cannot
1423 return -ENOPROTOOPT
;
1428 * Getsock opt support for the multicast routing system.
1431 int ip_mroute_getsockopt(struct sock
*sk
, int optname
, char __user
*optval
, int __user
*optlen
)
1435 struct net
*net
= sock_net(sk
);
1436 struct mr_table
*mrt
;
1438 if (sk
->sk_type
!= SOCK_RAW
||
1439 inet_sk(sk
)->inet_num
!= IPPROTO_IGMP
)
1442 mrt
= ipmr_get_table(net
, raw_sk(sk
)->ipmr_table
? : RT_TABLE_DEFAULT
);
1446 if (optname
!= MRT_VERSION
&&
1447 #ifdef CONFIG_IP_PIMSM
1448 optname
!= MRT_PIM
&&
1450 optname
!= MRT_ASSERT
)
1451 return -ENOPROTOOPT
;
1453 if (get_user(olr
, optlen
))
1456 olr
= min_t(unsigned int, olr
, sizeof(int));
1460 if (put_user(olr
, optlen
))
1462 if (optname
== MRT_VERSION
)
1464 #ifdef CONFIG_IP_PIMSM
1465 else if (optname
== MRT_PIM
)
1466 val
= mrt
->mroute_do_pim
;
1469 val
= mrt
->mroute_do_assert
;
1470 if (copy_to_user(optval
, &val
, olr
))
1476 * The IP multicast ioctl support routines.
1479 int ipmr_ioctl(struct sock
*sk
, int cmd
, void __user
*arg
)
1481 struct sioc_sg_req sr
;
1482 struct sioc_vif_req vr
;
1483 struct vif_device
*vif
;
1484 struct mfc_cache
*c
;
1485 struct net
*net
= sock_net(sk
);
1486 struct mr_table
*mrt
;
1488 mrt
= ipmr_get_table(net
, raw_sk(sk
)->ipmr_table
? : RT_TABLE_DEFAULT
);
1494 if (copy_from_user(&vr
, arg
, sizeof(vr
)))
1496 if (vr
.vifi
>= mrt
->maxvif
)
1498 read_lock(&mrt_lock
);
1499 vif
= &mrt
->vif_table
[vr
.vifi
];
1500 if (VIF_EXISTS(mrt
, vr
.vifi
)) {
1501 vr
.icount
= vif
->pkt_in
;
1502 vr
.ocount
= vif
->pkt_out
;
1503 vr
.ibytes
= vif
->bytes_in
;
1504 vr
.obytes
= vif
->bytes_out
;
1505 read_unlock(&mrt_lock
);
1507 if (copy_to_user(arg
, &vr
, sizeof(vr
)))
1511 read_unlock(&mrt_lock
);
1512 return -EADDRNOTAVAIL
;
1514 if (copy_from_user(&sr
, arg
, sizeof(sr
)))
1518 c
= ipmr_cache_find(mrt
, sr
.src
.s_addr
, sr
.grp
.s_addr
);
1520 sr
.pktcnt
= c
->mfc_un
.res
.pkt
;
1521 sr
.bytecnt
= c
->mfc_un
.res
.bytes
;
1522 sr
.wrong_if
= c
->mfc_un
.res
.wrong_if
;
1525 if (copy_to_user(arg
, &sr
, sizeof(sr
)))
1530 return -EADDRNOTAVAIL
;
1532 return -ENOIOCTLCMD
;
1536 #ifdef CONFIG_COMPAT
1537 struct compat_sioc_sg_req
{
1540 compat_ulong_t pktcnt
;
1541 compat_ulong_t bytecnt
;
1542 compat_ulong_t wrong_if
;
1545 struct compat_sioc_vif_req
{
1546 vifi_t vifi
; /* Which iface */
1547 compat_ulong_t icount
;
1548 compat_ulong_t ocount
;
1549 compat_ulong_t ibytes
;
1550 compat_ulong_t obytes
;
1553 int ipmr_compat_ioctl(struct sock
*sk
, unsigned int cmd
, void __user
*arg
)
1555 struct compat_sioc_sg_req sr
;
1556 struct compat_sioc_vif_req vr
;
1557 struct vif_device
*vif
;
1558 struct mfc_cache
*c
;
1559 struct net
*net
= sock_net(sk
);
1560 struct mr_table
*mrt
;
1562 mrt
= ipmr_get_table(net
, raw_sk(sk
)->ipmr_table
? : RT_TABLE_DEFAULT
);
1568 if (copy_from_user(&vr
, arg
, sizeof(vr
)))
1570 if (vr
.vifi
>= mrt
->maxvif
)
1572 read_lock(&mrt_lock
);
1573 vif
= &mrt
->vif_table
[vr
.vifi
];
1574 if (VIF_EXISTS(mrt
, vr
.vifi
)) {
1575 vr
.icount
= vif
->pkt_in
;
1576 vr
.ocount
= vif
->pkt_out
;
1577 vr
.ibytes
= vif
->bytes_in
;
1578 vr
.obytes
= vif
->bytes_out
;
1579 read_unlock(&mrt_lock
);
1581 if (copy_to_user(arg
, &vr
, sizeof(vr
)))
1585 read_unlock(&mrt_lock
);
1586 return -EADDRNOTAVAIL
;
1588 if (copy_from_user(&sr
, arg
, sizeof(sr
)))
1592 c
= ipmr_cache_find(mrt
, sr
.src
.s_addr
, sr
.grp
.s_addr
);
1594 sr
.pktcnt
= c
->mfc_un
.res
.pkt
;
1595 sr
.bytecnt
= c
->mfc_un
.res
.bytes
;
1596 sr
.wrong_if
= c
->mfc_un
.res
.wrong_if
;
1599 if (copy_to_user(arg
, &sr
, sizeof(sr
)))
1604 return -EADDRNOTAVAIL
;
1606 return -ENOIOCTLCMD
;
1612 static int ipmr_device_event(struct notifier_block
*this, unsigned long event
, void *ptr
)
1614 struct net_device
*dev
= netdev_notifier_info_to_dev(ptr
);
1615 struct net
*net
= dev_net(dev
);
1616 struct mr_table
*mrt
;
1617 struct vif_device
*v
;
1620 if (event
!= NETDEV_UNREGISTER
)
1623 ipmr_for_each_table(mrt
, net
) {
1624 v
= &mrt
->vif_table
[0];
1625 for (ct
= 0; ct
< mrt
->maxvif
; ct
++, v
++) {
1627 vif_delete(mrt
, ct
, 1, NULL
);
1634 static struct notifier_block ip_mr_notifier
= {
1635 .notifier_call
= ipmr_device_event
,
1639 * Encapsulate a packet by attaching a valid IPIP header to it.
1640 * This avoids tunnel drivers and other mess and gives us the speed so
1641 * important for multicast video.
1644 static void ip_encap(struct sk_buff
*skb
, __be32 saddr
, __be32 daddr
)
1647 const struct iphdr
*old_iph
= ip_hdr(skb
);
1649 skb_push(skb
, sizeof(struct iphdr
));
1650 skb
->transport_header
= skb
->network_header
;
1651 skb_reset_network_header(skb
);
1655 iph
->tos
= old_iph
->tos
;
1656 iph
->ttl
= old_iph
->ttl
;
1660 iph
->protocol
= IPPROTO_IPIP
;
1662 iph
->tot_len
= htons(skb
->len
);
1663 ip_select_ident(skb
, skb_dst(skb
), NULL
);
1666 memset(&(IPCB(skb
)->opt
), 0, sizeof(IPCB(skb
)->opt
));
1670 static inline int ipmr_forward_finish(struct sk_buff
*skb
)
1672 struct ip_options
*opt
= &(IPCB(skb
)->opt
);
1674 IP_INC_STATS_BH(dev_net(skb_dst(skb
)->dev
), IPSTATS_MIB_OUTFORWDATAGRAMS
);
1675 IP_ADD_STATS_BH(dev_net(skb_dst(skb
)->dev
), IPSTATS_MIB_OUTOCTETS
, skb
->len
);
1677 if (unlikely(opt
->optlen
))
1678 ip_forward_options(skb
);
1680 return dst_output(skb
);
1684 * Processing handlers for ipmr_forward
1687 static void ipmr_queue_xmit(struct net
*net
, struct mr_table
*mrt
,
1688 struct sk_buff
*skb
, struct mfc_cache
*c
, int vifi
)
1690 const struct iphdr
*iph
= ip_hdr(skb
);
1691 struct vif_device
*vif
= &mrt
->vif_table
[vifi
];
1692 struct net_device
*dev
;
1697 if (vif
->dev
== NULL
)
1700 #ifdef CONFIG_IP_PIMSM
1701 if (vif
->flags
& VIFF_REGISTER
) {
1703 vif
->bytes_out
+= skb
->len
;
1704 vif
->dev
->stats
.tx_bytes
+= skb
->len
;
1705 vif
->dev
->stats
.tx_packets
++;
1706 ipmr_cache_report(mrt
, skb
, vifi
, IGMPMSG_WHOLEPKT
);
1711 if (vif
->flags
& VIFF_TUNNEL
) {
1712 rt
= ip_route_output_ports(net
, &fl4
, NULL
,
1713 vif
->remote
, vif
->local
,
1716 RT_TOS(iph
->tos
), vif
->link
);
1719 encap
= sizeof(struct iphdr
);
1721 rt
= ip_route_output_ports(net
, &fl4
, NULL
, iph
->daddr
, 0,
1724 RT_TOS(iph
->tos
), vif
->link
);
1731 if (skb
->len
+encap
> dst_mtu(&rt
->dst
) && (ntohs(iph
->frag_off
) & IP_DF
)) {
1732 /* Do not fragment multicasts. Alas, IPv4 does not
1733 * allow to send ICMP, so that packets will disappear
1737 IP_INC_STATS_BH(dev_net(dev
), IPSTATS_MIB_FRAGFAILS
);
1742 encap
+= LL_RESERVED_SPACE(dev
) + rt
->dst
.header_len
;
1744 if (skb_cow(skb
, encap
)) {
1750 vif
->bytes_out
+= skb
->len
;
1753 skb_dst_set(skb
, &rt
->dst
);
1754 ip_decrease_ttl(ip_hdr(skb
));
1756 /* FIXME: forward and output firewalls used to be called here.
1757 * What do we do with netfilter? -- RR
1759 if (vif
->flags
& VIFF_TUNNEL
) {
1760 ip_encap(skb
, vif
->local
, vif
->remote
);
1761 /* FIXME: extra output firewall step used to be here. --RR */
1762 vif
->dev
->stats
.tx_packets
++;
1763 vif
->dev
->stats
.tx_bytes
+= skb
->len
;
1766 IPCB(skb
)->flags
|= IPSKB_FORWARDED
;
1769 * RFC1584 teaches, that DVMRP/PIM router must deliver packets locally
1770 * not only before forwarding, but after forwarding on all output
1771 * interfaces. It is clear, if mrouter runs a multicasting
1772 * program, it should receive packets not depending to what interface
1773 * program is joined.
1774 * If we will not make it, the program will have to join on all
1775 * interfaces. On the other hand, multihoming host (or router, but
1776 * not mrouter) cannot join to more than one interface - it will
1777 * result in receiving multiple packets.
1779 NF_HOOK(NFPROTO_IPV4
, NF_INET_FORWARD
, skb
, skb
->dev
, dev
,
1780 ipmr_forward_finish
);
1787 static int ipmr_find_vif(struct mr_table
*mrt
, struct net_device
*dev
)
1791 for (ct
= mrt
->maxvif
-1; ct
>= 0; ct
--) {
1792 if (mrt
->vif_table
[ct
].dev
== dev
)
1798 /* "local" means that we should preserve one skb (for local delivery) */
1800 static void ip_mr_forward(struct net
*net
, struct mr_table
*mrt
,
1801 struct sk_buff
*skb
, struct mfc_cache
*cache
,
1806 int true_vifi
= ipmr_find_vif(mrt
, skb
->dev
);
1808 vif
= cache
->mfc_parent
;
1809 cache
->mfc_un
.res
.pkt
++;
1810 cache
->mfc_un
.res
.bytes
+= skb
->len
;
1812 if (cache
->mfc_origin
== htonl(INADDR_ANY
) && true_vifi
>= 0) {
1813 struct mfc_cache
*cache_proxy
;
1815 /* For an (*,G) entry, we only check that the incomming
1816 * interface is part of the static tree.
1818 cache_proxy
= ipmr_cache_find_any_parent(mrt
, vif
);
1820 cache_proxy
->mfc_un
.res
.ttls
[true_vifi
] < 255)
1825 * Wrong interface: drop packet and (maybe) send PIM assert.
1827 if (mrt
->vif_table
[vif
].dev
!= skb
->dev
) {
1828 if (rt_is_output_route(skb_rtable(skb
))) {
1829 /* It is our own packet, looped back.
1830 * Very complicated situation...
1832 * The best workaround until routing daemons will be
1833 * fixed is not to redistribute packet, if it was
1834 * send through wrong interface. It means, that
1835 * multicast applications WILL NOT work for
1836 * (S,G), which have default multicast route pointing
1837 * to wrong oif. In any case, it is not a good
1838 * idea to use multicasting applications on router.
1843 cache
->mfc_un
.res
.wrong_if
++;
1845 if (true_vifi
>= 0 && mrt
->mroute_do_assert
&&
1846 /* pimsm uses asserts, when switching from RPT to SPT,
1847 * so that we cannot check that packet arrived on an oif.
1848 * It is bad, but otherwise we would need to move pretty
1849 * large chunk of pimd to kernel. Ough... --ANK
1851 (mrt
->mroute_do_pim
||
1852 cache
->mfc_un
.res
.ttls
[true_vifi
] < 255) &&
1854 cache
->mfc_un
.res
.last_assert
+ MFC_ASSERT_THRESH
)) {
1855 cache
->mfc_un
.res
.last_assert
= jiffies
;
1856 ipmr_cache_report(mrt
, skb
, true_vifi
, IGMPMSG_WRONGVIF
);
1862 mrt
->vif_table
[vif
].pkt_in
++;
1863 mrt
->vif_table
[vif
].bytes_in
+= skb
->len
;
1868 if (cache
->mfc_origin
== htonl(INADDR_ANY
) &&
1869 cache
->mfc_mcastgrp
== htonl(INADDR_ANY
)) {
1870 if (true_vifi
>= 0 &&
1871 true_vifi
!= cache
->mfc_parent
&&
1873 cache
->mfc_un
.res
.ttls
[cache
->mfc_parent
]) {
1874 /* It's an (*,*) entry and the packet is not coming from
1875 * the upstream: forward the packet to the upstream
1878 psend
= cache
->mfc_parent
;
1883 for (ct
= cache
->mfc_un
.res
.maxvif
- 1;
1884 ct
>= cache
->mfc_un
.res
.minvif
; ct
--) {
1885 /* For (*,G) entry, don't forward to the incoming interface */
1886 if ((cache
->mfc_origin
!= htonl(INADDR_ANY
) ||
1888 ip_hdr(skb
)->ttl
> cache
->mfc_un
.res
.ttls
[ct
]) {
1890 struct sk_buff
*skb2
= skb_clone(skb
, GFP_ATOMIC
);
1893 ipmr_queue_xmit(net
, mrt
, skb2
, cache
,
1902 struct sk_buff
*skb2
= skb_clone(skb
, GFP_ATOMIC
);
1905 ipmr_queue_xmit(net
, mrt
, skb2
, cache
, psend
);
1907 ipmr_queue_xmit(net
, mrt
, skb
, cache
, psend
);
1917 static struct mr_table
*ipmr_rt_fib_lookup(struct net
*net
, struct sk_buff
*skb
)
1919 struct rtable
*rt
= skb_rtable(skb
);
1920 struct iphdr
*iph
= ip_hdr(skb
);
1921 struct flowi4 fl4
= {
1922 .daddr
= iph
->daddr
,
1923 .saddr
= iph
->saddr
,
1924 .flowi4_tos
= RT_TOS(iph
->tos
),
1925 .flowi4_oif
= (rt_is_output_route(rt
) ?
1926 skb
->dev
->ifindex
: 0),
1927 .flowi4_iif
= (rt_is_output_route(rt
) ?
1930 .flowi4_mark
= skb
->mark
,
1932 struct mr_table
*mrt
;
1935 err
= ipmr_fib_lookup(net
, &fl4
, &mrt
);
1937 return ERR_PTR(err
);
1942 * Multicast packets for forwarding arrive here
1943 * Called with rcu_read_lock();
1946 int ip_mr_input(struct sk_buff
*skb
)
1948 struct mfc_cache
*cache
;
1949 struct net
*net
= dev_net(skb
->dev
);
1950 int local
= skb_rtable(skb
)->rt_flags
& RTCF_LOCAL
;
1951 struct mr_table
*mrt
;
1953 /* Packet is looped back after forward, it should not be
1954 * forwarded second time, but still can be delivered locally.
1956 if (IPCB(skb
)->flags
& IPSKB_FORWARDED
)
1959 mrt
= ipmr_rt_fib_lookup(net
, skb
);
1962 return PTR_ERR(mrt
);
1965 if (IPCB(skb
)->opt
.router_alert
) {
1966 if (ip_call_ra_chain(skb
))
1968 } else if (ip_hdr(skb
)->protocol
== IPPROTO_IGMP
) {
1969 /* IGMPv1 (and broken IGMPv2 implementations sort of
1970 * Cisco IOS <= 11.2(8)) do not put router alert
1971 * option to IGMP packets destined to routable
1972 * groups. It is very bad, because it means
1973 * that we can forward NO IGMP messages.
1975 struct sock
*mroute_sk
;
1977 mroute_sk
= rcu_dereference(mrt
->mroute_sk
);
1980 raw_rcv(mroute_sk
, skb
);
1986 /* already under rcu_read_lock() */
1987 cache
= ipmr_cache_find(mrt
, ip_hdr(skb
)->saddr
, ip_hdr(skb
)->daddr
);
1988 if (cache
== NULL
) {
1989 int vif
= ipmr_find_vif(mrt
, skb
->dev
);
1992 cache
= ipmr_cache_find_any(mrt
, ip_hdr(skb
)->daddr
,
1997 * No usable cache entry
1999 if (cache
== NULL
) {
2003 struct sk_buff
*skb2
= skb_clone(skb
, GFP_ATOMIC
);
2004 ip_local_deliver(skb
);
2010 read_lock(&mrt_lock
);
2011 vif
= ipmr_find_vif(mrt
, skb
->dev
);
2013 int err2
= ipmr_cache_unresolved(mrt
, vif
, skb
);
2014 read_unlock(&mrt_lock
);
2018 read_unlock(&mrt_lock
);
2023 read_lock(&mrt_lock
);
2024 ip_mr_forward(net
, mrt
, skb
, cache
, local
);
2025 read_unlock(&mrt_lock
);
2028 return ip_local_deliver(skb
);
2034 return ip_local_deliver(skb
);
2039 #ifdef CONFIG_IP_PIMSM
2040 /* called with rcu_read_lock() */
2041 static int __pim_rcv(struct mr_table
*mrt
, struct sk_buff
*skb
,
2042 unsigned int pimlen
)
2044 struct net_device
*reg_dev
= NULL
;
2045 struct iphdr
*encap
;
2047 encap
= (struct iphdr
*)(skb_transport_header(skb
) + pimlen
);
2050 * a. packet is really sent to a multicast group
2051 * b. packet is not a NULL-REGISTER
2052 * c. packet is not truncated
2054 if (!ipv4_is_multicast(encap
->daddr
) ||
2055 encap
->tot_len
== 0 ||
2056 ntohs(encap
->tot_len
) + pimlen
> skb
->len
)
2059 read_lock(&mrt_lock
);
2060 if (mrt
->mroute_reg_vif_num
>= 0)
2061 reg_dev
= mrt
->vif_table
[mrt
->mroute_reg_vif_num
].dev
;
2062 read_unlock(&mrt_lock
);
2064 if (reg_dev
== NULL
)
2067 skb
->mac_header
= skb
->network_header
;
2068 skb_pull(skb
, (u8
*)encap
- skb
->data
);
2069 skb_reset_network_header(skb
);
2070 skb
->protocol
= htons(ETH_P_IP
);
2071 skb
->ip_summed
= CHECKSUM_NONE
;
2073 skb_tunnel_rx(skb
, reg_dev
, dev_net(reg_dev
));
2077 return NET_RX_SUCCESS
;
2081 #ifdef CONFIG_IP_PIMSM_V1
2083 * Handle IGMP messages of PIMv1
2086 int pim_rcv_v1(struct sk_buff
*skb
)
2088 struct igmphdr
*pim
;
2089 struct net
*net
= dev_net(skb
->dev
);
2090 struct mr_table
*mrt
;
2092 if (!pskb_may_pull(skb
, sizeof(*pim
) + sizeof(struct iphdr
)))
2095 pim
= igmp_hdr(skb
);
2097 mrt
= ipmr_rt_fib_lookup(net
, skb
);
2100 if (!mrt
->mroute_do_pim
||
2101 pim
->group
!= PIM_V1_VERSION
|| pim
->code
!= PIM_V1_REGISTER
)
2104 if (__pim_rcv(mrt
, skb
, sizeof(*pim
))) {
2112 #ifdef CONFIG_IP_PIMSM_V2
2113 static int pim_rcv(struct sk_buff
*skb
)
2115 struct pimreghdr
*pim
;
2116 struct net
*net
= dev_net(skb
->dev
);
2117 struct mr_table
*mrt
;
2119 if (!pskb_may_pull(skb
, sizeof(*pim
) + sizeof(struct iphdr
)))
2122 pim
= (struct pimreghdr
*)skb_transport_header(skb
);
2123 if (pim
->type
!= ((PIM_VERSION
<< 4) | (PIM_REGISTER
)) ||
2124 (pim
->flags
& PIM_NULL_REGISTER
) ||
2125 (ip_compute_csum((void *)pim
, sizeof(*pim
)) != 0 &&
2126 csum_fold(skb_checksum(skb
, 0, skb
->len
, 0))))
2129 mrt
= ipmr_rt_fib_lookup(net
, skb
);
2132 if (__pim_rcv(mrt
, skb
, sizeof(*pim
))) {
2140 static int __ipmr_fill_mroute(struct mr_table
*mrt
, struct sk_buff
*skb
,
2141 struct mfc_cache
*c
, struct rtmsg
*rtm
)
2144 struct rtnexthop
*nhp
;
2145 struct nlattr
*mp_attr
;
2146 struct rta_mfc_stats mfcs
;
2148 /* If cache is unresolved, don't try to parse IIF and OIF */
2149 if (c
->mfc_parent
>= MAXVIFS
)
2152 if (VIF_EXISTS(mrt
, c
->mfc_parent
) &&
2153 nla_put_u32(skb
, RTA_IIF
, mrt
->vif_table
[c
->mfc_parent
].dev
->ifindex
) < 0)
2156 if (!(mp_attr
= nla_nest_start(skb
, RTA_MULTIPATH
)))
2159 for (ct
= c
->mfc_un
.res
.minvif
; ct
< c
->mfc_un
.res
.maxvif
; ct
++) {
2160 if (VIF_EXISTS(mrt
, ct
) && c
->mfc_un
.res
.ttls
[ct
] < 255) {
2161 if (!(nhp
= nla_reserve_nohdr(skb
, sizeof(*nhp
)))) {
2162 nla_nest_cancel(skb
, mp_attr
);
2166 nhp
->rtnh_flags
= 0;
2167 nhp
->rtnh_hops
= c
->mfc_un
.res
.ttls
[ct
];
2168 nhp
->rtnh_ifindex
= mrt
->vif_table
[ct
].dev
->ifindex
;
2169 nhp
->rtnh_len
= sizeof(*nhp
);
2173 nla_nest_end(skb
, mp_attr
);
2175 mfcs
.mfcs_packets
= c
->mfc_un
.res
.pkt
;
2176 mfcs
.mfcs_bytes
= c
->mfc_un
.res
.bytes
;
2177 mfcs
.mfcs_wrong_if
= c
->mfc_un
.res
.wrong_if
;
2178 if (nla_put(skb
, RTA_MFC_STATS
, sizeof(mfcs
), &mfcs
) < 0)
2181 rtm
->rtm_type
= RTN_MULTICAST
;
2185 int ipmr_get_route(struct net
*net
, struct sk_buff
*skb
,
2186 __be32 saddr
, __be32 daddr
,
2187 struct rtmsg
*rtm
, int nowait
)
2189 struct mfc_cache
*cache
;
2190 struct mr_table
*mrt
;
2193 mrt
= ipmr_get_table(net
, RT_TABLE_DEFAULT
);
2198 cache
= ipmr_cache_find(mrt
, saddr
, daddr
);
2199 if (cache
== NULL
&& skb
->dev
) {
2200 int vif
= ipmr_find_vif(mrt
, skb
->dev
);
2203 cache
= ipmr_cache_find_any(mrt
, daddr
, vif
);
2205 if (cache
== NULL
) {
2206 struct sk_buff
*skb2
;
2208 struct net_device
*dev
;
2217 read_lock(&mrt_lock
);
2219 vif
= ipmr_find_vif(mrt
, dev
);
2221 read_unlock(&mrt_lock
);
2225 skb2
= skb_clone(skb
, GFP_ATOMIC
);
2227 read_unlock(&mrt_lock
);
2232 skb_push(skb2
, sizeof(struct iphdr
));
2233 skb_reset_network_header(skb2
);
2235 iph
->ihl
= sizeof(struct iphdr
) >> 2;
2239 err
= ipmr_cache_unresolved(mrt
, vif
, skb2
);
2240 read_unlock(&mrt_lock
);
2245 read_lock(&mrt_lock
);
2246 if (!nowait
&& (rtm
->rtm_flags
& RTM_F_NOTIFY
))
2247 cache
->mfc_flags
|= MFC_NOTIFY
;
2248 err
= __ipmr_fill_mroute(mrt
, skb
, cache
, rtm
);
2249 read_unlock(&mrt_lock
);
2254 static int ipmr_fill_mroute(struct mr_table
*mrt
, struct sk_buff
*skb
,
2255 u32 portid
, u32 seq
, struct mfc_cache
*c
, int cmd
)
2257 struct nlmsghdr
*nlh
;
2261 nlh
= nlmsg_put(skb
, portid
, seq
, cmd
, sizeof(*rtm
), NLM_F_MULTI
);
2265 rtm
= nlmsg_data(nlh
);
2266 rtm
->rtm_family
= RTNL_FAMILY_IPMR
;
2267 rtm
->rtm_dst_len
= 32;
2268 rtm
->rtm_src_len
= 32;
2270 rtm
->rtm_table
= mrt
->id
;
2271 if (nla_put_u32(skb
, RTA_TABLE
, mrt
->id
))
2272 goto nla_put_failure
;
2273 rtm
->rtm_type
= RTN_MULTICAST
;
2274 rtm
->rtm_scope
= RT_SCOPE_UNIVERSE
;
2275 if (c
->mfc_flags
& MFC_STATIC
)
2276 rtm
->rtm_protocol
= RTPROT_STATIC
;
2278 rtm
->rtm_protocol
= RTPROT_MROUTED
;
2281 if (nla_put_be32(skb
, RTA_SRC
, c
->mfc_origin
) ||
2282 nla_put_be32(skb
, RTA_DST
, c
->mfc_mcastgrp
))
2283 goto nla_put_failure
;
2284 err
= __ipmr_fill_mroute(mrt
, skb
, c
, rtm
);
2285 /* do not break the dump if cache is unresolved */
2286 if (err
< 0 && err
!= -ENOENT
)
2287 goto nla_put_failure
;
2289 return nlmsg_end(skb
, nlh
);
2292 nlmsg_cancel(skb
, nlh
);
2296 static size_t mroute_msgsize(bool unresolved
, int maxvif
)
2299 NLMSG_ALIGN(sizeof(struct rtmsg
))
2300 + nla_total_size(4) /* RTA_TABLE */
2301 + nla_total_size(4) /* RTA_SRC */
2302 + nla_total_size(4) /* RTA_DST */
2307 + nla_total_size(4) /* RTA_IIF */
2308 + nla_total_size(0) /* RTA_MULTIPATH */
2309 + maxvif
* NLA_ALIGN(sizeof(struct rtnexthop
))
2311 + nla_total_size(sizeof(struct rta_mfc_stats
))
2317 static void mroute_netlink_event(struct mr_table
*mrt
, struct mfc_cache
*mfc
,
2320 struct net
*net
= read_pnet(&mrt
->net
);
2321 struct sk_buff
*skb
;
2324 skb
= nlmsg_new(mroute_msgsize(mfc
->mfc_parent
>= MAXVIFS
, mrt
->maxvif
),
2329 err
= ipmr_fill_mroute(mrt
, skb
, 0, 0, mfc
, cmd
);
2333 rtnl_notify(skb
, net
, 0, RTNLGRP_IPV4_MROUTE
, NULL
, GFP_ATOMIC
);
2339 rtnl_set_sk_err(net
, RTNLGRP_IPV4_MROUTE
, err
);
2342 static int ipmr_rtm_dumproute(struct sk_buff
*skb
, struct netlink_callback
*cb
)
2344 struct net
*net
= sock_net(skb
->sk
);
2345 struct mr_table
*mrt
;
2346 struct mfc_cache
*mfc
;
2347 unsigned int t
= 0, s_t
;
2348 unsigned int h
= 0, s_h
;
2349 unsigned int e
= 0, s_e
;
2356 ipmr_for_each_table(mrt
, net
) {
2361 for (h
= s_h
; h
< MFC_LINES
; h
++) {
2362 list_for_each_entry_rcu(mfc
, &mrt
->mfc_cache_array
[h
], list
) {
2365 if (ipmr_fill_mroute(mrt
, skb
,
2366 NETLINK_CB(cb
->skb
).portid
,
2368 mfc
, RTM_NEWROUTE
) < 0)
2375 spin_lock_bh(&mfc_unres_lock
);
2376 list_for_each_entry(mfc
, &mrt
->mfc_unres_queue
, list
) {
2379 if (ipmr_fill_mroute(mrt
, skb
,
2380 NETLINK_CB(cb
->skb
).portid
,
2382 mfc
, RTM_NEWROUTE
) < 0) {
2383 spin_unlock_bh(&mfc_unres_lock
);
2389 spin_unlock_bh(&mfc_unres_lock
);
2405 #ifdef CONFIG_PROC_FS
2407 * The /proc interfaces to multicast routing :
2408 * /proc/net/ip_mr_cache & /proc/net/ip_mr_vif
2410 struct ipmr_vif_iter
{
2411 struct seq_net_private p
;
2412 struct mr_table
*mrt
;
2416 static struct vif_device
*ipmr_vif_seq_idx(struct net
*net
,
2417 struct ipmr_vif_iter
*iter
,
2420 struct mr_table
*mrt
= iter
->mrt
;
2422 for (iter
->ct
= 0; iter
->ct
< mrt
->maxvif
; ++iter
->ct
) {
2423 if (!VIF_EXISTS(mrt
, iter
->ct
))
2426 return &mrt
->vif_table
[iter
->ct
];
2431 static void *ipmr_vif_seq_start(struct seq_file
*seq
, loff_t
*pos
)
2432 __acquires(mrt_lock
)
2434 struct ipmr_vif_iter
*iter
= seq
->private;
2435 struct net
*net
= seq_file_net(seq
);
2436 struct mr_table
*mrt
;
2438 mrt
= ipmr_get_table(net
, RT_TABLE_DEFAULT
);
2440 return ERR_PTR(-ENOENT
);
2444 read_lock(&mrt_lock
);
2445 return *pos
? ipmr_vif_seq_idx(net
, seq
->private, *pos
- 1)
2449 static void *ipmr_vif_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
2451 struct ipmr_vif_iter
*iter
= seq
->private;
2452 struct net
*net
= seq_file_net(seq
);
2453 struct mr_table
*mrt
= iter
->mrt
;
2456 if (v
== SEQ_START_TOKEN
)
2457 return ipmr_vif_seq_idx(net
, iter
, 0);
2459 while (++iter
->ct
< mrt
->maxvif
) {
2460 if (!VIF_EXISTS(mrt
, iter
->ct
))
2462 return &mrt
->vif_table
[iter
->ct
];
2467 static void ipmr_vif_seq_stop(struct seq_file
*seq
, void *v
)
2468 __releases(mrt_lock
)
2470 read_unlock(&mrt_lock
);
2473 static int ipmr_vif_seq_show(struct seq_file
*seq
, void *v
)
2475 struct ipmr_vif_iter
*iter
= seq
->private;
2476 struct mr_table
*mrt
= iter
->mrt
;
2478 if (v
== SEQ_START_TOKEN
) {
2480 "Interface BytesIn PktsIn BytesOut PktsOut Flags Local Remote\n");
2482 const struct vif_device
*vif
= v
;
2483 const char *name
= vif
->dev
? vif
->dev
->name
: "none";
2486 "%2Zd %-10s %8ld %7ld %8ld %7ld %05X %08X %08X\n",
2487 vif
- mrt
->vif_table
,
2488 name
, vif
->bytes_in
, vif
->pkt_in
,
2489 vif
->bytes_out
, vif
->pkt_out
,
2490 vif
->flags
, vif
->local
, vif
->remote
);
2495 static const struct seq_operations ipmr_vif_seq_ops
= {
2496 .start
= ipmr_vif_seq_start
,
2497 .next
= ipmr_vif_seq_next
,
2498 .stop
= ipmr_vif_seq_stop
,
2499 .show
= ipmr_vif_seq_show
,
2502 static int ipmr_vif_open(struct inode
*inode
, struct file
*file
)
2504 return seq_open_net(inode
, file
, &ipmr_vif_seq_ops
,
2505 sizeof(struct ipmr_vif_iter
));
2508 static const struct file_operations ipmr_vif_fops
= {
2509 .owner
= THIS_MODULE
,
2510 .open
= ipmr_vif_open
,
2512 .llseek
= seq_lseek
,
2513 .release
= seq_release_net
,
2516 struct ipmr_mfc_iter
{
2517 struct seq_net_private p
;
2518 struct mr_table
*mrt
;
2519 struct list_head
*cache
;
2524 static struct mfc_cache
*ipmr_mfc_seq_idx(struct net
*net
,
2525 struct ipmr_mfc_iter
*it
, loff_t pos
)
2527 struct mr_table
*mrt
= it
->mrt
;
2528 struct mfc_cache
*mfc
;
2531 for (it
->ct
= 0; it
->ct
< MFC_LINES
; it
->ct
++) {
2532 it
->cache
= &mrt
->mfc_cache_array
[it
->ct
];
2533 list_for_each_entry_rcu(mfc
, it
->cache
, list
)
2539 spin_lock_bh(&mfc_unres_lock
);
2540 it
->cache
= &mrt
->mfc_unres_queue
;
2541 list_for_each_entry(mfc
, it
->cache
, list
)
2544 spin_unlock_bh(&mfc_unres_lock
);
2551 static void *ipmr_mfc_seq_start(struct seq_file
*seq
, loff_t
*pos
)
2553 struct ipmr_mfc_iter
*it
= seq
->private;
2554 struct net
*net
= seq_file_net(seq
);
2555 struct mr_table
*mrt
;
2557 mrt
= ipmr_get_table(net
, RT_TABLE_DEFAULT
);
2559 return ERR_PTR(-ENOENT
);
2564 return *pos
? ipmr_mfc_seq_idx(net
, seq
->private, *pos
- 1)
2568 static void *ipmr_mfc_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
2570 struct mfc_cache
*mfc
= v
;
2571 struct ipmr_mfc_iter
*it
= seq
->private;
2572 struct net
*net
= seq_file_net(seq
);
2573 struct mr_table
*mrt
= it
->mrt
;
2577 if (v
== SEQ_START_TOKEN
)
2578 return ipmr_mfc_seq_idx(net
, seq
->private, 0);
2580 if (mfc
->list
.next
!= it
->cache
)
2581 return list_entry(mfc
->list
.next
, struct mfc_cache
, list
);
2583 if (it
->cache
== &mrt
->mfc_unres_queue
)
2586 BUG_ON(it
->cache
!= &mrt
->mfc_cache_array
[it
->ct
]);
2588 while (++it
->ct
< MFC_LINES
) {
2589 it
->cache
= &mrt
->mfc_cache_array
[it
->ct
];
2590 if (list_empty(it
->cache
))
2592 return list_first_entry(it
->cache
, struct mfc_cache
, list
);
2595 /* exhausted cache_array, show unresolved */
2597 it
->cache
= &mrt
->mfc_unres_queue
;
2600 spin_lock_bh(&mfc_unres_lock
);
2601 if (!list_empty(it
->cache
))
2602 return list_first_entry(it
->cache
, struct mfc_cache
, list
);
2605 spin_unlock_bh(&mfc_unres_lock
);
2611 static void ipmr_mfc_seq_stop(struct seq_file
*seq
, void *v
)
2613 struct ipmr_mfc_iter
*it
= seq
->private;
2614 struct mr_table
*mrt
= it
->mrt
;
2616 if (it
->cache
== &mrt
->mfc_unres_queue
)
2617 spin_unlock_bh(&mfc_unres_lock
);
2618 else if (it
->cache
== &mrt
->mfc_cache_array
[it
->ct
])
2622 static int ipmr_mfc_seq_show(struct seq_file
*seq
, void *v
)
2626 if (v
== SEQ_START_TOKEN
) {
2628 "Group Origin Iif Pkts Bytes Wrong Oifs\n");
2630 const struct mfc_cache
*mfc
= v
;
2631 const struct ipmr_mfc_iter
*it
= seq
->private;
2632 const struct mr_table
*mrt
= it
->mrt
;
2634 seq_printf(seq
, "%08X %08X %-3hd",
2635 (__force u32
) mfc
->mfc_mcastgrp
,
2636 (__force u32
) mfc
->mfc_origin
,
2639 if (it
->cache
!= &mrt
->mfc_unres_queue
) {
2640 seq_printf(seq
, " %8lu %8lu %8lu",
2641 mfc
->mfc_un
.res
.pkt
,
2642 mfc
->mfc_un
.res
.bytes
,
2643 mfc
->mfc_un
.res
.wrong_if
);
2644 for (n
= mfc
->mfc_un
.res
.minvif
;
2645 n
< mfc
->mfc_un
.res
.maxvif
; n
++) {
2646 if (VIF_EXISTS(mrt
, n
) &&
2647 mfc
->mfc_un
.res
.ttls
[n
] < 255)
2650 n
, mfc
->mfc_un
.res
.ttls
[n
]);
2653 /* unresolved mfc_caches don't contain
2654 * pkt, bytes and wrong_if values
2656 seq_printf(seq
, " %8lu %8lu %8lu", 0ul, 0ul, 0ul);
2658 seq_putc(seq
, '\n');
2663 static const struct seq_operations ipmr_mfc_seq_ops
= {
2664 .start
= ipmr_mfc_seq_start
,
2665 .next
= ipmr_mfc_seq_next
,
2666 .stop
= ipmr_mfc_seq_stop
,
2667 .show
= ipmr_mfc_seq_show
,
2670 static int ipmr_mfc_open(struct inode
*inode
, struct file
*file
)
2672 return seq_open_net(inode
, file
, &ipmr_mfc_seq_ops
,
2673 sizeof(struct ipmr_mfc_iter
));
2676 static const struct file_operations ipmr_mfc_fops
= {
2677 .owner
= THIS_MODULE
,
2678 .open
= ipmr_mfc_open
,
2680 .llseek
= seq_lseek
,
2681 .release
= seq_release_net
,
2685 #ifdef CONFIG_IP_PIMSM_V2
2686 static const struct net_protocol pim_protocol
= {
2694 * Setup for IP multicast routing
2696 static int __net_init
ipmr_net_init(struct net
*net
)
2700 err
= ipmr_rules_init(net
);
2704 #ifdef CONFIG_PROC_FS
2706 if (!proc_create("ip_mr_vif", 0, net
->proc_net
, &ipmr_vif_fops
))
2708 if (!proc_create("ip_mr_cache", 0, net
->proc_net
, &ipmr_mfc_fops
))
2709 goto proc_cache_fail
;
2713 #ifdef CONFIG_PROC_FS
2715 remove_proc_entry("ip_mr_vif", net
->proc_net
);
2717 ipmr_rules_exit(net
);
2723 static void __net_exit
ipmr_net_exit(struct net
*net
)
2725 #ifdef CONFIG_PROC_FS
2726 remove_proc_entry("ip_mr_cache", net
->proc_net
);
2727 remove_proc_entry("ip_mr_vif", net
->proc_net
);
2729 ipmr_rules_exit(net
);
2732 static struct pernet_operations ipmr_net_ops
= {
2733 .init
= ipmr_net_init
,
2734 .exit
= ipmr_net_exit
,
2737 int __init
ip_mr_init(void)
2741 mrt_cachep
= kmem_cache_create("ip_mrt_cache",
2742 sizeof(struct mfc_cache
),
2743 0, SLAB_HWCACHE_ALIGN
| SLAB_PANIC
,
2748 err
= register_pernet_subsys(&ipmr_net_ops
);
2750 goto reg_pernet_fail
;
2752 err
= register_netdevice_notifier(&ip_mr_notifier
);
2754 goto reg_notif_fail
;
2755 #ifdef CONFIG_IP_PIMSM_V2
2756 if (inet_add_protocol(&pim_protocol
, IPPROTO_PIM
) < 0) {
2757 pr_err("%s: can't add PIM protocol\n", __func__
);
2759 goto add_proto_fail
;
2762 rtnl_register(RTNL_FAMILY_IPMR
, RTM_GETROUTE
,
2763 NULL
, ipmr_rtm_dumproute
, NULL
);
2766 #ifdef CONFIG_IP_PIMSM_V2
2768 unregister_netdevice_notifier(&ip_mr_notifier
);
2771 unregister_pernet_subsys(&ipmr_net_ops
);
2773 kmem_cache_destroy(mrt_cachep
);