net: ipmr: fix setsockopt error return
[deliverable/linux.git] / net / ipv4 / ipmr.c
CommitLineData
1da177e4
LT
1/*
2 * IP multicast routing support for mrouted 3.6/3.8
3 *
113aa838 4 * (c) 1995 Alan Cox, <alan@lxorguk.ukuu.org.uk>
1da177e4
LT
5 * Linux Consultancy and Custom Driver Development
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 *
1da177e4
LT
12 * Fixes:
13 * Michael Chastain : Incorrect size of copying.
14 * Alan Cox : Added the cache manager code
15 * Alan Cox : Fixed the clone/copy bug and device race.
16 * Mike McLagan : Routing by source
17 * Malcolm Beattie : Buffer handling fixes.
18 * Alexey Kuznetsov : Double buffer free and other fixes.
19 * SVR Anand : Fixed several multicast bugs and problems.
20 * Alexey Kuznetsov : Status, optimisations and more.
21 * Brad Parker : Better behaviour on mrouted upcall
22 * overflow.
23 * Carlos Picoto : PIMv1 Support
24 * Pavlin Ivanov Radoslavov: PIMv2 Registers must checksum only PIM header
f77f13e2 25 * Relax this requirement to work with older peers.
1da177e4
LT
26 *
27 */
28
1da177e4
LT
29#include <asm/uaccess.h>
30#include <linux/types.h>
4fc268d2 31#include <linux/capability.h>
1da177e4
LT
32#include <linux/errno.h>
33#include <linux/timer.h>
34#include <linux/mm.h>
35#include <linux/kernel.h>
36#include <linux/fcntl.h>
37#include <linux/stat.h>
38#include <linux/socket.h>
39#include <linux/in.h>
40#include <linux/inet.h>
41#include <linux/netdevice.h>
42#include <linux/inetdevice.h>
43#include <linux/igmp.h>
44#include <linux/proc_fs.h>
45#include <linux/seq_file.h>
46#include <linux/mroute.h>
47#include <linux/init.h>
46f25dff 48#include <linux/if_ether.h>
5a0e3ad6 49#include <linux/slab.h>
457c4cbc 50#include <net/net_namespace.h>
1da177e4
LT
51#include <net/ip.h>
52#include <net/protocol.h>
53#include <linux/skbuff.h>
14c85021 54#include <net/route.h>
1da177e4
LT
55#include <net/sock.h>
56#include <net/icmp.h>
57#include <net/udp.h>
58#include <net/raw.h>
59#include <linux/notifier.h>
60#include <linux/if_arp.h>
61#include <linux/netfilter_ipv4.h>
709b46e8 62#include <linux/compat.h>
bc3b2d7f 63#include <linux/export.h>
c5441932 64#include <net/ip_tunnels.h>
1da177e4 65#include <net/checksum.h>
dc5fc579 66#include <net/netlink.h>
f0ad0860 67#include <net/fib_rules.h>
d67b8c61 68#include <linux/netconf.h>
1da177e4 69
f0ad0860
PM
70struct ipmr_rule {
71 struct fib_rule common;
72};
73
74struct ipmr_result {
75 struct mr_table *mrt;
76};
77
1da177e4 78/* Big lock, protecting vif table, mrt cache and mroute socket state.
a8cb16dd 79 * Note that the changes are semaphored via rtnl_lock.
1da177e4
LT
80 */
81
82static DEFINE_RWLOCK(mrt_lock);
83
7ef8f65d 84/* Multicast router control variables */
1da177e4 85
1da177e4
LT
86/* Special spinlock for queue of unresolved entries */
87static DEFINE_SPINLOCK(mfc_unres_lock);
88
89/* We return to original Alan's scheme. Hash table of resolved
a8cb16dd
ED
90 * entries is changed only in process context and protected
91 * with weak lock mrt_lock. Queue of unresolved entries is protected
92 * with strong spinlock mfc_unres_lock.
93 *
94 * In this case data path is free of exclusive locks at all.
1da177e4
LT
95 */
96
e18b890b 97static struct kmem_cache *mrt_cachep __read_mostly;
1da177e4 98
f0ad0860 99static struct mr_table *ipmr_new_table(struct net *net, u32 id);
acbb219d
FR
100static void ipmr_free_table(struct mr_table *mrt);
101
c4854ec8
RR
102static void ip_mr_forward(struct net *net, struct mr_table *mrt,
103 struct sk_buff *skb, struct mfc_cache *cache,
104 int local);
0c12295a 105static int ipmr_cache_report(struct mr_table *mrt,
4feb88e5 106 struct sk_buff *pkt, vifi_t vifi, int assert);
cb6a4e46
PM
107static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
108 struct mfc_cache *c, struct rtmsg *rtm);
8cd3ac9f
ND
109static void mroute_netlink_event(struct mr_table *mrt, struct mfc_cache *mfc,
110 int cmd);
acbb219d 111static void mroute_clean_tables(struct mr_table *mrt);
f0ad0860
PM
112static void ipmr_expire_process(unsigned long arg);
113
114#ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
115#define ipmr_for_each_table(mrt, net) \
116 list_for_each_entry_rcu(mrt, &net->ipv4.mr_tables, list)
117
118static struct mr_table *ipmr_get_table(struct net *net, u32 id)
119{
120 struct mr_table *mrt;
121
122 ipmr_for_each_table(mrt, net) {
123 if (mrt->id == id)
124 return mrt;
125 }
126 return NULL;
127}
128
da91981b 129static int ipmr_fib_lookup(struct net *net, struct flowi4 *flp4,
f0ad0860
PM
130 struct mr_table **mrt)
131{
f0ad0860 132 int err;
95f4a45d
HFS
133 struct ipmr_result res;
134 struct fib_lookup_arg arg = {
135 .result = &res,
136 .flags = FIB_LOOKUP_NOREF,
137 };
f0ad0860 138
da91981b
DM
139 err = fib_rules_lookup(net->ipv4.mr_rules_ops,
140 flowi4_to_flowi(flp4), 0, &arg);
f0ad0860
PM
141 if (err < 0)
142 return err;
143 *mrt = res.mrt;
144 return 0;
145}
146
147static int ipmr_rule_action(struct fib_rule *rule, struct flowi *flp,
148 int flags, struct fib_lookup_arg *arg)
149{
150 struct ipmr_result *res = arg->result;
151 struct mr_table *mrt;
1da177e4 152
f0ad0860
PM
153 switch (rule->action) {
154 case FR_ACT_TO_TBL:
155 break;
156 case FR_ACT_UNREACHABLE:
157 return -ENETUNREACH;
158 case FR_ACT_PROHIBIT:
159 return -EACCES;
160 case FR_ACT_BLACKHOLE:
161 default:
162 return -EINVAL;
163 }
164
165 mrt = ipmr_get_table(rule->fr_net, rule->table);
51456b29 166 if (!mrt)
f0ad0860
PM
167 return -EAGAIN;
168 res->mrt = mrt;
169 return 0;
170}
171
172static int ipmr_rule_match(struct fib_rule *rule, struct flowi *fl, int flags)
173{
174 return 1;
175}
176
177static const struct nla_policy ipmr_rule_policy[FRA_MAX + 1] = {
178 FRA_GENERIC_POLICY,
179};
180
181static int ipmr_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
182 struct fib_rule_hdr *frh, struct nlattr **tb)
183{
184 return 0;
185}
186
187static int ipmr_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh,
188 struct nlattr **tb)
189{
190 return 1;
191}
192
193static int ipmr_rule_fill(struct fib_rule *rule, struct sk_buff *skb,
194 struct fib_rule_hdr *frh)
195{
196 frh->dst_len = 0;
197 frh->src_len = 0;
198 frh->tos = 0;
199 return 0;
200}
201
04a6f82c 202static const struct fib_rules_ops __net_initconst ipmr_rules_ops_template = {
25239cee 203 .family = RTNL_FAMILY_IPMR,
f0ad0860
PM
204 .rule_size = sizeof(struct ipmr_rule),
205 .addr_size = sizeof(u32),
206 .action = ipmr_rule_action,
207 .match = ipmr_rule_match,
208 .configure = ipmr_rule_configure,
209 .compare = ipmr_rule_compare,
f0ad0860
PM
210 .fill = ipmr_rule_fill,
211 .nlgroup = RTNLGRP_IPV4_RULE,
212 .policy = ipmr_rule_policy,
213 .owner = THIS_MODULE,
214};
215
216static int __net_init ipmr_rules_init(struct net *net)
217{
218 struct fib_rules_ops *ops;
219 struct mr_table *mrt;
220 int err;
221
222 ops = fib_rules_register(&ipmr_rules_ops_template, net);
223 if (IS_ERR(ops))
224 return PTR_ERR(ops);
225
226 INIT_LIST_HEAD(&net->ipv4.mr_tables);
227
228 mrt = ipmr_new_table(net, RT_TABLE_DEFAULT);
1113ebbc
NA
229 if (IS_ERR(mrt)) {
230 err = PTR_ERR(mrt);
f0ad0860
PM
231 goto err1;
232 }
233
234 err = fib_default_rule_add(ops, 0x7fff, RT_TABLE_DEFAULT, 0);
235 if (err < 0)
236 goto err2;
237
238 net->ipv4.mr_rules_ops = ops;
239 return 0;
240
241err2:
f243e5a7 242 ipmr_free_table(mrt);
f0ad0860
PM
243err1:
244 fib_rules_unregister(ops);
245 return err;
246}
247
248static void __net_exit ipmr_rules_exit(struct net *net)
249{
250 struct mr_table *mrt, *next;
251
ed785309 252 rtnl_lock();
035320d5
ED
253 list_for_each_entry_safe(mrt, next, &net->ipv4.mr_tables, list) {
254 list_del(&mrt->list);
acbb219d 255 ipmr_free_table(mrt);
035320d5 256 }
f0ad0860 257 fib_rules_unregister(net->ipv4.mr_rules_ops);
419df12f 258 rtnl_unlock();
f0ad0860
PM
259}
260#else
261#define ipmr_for_each_table(mrt, net) \
262 for (mrt = net->ipv4.mrt; mrt; mrt = NULL)
263
264static struct mr_table *ipmr_get_table(struct net *net, u32 id)
265{
266 return net->ipv4.mrt;
267}
268
da91981b 269static int ipmr_fib_lookup(struct net *net, struct flowi4 *flp4,
f0ad0860
PM
270 struct mr_table **mrt)
271{
272 *mrt = net->ipv4.mrt;
273 return 0;
274}
275
276static int __net_init ipmr_rules_init(struct net *net)
277{
1113ebbc
NA
278 struct mr_table *mrt;
279
280 mrt = ipmr_new_table(net, RT_TABLE_DEFAULT);
281 if (IS_ERR(mrt))
282 return PTR_ERR(mrt);
283 net->ipv4.mrt = mrt;
284 return 0;
f0ad0860
PM
285}
286
287static void __net_exit ipmr_rules_exit(struct net *net)
288{
ed785309 289 rtnl_lock();
acbb219d 290 ipmr_free_table(net->ipv4.mrt);
ed785309
WC
291 net->ipv4.mrt = NULL;
292 rtnl_unlock();
f0ad0860
PM
293}
294#endif
295
296static struct mr_table *ipmr_new_table(struct net *net, u32 id)
297{
298 struct mr_table *mrt;
299 unsigned int i;
1da177e4 300
1113ebbc
NA
301 /* "pimreg%u" should not exceed 16 bytes (IFNAMSIZ) */
302 if (id != RT_TABLE_DEFAULT && id >= 1000000000)
303 return ERR_PTR(-EINVAL);
304
f0ad0860 305 mrt = ipmr_get_table(net, id);
00db4124 306 if (mrt)
f0ad0860
PM
307 return mrt;
308
309 mrt = kzalloc(sizeof(*mrt), GFP_KERNEL);
51456b29 310 if (!mrt)
1113ebbc 311 return ERR_PTR(-ENOMEM);
8de53dfb 312 write_pnet(&mrt->net, net);
f0ad0860
PM
313 mrt->id = id;
314
315 /* Forwarding cache */
316 for (i = 0; i < MFC_LINES; i++)
317 INIT_LIST_HEAD(&mrt->mfc_cache_array[i]);
318
319 INIT_LIST_HEAD(&mrt->mfc_unres_queue);
320
321 setup_timer(&mrt->ipmr_expire_timer, ipmr_expire_process,
322 (unsigned long)mrt);
323
f0ad0860 324 mrt->mroute_reg_vif_num = -1;
f0ad0860
PM
325#ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
326 list_add_tail_rcu(&mrt->list, &net->ipv4.mr_tables);
327#endif
328 return mrt;
329}
1da177e4 330
acbb219d
FR
331static void ipmr_free_table(struct mr_table *mrt)
332{
333 del_timer_sync(&mrt->ipmr_expire_timer);
334 mroute_clean_tables(mrt);
335 kfree(mrt);
336}
337
1da177e4
LT
338/* Service routines creating virtual interfaces: DVMRP tunnels and PIMREG */
339
d607032d
WC
340static void ipmr_del_tunnel(struct net_device *dev, struct vifctl *v)
341{
4feb88e5
BT
342 struct net *net = dev_net(dev);
343
d607032d
WC
344 dev_close(dev);
345
4feb88e5 346 dev = __dev_get_by_name(net, "tunl0");
d607032d 347 if (dev) {
5bc3eb7e 348 const struct net_device_ops *ops = dev->netdev_ops;
d607032d 349 struct ifreq ifr;
d607032d
WC
350 struct ip_tunnel_parm p;
351
352 memset(&p, 0, sizeof(p));
353 p.iph.daddr = v->vifc_rmt_addr.s_addr;
354 p.iph.saddr = v->vifc_lcl_addr.s_addr;
355 p.iph.version = 4;
356 p.iph.ihl = 5;
357 p.iph.protocol = IPPROTO_IPIP;
358 sprintf(p.name, "dvmrp%d", v->vifc_vifi);
359 ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
360
5bc3eb7e
SH
361 if (ops->ndo_do_ioctl) {
362 mm_segment_t oldfs = get_fs();
363
364 set_fs(KERNEL_DS);
365 ops->ndo_do_ioctl(dev, &ifr, SIOCDELTUNNEL);
366 set_fs(oldfs);
367 }
d607032d
WC
368 }
369}
370
a0b47736
NA
371/* Initialize ipmr pimreg/tunnel in_device */
372static bool ipmr_init_vif_indev(const struct net_device *dev)
373{
374 struct in_device *in_dev;
375
376 ASSERT_RTNL();
377
378 in_dev = __in_dev_get_rtnl(dev);
379 if (!in_dev)
380 return false;
381 ipv4_devconf_setall(in_dev);
382 neigh_parms_data_state_setall(in_dev->arp_parms);
383 IPV4_DEVCONF(in_dev->cnf, RP_FILTER) = 0;
384
385 return true;
386}
387
7ef8f65d 388static struct net_device *ipmr_new_tunnel(struct net *net, struct vifctl *v)
1da177e4
LT
389{
390 struct net_device *dev;
391
4feb88e5 392 dev = __dev_get_by_name(net, "tunl0");
1da177e4
LT
393
394 if (dev) {
5bc3eb7e 395 const struct net_device_ops *ops = dev->netdev_ops;
1da177e4
LT
396 int err;
397 struct ifreq ifr;
1da177e4 398 struct ip_tunnel_parm p;
1da177e4
LT
399
400 memset(&p, 0, sizeof(p));
401 p.iph.daddr = v->vifc_rmt_addr.s_addr;
402 p.iph.saddr = v->vifc_lcl_addr.s_addr;
403 p.iph.version = 4;
404 p.iph.ihl = 5;
405 p.iph.protocol = IPPROTO_IPIP;
406 sprintf(p.name, "dvmrp%d", v->vifc_vifi);
ba93ef74 407 ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
1da177e4 408
5bc3eb7e
SH
409 if (ops->ndo_do_ioctl) {
410 mm_segment_t oldfs = get_fs();
411
412 set_fs(KERNEL_DS);
413 err = ops->ndo_do_ioctl(dev, &ifr, SIOCADDTUNNEL);
414 set_fs(oldfs);
a8cb16dd 415 } else {
5bc3eb7e 416 err = -EOPNOTSUPP;
a8cb16dd 417 }
1da177e4
LT
418 dev = NULL;
419
4feb88e5
BT
420 if (err == 0 &&
421 (dev = __dev_get_by_name(net, p.name)) != NULL) {
1da177e4 422 dev->flags |= IFF_MULTICAST;
a0b47736 423 if (!ipmr_init_vif_indev(dev))
1da177e4 424 goto failure;
1da177e4
LT
425 if (dev_open(dev))
426 goto failure;
7dc00c82 427 dev_hold(dev);
1da177e4
LT
428 }
429 }
430 return dev;
431
432failure:
433 /* allow the register to be completed before unregistering. */
434 rtnl_unlock();
435 rtnl_lock();
436
437 unregister_netdevice(dev);
438 return NULL;
439}
440
c316c629 441#if defined(CONFIG_IP_PIMSM_V1) || defined(CONFIG_IP_PIMSM_V2)
6fef4c0c 442static netdev_tx_t reg_vif_xmit(struct sk_buff *skb, struct net_device *dev)
1da177e4 443{
4feb88e5 444 struct net *net = dev_net(dev);
f0ad0860 445 struct mr_table *mrt;
da91981b
DM
446 struct flowi4 fl4 = {
447 .flowi4_oif = dev->ifindex,
6a662719 448 .flowi4_iif = skb->skb_iif ? : LOOPBACK_IFINDEX,
da91981b 449 .flowi4_mark = skb->mark,
f0ad0860
PM
450 };
451 int err;
452
da91981b 453 err = ipmr_fib_lookup(net, &fl4, &mrt);
e40dbc51
BG
454 if (err < 0) {
455 kfree_skb(skb);
f0ad0860 456 return err;
e40dbc51 457 }
4feb88e5 458
1da177e4 459 read_lock(&mrt_lock);
cf3677ae
PE
460 dev->stats.tx_bytes += skb->len;
461 dev->stats.tx_packets++;
0c12295a 462 ipmr_cache_report(mrt, skb, mrt->mroute_reg_vif_num, IGMPMSG_WHOLEPKT);
1da177e4
LT
463 read_unlock(&mrt_lock);
464 kfree_skb(skb);
6ed10654 465 return NETDEV_TX_OK;
1da177e4
LT
466}
467
ee9b9596
ND
468static int reg_vif_get_iflink(const struct net_device *dev)
469{
470 return 0;
471}
472
007c3838
SH
473static const struct net_device_ops reg_vif_netdev_ops = {
474 .ndo_start_xmit = reg_vif_xmit,
ee9b9596 475 .ndo_get_iflink = reg_vif_get_iflink,
007c3838
SH
476};
477
1da177e4
LT
478static void reg_vif_setup(struct net_device *dev)
479{
480 dev->type = ARPHRD_PIMREG;
46f25dff 481 dev->mtu = ETH_DATA_LEN - sizeof(struct iphdr) - 8;
1da177e4 482 dev->flags = IFF_NOARP;
70cb4a45 483 dev->netdev_ops = &reg_vif_netdev_ops;
1da177e4 484 dev->destructor = free_netdev;
403dbb97 485 dev->features |= NETIF_F_NETNS_LOCAL;
1da177e4
LT
486}
487
f0ad0860 488static struct net_device *ipmr_reg_vif(struct net *net, struct mr_table *mrt)
1da177e4
LT
489{
490 struct net_device *dev;
f0ad0860 491 char name[IFNAMSIZ];
1da177e4 492
f0ad0860
PM
493 if (mrt->id == RT_TABLE_DEFAULT)
494 sprintf(name, "pimreg");
495 else
496 sprintf(name, "pimreg%u", mrt->id);
1da177e4 497
c835a677 498 dev = alloc_netdev(0, name, NET_NAME_UNKNOWN, reg_vif_setup);
1da177e4 499
51456b29 500 if (!dev)
1da177e4
LT
501 return NULL;
502
403dbb97
TG
503 dev_net_set(dev, net);
504
1da177e4
LT
505 if (register_netdevice(dev)) {
506 free_netdev(dev);
507 return NULL;
508 }
1da177e4 509
a0b47736 510 if (!ipmr_init_vif_indev(dev))
1da177e4 511 goto failure;
1da177e4
LT
512 if (dev_open(dev))
513 goto failure;
514
7dc00c82
WC
515 dev_hold(dev);
516
1da177e4
LT
517 return dev;
518
519failure:
520 /* allow the register to be completed before unregistering. */
521 rtnl_unlock();
522 rtnl_lock();
523
524 unregister_netdevice(dev);
525 return NULL;
526}
c316c629
NA
527
528/* called with rcu_read_lock() */
529static int __pim_rcv(struct mr_table *mrt, struct sk_buff *skb,
530 unsigned int pimlen)
531{
532 struct net_device *reg_dev = NULL;
533 struct iphdr *encap;
534
535 encap = (struct iphdr *)(skb_transport_header(skb) + pimlen);
7ef8f65d 536 /* Check that:
c316c629
NA
537 * a. packet is really sent to a multicast group
538 * b. packet is not a NULL-REGISTER
539 * c. packet is not truncated
540 */
541 if (!ipv4_is_multicast(encap->daddr) ||
542 encap->tot_len == 0 ||
543 ntohs(encap->tot_len) + pimlen > skb->len)
544 return 1;
545
546 read_lock(&mrt_lock);
547 if (mrt->mroute_reg_vif_num >= 0)
548 reg_dev = mrt->vif_table[mrt->mroute_reg_vif_num].dev;
549 read_unlock(&mrt_lock);
550
551 if (!reg_dev)
552 return 1;
553
554 skb->mac_header = skb->network_header;
555 skb_pull(skb, (u8 *)encap - skb->data);
556 skb_reset_network_header(skb);
557 skb->protocol = htons(ETH_P_IP);
558 skb->ip_summed = CHECKSUM_NONE;
559
560 skb_tunnel_rx(skb, reg_dev, dev_net(reg_dev));
561
562 netif_rx(skb);
563
564 return NET_RX_SUCCESS;
565}
566#else
567static struct net_device *ipmr_reg_vif(struct net *net, struct mr_table *mrt)
568{
569 return NULL;
570}
1da177e4
LT
571#endif
572
2c53040f
BH
573/**
574 * vif_delete - Delete a VIF entry
7dc00c82 575 * @notify: Set to 1, if the caller is a notifier_call
1da177e4 576 */
0c12295a 577static int vif_delete(struct mr_table *mrt, int vifi, int notify,
d17fa6fa 578 struct list_head *head)
1da177e4
LT
579{
580 struct vif_device *v;
581 struct net_device *dev;
582 struct in_device *in_dev;
583
0c12295a 584 if (vifi < 0 || vifi >= mrt->maxvif)
1da177e4
LT
585 return -EADDRNOTAVAIL;
586
0c12295a 587 v = &mrt->vif_table[vifi];
1da177e4
LT
588
589 write_lock_bh(&mrt_lock);
590 dev = v->dev;
591 v->dev = NULL;
592
593 if (!dev) {
594 write_unlock_bh(&mrt_lock);
595 return -EADDRNOTAVAIL;
596 }
597
0c12295a
PM
598 if (vifi == mrt->mroute_reg_vif_num)
599 mrt->mroute_reg_vif_num = -1;
1da177e4 600
a8cb16dd 601 if (vifi + 1 == mrt->maxvif) {
1da177e4 602 int tmp;
a8cb16dd
ED
603
604 for (tmp = vifi - 1; tmp >= 0; tmp--) {
0c12295a 605 if (VIF_EXISTS(mrt, tmp))
1da177e4
LT
606 break;
607 }
0c12295a 608 mrt->maxvif = tmp+1;
1da177e4
LT
609 }
610
611 write_unlock_bh(&mrt_lock);
612
613 dev_set_allmulti(dev, -1);
614
a8cb16dd
ED
615 in_dev = __in_dev_get_rtnl(dev);
616 if (in_dev) {
42f811b8 617 IPV4_DEVCONF(in_dev->cnf, MC_FORWARDING)--;
d67b8c61
ND
618 inet_netconf_notify_devconf(dev_net(dev),
619 NETCONFA_MC_FORWARDING,
620 dev->ifindex, &in_dev->cnf);
1da177e4
LT
621 ip_rt_multicast_event(in_dev);
622 }
623
a8cb16dd 624 if (v->flags & (VIFF_TUNNEL | VIFF_REGISTER) && !notify)
d17fa6fa 625 unregister_netdevice_queue(dev, head);
1da177e4
LT
626
627 dev_put(dev);
628 return 0;
629}
630
a8c9486b 631static void ipmr_cache_free_rcu(struct rcu_head *head)
5c0a66f5 632{
a8c9486b
ED
633 struct mfc_cache *c = container_of(head, struct mfc_cache, rcu);
634
5c0a66f5
BT
635 kmem_cache_free(mrt_cachep, c);
636}
637
a8c9486b
ED
638static inline void ipmr_cache_free(struct mfc_cache *c)
639{
640 call_rcu(&c->rcu, ipmr_cache_free_rcu);
641}
642
1da177e4 643/* Destroy an unresolved cache entry, killing queued skbs
a8cb16dd 644 * and reporting error to netlink readers.
1da177e4 645 */
0c12295a 646static void ipmr_destroy_unres(struct mr_table *mrt, struct mfc_cache *c)
1da177e4 647{
8de53dfb 648 struct net *net = read_pnet(&mrt->net);
1da177e4 649 struct sk_buff *skb;
9ef1d4c7 650 struct nlmsgerr *e;
1da177e4 651
0c12295a 652 atomic_dec(&mrt->cache_resolve_queue_len);
1da177e4 653
c354e124 654 while ((skb = skb_dequeue(&c->mfc_un.unres.unresolved))) {
eddc9ec5 655 if (ip_hdr(skb)->version == 0) {
1da177e4
LT
656 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr));
657 nlh->nlmsg_type = NLMSG_ERROR;
573ce260 658 nlh->nlmsg_len = nlmsg_msg_size(sizeof(struct nlmsgerr));
1da177e4 659 skb_trim(skb, nlh->nlmsg_len);
573ce260 660 e = nlmsg_data(nlh);
9ef1d4c7
PM
661 e->error = -ETIMEDOUT;
662 memset(&e->msg, 0, sizeof(e->msg));
2942e900 663
15e47304 664 rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
a8cb16dd 665 } else {
1da177e4 666 kfree_skb(skb);
a8cb16dd 667 }
1da177e4
LT
668 }
669
5c0a66f5 670 ipmr_cache_free(c);
1da177e4
LT
671}
672
e258beb2 673/* Timer process for the unresolved queue. */
e258beb2 674static void ipmr_expire_process(unsigned long arg)
1da177e4 675{
0c12295a 676 struct mr_table *mrt = (struct mr_table *)arg;
1da177e4
LT
677 unsigned long now;
678 unsigned long expires;
862465f2 679 struct mfc_cache *c, *next;
1da177e4
LT
680
681 if (!spin_trylock(&mfc_unres_lock)) {
0c12295a 682 mod_timer(&mrt->ipmr_expire_timer, jiffies+HZ/10);
1da177e4
LT
683 return;
684 }
685
0c12295a 686 if (list_empty(&mrt->mfc_unres_queue))
1da177e4
LT
687 goto out;
688
689 now = jiffies;
690 expires = 10*HZ;
1da177e4 691
0c12295a 692 list_for_each_entry_safe(c, next, &mrt->mfc_unres_queue, list) {
1da177e4
LT
693 if (time_after(c->mfc_un.unres.expires, now)) {
694 unsigned long interval = c->mfc_un.unres.expires - now;
695 if (interval < expires)
696 expires = interval;
1da177e4
LT
697 continue;
698 }
699
862465f2 700 list_del(&c->list);
8cd3ac9f 701 mroute_netlink_event(mrt, c, RTM_DELROUTE);
0c12295a 702 ipmr_destroy_unres(mrt, c);
1da177e4
LT
703 }
704
0c12295a
PM
705 if (!list_empty(&mrt->mfc_unres_queue))
706 mod_timer(&mrt->ipmr_expire_timer, jiffies + expires);
1da177e4
LT
707
708out:
709 spin_unlock(&mfc_unres_lock);
710}
711
712/* Fill oifs list. It is called under write locked mrt_lock. */
0c12295a 713static void ipmr_update_thresholds(struct mr_table *mrt, struct mfc_cache *cache,
d658f8a0 714 unsigned char *ttls)
1da177e4
LT
715{
716 int vifi;
717
718 cache->mfc_un.res.minvif = MAXVIFS;
719 cache->mfc_un.res.maxvif = 0;
720 memset(cache->mfc_un.res.ttls, 255, MAXVIFS);
721
0c12295a
PM
722 for (vifi = 0; vifi < mrt->maxvif; vifi++) {
723 if (VIF_EXISTS(mrt, vifi) &&
cf958ae3 724 ttls[vifi] && ttls[vifi] < 255) {
1da177e4
LT
725 cache->mfc_un.res.ttls[vifi] = ttls[vifi];
726 if (cache->mfc_un.res.minvif > vifi)
727 cache->mfc_un.res.minvif = vifi;
728 if (cache->mfc_un.res.maxvif <= vifi)
729 cache->mfc_un.res.maxvif = vifi + 1;
730 }
731 }
732}
733
0c12295a
PM
734static int vif_add(struct net *net, struct mr_table *mrt,
735 struct vifctl *vifc, int mrtsock)
1da177e4
LT
736{
737 int vifi = vifc->vifc_vifi;
0c12295a 738 struct vif_device *v = &mrt->vif_table[vifi];
1da177e4
LT
739 struct net_device *dev;
740 struct in_device *in_dev;
d607032d 741 int err;
1da177e4
LT
742
743 /* Is vif busy ? */
0c12295a 744 if (VIF_EXISTS(mrt, vifi))
1da177e4
LT
745 return -EADDRINUSE;
746
747 switch (vifc->vifc_flags) {
1da177e4 748 case VIFF_REGISTER:
1973a4ea 749 if (!ipmr_pimsm_enabled())
c316c629
NA
750 return -EINVAL;
751 /* Special Purpose VIF in PIM
1da177e4
LT
752 * All the packets will be sent to the daemon
753 */
0c12295a 754 if (mrt->mroute_reg_vif_num >= 0)
1da177e4 755 return -EADDRINUSE;
f0ad0860 756 dev = ipmr_reg_vif(net, mrt);
1da177e4
LT
757 if (!dev)
758 return -ENOBUFS;
d607032d
WC
759 err = dev_set_allmulti(dev, 1);
760 if (err) {
761 unregister_netdevice(dev);
7dc00c82 762 dev_put(dev);
d607032d
WC
763 return err;
764 }
1da177e4 765 break;
e905a9ed 766 case VIFF_TUNNEL:
4feb88e5 767 dev = ipmr_new_tunnel(net, vifc);
1da177e4
LT
768 if (!dev)
769 return -ENOBUFS;
d607032d
WC
770 err = dev_set_allmulti(dev, 1);
771 if (err) {
772 ipmr_del_tunnel(dev, vifc);
7dc00c82 773 dev_put(dev);
d607032d
WC
774 return err;
775 }
1da177e4 776 break;
ee5e81f0 777 case VIFF_USE_IFINDEX:
1da177e4 778 case 0:
ee5e81f0
I
779 if (vifc->vifc_flags == VIFF_USE_IFINDEX) {
780 dev = dev_get_by_index(net, vifc->vifc_lcl_ifindex);
51456b29 781 if (dev && !__in_dev_get_rtnl(dev)) {
ee5e81f0
I
782 dev_put(dev);
783 return -EADDRNOTAVAIL;
784 }
a8cb16dd 785 } else {
ee5e81f0 786 dev = ip_dev_find(net, vifc->vifc_lcl_addr.s_addr);
a8cb16dd 787 }
1da177e4
LT
788 if (!dev)
789 return -EADDRNOTAVAIL;
d607032d 790 err = dev_set_allmulti(dev, 1);
7dc00c82
WC
791 if (err) {
792 dev_put(dev);
d607032d 793 return err;
7dc00c82 794 }
1da177e4
LT
795 break;
796 default:
797 return -EINVAL;
798 }
799
a8cb16dd
ED
800 in_dev = __in_dev_get_rtnl(dev);
801 if (!in_dev) {
d0490cfd 802 dev_put(dev);
1da177e4 803 return -EADDRNOTAVAIL;
d0490cfd 804 }
42f811b8 805 IPV4_DEVCONF(in_dev->cnf, MC_FORWARDING)++;
d67b8c61
ND
806 inet_netconf_notify_devconf(net, NETCONFA_MC_FORWARDING, dev->ifindex,
807 &in_dev->cnf);
1da177e4
LT
808 ip_rt_multicast_event(in_dev);
809
a8cb16dd
ED
810 /* Fill in the VIF structures */
811
c354e124
JK
812 v->rate_limit = vifc->vifc_rate_limit;
813 v->local = vifc->vifc_lcl_addr.s_addr;
814 v->remote = vifc->vifc_rmt_addr.s_addr;
815 v->flags = vifc->vifc_flags;
1da177e4
LT
816 if (!mrtsock)
817 v->flags |= VIFF_STATIC;
c354e124 818 v->threshold = vifc->vifc_threshold;
1da177e4
LT
819 v->bytes_in = 0;
820 v->bytes_out = 0;
821 v->pkt_in = 0;
822 v->pkt_out = 0;
823 v->link = dev->ifindex;
a8cb16dd 824 if (v->flags & (VIFF_TUNNEL | VIFF_REGISTER))
a54acb3a 825 v->link = dev_get_iflink(dev);
1da177e4
LT
826
827 /* And finish update writing critical data */
828 write_lock_bh(&mrt_lock);
c354e124 829 v->dev = dev;
a8cb16dd 830 if (v->flags & VIFF_REGISTER)
0c12295a 831 mrt->mroute_reg_vif_num = vifi;
0c12295a
PM
832 if (vifi+1 > mrt->maxvif)
833 mrt->maxvif = vifi+1;
1da177e4
LT
834 write_unlock_bh(&mrt_lock);
835 return 0;
836}
837
a8c9486b 838/* called with rcu_read_lock() */
0c12295a 839static struct mfc_cache *ipmr_cache_find(struct mr_table *mrt,
4feb88e5
BT
840 __be32 origin,
841 __be32 mcastgrp)
1da177e4 842{
c354e124 843 int line = MFC_HASH(mcastgrp, origin);
1da177e4
LT
844 struct mfc_cache *c;
845
a8c9486b 846 list_for_each_entry_rcu(c, &mrt->mfc_cache_array[line], list) {
862465f2
PM
847 if (c->mfc_origin == origin && c->mfc_mcastgrp == mcastgrp)
848 return c;
1da177e4 849 }
862465f2 850 return NULL;
1da177e4
LT
851}
852
660b26dc
ND
853/* Look for a (*,*,oif) entry */
854static struct mfc_cache *ipmr_cache_find_any_parent(struct mr_table *mrt,
855 int vifi)
856{
360eb5da 857 int line = MFC_HASH(htonl(INADDR_ANY), htonl(INADDR_ANY));
660b26dc
ND
858 struct mfc_cache *c;
859
860 list_for_each_entry_rcu(c, &mrt->mfc_cache_array[line], list)
360eb5da
ND
861 if (c->mfc_origin == htonl(INADDR_ANY) &&
862 c->mfc_mcastgrp == htonl(INADDR_ANY) &&
660b26dc
ND
863 c->mfc_un.res.ttls[vifi] < 255)
864 return c;
865
866 return NULL;
867}
868
869/* Look for a (*,G) entry */
870static struct mfc_cache *ipmr_cache_find_any(struct mr_table *mrt,
871 __be32 mcastgrp, int vifi)
872{
360eb5da 873 int line = MFC_HASH(mcastgrp, htonl(INADDR_ANY));
660b26dc
ND
874 struct mfc_cache *c, *proxy;
875
360eb5da 876 if (mcastgrp == htonl(INADDR_ANY))
660b26dc
ND
877 goto skip;
878
879 list_for_each_entry_rcu(c, &mrt->mfc_cache_array[line], list)
360eb5da 880 if (c->mfc_origin == htonl(INADDR_ANY) &&
660b26dc
ND
881 c->mfc_mcastgrp == mcastgrp) {
882 if (c->mfc_un.res.ttls[vifi] < 255)
883 return c;
884
885 /* It's ok if the vifi is part of the static tree */
886 proxy = ipmr_cache_find_any_parent(mrt,
887 c->mfc_parent);
888 if (proxy && proxy->mfc_un.res.ttls[vifi] < 255)
889 return c;
890 }
891
892skip:
893 return ipmr_cache_find_any_parent(mrt, vifi);
894}
895
7ef8f65d 896/* Allocate a multicast cache entry */
d658f8a0 897static struct mfc_cache *ipmr_cache_alloc(void)
1da177e4 898{
c354e124 899 struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
a8c9486b
ED
900
901 if (c)
902 c->mfc_un.res.minvif = MAXVIFS;
1da177e4
LT
903 return c;
904}
905
d658f8a0 906static struct mfc_cache *ipmr_cache_alloc_unres(void)
1da177e4 907{
c354e124 908 struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC);
a8c9486b
ED
909
910 if (c) {
911 skb_queue_head_init(&c->mfc_un.unres.unresolved);
912 c->mfc_un.unres.expires = jiffies + 10*HZ;
913 }
1da177e4
LT
914 return c;
915}
916
7ef8f65d 917/* A cache entry has gone into a resolved state from queued */
0c12295a
PM
918static void ipmr_cache_resolve(struct net *net, struct mr_table *mrt,
919 struct mfc_cache *uc, struct mfc_cache *c)
1da177e4
LT
920{
921 struct sk_buff *skb;
9ef1d4c7 922 struct nlmsgerr *e;
1da177e4 923
a8cb16dd 924 /* Play the pending entries through our router */
c354e124 925 while ((skb = __skb_dequeue(&uc->mfc_un.unres.unresolved))) {
eddc9ec5 926 if (ip_hdr(skb)->version == 0) {
1da177e4
LT
927 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr));
928
573ce260 929 if (__ipmr_fill_mroute(mrt, skb, c, nlmsg_data(nlh)) > 0) {
a8cb16dd
ED
930 nlh->nlmsg_len = skb_tail_pointer(skb) -
931 (u8 *)nlh;
1da177e4
LT
932 } else {
933 nlh->nlmsg_type = NLMSG_ERROR;
573ce260 934 nlh->nlmsg_len = nlmsg_msg_size(sizeof(struct nlmsgerr));
1da177e4 935 skb_trim(skb, nlh->nlmsg_len);
573ce260 936 e = nlmsg_data(nlh);
9ef1d4c7
PM
937 e->error = -EMSGSIZE;
938 memset(&e->msg, 0, sizeof(e->msg));
1da177e4 939 }
2942e900 940
15e47304 941 rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
a8cb16dd 942 } else {
0c12295a 943 ip_mr_forward(net, mrt, skb, c, 0);
a8cb16dd 944 }
1da177e4
LT
945 }
946}
947
c316c629
NA
948/* Bounce a cache query up to mrouted. We could use netlink for this but mrouted
949 * expects the following bizarre scheme.
1da177e4 950 *
c316c629 951 * Called under mrt_lock.
1da177e4 952 */
0c12295a 953static int ipmr_cache_report(struct mr_table *mrt,
4feb88e5 954 struct sk_buff *pkt, vifi_t vifi, int assert)
1da177e4 955{
c9bdd4b5 956 const int ihl = ip_hdrlen(pkt);
c316c629 957 struct sock *mroute_sk;
1da177e4
LT
958 struct igmphdr *igmp;
959 struct igmpmsg *msg;
c316c629 960 struct sk_buff *skb;
1da177e4
LT
961 int ret;
962
1da177e4
LT
963 if (assert == IGMPMSG_WHOLEPKT)
964 skb = skb_realloc_headroom(pkt, sizeof(struct iphdr));
965 else
1da177e4
LT
966 skb = alloc_skb(128, GFP_ATOMIC);
967
132adf54 968 if (!skb)
1da177e4
LT
969 return -ENOBUFS;
970
1da177e4
LT
971 if (assert == IGMPMSG_WHOLEPKT) {
972 /* Ugly, but we have no choice with this interface.
a8cb16dd
ED
973 * Duplicate old header, fix ihl, length etc.
974 * And all this only to mangle msg->im_msgtype and
975 * to set msg->im_mbz to "mbz" :-)
1da177e4 976 */
878c8145
ACM
977 skb_push(skb, sizeof(struct iphdr));
978 skb_reset_network_header(skb);
badff6d0 979 skb_reset_transport_header(skb);
0272ffc4 980 msg = (struct igmpmsg *)skb_network_header(skb);
d56f90a7 981 memcpy(msg, skb_network_header(pkt), sizeof(struct iphdr));
1da177e4
LT
982 msg->im_msgtype = IGMPMSG_WHOLEPKT;
983 msg->im_mbz = 0;
0c12295a 984 msg->im_vif = mrt->mroute_reg_vif_num;
eddc9ec5
ACM
985 ip_hdr(skb)->ihl = sizeof(struct iphdr) >> 2;
986 ip_hdr(skb)->tot_len = htons(ntohs(ip_hdr(pkt)->tot_len) +
987 sizeof(struct iphdr));
c316c629
NA
988 } else {
989 /* Copy the IP header */
990 skb_set_network_header(skb, skb->len);
991 skb_put(skb, ihl);
992 skb_copy_to_linear_data(skb, pkt->data, ihl);
993 /* Flag to the kernel this is a route add */
994 ip_hdr(skb)->protocol = 0;
995 msg = (struct igmpmsg *)skb_network_header(skb);
996 msg->im_vif = vifi;
997 skb_dst_set(skb, dst_clone(skb_dst(pkt)));
998 /* Add our header */
999 igmp = (struct igmphdr *)skb_put(skb, sizeof(struct igmphdr));
1000 igmp->type = assert;
1001 msg->im_msgtype = assert;
1002 igmp->code = 0;
1003 ip_hdr(skb)->tot_len = htons(skb->len); /* Fix the length */
1004 skb->transport_header = skb->network_header;
e905a9ed 1005 }
1da177e4 1006
4c968709
ED
1007 rcu_read_lock();
1008 mroute_sk = rcu_dereference(mrt->mroute_sk);
51456b29 1009 if (!mroute_sk) {
4c968709 1010 rcu_read_unlock();
1da177e4
LT
1011 kfree_skb(skb);
1012 return -EINVAL;
1013 }
1014
a8cb16dd 1015 /* Deliver to mrouted */
4c968709
ED
1016 ret = sock_queue_rcv_skb(mroute_sk, skb);
1017 rcu_read_unlock();
70a269e6 1018 if (ret < 0) {
e87cc472 1019 net_warn_ratelimited("mroute: pending queue full, dropping entries\n");
1da177e4
LT
1020 kfree_skb(skb);
1021 }
1022
1023 return ret;
1024}
1025
7ef8f65d
NA
1026/* Queue a packet for resolution. It gets locked cache entry! */
1027static int ipmr_cache_unresolved(struct mr_table *mrt, vifi_t vifi,
1028 struct sk_buff *skb)
1da177e4 1029{
862465f2 1030 bool found = false;
1da177e4
LT
1031 int err;
1032 struct mfc_cache *c;
eddc9ec5 1033 const struct iphdr *iph = ip_hdr(skb);
1da177e4
LT
1034
1035 spin_lock_bh(&mfc_unres_lock);
0c12295a 1036 list_for_each_entry(c, &mrt->mfc_unres_queue, list) {
e258beb2 1037 if (c->mfc_mcastgrp == iph->daddr &&
862465f2
PM
1038 c->mfc_origin == iph->saddr) {
1039 found = true;
1da177e4 1040 break;
862465f2 1041 }
1da177e4
LT
1042 }
1043
862465f2 1044 if (!found) {
a8cb16dd 1045 /* Create a new entry if allowable */
0c12295a 1046 if (atomic_read(&mrt->cache_resolve_queue_len) >= 10 ||
d658f8a0 1047 (c = ipmr_cache_alloc_unres()) == NULL) {
1da177e4
LT
1048 spin_unlock_bh(&mfc_unres_lock);
1049
1050 kfree_skb(skb);
1051 return -ENOBUFS;
1052 }
1053
a8cb16dd 1054 /* Fill in the new cache entry */
eddc9ec5
ACM
1055 c->mfc_parent = -1;
1056 c->mfc_origin = iph->saddr;
1057 c->mfc_mcastgrp = iph->daddr;
1da177e4 1058
a8cb16dd 1059 /* Reflect first query at mrouted. */
0c12295a 1060 err = ipmr_cache_report(mrt, skb, vifi, IGMPMSG_NOCACHE);
4feb88e5 1061 if (err < 0) {
e905a9ed 1062 /* If the report failed throw the cache entry
1da177e4
LT
1063 out - Brad Parker
1064 */
1065 spin_unlock_bh(&mfc_unres_lock);
1066
5c0a66f5 1067 ipmr_cache_free(c);
1da177e4
LT
1068 kfree_skb(skb);
1069 return err;
1070 }
1071
0c12295a
PM
1072 atomic_inc(&mrt->cache_resolve_queue_len);
1073 list_add(&c->list, &mrt->mfc_unres_queue);
8cd3ac9f 1074 mroute_netlink_event(mrt, c, RTM_NEWROUTE);
1da177e4 1075
278554bd
DM
1076 if (atomic_read(&mrt->cache_resolve_queue_len) == 1)
1077 mod_timer(&mrt->ipmr_expire_timer, c->mfc_un.unres.expires);
1da177e4
LT
1078 }
1079
a8cb16dd 1080 /* See if we can append the packet */
a8cb16dd 1081 if (c->mfc_un.unres.unresolved.qlen > 3) {
1da177e4
LT
1082 kfree_skb(skb);
1083 err = -ENOBUFS;
1084 } else {
c354e124 1085 skb_queue_tail(&c->mfc_un.unres.unresolved, skb);
1da177e4
LT
1086 err = 0;
1087 }
1088
1089 spin_unlock_bh(&mfc_unres_lock);
1090 return err;
1091}
1092
7ef8f65d 1093/* MFC cache manipulation by user space mroute daemon */
1da177e4 1094
660b26dc 1095static int ipmr_mfc_delete(struct mr_table *mrt, struct mfcctl *mfc, int parent)
1da177e4
LT
1096{
1097 int line;
862465f2 1098 struct mfc_cache *c, *next;
1da177e4 1099
c354e124 1100 line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr);
1da177e4 1101
0c12295a 1102 list_for_each_entry_safe(c, next, &mrt->mfc_cache_array[line], list) {
1da177e4 1103 if (c->mfc_origin == mfc->mfcc_origin.s_addr &&
660b26dc
ND
1104 c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr &&
1105 (parent == -1 || parent == c->mfc_parent)) {
a8c9486b 1106 list_del_rcu(&c->list);
8cd3ac9f 1107 mroute_netlink_event(mrt, c, RTM_DELROUTE);
5c0a66f5 1108 ipmr_cache_free(c);
1da177e4
LT
1109 return 0;
1110 }
1111 }
1112 return -ENOENT;
1113}
1114
0c12295a 1115static int ipmr_mfc_add(struct net *net, struct mr_table *mrt,
660b26dc 1116 struct mfcctl *mfc, int mrtsock, int parent)
1da177e4 1117{
862465f2 1118 bool found = false;
1da177e4 1119 int line;
862465f2 1120 struct mfc_cache *uc, *c;
1da177e4 1121
a50436f2
PM
1122 if (mfc->mfcc_parent >= MAXVIFS)
1123 return -ENFILE;
1124
c354e124 1125 line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr);
1da177e4 1126
0c12295a 1127 list_for_each_entry(c, &mrt->mfc_cache_array[line], list) {
1da177e4 1128 if (c->mfc_origin == mfc->mfcc_origin.s_addr &&
660b26dc
ND
1129 c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr &&
1130 (parent == -1 || parent == c->mfc_parent)) {
862465f2 1131 found = true;
1da177e4 1132 break;
862465f2 1133 }
1da177e4
LT
1134 }
1135
862465f2 1136 if (found) {
1da177e4
LT
1137 write_lock_bh(&mrt_lock);
1138 c->mfc_parent = mfc->mfcc_parent;
0c12295a 1139 ipmr_update_thresholds(mrt, c, mfc->mfcc_ttls);
1da177e4
LT
1140 if (!mrtsock)
1141 c->mfc_flags |= MFC_STATIC;
1142 write_unlock_bh(&mrt_lock);
8cd3ac9f 1143 mroute_netlink_event(mrt, c, RTM_NEWROUTE);
1da177e4
LT
1144 return 0;
1145 }
1146
360eb5da 1147 if (mfc->mfcc_mcastgrp.s_addr != htonl(INADDR_ANY) &&
660b26dc 1148 !ipv4_is_multicast(mfc->mfcc_mcastgrp.s_addr))
1da177e4
LT
1149 return -EINVAL;
1150
d658f8a0 1151 c = ipmr_cache_alloc();
51456b29 1152 if (!c)
1da177e4
LT
1153 return -ENOMEM;
1154
c354e124
JK
1155 c->mfc_origin = mfc->mfcc_origin.s_addr;
1156 c->mfc_mcastgrp = mfc->mfcc_mcastgrp.s_addr;
1157 c->mfc_parent = mfc->mfcc_parent;
0c12295a 1158 ipmr_update_thresholds(mrt, c, mfc->mfcc_ttls);
1da177e4
LT
1159 if (!mrtsock)
1160 c->mfc_flags |= MFC_STATIC;
1161
a8c9486b 1162 list_add_rcu(&c->list, &mrt->mfc_cache_array[line]);
1da177e4 1163
7ef8f65d
NA
1164 /* Check to see if we resolved a queued list. If so we
1165 * need to send on the frames and tidy up.
1da177e4 1166 */
b0ebb739 1167 found = false;
1da177e4 1168 spin_lock_bh(&mfc_unres_lock);
0c12295a 1169 list_for_each_entry(uc, &mrt->mfc_unres_queue, list) {
e258beb2 1170 if (uc->mfc_origin == c->mfc_origin &&
1da177e4 1171 uc->mfc_mcastgrp == c->mfc_mcastgrp) {
862465f2 1172 list_del(&uc->list);
0c12295a 1173 atomic_dec(&mrt->cache_resolve_queue_len);
b0ebb739 1174 found = true;
1da177e4
LT
1175 break;
1176 }
1177 }
0c12295a
PM
1178 if (list_empty(&mrt->mfc_unres_queue))
1179 del_timer(&mrt->ipmr_expire_timer);
1da177e4
LT
1180 spin_unlock_bh(&mfc_unres_lock);
1181
b0ebb739 1182 if (found) {
0c12295a 1183 ipmr_cache_resolve(net, mrt, uc, c);
5c0a66f5 1184 ipmr_cache_free(uc);
1da177e4 1185 }
8cd3ac9f 1186 mroute_netlink_event(mrt, c, RTM_NEWROUTE);
1da177e4
LT
1187 return 0;
1188}
1189
7ef8f65d 1190/* Close the multicast socket, and clear the vif tables etc */
0c12295a 1191static void mroute_clean_tables(struct mr_table *mrt)
1da177e4
LT
1192{
1193 int i;
d17fa6fa 1194 LIST_HEAD(list);
862465f2 1195 struct mfc_cache *c, *next;
e905a9ed 1196
a8cb16dd 1197 /* Shut down all active vif entries */
0c12295a 1198 for (i = 0; i < mrt->maxvif; i++) {
a8cb16dd 1199 if (!(mrt->vif_table[i].flags & VIFF_STATIC))
0c12295a 1200 vif_delete(mrt, i, 0, &list);
1da177e4 1201 }
d17fa6fa 1202 unregister_netdevice_many(&list);
1da177e4 1203
a8cb16dd 1204 /* Wipe the cache */
862465f2 1205 for (i = 0; i < MFC_LINES; i++) {
0c12295a 1206 list_for_each_entry_safe(c, next, &mrt->mfc_cache_array[i], list) {
a8c9486b 1207 if (c->mfc_flags & MFC_STATIC)
1da177e4 1208 continue;
a8c9486b 1209 list_del_rcu(&c->list);
8cd3ac9f 1210 mroute_netlink_event(mrt, c, RTM_DELROUTE);
5c0a66f5 1211 ipmr_cache_free(c);
1da177e4
LT
1212 }
1213 }
1214
0c12295a 1215 if (atomic_read(&mrt->cache_resolve_queue_len) != 0) {
1da177e4 1216 spin_lock_bh(&mfc_unres_lock);
0c12295a 1217 list_for_each_entry_safe(c, next, &mrt->mfc_unres_queue, list) {
862465f2 1218 list_del(&c->list);
8cd3ac9f 1219 mroute_netlink_event(mrt, c, RTM_DELROUTE);
0c12295a 1220 ipmr_destroy_unres(mrt, c);
1da177e4
LT
1221 }
1222 spin_unlock_bh(&mfc_unres_lock);
1223 }
1224}
1225
4c968709
ED
1226/* called from ip_ra_control(), before an RCU grace period,
1227 * we dont need to call synchronize_rcu() here
1228 */
1da177e4
LT
1229static void mrtsock_destruct(struct sock *sk)
1230{
4feb88e5 1231 struct net *net = sock_net(sk);
f0ad0860 1232 struct mr_table *mrt;
4feb88e5 1233
1da177e4 1234 rtnl_lock();
f0ad0860 1235 ipmr_for_each_table(mrt, net) {
4c968709 1236 if (sk == rtnl_dereference(mrt->mroute_sk)) {
f0ad0860 1237 IPV4_DEVCONF_ALL(net, MC_FORWARDING)--;
d67b8c61
ND
1238 inet_netconf_notify_devconf(net, NETCONFA_MC_FORWARDING,
1239 NETCONFA_IFINDEX_ALL,
1240 net->ipv4.devconf_all);
a9b3cd7f 1241 RCU_INIT_POINTER(mrt->mroute_sk, NULL);
f0ad0860
PM
1242 mroute_clean_tables(mrt);
1243 }
1da177e4
LT
1244 }
1245 rtnl_unlock();
1246}
1247
7ef8f65d
NA
1248/* Socket options and virtual interface manipulation. The whole
1249 * virtual interface system is a complete heap, but unfortunately
1250 * that's how BSD mrouted happens to think. Maybe one day with a proper
1251 * MOSPF/PIM router set up we can clean this up.
1da177e4 1252 */
e905a9ed 1253
29e97d21
NA
1254int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval,
1255 unsigned int optlen)
1da177e4 1256{
4feb88e5 1257 struct net *net = sock_net(sk);
29e97d21 1258 int val, ret = 0, parent = 0;
f0ad0860 1259 struct mr_table *mrt;
29e97d21
NA
1260 struct vifctl vif;
1261 struct mfcctl mfc;
1262 u32 uval;
f0ad0860 1263
29e97d21
NA
1264 /* There's one exception to the lock - MRT_DONE which needs to unlock */
1265 rtnl_lock();
5e1859fb 1266 if (sk->sk_type != SOCK_RAW ||
29e97d21
NA
1267 inet_sk(sk)->inet_num != IPPROTO_IGMP) {
1268 ret = -EOPNOTSUPP;
1269 goto out_unlock;
1270 }
5e1859fb 1271
f0ad0860 1272 mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
29e97d21
NA
1273 if (!mrt) {
1274 ret = -ENOENT;
1275 goto out_unlock;
1276 }
132adf54 1277 if (optname != MRT_INIT) {
33d480ce 1278 if (sk != rcu_access_pointer(mrt->mroute_sk) &&
29e97d21
NA
1279 !ns_capable(net->user_ns, CAP_NET_ADMIN)) {
1280 ret = -EACCES;
1281 goto out_unlock;
1282 }
1da177e4
LT
1283 }
1284
132adf54
SH
1285 switch (optname) {
1286 case MRT_INIT:
42e6b89c 1287 if (optlen != sizeof(int)) {
29e97d21 1288 ret = -EINVAL;
42e6b89c
NA
1289 break;
1290 }
1291 if (rtnl_dereference(mrt->mroute_sk)) {
29e97d21 1292 ret = -EADDRINUSE;
29e97d21 1293 break;
42e6b89c 1294 }
132adf54
SH
1295
1296 ret = ip_ra_control(sk, 1, mrtsock_destruct);
1297 if (ret == 0) {
cf778b00 1298 rcu_assign_pointer(mrt->mroute_sk, sk);
4feb88e5 1299 IPV4_DEVCONF_ALL(net, MC_FORWARDING)++;
d67b8c61
ND
1300 inet_netconf_notify_devconf(net, NETCONFA_MC_FORWARDING,
1301 NETCONFA_IFINDEX_ALL,
1302 net->ipv4.devconf_all);
132adf54 1303 }
29e97d21 1304 break;
132adf54 1305 case MRT_DONE:
29e97d21
NA
1306 if (sk != rcu_access_pointer(mrt->mroute_sk)) {
1307 ret = -EACCES;
1308 } else {
1309 /* We need to unlock here because mrtsock_destruct takes
1310 * care of rtnl itself and we can't change that due to
1311 * the IP_ROUTER_ALERT setsockopt which runs without it.
1312 */
1313 rtnl_unlock();
1314 ret = ip_ra_control(sk, 0, NULL);
1315 goto out;
1316 }
1317 break;
132adf54
SH
1318 case MRT_ADD_VIF:
1319 case MRT_DEL_VIF:
29e97d21
NA
1320 if (optlen != sizeof(vif)) {
1321 ret = -EINVAL;
1322 break;
1323 }
1324 if (copy_from_user(&vif, optval, sizeof(vif))) {
1325 ret = -EFAULT;
1326 break;
1327 }
1328 if (vif.vifc_vifi >= MAXVIFS) {
1329 ret = -ENFILE;
1330 break;
1331 }
c354e124 1332 if (optname == MRT_ADD_VIF) {
4c968709
ED
1333 ret = vif_add(net, mrt, &vif,
1334 sk == rtnl_dereference(mrt->mroute_sk));
132adf54 1335 } else {
0c12295a 1336 ret = vif_delete(mrt, vif.vifc_vifi, 0, NULL);
132adf54 1337 }
29e97d21 1338 break;
7ef8f65d
NA
1339 /* Manipulate the forwarding caches. These live
1340 * in a sort of kernel/user symbiosis.
1341 */
132adf54
SH
1342 case MRT_ADD_MFC:
1343 case MRT_DEL_MFC:
660b26dc
ND
1344 parent = -1;
1345 case MRT_ADD_MFC_PROXY:
1346 case MRT_DEL_MFC_PROXY:
29e97d21
NA
1347 if (optlen != sizeof(mfc)) {
1348 ret = -EINVAL;
1349 break;
1350 }
1351 if (copy_from_user(&mfc, optval, sizeof(mfc))) {
1352 ret = -EFAULT;
1353 break;
1354 }
660b26dc
ND
1355 if (parent == 0)
1356 parent = mfc.mfcc_parent;
660b26dc
ND
1357 if (optname == MRT_DEL_MFC || optname == MRT_DEL_MFC_PROXY)
1358 ret = ipmr_mfc_delete(mrt, &mfc, parent);
132adf54 1359 else
4c968709 1360 ret = ipmr_mfc_add(net, mrt, &mfc,
660b26dc
ND
1361 sk == rtnl_dereference(mrt->mroute_sk),
1362 parent);
29e97d21 1363 break;
7ef8f65d 1364 /* Control PIM assert. */
132adf54 1365 case MRT_ASSERT:
29e97d21
NA
1366 if (optlen != sizeof(val)) {
1367 ret = -EINVAL;
1368 break;
1369 }
1370 if (get_user(val, (int __user *)optval)) {
1371 ret = -EFAULT;
1372 break;
1373 }
1374 mrt->mroute_do_assert = val;
1375 break;
132adf54 1376 case MRT_PIM:
1973a4ea 1377 if (!ipmr_pimsm_enabled()) {
29e97d21
NA
1378 ret = -ENOPROTOOPT;
1379 break;
1380 }
1381 if (optlen != sizeof(val)) {
1382 ret = -EINVAL;
1383 break;
1384 }
1385 if (get_user(val, (int __user *)optval)) {
1386 ret = -EFAULT;
1387 break;
1388 }
ba93ef74 1389
29e97d21
NA
1390 val = !!val;
1391 if (val != mrt->mroute_do_pim) {
1392 mrt->mroute_do_pim = val;
1393 mrt->mroute_do_assert = val;
1da177e4 1394 }
29e97d21 1395 break;
f0ad0860 1396 case MRT_TABLE:
29e97d21
NA
1397 if (!IS_BUILTIN(CONFIG_IP_MROUTE_MULTIPLE_TABLES)) {
1398 ret = -ENOPROTOOPT;
1399 break;
1400 }
1401 if (optlen != sizeof(uval)) {
1402 ret = -EINVAL;
1403 break;
1404 }
1405 if (get_user(uval, (u32 __user *)optval)) {
1406 ret = -EFAULT;
1407 break;
1408 }
f0ad0860 1409
4c968709
ED
1410 if (sk == rtnl_dereference(mrt->mroute_sk)) {
1411 ret = -EBUSY;
1412 } else {
29e97d21 1413 mrt = ipmr_new_table(net, uval);
1113ebbc
NA
1414 if (IS_ERR(mrt))
1415 ret = PTR_ERR(mrt);
5e1859fb 1416 else
29e97d21 1417 raw_sk(sk)->ipmr_table = uval;
4c968709 1418 }
29e97d21 1419 break;
7ef8f65d 1420 /* Spurious command, or MRT_VERSION which you cannot set. */
132adf54 1421 default:
29e97d21 1422 ret = -ENOPROTOOPT;
1da177e4 1423 }
29e97d21
NA
1424out_unlock:
1425 rtnl_unlock();
1426out:
1427 return ret;
1da177e4
LT
1428}
1429
7ef8f65d 1430/* Getsock opt support for the multicast routing system. */
c354e124 1431int ip_mroute_getsockopt(struct sock *sk, int optname, char __user *optval, int __user *optlen)
1da177e4
LT
1432{
1433 int olr;
1434 int val;
4feb88e5 1435 struct net *net = sock_net(sk);
f0ad0860
PM
1436 struct mr_table *mrt;
1437
5e1859fb
ED
1438 if (sk->sk_type != SOCK_RAW ||
1439 inet_sk(sk)->inet_num != IPPROTO_IGMP)
1440 return -EOPNOTSUPP;
1441
f0ad0860 1442 mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
51456b29 1443 if (!mrt)
f0ad0860 1444 return -ENOENT;
1da177e4 1445
fe9ef3ce
NA
1446 switch (optname) {
1447 case MRT_VERSION:
1448 val = 0x0305;
1449 break;
1450 case MRT_PIM:
1973a4ea 1451 if (!ipmr_pimsm_enabled())
fe9ef3ce
NA
1452 return -ENOPROTOOPT;
1453 val = mrt->mroute_do_pim;
1454 break;
1455 case MRT_ASSERT:
1456 val = mrt->mroute_do_assert;
1457 break;
1458 default:
1da177e4 1459 return -ENOPROTOOPT;
fe9ef3ce 1460 }
1da177e4
LT
1461
1462 if (get_user(olr, optlen))
1463 return -EFAULT;
1da177e4
LT
1464 olr = min_t(unsigned int, olr, sizeof(int));
1465 if (olr < 0)
1466 return -EINVAL;
c354e124 1467 if (put_user(olr, optlen))
1da177e4 1468 return -EFAULT;
c354e124 1469 if (copy_to_user(optval, &val, olr))
1da177e4
LT
1470 return -EFAULT;
1471 return 0;
1472}
1473
7ef8f65d 1474/* The IP multicast ioctl support routines. */
1da177e4
LT
1475int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg)
1476{
1477 struct sioc_sg_req sr;
1478 struct sioc_vif_req vr;
1479 struct vif_device *vif;
1480 struct mfc_cache *c;
4feb88e5 1481 struct net *net = sock_net(sk);
f0ad0860
PM
1482 struct mr_table *mrt;
1483
1484 mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
51456b29 1485 if (!mrt)
f0ad0860 1486 return -ENOENT;
e905a9ed 1487
132adf54
SH
1488 switch (cmd) {
1489 case SIOCGETVIFCNT:
c354e124 1490 if (copy_from_user(&vr, arg, sizeof(vr)))
132adf54 1491 return -EFAULT;
0c12295a 1492 if (vr.vifi >= mrt->maxvif)
132adf54
SH
1493 return -EINVAL;
1494 read_lock(&mrt_lock);
0c12295a
PM
1495 vif = &mrt->vif_table[vr.vifi];
1496 if (VIF_EXISTS(mrt, vr.vifi)) {
c354e124
JK
1497 vr.icount = vif->pkt_in;
1498 vr.ocount = vif->pkt_out;
1499 vr.ibytes = vif->bytes_in;
1500 vr.obytes = vif->bytes_out;
1da177e4 1501 read_unlock(&mrt_lock);
1da177e4 1502
c354e124 1503 if (copy_to_user(arg, &vr, sizeof(vr)))
132adf54
SH
1504 return -EFAULT;
1505 return 0;
1506 }
1507 read_unlock(&mrt_lock);
1508 return -EADDRNOTAVAIL;
1509 case SIOCGETSGCNT:
c354e124 1510 if (copy_from_user(&sr, arg, sizeof(sr)))
132adf54
SH
1511 return -EFAULT;
1512
a8c9486b 1513 rcu_read_lock();
0c12295a 1514 c = ipmr_cache_find(mrt, sr.src.s_addr, sr.grp.s_addr);
132adf54
SH
1515 if (c) {
1516 sr.pktcnt = c->mfc_un.res.pkt;
1517 sr.bytecnt = c->mfc_un.res.bytes;
1518 sr.wrong_if = c->mfc_un.res.wrong_if;
a8c9486b 1519 rcu_read_unlock();
132adf54 1520
c354e124 1521 if (copy_to_user(arg, &sr, sizeof(sr)))
132adf54
SH
1522 return -EFAULT;
1523 return 0;
1524 }
a8c9486b 1525 rcu_read_unlock();
132adf54
SH
1526 return -EADDRNOTAVAIL;
1527 default:
1528 return -ENOIOCTLCMD;
1da177e4
LT
1529 }
1530}
1531
709b46e8
EB
1532#ifdef CONFIG_COMPAT
1533struct compat_sioc_sg_req {
1534 struct in_addr src;
1535 struct in_addr grp;
1536 compat_ulong_t pktcnt;
1537 compat_ulong_t bytecnt;
1538 compat_ulong_t wrong_if;
1539};
1540
ca6b8bb0
DM
1541struct compat_sioc_vif_req {
1542 vifi_t vifi; /* Which iface */
1543 compat_ulong_t icount;
1544 compat_ulong_t ocount;
1545 compat_ulong_t ibytes;
1546 compat_ulong_t obytes;
1547};
1548
709b46e8
EB
1549int ipmr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
1550{
0033d5ad 1551 struct compat_sioc_sg_req sr;
ca6b8bb0
DM
1552 struct compat_sioc_vif_req vr;
1553 struct vif_device *vif;
709b46e8
EB
1554 struct mfc_cache *c;
1555 struct net *net = sock_net(sk);
1556 struct mr_table *mrt;
1557
1558 mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
51456b29 1559 if (!mrt)
709b46e8
EB
1560 return -ENOENT;
1561
1562 switch (cmd) {
ca6b8bb0
DM
1563 case SIOCGETVIFCNT:
1564 if (copy_from_user(&vr, arg, sizeof(vr)))
1565 return -EFAULT;
1566 if (vr.vifi >= mrt->maxvif)
1567 return -EINVAL;
1568 read_lock(&mrt_lock);
1569 vif = &mrt->vif_table[vr.vifi];
1570 if (VIF_EXISTS(mrt, vr.vifi)) {
1571 vr.icount = vif->pkt_in;
1572 vr.ocount = vif->pkt_out;
1573 vr.ibytes = vif->bytes_in;
1574 vr.obytes = vif->bytes_out;
1575 read_unlock(&mrt_lock);
1576
1577 if (copy_to_user(arg, &vr, sizeof(vr)))
1578 return -EFAULT;
1579 return 0;
1580 }
1581 read_unlock(&mrt_lock);
1582 return -EADDRNOTAVAIL;
709b46e8
EB
1583 case SIOCGETSGCNT:
1584 if (copy_from_user(&sr, arg, sizeof(sr)))
1585 return -EFAULT;
1586
1587 rcu_read_lock();
1588 c = ipmr_cache_find(mrt, sr.src.s_addr, sr.grp.s_addr);
1589 if (c) {
1590 sr.pktcnt = c->mfc_un.res.pkt;
1591 sr.bytecnt = c->mfc_un.res.bytes;
1592 sr.wrong_if = c->mfc_un.res.wrong_if;
1593 rcu_read_unlock();
1594
1595 if (copy_to_user(arg, &sr, sizeof(sr)))
1596 return -EFAULT;
1597 return 0;
1598 }
1599 rcu_read_unlock();
1600 return -EADDRNOTAVAIL;
1601 default:
1602 return -ENOIOCTLCMD;
1603 }
1604}
1605#endif
1606
1da177e4
LT
1607static int ipmr_device_event(struct notifier_block *this, unsigned long event, void *ptr)
1608{
351638e7 1609 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
4feb88e5 1610 struct net *net = dev_net(dev);
f0ad0860 1611 struct mr_table *mrt;
1da177e4
LT
1612 struct vif_device *v;
1613 int ct;
e9dc8653 1614
1da177e4
LT
1615 if (event != NETDEV_UNREGISTER)
1616 return NOTIFY_DONE;
f0ad0860
PM
1617
1618 ipmr_for_each_table(mrt, net) {
1619 v = &mrt->vif_table[0];
1620 for (ct = 0; ct < mrt->maxvif; ct++, v++) {
1621 if (v->dev == dev)
e92036a6 1622 vif_delete(mrt, ct, 1, NULL);
f0ad0860 1623 }
1da177e4
LT
1624 }
1625 return NOTIFY_DONE;
1626}
1627
c354e124 1628static struct notifier_block ip_mr_notifier = {
1da177e4
LT
1629 .notifier_call = ipmr_device_event,
1630};
1631
7ef8f65d
NA
1632/* Encapsulate a packet by attaching a valid IPIP header to it.
1633 * This avoids tunnel drivers and other mess and gives us the speed so
1634 * important for multicast video.
1da177e4 1635 */
b6a7719a
HFS
1636static void ip_encap(struct net *net, struct sk_buff *skb,
1637 __be32 saddr, __be32 daddr)
1da177e4 1638{
8856dfa3 1639 struct iphdr *iph;
b71d1d42 1640 const struct iphdr *old_iph = ip_hdr(skb);
8856dfa3
ACM
1641
1642 skb_push(skb, sizeof(struct iphdr));
b0e380b1 1643 skb->transport_header = skb->network_header;
8856dfa3 1644 skb_reset_network_header(skb);
eddc9ec5 1645 iph = ip_hdr(skb);
1da177e4 1646
a8cb16dd 1647 iph->version = 4;
e023dd64
ACM
1648 iph->tos = old_iph->tos;
1649 iph->ttl = old_iph->ttl;
1da177e4
LT
1650 iph->frag_off = 0;
1651 iph->daddr = daddr;
1652 iph->saddr = saddr;
1653 iph->protocol = IPPROTO_IPIP;
1654 iph->ihl = 5;
1655 iph->tot_len = htons(skb->len);
b6a7719a 1656 ip_select_ident(net, skb, NULL);
1da177e4
LT
1657 ip_send_check(iph);
1658
1da177e4
LT
1659 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
1660 nf_reset(skb);
1661}
1662
0c4b51f0
EB
1663static inline int ipmr_forward_finish(struct net *net, struct sock *sk,
1664 struct sk_buff *skb)
1da177e4 1665{
a8cb16dd 1666 struct ip_options *opt = &(IPCB(skb)->opt);
1da177e4 1667
73186df8
DM
1668 IP_INC_STATS(net, IPSTATS_MIB_OUTFORWDATAGRAMS);
1669 IP_ADD_STATS(net, IPSTATS_MIB_OUTOCTETS, skb->len);
1da177e4
LT
1670
1671 if (unlikely(opt->optlen))
1672 ip_forward_options(skb);
1673
13206b6b 1674 return dst_output(net, sk, skb);
1da177e4
LT
1675}
1676
7ef8f65d 1677/* Processing handlers for ipmr_forward */
1da177e4 1678
0c12295a
PM
1679static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
1680 struct sk_buff *skb, struct mfc_cache *c, int vifi)
1da177e4 1681{
eddc9ec5 1682 const struct iphdr *iph = ip_hdr(skb);
0c12295a 1683 struct vif_device *vif = &mrt->vif_table[vifi];
1da177e4
LT
1684 struct net_device *dev;
1685 struct rtable *rt;
31e4543d 1686 struct flowi4 fl4;
1da177e4
LT
1687 int encap = 0;
1688
51456b29 1689 if (!vif->dev)
1da177e4
LT
1690 goto out_free;
1691
1da177e4
LT
1692 if (vif->flags & VIFF_REGISTER) {
1693 vif->pkt_out++;
c354e124 1694 vif->bytes_out += skb->len;
cf3677ae
PE
1695 vif->dev->stats.tx_bytes += skb->len;
1696 vif->dev->stats.tx_packets++;
0c12295a 1697 ipmr_cache_report(mrt, skb, vifi, IGMPMSG_WHOLEPKT);
69ebbf58 1698 goto out_free;
1da177e4 1699 }
1da177e4 1700
a8cb16dd 1701 if (vif->flags & VIFF_TUNNEL) {
31e4543d 1702 rt = ip_route_output_ports(net, &fl4, NULL,
78fbfd8a
DM
1703 vif->remote, vif->local,
1704 0, 0,
1705 IPPROTO_IPIP,
1706 RT_TOS(iph->tos), vif->link);
b23dd4fe 1707 if (IS_ERR(rt))
1da177e4
LT
1708 goto out_free;
1709 encap = sizeof(struct iphdr);
1710 } else {
31e4543d 1711 rt = ip_route_output_ports(net, &fl4, NULL, iph->daddr, 0,
78fbfd8a
DM
1712 0, 0,
1713 IPPROTO_IPIP,
1714 RT_TOS(iph->tos), vif->link);
b23dd4fe 1715 if (IS_ERR(rt))
1da177e4
LT
1716 goto out_free;
1717 }
1718
d8d1f30b 1719 dev = rt->dst.dev;
1da177e4 1720
d8d1f30b 1721 if (skb->len+encap > dst_mtu(&rt->dst) && (ntohs(iph->frag_off) & IP_DF)) {
1da177e4 1722 /* Do not fragment multicasts. Alas, IPv4 does not
a8cb16dd
ED
1723 * allow to send ICMP, so that packets will disappear
1724 * to blackhole.
1da177e4 1725 */
73186df8 1726 IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS);
1da177e4
LT
1727 ip_rt_put(rt);
1728 goto out_free;
1729 }
1730
d8d1f30b 1731 encap += LL_RESERVED_SPACE(dev) + rt->dst.header_len;
1da177e4
LT
1732
1733 if (skb_cow(skb, encap)) {
e905a9ed 1734 ip_rt_put(rt);
1da177e4
LT
1735 goto out_free;
1736 }
1737
1738 vif->pkt_out++;
c354e124 1739 vif->bytes_out += skb->len;
1da177e4 1740
adf30907 1741 skb_dst_drop(skb);
d8d1f30b 1742 skb_dst_set(skb, &rt->dst);
eddc9ec5 1743 ip_decrease_ttl(ip_hdr(skb));
1da177e4
LT
1744
1745 /* FIXME: forward and output firewalls used to be called here.
a8cb16dd
ED
1746 * What do we do with netfilter? -- RR
1747 */
1da177e4 1748 if (vif->flags & VIFF_TUNNEL) {
b6a7719a 1749 ip_encap(net, skb, vif->local, vif->remote);
1da177e4 1750 /* FIXME: extra output firewall step used to be here. --RR */
2f4c02d4
PE
1751 vif->dev->stats.tx_packets++;
1752 vif->dev->stats.tx_bytes += skb->len;
1da177e4
LT
1753 }
1754
1755 IPCB(skb)->flags |= IPSKB_FORWARDED;
1756
7ef8f65d 1757 /* RFC1584 teaches, that DVMRP/PIM router must deliver packets locally
1da177e4
LT
1758 * not only before forwarding, but after forwarding on all output
1759 * interfaces. It is clear, if mrouter runs a multicasting
1760 * program, it should receive packets not depending to what interface
1761 * program is joined.
1762 * If we will not make it, the program will have to join on all
1763 * interfaces. On the other hand, multihoming host (or router, but
1764 * not mrouter) cannot join to more than one interface - it will
1765 * result in receiving multiple packets.
1766 */
29a26a56
EB
1767 NF_HOOK(NFPROTO_IPV4, NF_INET_FORWARD,
1768 net, NULL, skb, skb->dev, dev,
1da177e4
LT
1769 ipmr_forward_finish);
1770 return;
1771
1772out_free:
1773 kfree_skb(skb);
1da177e4
LT
1774}
1775
0c12295a 1776static int ipmr_find_vif(struct mr_table *mrt, struct net_device *dev)
1da177e4
LT
1777{
1778 int ct;
0c12295a
PM
1779
1780 for (ct = mrt->maxvif-1; ct >= 0; ct--) {
1781 if (mrt->vif_table[ct].dev == dev)
1da177e4
LT
1782 break;
1783 }
1784 return ct;
1785}
1786
1787/* "local" means that we should preserve one skb (for local delivery) */
c4854ec8
RR
1788static void ip_mr_forward(struct net *net, struct mr_table *mrt,
1789 struct sk_buff *skb, struct mfc_cache *cache,
1790 int local)
1da177e4
LT
1791{
1792 int psend = -1;
1793 int vif, ct;
660b26dc 1794 int true_vifi = ipmr_find_vif(mrt, skb->dev);
1da177e4
LT
1795
1796 vif = cache->mfc_parent;
1797 cache->mfc_un.res.pkt++;
1798 cache->mfc_un.res.bytes += skb->len;
1799
360eb5da 1800 if (cache->mfc_origin == htonl(INADDR_ANY) && true_vifi >= 0) {
660b26dc
ND
1801 struct mfc_cache *cache_proxy;
1802
1803 /* For an (*,G) entry, we only check that the incomming
1804 * interface is part of the static tree.
1805 */
1806 cache_proxy = ipmr_cache_find_any_parent(mrt, vif);
1807 if (cache_proxy &&
1808 cache_proxy->mfc_un.res.ttls[true_vifi] < 255)
1809 goto forward;
1810 }
1811
7ef8f65d 1812 /* Wrong interface: drop packet and (maybe) send PIM assert. */
0c12295a 1813 if (mrt->vif_table[vif].dev != skb->dev) {
c7537967 1814 if (rt_is_output_route(skb_rtable(skb))) {
1da177e4 1815 /* It is our own packet, looped back.
a8cb16dd
ED
1816 * Very complicated situation...
1817 *
1818 * The best workaround until routing daemons will be
1819 * fixed is not to redistribute packet, if it was
1820 * send through wrong interface. It means, that
1821 * multicast applications WILL NOT work for
1822 * (S,G), which have default multicast route pointing
1823 * to wrong oif. In any case, it is not a good
1824 * idea to use multicasting applications on router.
1da177e4
LT
1825 */
1826 goto dont_forward;
1827 }
1828
1829 cache->mfc_un.res.wrong_if++;
1da177e4 1830
0c12295a 1831 if (true_vifi >= 0 && mrt->mroute_do_assert &&
1da177e4 1832 /* pimsm uses asserts, when switching from RPT to SPT,
a8cb16dd
ED
1833 * so that we cannot check that packet arrived on an oif.
1834 * It is bad, but otherwise we would need to move pretty
1835 * large chunk of pimd to kernel. Ough... --ANK
1da177e4 1836 */
0c12295a 1837 (mrt->mroute_do_pim ||
6f9374a9 1838 cache->mfc_un.res.ttls[true_vifi] < 255) &&
e905a9ed 1839 time_after(jiffies,
1da177e4
LT
1840 cache->mfc_un.res.last_assert + MFC_ASSERT_THRESH)) {
1841 cache->mfc_un.res.last_assert = jiffies;
0c12295a 1842 ipmr_cache_report(mrt, skb, true_vifi, IGMPMSG_WRONGVIF);
1da177e4
LT
1843 }
1844 goto dont_forward;
1845 }
1846
660b26dc 1847forward:
0c12295a
PM
1848 mrt->vif_table[vif].pkt_in++;
1849 mrt->vif_table[vif].bytes_in += skb->len;
1da177e4 1850
7ef8f65d 1851 /* Forward the frame */
360eb5da
ND
1852 if (cache->mfc_origin == htonl(INADDR_ANY) &&
1853 cache->mfc_mcastgrp == htonl(INADDR_ANY)) {
660b26dc
ND
1854 if (true_vifi >= 0 &&
1855 true_vifi != cache->mfc_parent &&
1856 ip_hdr(skb)->ttl >
1857 cache->mfc_un.res.ttls[cache->mfc_parent]) {
1858 /* It's an (*,*) entry and the packet is not coming from
1859 * the upstream: forward the packet to the upstream
1860 * only.
1861 */
1862 psend = cache->mfc_parent;
1863 goto last_forward;
1864 }
1865 goto dont_forward;
1866 }
a8cb16dd
ED
1867 for (ct = cache->mfc_un.res.maxvif - 1;
1868 ct >= cache->mfc_un.res.minvif; ct--) {
660b26dc 1869 /* For (*,G) entry, don't forward to the incoming interface */
360eb5da
ND
1870 if ((cache->mfc_origin != htonl(INADDR_ANY) ||
1871 ct != true_vifi) &&
660b26dc 1872 ip_hdr(skb)->ttl > cache->mfc_un.res.ttls[ct]) {
1da177e4
LT
1873 if (psend != -1) {
1874 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
a8cb16dd 1875
1da177e4 1876 if (skb2)
0c12295a
PM
1877 ipmr_queue_xmit(net, mrt, skb2, cache,
1878 psend);
1da177e4 1879 }
c354e124 1880 psend = ct;
1da177e4
LT
1881 }
1882 }
660b26dc 1883last_forward:
1da177e4
LT
1884 if (psend != -1) {
1885 if (local) {
1886 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
a8cb16dd 1887
1da177e4 1888 if (skb2)
0c12295a 1889 ipmr_queue_xmit(net, mrt, skb2, cache, psend);
1da177e4 1890 } else {
0c12295a 1891 ipmr_queue_xmit(net, mrt, skb, cache, psend);
c4854ec8 1892 return;
1da177e4
LT
1893 }
1894 }
1895
1896dont_forward:
1897 if (!local)
1898 kfree_skb(skb);
1da177e4
LT
1899}
1900
417da66f 1901static struct mr_table *ipmr_rt_fib_lookup(struct net *net, struct sk_buff *skb)
ee3f1aaf 1902{
417da66f
DM
1903 struct rtable *rt = skb_rtable(skb);
1904 struct iphdr *iph = ip_hdr(skb);
da91981b 1905 struct flowi4 fl4 = {
417da66f
DM
1906 .daddr = iph->daddr,
1907 .saddr = iph->saddr,
b0fe4a31 1908 .flowi4_tos = RT_TOS(iph->tos),
4fd551d7
DM
1909 .flowi4_oif = (rt_is_output_route(rt) ?
1910 skb->dev->ifindex : 0),
1911 .flowi4_iif = (rt_is_output_route(rt) ?
1fb9489b 1912 LOOPBACK_IFINDEX :
4fd551d7 1913 skb->dev->ifindex),
b4869889 1914 .flowi4_mark = skb->mark,
ee3f1aaf
DM
1915 };
1916 struct mr_table *mrt;
1917 int err;
1918
da91981b 1919 err = ipmr_fib_lookup(net, &fl4, &mrt);
ee3f1aaf
DM
1920 if (err)
1921 return ERR_PTR(err);
1922 return mrt;
1923}
1da177e4 1924
7ef8f65d
NA
1925/* Multicast packets for forwarding arrive here
1926 * Called with rcu_read_lock();
1da177e4 1927 */
1da177e4
LT
1928int ip_mr_input(struct sk_buff *skb)
1929{
1930 struct mfc_cache *cache;
4feb88e5 1931 struct net *net = dev_net(skb->dev);
511c3f92 1932 int local = skb_rtable(skb)->rt_flags & RTCF_LOCAL;
f0ad0860 1933 struct mr_table *mrt;
1da177e4
LT
1934
1935 /* Packet is looped back after forward, it should not be
a8cb16dd 1936 * forwarded second time, but still can be delivered locally.
1da177e4 1937 */
4c968709 1938 if (IPCB(skb)->flags & IPSKB_FORWARDED)
1da177e4
LT
1939 goto dont_forward;
1940
417da66f 1941 mrt = ipmr_rt_fib_lookup(net, skb);
ee3f1aaf
DM
1942 if (IS_ERR(mrt)) {
1943 kfree_skb(skb);
1944 return PTR_ERR(mrt);
e40dbc51 1945 }
1da177e4 1946 if (!local) {
4c968709
ED
1947 if (IPCB(skb)->opt.router_alert) {
1948 if (ip_call_ra_chain(skb))
1949 return 0;
1950 } else if (ip_hdr(skb)->protocol == IPPROTO_IGMP) {
1951 /* IGMPv1 (and broken IGMPv2 implementations sort of
1952 * Cisco IOS <= 11.2(8)) do not put router alert
1953 * option to IGMP packets destined to routable
1954 * groups. It is very bad, because it means
1955 * that we can forward NO IGMP messages.
1956 */
1957 struct sock *mroute_sk;
1958
1959 mroute_sk = rcu_dereference(mrt->mroute_sk);
1960 if (mroute_sk) {
1961 nf_reset(skb);
1962 raw_rcv(mroute_sk, skb);
1963 return 0;
1964 }
1da177e4
LT
1965 }
1966 }
1967
a8c9486b 1968 /* already under rcu_read_lock() */
0c12295a 1969 cache = ipmr_cache_find(mrt, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr);
51456b29 1970 if (!cache) {
660b26dc
ND
1971 int vif = ipmr_find_vif(mrt, skb->dev);
1972
1973 if (vif >= 0)
1974 cache = ipmr_cache_find_any(mrt, ip_hdr(skb)->daddr,
1975 vif);
1976 }
1da177e4 1977
7ef8f65d 1978 /* No usable cache entry */
51456b29 1979 if (!cache) {
1da177e4
LT
1980 int vif;
1981
1982 if (local) {
1983 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1984 ip_local_deliver(skb);
51456b29 1985 if (!skb2)
1da177e4 1986 return -ENOBUFS;
1da177e4
LT
1987 skb = skb2;
1988 }
1989
a8c9486b 1990 read_lock(&mrt_lock);
0c12295a 1991 vif = ipmr_find_vif(mrt, skb->dev);
1da177e4 1992 if (vif >= 0) {
0eae88f3 1993 int err2 = ipmr_cache_unresolved(mrt, vif, skb);
1da177e4
LT
1994 read_unlock(&mrt_lock);
1995
0eae88f3 1996 return err2;
1da177e4
LT
1997 }
1998 read_unlock(&mrt_lock);
1999 kfree_skb(skb);
2000 return -ENODEV;
2001 }
2002
a8c9486b 2003 read_lock(&mrt_lock);
0c12295a 2004 ip_mr_forward(net, mrt, skb, cache, local);
1da177e4
LT
2005 read_unlock(&mrt_lock);
2006
2007 if (local)
2008 return ip_local_deliver(skb);
2009
2010 return 0;
2011
2012dont_forward:
2013 if (local)
2014 return ip_local_deliver(skb);
2015 kfree_skb(skb);
2016 return 0;
2017}
2018
b1879204 2019#ifdef CONFIG_IP_PIMSM_V1
7ef8f65d 2020/* Handle IGMP messages of PIMv1 */
a8cb16dd 2021int pim_rcv_v1(struct sk_buff *skb)
b1879204
IJ
2022{
2023 struct igmphdr *pim;
4feb88e5 2024 struct net *net = dev_net(skb->dev);
f0ad0860 2025 struct mr_table *mrt;
b1879204
IJ
2026
2027 if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(struct iphdr)))
2028 goto drop;
2029
2030 pim = igmp_hdr(skb);
2031
417da66f 2032 mrt = ipmr_rt_fib_lookup(net, skb);
ee3f1aaf
DM
2033 if (IS_ERR(mrt))
2034 goto drop;
0c12295a 2035 if (!mrt->mroute_do_pim ||
b1879204
IJ
2036 pim->group != PIM_V1_VERSION || pim->code != PIM_V1_REGISTER)
2037 goto drop;
2038
f0ad0860 2039 if (__pim_rcv(mrt, skb, sizeof(*pim))) {
b1879204
IJ
2040drop:
2041 kfree_skb(skb);
2042 }
1da177e4
LT
2043 return 0;
2044}
2045#endif
2046
2047#ifdef CONFIG_IP_PIMSM_V2
a8cb16dd 2048static int pim_rcv(struct sk_buff *skb)
1da177e4
LT
2049{
2050 struct pimreghdr *pim;
f0ad0860
PM
2051 struct net *net = dev_net(skb->dev);
2052 struct mr_table *mrt;
1da177e4 2053
b1879204 2054 if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(struct iphdr)))
1da177e4
LT
2055 goto drop;
2056
9c70220b 2057 pim = (struct pimreghdr *)skb_transport_header(skb);
a8cb16dd
ED
2058 if (pim->type != ((PIM_VERSION << 4) | (PIM_REGISTER)) ||
2059 (pim->flags & PIM_NULL_REGISTER) ||
e905a9ed 2060 (ip_compute_csum((void *)pim, sizeof(*pim)) != 0 &&
d3bc23e7 2061 csum_fold(skb_checksum(skb, 0, skb->len, 0))))
1da177e4
LT
2062 goto drop;
2063
417da66f 2064 mrt = ipmr_rt_fib_lookup(net, skb);
ee3f1aaf
DM
2065 if (IS_ERR(mrt))
2066 goto drop;
f0ad0860 2067 if (__pim_rcv(mrt, skb, sizeof(*pim))) {
b1879204
IJ
2068drop:
2069 kfree_skb(skb);
2070 }
1da177e4
LT
2071 return 0;
2072}
2073#endif
2074
cb6a4e46
PM
2075static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
2076 struct mfc_cache *c, struct rtmsg *rtm)
1da177e4
LT
2077{
2078 int ct;
2079 struct rtnexthop *nhp;
92a395e5 2080 struct nlattr *mp_attr;
adfa85e4 2081 struct rta_mfc_stats mfcs;
1da177e4 2082
7438189b 2083 /* If cache is unresolved, don't try to parse IIF and OIF */
ed0f160a 2084 if (c->mfc_parent >= MAXVIFS)
7438189b
ND
2085 return -ENOENT;
2086
92a395e5
TG
2087 if (VIF_EXISTS(mrt, c->mfc_parent) &&
2088 nla_put_u32(skb, RTA_IIF, mrt->vif_table[c->mfc_parent].dev->ifindex) < 0)
2089 return -EMSGSIZE;
1da177e4 2090
92a395e5
TG
2091 if (!(mp_attr = nla_nest_start(skb, RTA_MULTIPATH)))
2092 return -EMSGSIZE;
1da177e4
LT
2093
2094 for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) {
0c12295a 2095 if (VIF_EXISTS(mrt, ct) && c->mfc_un.res.ttls[ct] < 255) {
92a395e5
TG
2096 if (!(nhp = nla_reserve_nohdr(skb, sizeof(*nhp)))) {
2097 nla_nest_cancel(skb, mp_attr);
2098 return -EMSGSIZE;
2099 }
2100
1da177e4
LT
2101 nhp->rtnh_flags = 0;
2102 nhp->rtnh_hops = c->mfc_un.res.ttls[ct];
0c12295a 2103 nhp->rtnh_ifindex = mrt->vif_table[ct].dev->ifindex;
1da177e4
LT
2104 nhp->rtnh_len = sizeof(*nhp);
2105 }
2106 }
92a395e5
TG
2107
2108 nla_nest_end(skb, mp_attr);
2109
adfa85e4
ND
2110 mfcs.mfcs_packets = c->mfc_un.res.pkt;
2111 mfcs.mfcs_bytes = c->mfc_un.res.bytes;
2112 mfcs.mfcs_wrong_if = c->mfc_un.res.wrong_if;
2113 if (nla_put(skb, RTA_MFC_STATS, sizeof(mfcs), &mfcs) < 0)
2114 return -EMSGSIZE;
2115
1da177e4
LT
2116 rtm->rtm_type = RTN_MULTICAST;
2117 return 1;
1da177e4
LT
2118}
2119
9a1b9496
DM
2120int ipmr_get_route(struct net *net, struct sk_buff *skb,
2121 __be32 saddr, __be32 daddr,
2122 struct rtmsg *rtm, int nowait)
1da177e4 2123{
1da177e4 2124 struct mfc_cache *cache;
9a1b9496
DM
2125 struct mr_table *mrt;
2126 int err;
1da177e4 2127
f0ad0860 2128 mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
51456b29 2129 if (!mrt)
f0ad0860
PM
2130 return -ENOENT;
2131
a8c9486b 2132 rcu_read_lock();
9a1b9496 2133 cache = ipmr_cache_find(mrt, saddr, daddr);
51456b29 2134 if (!cache && skb->dev) {
660b26dc 2135 int vif = ipmr_find_vif(mrt, skb->dev);
1da177e4 2136
660b26dc
ND
2137 if (vif >= 0)
2138 cache = ipmr_cache_find_any(mrt, daddr, vif);
2139 }
51456b29 2140 if (!cache) {
72287490 2141 struct sk_buff *skb2;
eddc9ec5 2142 struct iphdr *iph;
1da177e4 2143 struct net_device *dev;
a8cb16dd 2144 int vif = -1;
1da177e4
LT
2145
2146 if (nowait) {
a8c9486b 2147 rcu_read_unlock();
1da177e4
LT
2148 return -EAGAIN;
2149 }
2150
2151 dev = skb->dev;
a8c9486b 2152 read_lock(&mrt_lock);
a8cb16dd
ED
2153 if (dev)
2154 vif = ipmr_find_vif(mrt, dev);
2155 if (vif < 0) {
1da177e4 2156 read_unlock(&mrt_lock);
a8c9486b 2157 rcu_read_unlock();
1da177e4
LT
2158 return -ENODEV;
2159 }
72287490
AK
2160 skb2 = skb_clone(skb, GFP_ATOMIC);
2161 if (!skb2) {
2162 read_unlock(&mrt_lock);
a8c9486b 2163 rcu_read_unlock();
72287490
AK
2164 return -ENOMEM;
2165 }
2166
e2d1bca7
ACM
2167 skb_push(skb2, sizeof(struct iphdr));
2168 skb_reset_network_header(skb2);
eddc9ec5
ACM
2169 iph = ip_hdr(skb2);
2170 iph->ihl = sizeof(struct iphdr) >> 2;
9a1b9496
DM
2171 iph->saddr = saddr;
2172 iph->daddr = daddr;
eddc9ec5 2173 iph->version = 0;
0c12295a 2174 err = ipmr_cache_unresolved(mrt, vif, skb2);
1da177e4 2175 read_unlock(&mrt_lock);
a8c9486b 2176 rcu_read_unlock();
1da177e4
LT
2177 return err;
2178 }
2179
a8c9486b 2180 read_lock(&mrt_lock);
cb6a4e46 2181 err = __ipmr_fill_mroute(mrt, skb, cache, rtm);
1da177e4 2182 read_unlock(&mrt_lock);
a8c9486b 2183 rcu_read_unlock();
1da177e4
LT
2184 return err;
2185}
2186
cb6a4e46 2187static int ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
65886f43
ND
2188 u32 portid, u32 seq, struct mfc_cache *c, int cmd,
2189 int flags)
cb6a4e46
PM
2190{
2191 struct nlmsghdr *nlh;
2192 struct rtmsg *rtm;
1eb99af5 2193 int err;
cb6a4e46 2194
65886f43 2195 nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rtm), flags);
51456b29 2196 if (!nlh)
cb6a4e46
PM
2197 return -EMSGSIZE;
2198
2199 rtm = nlmsg_data(nlh);
2200 rtm->rtm_family = RTNL_FAMILY_IPMR;
2201 rtm->rtm_dst_len = 32;
2202 rtm->rtm_src_len = 32;
2203 rtm->rtm_tos = 0;
2204 rtm->rtm_table = mrt->id;
f3756b79
DM
2205 if (nla_put_u32(skb, RTA_TABLE, mrt->id))
2206 goto nla_put_failure;
cb6a4e46
PM
2207 rtm->rtm_type = RTN_MULTICAST;
2208 rtm->rtm_scope = RT_SCOPE_UNIVERSE;
9a68ac72
ND
2209 if (c->mfc_flags & MFC_STATIC)
2210 rtm->rtm_protocol = RTPROT_STATIC;
2211 else
2212 rtm->rtm_protocol = RTPROT_MROUTED;
cb6a4e46
PM
2213 rtm->rtm_flags = 0;
2214
930345ea
JB
2215 if (nla_put_in_addr(skb, RTA_SRC, c->mfc_origin) ||
2216 nla_put_in_addr(skb, RTA_DST, c->mfc_mcastgrp))
f3756b79 2217 goto nla_put_failure;
1eb99af5
ND
2218 err = __ipmr_fill_mroute(mrt, skb, c, rtm);
2219 /* do not break the dump if cache is unresolved */
2220 if (err < 0 && err != -ENOENT)
cb6a4e46
PM
2221 goto nla_put_failure;
2222
053c095a
JB
2223 nlmsg_end(skb, nlh);
2224 return 0;
cb6a4e46
PM
2225
2226nla_put_failure:
2227 nlmsg_cancel(skb, nlh);
2228 return -EMSGSIZE;
2229}
2230
8cd3ac9f
ND
2231static size_t mroute_msgsize(bool unresolved, int maxvif)
2232{
2233 size_t len =
2234 NLMSG_ALIGN(sizeof(struct rtmsg))
2235 + nla_total_size(4) /* RTA_TABLE */
2236 + nla_total_size(4) /* RTA_SRC */
2237 + nla_total_size(4) /* RTA_DST */
2238 ;
2239
2240 if (!unresolved)
2241 len = len
2242 + nla_total_size(4) /* RTA_IIF */
2243 + nla_total_size(0) /* RTA_MULTIPATH */
2244 + maxvif * NLA_ALIGN(sizeof(struct rtnexthop))
2245 /* RTA_MFC_STATS */
2246 + nla_total_size(sizeof(struct rta_mfc_stats))
2247 ;
2248
2249 return len;
2250}
2251
2252static void mroute_netlink_event(struct mr_table *mrt, struct mfc_cache *mfc,
2253 int cmd)
2254{
2255 struct net *net = read_pnet(&mrt->net);
2256 struct sk_buff *skb;
2257 int err = -ENOBUFS;
2258
2259 skb = nlmsg_new(mroute_msgsize(mfc->mfc_parent >= MAXVIFS, mrt->maxvif),
2260 GFP_ATOMIC);
51456b29 2261 if (!skb)
8cd3ac9f
ND
2262 goto errout;
2263
65886f43 2264 err = ipmr_fill_mroute(mrt, skb, 0, 0, mfc, cmd, 0);
8cd3ac9f
ND
2265 if (err < 0)
2266 goto errout;
2267
2268 rtnl_notify(skb, net, 0, RTNLGRP_IPV4_MROUTE, NULL, GFP_ATOMIC);
2269 return;
2270
2271errout:
2272 kfree_skb(skb);
2273 if (err < 0)
2274 rtnl_set_sk_err(net, RTNLGRP_IPV4_MROUTE, err);
2275}
2276
cb6a4e46
PM
2277static int ipmr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
2278{
2279 struct net *net = sock_net(skb->sk);
2280 struct mr_table *mrt;
2281 struct mfc_cache *mfc;
2282 unsigned int t = 0, s_t;
2283 unsigned int h = 0, s_h;
2284 unsigned int e = 0, s_e;
2285
2286 s_t = cb->args[0];
2287 s_h = cb->args[1];
2288 s_e = cb->args[2];
2289
a8c9486b 2290 rcu_read_lock();
cb6a4e46
PM
2291 ipmr_for_each_table(mrt, net) {
2292 if (t < s_t)
2293 goto next_table;
2294 if (t > s_t)
2295 s_h = 0;
2296 for (h = s_h; h < MFC_LINES; h++) {
a8c9486b 2297 list_for_each_entry_rcu(mfc, &mrt->mfc_cache_array[h], list) {
cb6a4e46
PM
2298 if (e < s_e)
2299 goto next_entry;
2300 if (ipmr_fill_mroute(mrt, skb,
15e47304 2301 NETLINK_CB(cb->skb).portid,
cb6a4e46 2302 cb->nlh->nlmsg_seq,
65886f43
ND
2303 mfc, RTM_NEWROUTE,
2304 NLM_F_MULTI) < 0)
cb6a4e46
PM
2305 goto done;
2306next_entry:
2307 e++;
2308 }
2309 e = s_e = 0;
2310 }
1eb99af5
ND
2311 spin_lock_bh(&mfc_unres_lock);
2312 list_for_each_entry(mfc, &mrt->mfc_unres_queue, list) {
2313 if (e < s_e)
2314 goto next_entry2;
2315 if (ipmr_fill_mroute(mrt, skb,
2316 NETLINK_CB(cb->skb).portid,
2317 cb->nlh->nlmsg_seq,
65886f43
ND
2318 mfc, RTM_NEWROUTE,
2319 NLM_F_MULTI) < 0) {
1eb99af5
ND
2320 spin_unlock_bh(&mfc_unres_lock);
2321 goto done;
2322 }
2323next_entry2:
2324 e++;
2325 }
2326 spin_unlock_bh(&mfc_unres_lock);
2327 e = s_e = 0;
cb6a4e46
PM
2328 s_h = 0;
2329next_table:
2330 t++;
2331 }
2332done:
a8c9486b 2333 rcu_read_unlock();
cb6a4e46
PM
2334
2335 cb->args[2] = e;
2336 cb->args[1] = h;
2337 cb->args[0] = t;
2338
2339 return skb->len;
2340}
2341
e905a9ed 2342#ifdef CONFIG_PROC_FS
7ef8f65d
NA
2343/* The /proc interfaces to multicast routing :
2344 * /proc/net/ip_mr_cache & /proc/net/ip_mr_vif
1da177e4
LT
2345 */
2346struct ipmr_vif_iter {
f6bb4514 2347 struct seq_net_private p;
f0ad0860 2348 struct mr_table *mrt;
1da177e4
LT
2349 int ct;
2350};
2351
f6bb4514
BT
2352static struct vif_device *ipmr_vif_seq_idx(struct net *net,
2353 struct ipmr_vif_iter *iter,
1da177e4
LT
2354 loff_t pos)
2355{
f0ad0860 2356 struct mr_table *mrt = iter->mrt;
0c12295a
PM
2357
2358 for (iter->ct = 0; iter->ct < mrt->maxvif; ++iter->ct) {
2359 if (!VIF_EXISTS(mrt, iter->ct))
1da177e4 2360 continue;
e905a9ed 2361 if (pos-- == 0)
0c12295a 2362 return &mrt->vif_table[iter->ct];
1da177e4
LT
2363 }
2364 return NULL;
2365}
2366
2367static void *ipmr_vif_seq_start(struct seq_file *seq, loff_t *pos)
ba93ef74 2368 __acquires(mrt_lock)
1da177e4 2369{
f0ad0860 2370 struct ipmr_vif_iter *iter = seq->private;
f6bb4514 2371 struct net *net = seq_file_net(seq);
f0ad0860
PM
2372 struct mr_table *mrt;
2373
2374 mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
51456b29 2375 if (!mrt)
f0ad0860
PM
2376 return ERR_PTR(-ENOENT);
2377
2378 iter->mrt = mrt;
f6bb4514 2379
1da177e4 2380 read_lock(&mrt_lock);
f6bb4514 2381 return *pos ? ipmr_vif_seq_idx(net, seq->private, *pos - 1)
1da177e4
LT
2382 : SEQ_START_TOKEN;
2383}
2384
2385static void *ipmr_vif_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2386{
2387 struct ipmr_vif_iter *iter = seq->private;
f6bb4514 2388 struct net *net = seq_file_net(seq);
f0ad0860 2389 struct mr_table *mrt = iter->mrt;
1da177e4
LT
2390
2391 ++*pos;
2392 if (v == SEQ_START_TOKEN)
f6bb4514 2393 return ipmr_vif_seq_idx(net, iter, 0);
e905a9ed 2394
0c12295a
PM
2395 while (++iter->ct < mrt->maxvif) {
2396 if (!VIF_EXISTS(mrt, iter->ct))
1da177e4 2397 continue;
0c12295a 2398 return &mrt->vif_table[iter->ct];
1da177e4
LT
2399 }
2400 return NULL;
2401}
2402
2403static void ipmr_vif_seq_stop(struct seq_file *seq, void *v)
ba93ef74 2404 __releases(mrt_lock)
1da177e4
LT
2405{
2406 read_unlock(&mrt_lock);
2407}
2408
2409static int ipmr_vif_seq_show(struct seq_file *seq, void *v)
2410{
f0ad0860
PM
2411 struct ipmr_vif_iter *iter = seq->private;
2412 struct mr_table *mrt = iter->mrt;
f6bb4514 2413
1da177e4 2414 if (v == SEQ_START_TOKEN) {
e905a9ed 2415 seq_puts(seq,
1da177e4
LT
2416 "Interface BytesIn PktsIn BytesOut PktsOut Flags Local Remote\n");
2417 } else {
2418 const struct vif_device *vif = v;
2419 const char *name = vif->dev ? vif->dev->name : "none";
2420
2421 seq_printf(seq,
2422 "%2Zd %-10s %8ld %7ld %8ld %7ld %05X %08X %08X\n",
0c12295a 2423 vif - mrt->vif_table,
e905a9ed 2424 name, vif->bytes_in, vif->pkt_in,
1da177e4
LT
2425 vif->bytes_out, vif->pkt_out,
2426 vif->flags, vif->local, vif->remote);
2427 }
2428 return 0;
2429}
2430
f690808e 2431static const struct seq_operations ipmr_vif_seq_ops = {
1da177e4
LT
2432 .start = ipmr_vif_seq_start,
2433 .next = ipmr_vif_seq_next,
2434 .stop = ipmr_vif_seq_stop,
2435 .show = ipmr_vif_seq_show,
2436};
2437
2438static int ipmr_vif_open(struct inode *inode, struct file *file)
2439{
f6bb4514
BT
2440 return seq_open_net(inode, file, &ipmr_vif_seq_ops,
2441 sizeof(struct ipmr_vif_iter));
1da177e4
LT
2442}
2443
9a32144e 2444static const struct file_operations ipmr_vif_fops = {
1da177e4
LT
2445 .owner = THIS_MODULE,
2446 .open = ipmr_vif_open,
2447 .read = seq_read,
2448 .llseek = seq_lseek,
f6bb4514 2449 .release = seq_release_net,
1da177e4
LT
2450};
2451
2452struct ipmr_mfc_iter {
f6bb4514 2453 struct seq_net_private p;
f0ad0860 2454 struct mr_table *mrt;
862465f2 2455 struct list_head *cache;
1da177e4
LT
2456 int ct;
2457};
2458
2459
f6bb4514
BT
2460static struct mfc_cache *ipmr_mfc_seq_idx(struct net *net,
2461 struct ipmr_mfc_iter *it, loff_t pos)
1da177e4 2462{
f0ad0860 2463 struct mr_table *mrt = it->mrt;
1da177e4
LT
2464 struct mfc_cache *mfc;
2465
a8c9486b 2466 rcu_read_lock();
862465f2 2467 for (it->ct = 0; it->ct < MFC_LINES; it->ct++) {
0c12295a 2468 it->cache = &mrt->mfc_cache_array[it->ct];
a8c9486b 2469 list_for_each_entry_rcu(mfc, it->cache, list)
e905a9ed 2470 if (pos-- == 0)
1da177e4 2471 return mfc;
862465f2 2472 }
a8c9486b 2473 rcu_read_unlock();
1da177e4 2474
1da177e4 2475 spin_lock_bh(&mfc_unres_lock);
0c12295a 2476 it->cache = &mrt->mfc_unres_queue;
862465f2 2477 list_for_each_entry(mfc, it->cache, list)
e258beb2 2478 if (pos-- == 0)
1da177e4
LT
2479 return mfc;
2480 spin_unlock_bh(&mfc_unres_lock);
2481
2482 it->cache = NULL;
2483 return NULL;
2484}
2485
2486
2487static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos)
2488{
2489 struct ipmr_mfc_iter *it = seq->private;
f6bb4514 2490 struct net *net = seq_file_net(seq);
f0ad0860 2491 struct mr_table *mrt;
f6bb4514 2492
f0ad0860 2493 mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
51456b29 2494 if (!mrt)
f0ad0860 2495 return ERR_PTR(-ENOENT);
f6bb4514 2496
f0ad0860 2497 it->mrt = mrt;
1da177e4
LT
2498 it->cache = NULL;
2499 it->ct = 0;
f6bb4514 2500 return *pos ? ipmr_mfc_seq_idx(net, seq->private, *pos - 1)
1da177e4
LT
2501 : SEQ_START_TOKEN;
2502}
2503
2504static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2505{
2506 struct mfc_cache *mfc = v;
2507 struct ipmr_mfc_iter *it = seq->private;
f6bb4514 2508 struct net *net = seq_file_net(seq);
f0ad0860 2509 struct mr_table *mrt = it->mrt;
1da177e4
LT
2510
2511 ++*pos;
2512
2513 if (v == SEQ_START_TOKEN)
f6bb4514 2514 return ipmr_mfc_seq_idx(net, seq->private, 0);
1da177e4 2515
862465f2
PM
2516 if (mfc->list.next != it->cache)
2517 return list_entry(mfc->list.next, struct mfc_cache, list);
e905a9ed 2518
0c12295a 2519 if (it->cache == &mrt->mfc_unres_queue)
1da177e4
LT
2520 goto end_of_list;
2521
0c12295a 2522 BUG_ON(it->cache != &mrt->mfc_cache_array[it->ct]);
1da177e4
LT
2523
2524 while (++it->ct < MFC_LINES) {
0c12295a 2525 it->cache = &mrt->mfc_cache_array[it->ct];
862465f2
PM
2526 if (list_empty(it->cache))
2527 continue;
2528 return list_first_entry(it->cache, struct mfc_cache, list);
1da177e4
LT
2529 }
2530
2531 /* exhausted cache_array, show unresolved */
a8c9486b 2532 rcu_read_unlock();
0c12295a 2533 it->cache = &mrt->mfc_unres_queue;
1da177e4 2534 it->ct = 0;
e905a9ed 2535
1da177e4 2536 spin_lock_bh(&mfc_unres_lock);
862465f2
PM
2537 if (!list_empty(it->cache))
2538 return list_first_entry(it->cache, struct mfc_cache, list);
1da177e4 2539
a8cb16dd 2540end_of_list:
1da177e4
LT
2541 spin_unlock_bh(&mfc_unres_lock);
2542 it->cache = NULL;
2543
2544 return NULL;
2545}
2546
2547static void ipmr_mfc_seq_stop(struct seq_file *seq, void *v)
2548{
2549 struct ipmr_mfc_iter *it = seq->private;
f0ad0860 2550 struct mr_table *mrt = it->mrt;
1da177e4 2551
0c12295a 2552 if (it->cache == &mrt->mfc_unres_queue)
1da177e4 2553 spin_unlock_bh(&mfc_unres_lock);
0c12295a 2554 else if (it->cache == &mrt->mfc_cache_array[it->ct])
a8c9486b 2555 rcu_read_unlock();
1da177e4
LT
2556}
2557
2558static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
2559{
2560 int n;
2561
2562 if (v == SEQ_START_TOKEN) {
e905a9ed 2563 seq_puts(seq,
1da177e4
LT
2564 "Group Origin Iif Pkts Bytes Wrong Oifs\n");
2565 } else {
2566 const struct mfc_cache *mfc = v;
2567 const struct ipmr_mfc_iter *it = seq->private;
f0ad0860 2568 const struct mr_table *mrt = it->mrt;
e905a9ed 2569
0eae88f3
ED
2570 seq_printf(seq, "%08X %08X %-3hd",
2571 (__force u32) mfc->mfc_mcastgrp,
2572 (__force u32) mfc->mfc_origin,
1ea472e2 2573 mfc->mfc_parent);
1da177e4 2574
0c12295a 2575 if (it->cache != &mrt->mfc_unres_queue) {
1ea472e2
BT
2576 seq_printf(seq, " %8lu %8lu %8lu",
2577 mfc->mfc_un.res.pkt,
2578 mfc->mfc_un.res.bytes,
2579 mfc->mfc_un.res.wrong_if);
132adf54 2580 for (n = mfc->mfc_un.res.minvif;
a8cb16dd 2581 n < mfc->mfc_un.res.maxvif; n++) {
0c12295a 2582 if (VIF_EXISTS(mrt, n) &&
cf958ae3
BT
2583 mfc->mfc_un.res.ttls[n] < 255)
2584 seq_printf(seq,
e905a9ed 2585 " %2d:%-3d",
1da177e4
LT
2586 n, mfc->mfc_un.res.ttls[n]);
2587 }
1ea472e2
BT
2588 } else {
2589 /* unresolved mfc_caches don't contain
2590 * pkt, bytes and wrong_if values
2591 */
2592 seq_printf(seq, " %8lu %8lu %8lu", 0ul, 0ul, 0ul);
1da177e4
LT
2593 }
2594 seq_putc(seq, '\n');
2595 }
2596 return 0;
2597}
2598
f690808e 2599static const struct seq_operations ipmr_mfc_seq_ops = {
1da177e4
LT
2600 .start = ipmr_mfc_seq_start,
2601 .next = ipmr_mfc_seq_next,
2602 .stop = ipmr_mfc_seq_stop,
2603 .show = ipmr_mfc_seq_show,
2604};
2605
2606static int ipmr_mfc_open(struct inode *inode, struct file *file)
2607{
f6bb4514
BT
2608 return seq_open_net(inode, file, &ipmr_mfc_seq_ops,
2609 sizeof(struct ipmr_mfc_iter));
1da177e4
LT
2610}
2611
9a32144e 2612static const struct file_operations ipmr_mfc_fops = {
1da177e4
LT
2613 .owner = THIS_MODULE,
2614 .open = ipmr_mfc_open,
2615 .read = seq_read,
2616 .llseek = seq_lseek,
f6bb4514 2617 .release = seq_release_net,
1da177e4 2618};
e905a9ed 2619#endif
1da177e4
LT
2620
2621#ifdef CONFIG_IP_PIMSM_V2
32613090 2622static const struct net_protocol pim_protocol = {
1da177e4 2623 .handler = pim_rcv,
403dbb97 2624 .netns_ok = 1,
1da177e4
LT
2625};
2626#endif
2627
7ef8f65d 2628/* Setup for IP multicast routing */
cf958ae3
BT
2629static int __net_init ipmr_net_init(struct net *net)
2630{
f0ad0860 2631 int err;
cf958ae3 2632
f0ad0860
PM
2633 err = ipmr_rules_init(net);
2634 if (err < 0)
cf958ae3 2635 goto fail;
f6bb4514
BT
2636
2637#ifdef CONFIG_PROC_FS
2638 err = -ENOMEM;
d4beaa66 2639 if (!proc_create("ip_mr_vif", 0, net->proc_net, &ipmr_vif_fops))
f6bb4514 2640 goto proc_vif_fail;
d4beaa66 2641 if (!proc_create("ip_mr_cache", 0, net->proc_net, &ipmr_mfc_fops))
f6bb4514
BT
2642 goto proc_cache_fail;
2643#endif
2bb8b26c
BT
2644 return 0;
2645
f6bb4514
BT
2646#ifdef CONFIG_PROC_FS
2647proc_cache_fail:
ece31ffd 2648 remove_proc_entry("ip_mr_vif", net->proc_net);
f6bb4514 2649proc_vif_fail:
f0ad0860 2650 ipmr_rules_exit(net);
f6bb4514 2651#endif
cf958ae3
BT
2652fail:
2653 return err;
2654}
2655
2656static void __net_exit ipmr_net_exit(struct net *net)
2657{
f6bb4514 2658#ifdef CONFIG_PROC_FS
ece31ffd
G
2659 remove_proc_entry("ip_mr_cache", net->proc_net);
2660 remove_proc_entry("ip_mr_vif", net->proc_net);
f6bb4514 2661#endif
f0ad0860 2662 ipmr_rules_exit(net);
cf958ae3
BT
2663}
2664
2665static struct pernet_operations ipmr_net_ops = {
2666 .init = ipmr_net_init,
2667 .exit = ipmr_net_exit,
2668};
e905a9ed 2669
03d2f897 2670int __init ip_mr_init(void)
1da177e4 2671{
03d2f897
WC
2672 int err;
2673
1da177e4
LT
2674 mrt_cachep = kmem_cache_create("ip_mrt_cache",
2675 sizeof(struct mfc_cache),
a8c9486b 2676 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC,
20c2df83 2677 NULL);
03d2f897 2678
cf958ae3
BT
2679 err = register_pernet_subsys(&ipmr_net_ops);
2680 if (err)
2681 goto reg_pernet_fail;
2682
03d2f897
WC
2683 err = register_netdevice_notifier(&ip_mr_notifier);
2684 if (err)
2685 goto reg_notif_fail;
403dbb97
TG
2686#ifdef CONFIG_IP_PIMSM_V2
2687 if (inet_add_protocol(&pim_protocol, IPPROTO_PIM) < 0) {
058bd4d2 2688 pr_err("%s: can't add PIM protocol\n", __func__);
403dbb97
TG
2689 err = -EAGAIN;
2690 goto add_proto_fail;
2691 }
2692#endif
c7ac8679
GR
2693 rtnl_register(RTNL_FAMILY_IPMR, RTM_GETROUTE,
2694 NULL, ipmr_rtm_dumproute, NULL);
03d2f897 2695 return 0;
f6bb4514 2696
403dbb97
TG
2697#ifdef CONFIG_IP_PIMSM_V2
2698add_proto_fail:
2699 unregister_netdevice_notifier(&ip_mr_notifier);
2700#endif
c3e38896 2701reg_notif_fail:
cf958ae3
BT
2702 unregister_pernet_subsys(&ipmr_net_ops);
2703reg_pernet_fail:
c3e38896 2704 kmem_cache_destroy(mrt_cachep);
03d2f897 2705 return err;
1da177e4 2706}
This page took 1.264569 seconds and 5 git commands to generate.