inet_diag: Do not use RTA_PUT() macros
[deliverable/linux.git] / net / ipv4 / ipmr.c
... / ...
CommitLineData
1/*
2 * IP multicast routing support for mrouted 3.6/3.8
3 *
4 * (c) 1995 Alan Cox, <alan@lxorguk.ukuu.org.uk>
5 * Linux Consultancy and Custom Driver Development
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 *
12 * Fixes:
13 * Michael Chastain : Incorrect size of copying.
14 * Alan Cox : Added the cache manager code
15 * Alan Cox : Fixed the clone/copy bug and device race.
16 * Mike McLagan : Routing by source
17 * Malcolm Beattie : Buffer handling fixes.
18 * Alexey Kuznetsov : Double buffer free and other fixes.
19 * SVR Anand : Fixed several multicast bugs and problems.
20 * Alexey Kuznetsov : Status, optimisations and more.
21 * Brad Parker : Better behaviour on mrouted upcall
22 * overflow.
23 * Carlos Picoto : PIMv1 Support
24 * Pavlin Ivanov Radoslavov: PIMv2 Registers must checksum only PIM header
25 * Relax this requirement to work with older peers.
26 *
27 */
28
29#include <asm/uaccess.h>
30#include <linux/types.h>
31#include <linux/capability.h>
32#include <linux/errno.h>
33#include <linux/timer.h>
34#include <linux/mm.h>
35#include <linux/kernel.h>
36#include <linux/fcntl.h>
37#include <linux/stat.h>
38#include <linux/socket.h>
39#include <linux/in.h>
40#include <linux/inet.h>
41#include <linux/netdevice.h>
42#include <linux/inetdevice.h>
43#include <linux/igmp.h>
44#include <linux/proc_fs.h>
45#include <linux/seq_file.h>
46#include <linux/mroute.h>
47#include <linux/init.h>
48#include <linux/if_ether.h>
49#include <linux/slab.h>
50#include <net/net_namespace.h>
51#include <net/ip.h>
52#include <net/protocol.h>
53#include <linux/skbuff.h>
54#include <net/route.h>
55#include <net/sock.h>
56#include <net/icmp.h>
57#include <net/udp.h>
58#include <net/raw.h>
59#include <linux/notifier.h>
60#include <linux/if_arp.h>
61#include <linux/netfilter_ipv4.h>
62#include <linux/compat.h>
63#include <linux/export.h>
64#include <net/ipip.h>
65#include <net/checksum.h>
66#include <net/netlink.h>
67#include <net/fib_rules.h>
68
69#if defined(CONFIG_IP_PIMSM_V1) || defined(CONFIG_IP_PIMSM_V2)
70#define CONFIG_IP_PIMSM 1
71#endif
72
73struct mr_table {
74 struct list_head list;
75#ifdef CONFIG_NET_NS
76 struct net *net;
77#endif
78 u32 id;
79 struct sock __rcu *mroute_sk;
80 struct timer_list ipmr_expire_timer;
81 struct list_head mfc_unres_queue;
82 struct list_head mfc_cache_array[MFC_LINES];
83 struct vif_device vif_table[MAXVIFS];
84 int maxvif;
85 atomic_t cache_resolve_queue_len;
86 int mroute_do_assert;
87 int mroute_do_pim;
88#if defined(CONFIG_IP_PIMSM_V1) || defined(CONFIG_IP_PIMSM_V2)
89 int mroute_reg_vif_num;
90#endif
91};
92
93struct ipmr_rule {
94 struct fib_rule common;
95};
96
97struct ipmr_result {
98 struct mr_table *mrt;
99};
100
101/* Big lock, protecting vif table, mrt cache and mroute socket state.
102 * Note that the changes are semaphored via rtnl_lock.
103 */
104
105static DEFINE_RWLOCK(mrt_lock);
106
107/*
108 * Multicast router control variables
109 */
110
111#define VIF_EXISTS(_mrt, _idx) ((_mrt)->vif_table[_idx].dev != NULL)
112
113/* Special spinlock for queue of unresolved entries */
114static DEFINE_SPINLOCK(mfc_unres_lock);
115
116/* We return to original Alan's scheme. Hash table of resolved
117 * entries is changed only in process context and protected
118 * with weak lock mrt_lock. Queue of unresolved entries is protected
119 * with strong spinlock mfc_unres_lock.
120 *
121 * In this case data path is free of exclusive locks at all.
122 */
123
124static struct kmem_cache *mrt_cachep __read_mostly;
125
126static struct mr_table *ipmr_new_table(struct net *net, u32 id);
127static int ip_mr_forward(struct net *net, struct mr_table *mrt,
128 struct sk_buff *skb, struct mfc_cache *cache,
129 int local);
130static int ipmr_cache_report(struct mr_table *mrt,
131 struct sk_buff *pkt, vifi_t vifi, int assert);
132static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
133 struct mfc_cache *c, struct rtmsg *rtm);
134static void ipmr_expire_process(unsigned long arg);
135
136#ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
137#define ipmr_for_each_table(mrt, net) \
138 list_for_each_entry_rcu(mrt, &net->ipv4.mr_tables, list)
139
140static struct mr_table *ipmr_get_table(struct net *net, u32 id)
141{
142 struct mr_table *mrt;
143
144 ipmr_for_each_table(mrt, net) {
145 if (mrt->id == id)
146 return mrt;
147 }
148 return NULL;
149}
150
151static int ipmr_fib_lookup(struct net *net, struct flowi4 *flp4,
152 struct mr_table **mrt)
153{
154 struct ipmr_result res;
155 struct fib_lookup_arg arg = { .result = &res, };
156 int err;
157
158 err = fib_rules_lookup(net->ipv4.mr_rules_ops,
159 flowi4_to_flowi(flp4), 0, &arg);
160 if (err < 0)
161 return err;
162 *mrt = res.mrt;
163 return 0;
164}
165
166static int ipmr_rule_action(struct fib_rule *rule, struct flowi *flp,
167 int flags, struct fib_lookup_arg *arg)
168{
169 struct ipmr_result *res = arg->result;
170 struct mr_table *mrt;
171
172 switch (rule->action) {
173 case FR_ACT_TO_TBL:
174 break;
175 case FR_ACT_UNREACHABLE:
176 return -ENETUNREACH;
177 case FR_ACT_PROHIBIT:
178 return -EACCES;
179 case FR_ACT_BLACKHOLE:
180 default:
181 return -EINVAL;
182 }
183
184 mrt = ipmr_get_table(rule->fr_net, rule->table);
185 if (mrt == NULL)
186 return -EAGAIN;
187 res->mrt = mrt;
188 return 0;
189}
190
191static int ipmr_rule_match(struct fib_rule *rule, struct flowi *fl, int flags)
192{
193 return 1;
194}
195
196static const struct nla_policy ipmr_rule_policy[FRA_MAX + 1] = {
197 FRA_GENERIC_POLICY,
198};
199
200static int ipmr_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
201 struct fib_rule_hdr *frh, struct nlattr **tb)
202{
203 return 0;
204}
205
206static int ipmr_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh,
207 struct nlattr **tb)
208{
209 return 1;
210}
211
212static int ipmr_rule_fill(struct fib_rule *rule, struct sk_buff *skb,
213 struct fib_rule_hdr *frh)
214{
215 frh->dst_len = 0;
216 frh->src_len = 0;
217 frh->tos = 0;
218 return 0;
219}
220
221static const struct fib_rules_ops __net_initdata ipmr_rules_ops_template = {
222 .family = RTNL_FAMILY_IPMR,
223 .rule_size = sizeof(struct ipmr_rule),
224 .addr_size = sizeof(u32),
225 .action = ipmr_rule_action,
226 .match = ipmr_rule_match,
227 .configure = ipmr_rule_configure,
228 .compare = ipmr_rule_compare,
229 .default_pref = fib_default_rule_pref,
230 .fill = ipmr_rule_fill,
231 .nlgroup = RTNLGRP_IPV4_RULE,
232 .policy = ipmr_rule_policy,
233 .owner = THIS_MODULE,
234};
235
236static int __net_init ipmr_rules_init(struct net *net)
237{
238 struct fib_rules_ops *ops;
239 struct mr_table *mrt;
240 int err;
241
242 ops = fib_rules_register(&ipmr_rules_ops_template, net);
243 if (IS_ERR(ops))
244 return PTR_ERR(ops);
245
246 INIT_LIST_HEAD(&net->ipv4.mr_tables);
247
248 mrt = ipmr_new_table(net, RT_TABLE_DEFAULT);
249 if (mrt == NULL) {
250 err = -ENOMEM;
251 goto err1;
252 }
253
254 err = fib_default_rule_add(ops, 0x7fff, RT_TABLE_DEFAULT, 0);
255 if (err < 0)
256 goto err2;
257
258 net->ipv4.mr_rules_ops = ops;
259 return 0;
260
261err2:
262 kfree(mrt);
263err1:
264 fib_rules_unregister(ops);
265 return err;
266}
267
268static void __net_exit ipmr_rules_exit(struct net *net)
269{
270 struct mr_table *mrt, *next;
271
272 list_for_each_entry_safe(mrt, next, &net->ipv4.mr_tables, list) {
273 list_del(&mrt->list);
274 kfree(mrt);
275 }
276 fib_rules_unregister(net->ipv4.mr_rules_ops);
277}
278#else
279#define ipmr_for_each_table(mrt, net) \
280 for (mrt = net->ipv4.mrt; mrt; mrt = NULL)
281
282static struct mr_table *ipmr_get_table(struct net *net, u32 id)
283{
284 return net->ipv4.mrt;
285}
286
287static int ipmr_fib_lookup(struct net *net, struct flowi4 *flp4,
288 struct mr_table **mrt)
289{
290 *mrt = net->ipv4.mrt;
291 return 0;
292}
293
294static int __net_init ipmr_rules_init(struct net *net)
295{
296 net->ipv4.mrt = ipmr_new_table(net, RT_TABLE_DEFAULT);
297 return net->ipv4.mrt ? 0 : -ENOMEM;
298}
299
300static void __net_exit ipmr_rules_exit(struct net *net)
301{
302 kfree(net->ipv4.mrt);
303}
304#endif
305
306static struct mr_table *ipmr_new_table(struct net *net, u32 id)
307{
308 struct mr_table *mrt;
309 unsigned int i;
310
311 mrt = ipmr_get_table(net, id);
312 if (mrt != NULL)
313 return mrt;
314
315 mrt = kzalloc(sizeof(*mrt), GFP_KERNEL);
316 if (mrt == NULL)
317 return NULL;
318 write_pnet(&mrt->net, net);
319 mrt->id = id;
320
321 /* Forwarding cache */
322 for (i = 0; i < MFC_LINES; i++)
323 INIT_LIST_HEAD(&mrt->mfc_cache_array[i]);
324
325 INIT_LIST_HEAD(&mrt->mfc_unres_queue);
326
327 setup_timer(&mrt->ipmr_expire_timer, ipmr_expire_process,
328 (unsigned long)mrt);
329
330#ifdef CONFIG_IP_PIMSM
331 mrt->mroute_reg_vif_num = -1;
332#endif
333#ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
334 list_add_tail_rcu(&mrt->list, &net->ipv4.mr_tables);
335#endif
336 return mrt;
337}
338
339/* Service routines creating virtual interfaces: DVMRP tunnels and PIMREG */
340
341static void ipmr_del_tunnel(struct net_device *dev, struct vifctl *v)
342{
343 struct net *net = dev_net(dev);
344
345 dev_close(dev);
346
347 dev = __dev_get_by_name(net, "tunl0");
348 if (dev) {
349 const struct net_device_ops *ops = dev->netdev_ops;
350 struct ifreq ifr;
351 struct ip_tunnel_parm p;
352
353 memset(&p, 0, sizeof(p));
354 p.iph.daddr = v->vifc_rmt_addr.s_addr;
355 p.iph.saddr = v->vifc_lcl_addr.s_addr;
356 p.iph.version = 4;
357 p.iph.ihl = 5;
358 p.iph.protocol = IPPROTO_IPIP;
359 sprintf(p.name, "dvmrp%d", v->vifc_vifi);
360 ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
361
362 if (ops->ndo_do_ioctl) {
363 mm_segment_t oldfs = get_fs();
364
365 set_fs(KERNEL_DS);
366 ops->ndo_do_ioctl(dev, &ifr, SIOCDELTUNNEL);
367 set_fs(oldfs);
368 }
369 }
370}
371
372static
373struct net_device *ipmr_new_tunnel(struct net *net, struct vifctl *v)
374{
375 struct net_device *dev;
376
377 dev = __dev_get_by_name(net, "tunl0");
378
379 if (dev) {
380 const struct net_device_ops *ops = dev->netdev_ops;
381 int err;
382 struct ifreq ifr;
383 struct ip_tunnel_parm p;
384 struct in_device *in_dev;
385
386 memset(&p, 0, sizeof(p));
387 p.iph.daddr = v->vifc_rmt_addr.s_addr;
388 p.iph.saddr = v->vifc_lcl_addr.s_addr;
389 p.iph.version = 4;
390 p.iph.ihl = 5;
391 p.iph.protocol = IPPROTO_IPIP;
392 sprintf(p.name, "dvmrp%d", v->vifc_vifi);
393 ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
394
395 if (ops->ndo_do_ioctl) {
396 mm_segment_t oldfs = get_fs();
397
398 set_fs(KERNEL_DS);
399 err = ops->ndo_do_ioctl(dev, &ifr, SIOCADDTUNNEL);
400 set_fs(oldfs);
401 } else {
402 err = -EOPNOTSUPP;
403 }
404 dev = NULL;
405
406 if (err == 0 &&
407 (dev = __dev_get_by_name(net, p.name)) != NULL) {
408 dev->flags |= IFF_MULTICAST;
409
410 in_dev = __in_dev_get_rtnl(dev);
411 if (in_dev == NULL)
412 goto failure;
413
414 ipv4_devconf_setall(in_dev);
415 IPV4_DEVCONF(in_dev->cnf, RP_FILTER) = 0;
416
417 if (dev_open(dev))
418 goto failure;
419 dev_hold(dev);
420 }
421 }
422 return dev;
423
424failure:
425 /* allow the register to be completed before unregistering. */
426 rtnl_unlock();
427 rtnl_lock();
428
429 unregister_netdevice(dev);
430 return NULL;
431}
432
433#ifdef CONFIG_IP_PIMSM
434
435static netdev_tx_t reg_vif_xmit(struct sk_buff *skb, struct net_device *dev)
436{
437 struct net *net = dev_net(dev);
438 struct mr_table *mrt;
439 struct flowi4 fl4 = {
440 .flowi4_oif = dev->ifindex,
441 .flowi4_iif = skb->skb_iif,
442 .flowi4_mark = skb->mark,
443 };
444 int err;
445
446 err = ipmr_fib_lookup(net, &fl4, &mrt);
447 if (err < 0) {
448 kfree_skb(skb);
449 return err;
450 }
451
452 read_lock(&mrt_lock);
453 dev->stats.tx_bytes += skb->len;
454 dev->stats.tx_packets++;
455 ipmr_cache_report(mrt, skb, mrt->mroute_reg_vif_num, IGMPMSG_WHOLEPKT);
456 read_unlock(&mrt_lock);
457 kfree_skb(skb);
458 return NETDEV_TX_OK;
459}
460
461static const struct net_device_ops reg_vif_netdev_ops = {
462 .ndo_start_xmit = reg_vif_xmit,
463};
464
465static void reg_vif_setup(struct net_device *dev)
466{
467 dev->type = ARPHRD_PIMREG;
468 dev->mtu = ETH_DATA_LEN - sizeof(struct iphdr) - 8;
469 dev->flags = IFF_NOARP;
470 dev->netdev_ops = &reg_vif_netdev_ops,
471 dev->destructor = free_netdev;
472 dev->features |= NETIF_F_NETNS_LOCAL;
473}
474
475static struct net_device *ipmr_reg_vif(struct net *net, struct mr_table *mrt)
476{
477 struct net_device *dev;
478 struct in_device *in_dev;
479 char name[IFNAMSIZ];
480
481 if (mrt->id == RT_TABLE_DEFAULT)
482 sprintf(name, "pimreg");
483 else
484 sprintf(name, "pimreg%u", mrt->id);
485
486 dev = alloc_netdev(0, name, reg_vif_setup);
487
488 if (dev == NULL)
489 return NULL;
490
491 dev_net_set(dev, net);
492
493 if (register_netdevice(dev)) {
494 free_netdev(dev);
495 return NULL;
496 }
497 dev->iflink = 0;
498
499 rcu_read_lock();
500 in_dev = __in_dev_get_rcu(dev);
501 if (!in_dev) {
502 rcu_read_unlock();
503 goto failure;
504 }
505
506 ipv4_devconf_setall(in_dev);
507 IPV4_DEVCONF(in_dev->cnf, RP_FILTER) = 0;
508 rcu_read_unlock();
509
510 if (dev_open(dev))
511 goto failure;
512
513 dev_hold(dev);
514
515 return dev;
516
517failure:
518 /* allow the register to be completed before unregistering. */
519 rtnl_unlock();
520 rtnl_lock();
521
522 unregister_netdevice(dev);
523 return NULL;
524}
525#endif
526
527/*
528 * Delete a VIF entry
529 * @notify: Set to 1, if the caller is a notifier_call
530 */
531
532static int vif_delete(struct mr_table *mrt, int vifi, int notify,
533 struct list_head *head)
534{
535 struct vif_device *v;
536 struct net_device *dev;
537 struct in_device *in_dev;
538
539 if (vifi < 0 || vifi >= mrt->maxvif)
540 return -EADDRNOTAVAIL;
541
542 v = &mrt->vif_table[vifi];
543
544 write_lock_bh(&mrt_lock);
545 dev = v->dev;
546 v->dev = NULL;
547
548 if (!dev) {
549 write_unlock_bh(&mrt_lock);
550 return -EADDRNOTAVAIL;
551 }
552
553#ifdef CONFIG_IP_PIMSM
554 if (vifi == mrt->mroute_reg_vif_num)
555 mrt->mroute_reg_vif_num = -1;
556#endif
557
558 if (vifi + 1 == mrt->maxvif) {
559 int tmp;
560
561 for (tmp = vifi - 1; tmp >= 0; tmp--) {
562 if (VIF_EXISTS(mrt, tmp))
563 break;
564 }
565 mrt->maxvif = tmp+1;
566 }
567
568 write_unlock_bh(&mrt_lock);
569
570 dev_set_allmulti(dev, -1);
571
572 in_dev = __in_dev_get_rtnl(dev);
573 if (in_dev) {
574 IPV4_DEVCONF(in_dev->cnf, MC_FORWARDING)--;
575 ip_rt_multicast_event(in_dev);
576 }
577
578 if (v->flags & (VIFF_TUNNEL | VIFF_REGISTER) && !notify)
579 unregister_netdevice_queue(dev, head);
580
581 dev_put(dev);
582 return 0;
583}
584
585static void ipmr_cache_free_rcu(struct rcu_head *head)
586{
587 struct mfc_cache *c = container_of(head, struct mfc_cache, rcu);
588
589 kmem_cache_free(mrt_cachep, c);
590}
591
592static inline void ipmr_cache_free(struct mfc_cache *c)
593{
594 call_rcu(&c->rcu, ipmr_cache_free_rcu);
595}
596
597/* Destroy an unresolved cache entry, killing queued skbs
598 * and reporting error to netlink readers.
599 */
600
601static void ipmr_destroy_unres(struct mr_table *mrt, struct mfc_cache *c)
602{
603 struct net *net = read_pnet(&mrt->net);
604 struct sk_buff *skb;
605 struct nlmsgerr *e;
606
607 atomic_dec(&mrt->cache_resolve_queue_len);
608
609 while ((skb = skb_dequeue(&c->mfc_un.unres.unresolved))) {
610 if (ip_hdr(skb)->version == 0) {
611 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr));
612 nlh->nlmsg_type = NLMSG_ERROR;
613 nlh->nlmsg_len = NLMSG_LENGTH(sizeof(struct nlmsgerr));
614 skb_trim(skb, nlh->nlmsg_len);
615 e = NLMSG_DATA(nlh);
616 e->error = -ETIMEDOUT;
617 memset(&e->msg, 0, sizeof(e->msg));
618
619 rtnl_unicast(skb, net, NETLINK_CB(skb).pid);
620 } else {
621 kfree_skb(skb);
622 }
623 }
624
625 ipmr_cache_free(c);
626}
627
628
629/* Timer process for the unresolved queue. */
630
631static void ipmr_expire_process(unsigned long arg)
632{
633 struct mr_table *mrt = (struct mr_table *)arg;
634 unsigned long now;
635 unsigned long expires;
636 struct mfc_cache *c, *next;
637
638 if (!spin_trylock(&mfc_unres_lock)) {
639 mod_timer(&mrt->ipmr_expire_timer, jiffies+HZ/10);
640 return;
641 }
642
643 if (list_empty(&mrt->mfc_unres_queue))
644 goto out;
645
646 now = jiffies;
647 expires = 10*HZ;
648
649 list_for_each_entry_safe(c, next, &mrt->mfc_unres_queue, list) {
650 if (time_after(c->mfc_un.unres.expires, now)) {
651 unsigned long interval = c->mfc_un.unres.expires - now;
652 if (interval < expires)
653 expires = interval;
654 continue;
655 }
656
657 list_del(&c->list);
658 ipmr_destroy_unres(mrt, c);
659 }
660
661 if (!list_empty(&mrt->mfc_unres_queue))
662 mod_timer(&mrt->ipmr_expire_timer, jiffies + expires);
663
664out:
665 spin_unlock(&mfc_unres_lock);
666}
667
668/* Fill oifs list. It is called under write locked mrt_lock. */
669
670static void ipmr_update_thresholds(struct mr_table *mrt, struct mfc_cache *cache,
671 unsigned char *ttls)
672{
673 int vifi;
674
675 cache->mfc_un.res.minvif = MAXVIFS;
676 cache->mfc_un.res.maxvif = 0;
677 memset(cache->mfc_un.res.ttls, 255, MAXVIFS);
678
679 for (vifi = 0; vifi < mrt->maxvif; vifi++) {
680 if (VIF_EXISTS(mrt, vifi) &&
681 ttls[vifi] && ttls[vifi] < 255) {
682 cache->mfc_un.res.ttls[vifi] = ttls[vifi];
683 if (cache->mfc_un.res.minvif > vifi)
684 cache->mfc_un.res.minvif = vifi;
685 if (cache->mfc_un.res.maxvif <= vifi)
686 cache->mfc_un.res.maxvif = vifi + 1;
687 }
688 }
689}
690
691static int vif_add(struct net *net, struct mr_table *mrt,
692 struct vifctl *vifc, int mrtsock)
693{
694 int vifi = vifc->vifc_vifi;
695 struct vif_device *v = &mrt->vif_table[vifi];
696 struct net_device *dev;
697 struct in_device *in_dev;
698 int err;
699
700 /* Is vif busy ? */
701 if (VIF_EXISTS(mrt, vifi))
702 return -EADDRINUSE;
703
704 switch (vifc->vifc_flags) {
705#ifdef CONFIG_IP_PIMSM
706 case VIFF_REGISTER:
707 /*
708 * Special Purpose VIF in PIM
709 * All the packets will be sent to the daemon
710 */
711 if (mrt->mroute_reg_vif_num >= 0)
712 return -EADDRINUSE;
713 dev = ipmr_reg_vif(net, mrt);
714 if (!dev)
715 return -ENOBUFS;
716 err = dev_set_allmulti(dev, 1);
717 if (err) {
718 unregister_netdevice(dev);
719 dev_put(dev);
720 return err;
721 }
722 break;
723#endif
724 case VIFF_TUNNEL:
725 dev = ipmr_new_tunnel(net, vifc);
726 if (!dev)
727 return -ENOBUFS;
728 err = dev_set_allmulti(dev, 1);
729 if (err) {
730 ipmr_del_tunnel(dev, vifc);
731 dev_put(dev);
732 return err;
733 }
734 break;
735
736 case VIFF_USE_IFINDEX:
737 case 0:
738 if (vifc->vifc_flags == VIFF_USE_IFINDEX) {
739 dev = dev_get_by_index(net, vifc->vifc_lcl_ifindex);
740 if (dev && __in_dev_get_rtnl(dev) == NULL) {
741 dev_put(dev);
742 return -EADDRNOTAVAIL;
743 }
744 } else {
745 dev = ip_dev_find(net, vifc->vifc_lcl_addr.s_addr);
746 }
747 if (!dev)
748 return -EADDRNOTAVAIL;
749 err = dev_set_allmulti(dev, 1);
750 if (err) {
751 dev_put(dev);
752 return err;
753 }
754 break;
755 default:
756 return -EINVAL;
757 }
758
759 in_dev = __in_dev_get_rtnl(dev);
760 if (!in_dev) {
761 dev_put(dev);
762 return -EADDRNOTAVAIL;
763 }
764 IPV4_DEVCONF(in_dev->cnf, MC_FORWARDING)++;
765 ip_rt_multicast_event(in_dev);
766
767 /* Fill in the VIF structures */
768
769 v->rate_limit = vifc->vifc_rate_limit;
770 v->local = vifc->vifc_lcl_addr.s_addr;
771 v->remote = vifc->vifc_rmt_addr.s_addr;
772 v->flags = vifc->vifc_flags;
773 if (!mrtsock)
774 v->flags |= VIFF_STATIC;
775 v->threshold = vifc->vifc_threshold;
776 v->bytes_in = 0;
777 v->bytes_out = 0;
778 v->pkt_in = 0;
779 v->pkt_out = 0;
780 v->link = dev->ifindex;
781 if (v->flags & (VIFF_TUNNEL | VIFF_REGISTER))
782 v->link = dev->iflink;
783
784 /* And finish update writing critical data */
785 write_lock_bh(&mrt_lock);
786 v->dev = dev;
787#ifdef CONFIG_IP_PIMSM
788 if (v->flags & VIFF_REGISTER)
789 mrt->mroute_reg_vif_num = vifi;
790#endif
791 if (vifi+1 > mrt->maxvif)
792 mrt->maxvif = vifi+1;
793 write_unlock_bh(&mrt_lock);
794 return 0;
795}
796
797/* called with rcu_read_lock() */
798static struct mfc_cache *ipmr_cache_find(struct mr_table *mrt,
799 __be32 origin,
800 __be32 mcastgrp)
801{
802 int line = MFC_HASH(mcastgrp, origin);
803 struct mfc_cache *c;
804
805 list_for_each_entry_rcu(c, &mrt->mfc_cache_array[line], list) {
806 if (c->mfc_origin == origin && c->mfc_mcastgrp == mcastgrp)
807 return c;
808 }
809 return NULL;
810}
811
812/*
813 * Allocate a multicast cache entry
814 */
815static struct mfc_cache *ipmr_cache_alloc(void)
816{
817 struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
818
819 if (c)
820 c->mfc_un.res.minvif = MAXVIFS;
821 return c;
822}
823
824static struct mfc_cache *ipmr_cache_alloc_unres(void)
825{
826 struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC);
827
828 if (c) {
829 skb_queue_head_init(&c->mfc_un.unres.unresolved);
830 c->mfc_un.unres.expires = jiffies + 10*HZ;
831 }
832 return c;
833}
834
835/*
836 * A cache entry has gone into a resolved state from queued
837 */
838
839static void ipmr_cache_resolve(struct net *net, struct mr_table *mrt,
840 struct mfc_cache *uc, struct mfc_cache *c)
841{
842 struct sk_buff *skb;
843 struct nlmsgerr *e;
844
845 /* Play the pending entries through our router */
846
847 while ((skb = __skb_dequeue(&uc->mfc_un.unres.unresolved))) {
848 if (ip_hdr(skb)->version == 0) {
849 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr));
850
851 if (__ipmr_fill_mroute(mrt, skb, c, NLMSG_DATA(nlh)) > 0) {
852 nlh->nlmsg_len = skb_tail_pointer(skb) -
853 (u8 *)nlh;
854 } else {
855 nlh->nlmsg_type = NLMSG_ERROR;
856 nlh->nlmsg_len = NLMSG_LENGTH(sizeof(struct nlmsgerr));
857 skb_trim(skb, nlh->nlmsg_len);
858 e = NLMSG_DATA(nlh);
859 e->error = -EMSGSIZE;
860 memset(&e->msg, 0, sizeof(e->msg));
861 }
862
863 rtnl_unicast(skb, net, NETLINK_CB(skb).pid);
864 } else {
865 ip_mr_forward(net, mrt, skb, c, 0);
866 }
867 }
868}
869
870/*
871 * Bounce a cache query up to mrouted. We could use netlink for this but mrouted
872 * expects the following bizarre scheme.
873 *
874 * Called under mrt_lock.
875 */
876
877static int ipmr_cache_report(struct mr_table *mrt,
878 struct sk_buff *pkt, vifi_t vifi, int assert)
879{
880 struct sk_buff *skb;
881 const int ihl = ip_hdrlen(pkt);
882 struct igmphdr *igmp;
883 struct igmpmsg *msg;
884 struct sock *mroute_sk;
885 int ret;
886
887#ifdef CONFIG_IP_PIMSM
888 if (assert == IGMPMSG_WHOLEPKT)
889 skb = skb_realloc_headroom(pkt, sizeof(struct iphdr));
890 else
891#endif
892 skb = alloc_skb(128, GFP_ATOMIC);
893
894 if (!skb)
895 return -ENOBUFS;
896
897#ifdef CONFIG_IP_PIMSM
898 if (assert == IGMPMSG_WHOLEPKT) {
899 /* Ugly, but we have no choice with this interface.
900 * Duplicate old header, fix ihl, length etc.
901 * And all this only to mangle msg->im_msgtype and
902 * to set msg->im_mbz to "mbz" :-)
903 */
904 skb_push(skb, sizeof(struct iphdr));
905 skb_reset_network_header(skb);
906 skb_reset_transport_header(skb);
907 msg = (struct igmpmsg *)skb_network_header(skb);
908 memcpy(msg, skb_network_header(pkt), sizeof(struct iphdr));
909 msg->im_msgtype = IGMPMSG_WHOLEPKT;
910 msg->im_mbz = 0;
911 msg->im_vif = mrt->mroute_reg_vif_num;
912 ip_hdr(skb)->ihl = sizeof(struct iphdr) >> 2;
913 ip_hdr(skb)->tot_len = htons(ntohs(ip_hdr(pkt)->tot_len) +
914 sizeof(struct iphdr));
915 } else
916#endif
917 {
918
919 /* Copy the IP header */
920
921 skb->network_header = skb->tail;
922 skb_put(skb, ihl);
923 skb_copy_to_linear_data(skb, pkt->data, ihl);
924 ip_hdr(skb)->protocol = 0; /* Flag to the kernel this is a route add */
925 msg = (struct igmpmsg *)skb_network_header(skb);
926 msg->im_vif = vifi;
927 skb_dst_set(skb, dst_clone(skb_dst(pkt)));
928
929 /* Add our header */
930
931 igmp = (struct igmphdr *)skb_put(skb, sizeof(struct igmphdr));
932 igmp->type =
933 msg->im_msgtype = assert;
934 igmp->code = 0;
935 ip_hdr(skb)->tot_len = htons(skb->len); /* Fix the length */
936 skb->transport_header = skb->network_header;
937 }
938
939 rcu_read_lock();
940 mroute_sk = rcu_dereference(mrt->mroute_sk);
941 if (mroute_sk == NULL) {
942 rcu_read_unlock();
943 kfree_skb(skb);
944 return -EINVAL;
945 }
946
947 /* Deliver to mrouted */
948
949 ret = sock_queue_rcv_skb(mroute_sk, skb);
950 rcu_read_unlock();
951 if (ret < 0) {
952 net_warn_ratelimited("mroute: pending queue full, dropping entries\n");
953 kfree_skb(skb);
954 }
955
956 return ret;
957}
958
959/*
960 * Queue a packet for resolution. It gets locked cache entry!
961 */
962
963static int
964ipmr_cache_unresolved(struct mr_table *mrt, vifi_t vifi, struct sk_buff *skb)
965{
966 bool found = false;
967 int err;
968 struct mfc_cache *c;
969 const struct iphdr *iph = ip_hdr(skb);
970
971 spin_lock_bh(&mfc_unres_lock);
972 list_for_each_entry(c, &mrt->mfc_unres_queue, list) {
973 if (c->mfc_mcastgrp == iph->daddr &&
974 c->mfc_origin == iph->saddr) {
975 found = true;
976 break;
977 }
978 }
979
980 if (!found) {
981 /* Create a new entry if allowable */
982
983 if (atomic_read(&mrt->cache_resolve_queue_len) >= 10 ||
984 (c = ipmr_cache_alloc_unres()) == NULL) {
985 spin_unlock_bh(&mfc_unres_lock);
986
987 kfree_skb(skb);
988 return -ENOBUFS;
989 }
990
991 /* Fill in the new cache entry */
992
993 c->mfc_parent = -1;
994 c->mfc_origin = iph->saddr;
995 c->mfc_mcastgrp = iph->daddr;
996
997 /* Reflect first query at mrouted. */
998
999 err = ipmr_cache_report(mrt, skb, vifi, IGMPMSG_NOCACHE);
1000 if (err < 0) {
1001 /* If the report failed throw the cache entry
1002 out - Brad Parker
1003 */
1004 spin_unlock_bh(&mfc_unres_lock);
1005
1006 ipmr_cache_free(c);
1007 kfree_skb(skb);
1008 return err;
1009 }
1010
1011 atomic_inc(&mrt->cache_resolve_queue_len);
1012 list_add(&c->list, &mrt->mfc_unres_queue);
1013
1014 if (atomic_read(&mrt->cache_resolve_queue_len) == 1)
1015 mod_timer(&mrt->ipmr_expire_timer, c->mfc_un.unres.expires);
1016 }
1017
1018 /* See if we can append the packet */
1019
1020 if (c->mfc_un.unres.unresolved.qlen > 3) {
1021 kfree_skb(skb);
1022 err = -ENOBUFS;
1023 } else {
1024 skb_queue_tail(&c->mfc_un.unres.unresolved, skb);
1025 err = 0;
1026 }
1027
1028 spin_unlock_bh(&mfc_unres_lock);
1029 return err;
1030}
1031
1032/*
1033 * MFC cache manipulation by user space mroute daemon
1034 */
1035
1036static int ipmr_mfc_delete(struct mr_table *mrt, struct mfcctl *mfc)
1037{
1038 int line;
1039 struct mfc_cache *c, *next;
1040
1041 line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr);
1042
1043 list_for_each_entry_safe(c, next, &mrt->mfc_cache_array[line], list) {
1044 if (c->mfc_origin == mfc->mfcc_origin.s_addr &&
1045 c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr) {
1046 list_del_rcu(&c->list);
1047
1048 ipmr_cache_free(c);
1049 return 0;
1050 }
1051 }
1052 return -ENOENT;
1053}
1054
1055static int ipmr_mfc_add(struct net *net, struct mr_table *mrt,
1056 struct mfcctl *mfc, int mrtsock)
1057{
1058 bool found = false;
1059 int line;
1060 struct mfc_cache *uc, *c;
1061
1062 if (mfc->mfcc_parent >= MAXVIFS)
1063 return -ENFILE;
1064
1065 line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr);
1066
1067 list_for_each_entry(c, &mrt->mfc_cache_array[line], list) {
1068 if (c->mfc_origin == mfc->mfcc_origin.s_addr &&
1069 c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr) {
1070 found = true;
1071 break;
1072 }
1073 }
1074
1075 if (found) {
1076 write_lock_bh(&mrt_lock);
1077 c->mfc_parent = mfc->mfcc_parent;
1078 ipmr_update_thresholds(mrt, c, mfc->mfcc_ttls);
1079 if (!mrtsock)
1080 c->mfc_flags |= MFC_STATIC;
1081 write_unlock_bh(&mrt_lock);
1082 return 0;
1083 }
1084
1085 if (!ipv4_is_multicast(mfc->mfcc_mcastgrp.s_addr))
1086 return -EINVAL;
1087
1088 c = ipmr_cache_alloc();
1089 if (c == NULL)
1090 return -ENOMEM;
1091
1092 c->mfc_origin = mfc->mfcc_origin.s_addr;
1093 c->mfc_mcastgrp = mfc->mfcc_mcastgrp.s_addr;
1094 c->mfc_parent = mfc->mfcc_parent;
1095 ipmr_update_thresholds(mrt, c, mfc->mfcc_ttls);
1096 if (!mrtsock)
1097 c->mfc_flags |= MFC_STATIC;
1098
1099 list_add_rcu(&c->list, &mrt->mfc_cache_array[line]);
1100
1101 /*
1102 * Check to see if we resolved a queued list. If so we
1103 * need to send on the frames and tidy up.
1104 */
1105 found = false;
1106 spin_lock_bh(&mfc_unres_lock);
1107 list_for_each_entry(uc, &mrt->mfc_unres_queue, list) {
1108 if (uc->mfc_origin == c->mfc_origin &&
1109 uc->mfc_mcastgrp == c->mfc_mcastgrp) {
1110 list_del(&uc->list);
1111 atomic_dec(&mrt->cache_resolve_queue_len);
1112 found = true;
1113 break;
1114 }
1115 }
1116 if (list_empty(&mrt->mfc_unres_queue))
1117 del_timer(&mrt->ipmr_expire_timer);
1118 spin_unlock_bh(&mfc_unres_lock);
1119
1120 if (found) {
1121 ipmr_cache_resolve(net, mrt, uc, c);
1122 ipmr_cache_free(uc);
1123 }
1124 return 0;
1125}
1126
1127/*
1128 * Close the multicast socket, and clear the vif tables etc
1129 */
1130
1131static void mroute_clean_tables(struct mr_table *mrt)
1132{
1133 int i;
1134 LIST_HEAD(list);
1135 struct mfc_cache *c, *next;
1136
1137 /* Shut down all active vif entries */
1138
1139 for (i = 0; i < mrt->maxvif; i++) {
1140 if (!(mrt->vif_table[i].flags & VIFF_STATIC))
1141 vif_delete(mrt, i, 0, &list);
1142 }
1143 unregister_netdevice_many(&list);
1144
1145 /* Wipe the cache */
1146
1147 for (i = 0; i < MFC_LINES; i++) {
1148 list_for_each_entry_safe(c, next, &mrt->mfc_cache_array[i], list) {
1149 if (c->mfc_flags & MFC_STATIC)
1150 continue;
1151 list_del_rcu(&c->list);
1152 ipmr_cache_free(c);
1153 }
1154 }
1155
1156 if (atomic_read(&mrt->cache_resolve_queue_len) != 0) {
1157 spin_lock_bh(&mfc_unres_lock);
1158 list_for_each_entry_safe(c, next, &mrt->mfc_unres_queue, list) {
1159 list_del(&c->list);
1160 ipmr_destroy_unres(mrt, c);
1161 }
1162 spin_unlock_bh(&mfc_unres_lock);
1163 }
1164}
1165
1166/* called from ip_ra_control(), before an RCU grace period,
1167 * we dont need to call synchronize_rcu() here
1168 */
1169static void mrtsock_destruct(struct sock *sk)
1170{
1171 struct net *net = sock_net(sk);
1172 struct mr_table *mrt;
1173
1174 rtnl_lock();
1175 ipmr_for_each_table(mrt, net) {
1176 if (sk == rtnl_dereference(mrt->mroute_sk)) {
1177 IPV4_DEVCONF_ALL(net, MC_FORWARDING)--;
1178 RCU_INIT_POINTER(mrt->mroute_sk, NULL);
1179 mroute_clean_tables(mrt);
1180 }
1181 }
1182 rtnl_unlock();
1183}
1184
1185/*
1186 * Socket options and virtual interface manipulation. The whole
1187 * virtual interface system is a complete heap, but unfortunately
1188 * that's how BSD mrouted happens to think. Maybe one day with a proper
1189 * MOSPF/PIM router set up we can clean this up.
1190 */
1191
1192int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsigned int optlen)
1193{
1194 int ret;
1195 struct vifctl vif;
1196 struct mfcctl mfc;
1197 struct net *net = sock_net(sk);
1198 struct mr_table *mrt;
1199
1200 mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
1201 if (mrt == NULL)
1202 return -ENOENT;
1203
1204 if (optname != MRT_INIT) {
1205 if (sk != rcu_access_pointer(mrt->mroute_sk) &&
1206 !capable(CAP_NET_ADMIN))
1207 return -EACCES;
1208 }
1209
1210 switch (optname) {
1211 case MRT_INIT:
1212 if (sk->sk_type != SOCK_RAW ||
1213 inet_sk(sk)->inet_num != IPPROTO_IGMP)
1214 return -EOPNOTSUPP;
1215 if (optlen != sizeof(int))
1216 return -ENOPROTOOPT;
1217
1218 rtnl_lock();
1219 if (rtnl_dereference(mrt->mroute_sk)) {
1220 rtnl_unlock();
1221 return -EADDRINUSE;
1222 }
1223
1224 ret = ip_ra_control(sk, 1, mrtsock_destruct);
1225 if (ret == 0) {
1226 rcu_assign_pointer(mrt->mroute_sk, sk);
1227 IPV4_DEVCONF_ALL(net, MC_FORWARDING)++;
1228 }
1229 rtnl_unlock();
1230 return ret;
1231 case MRT_DONE:
1232 if (sk != rcu_access_pointer(mrt->mroute_sk))
1233 return -EACCES;
1234 return ip_ra_control(sk, 0, NULL);
1235 case MRT_ADD_VIF:
1236 case MRT_DEL_VIF:
1237 if (optlen != sizeof(vif))
1238 return -EINVAL;
1239 if (copy_from_user(&vif, optval, sizeof(vif)))
1240 return -EFAULT;
1241 if (vif.vifc_vifi >= MAXVIFS)
1242 return -ENFILE;
1243 rtnl_lock();
1244 if (optname == MRT_ADD_VIF) {
1245 ret = vif_add(net, mrt, &vif,
1246 sk == rtnl_dereference(mrt->mroute_sk));
1247 } else {
1248 ret = vif_delete(mrt, vif.vifc_vifi, 0, NULL);
1249 }
1250 rtnl_unlock();
1251 return ret;
1252
1253 /*
1254 * Manipulate the forwarding caches. These live
1255 * in a sort of kernel/user symbiosis.
1256 */
1257 case MRT_ADD_MFC:
1258 case MRT_DEL_MFC:
1259 if (optlen != sizeof(mfc))
1260 return -EINVAL;
1261 if (copy_from_user(&mfc, optval, sizeof(mfc)))
1262 return -EFAULT;
1263 rtnl_lock();
1264 if (optname == MRT_DEL_MFC)
1265 ret = ipmr_mfc_delete(mrt, &mfc);
1266 else
1267 ret = ipmr_mfc_add(net, mrt, &mfc,
1268 sk == rtnl_dereference(mrt->mroute_sk));
1269 rtnl_unlock();
1270 return ret;
1271 /*
1272 * Control PIM assert.
1273 */
1274 case MRT_ASSERT:
1275 {
1276 int v;
1277 if (get_user(v, (int __user *)optval))
1278 return -EFAULT;
1279 mrt->mroute_do_assert = (v) ? 1 : 0;
1280 return 0;
1281 }
1282#ifdef CONFIG_IP_PIMSM
1283 case MRT_PIM:
1284 {
1285 int v;
1286
1287 if (get_user(v, (int __user *)optval))
1288 return -EFAULT;
1289 v = (v) ? 1 : 0;
1290
1291 rtnl_lock();
1292 ret = 0;
1293 if (v != mrt->mroute_do_pim) {
1294 mrt->mroute_do_pim = v;
1295 mrt->mroute_do_assert = v;
1296 }
1297 rtnl_unlock();
1298 return ret;
1299 }
1300#endif
1301#ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
1302 case MRT_TABLE:
1303 {
1304 u32 v;
1305
1306 if (optlen != sizeof(u32))
1307 return -EINVAL;
1308 if (get_user(v, (u32 __user *)optval))
1309 return -EFAULT;
1310
1311 rtnl_lock();
1312 ret = 0;
1313 if (sk == rtnl_dereference(mrt->mroute_sk)) {
1314 ret = -EBUSY;
1315 } else {
1316 if (!ipmr_new_table(net, v))
1317 ret = -ENOMEM;
1318 raw_sk(sk)->ipmr_table = v;
1319 }
1320 rtnl_unlock();
1321 return ret;
1322 }
1323#endif
1324 /*
1325 * Spurious command, or MRT_VERSION which you cannot
1326 * set.
1327 */
1328 default:
1329 return -ENOPROTOOPT;
1330 }
1331}
1332
1333/*
1334 * Getsock opt support for the multicast routing system.
1335 */
1336
1337int ip_mroute_getsockopt(struct sock *sk, int optname, char __user *optval, int __user *optlen)
1338{
1339 int olr;
1340 int val;
1341 struct net *net = sock_net(sk);
1342 struct mr_table *mrt;
1343
1344 mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
1345 if (mrt == NULL)
1346 return -ENOENT;
1347
1348 if (optname != MRT_VERSION &&
1349#ifdef CONFIG_IP_PIMSM
1350 optname != MRT_PIM &&
1351#endif
1352 optname != MRT_ASSERT)
1353 return -ENOPROTOOPT;
1354
1355 if (get_user(olr, optlen))
1356 return -EFAULT;
1357
1358 olr = min_t(unsigned int, olr, sizeof(int));
1359 if (olr < 0)
1360 return -EINVAL;
1361
1362 if (put_user(olr, optlen))
1363 return -EFAULT;
1364 if (optname == MRT_VERSION)
1365 val = 0x0305;
1366#ifdef CONFIG_IP_PIMSM
1367 else if (optname == MRT_PIM)
1368 val = mrt->mroute_do_pim;
1369#endif
1370 else
1371 val = mrt->mroute_do_assert;
1372 if (copy_to_user(optval, &val, olr))
1373 return -EFAULT;
1374 return 0;
1375}
1376
1377/*
1378 * The IP multicast ioctl support routines.
1379 */
1380
1381int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg)
1382{
1383 struct sioc_sg_req sr;
1384 struct sioc_vif_req vr;
1385 struct vif_device *vif;
1386 struct mfc_cache *c;
1387 struct net *net = sock_net(sk);
1388 struct mr_table *mrt;
1389
1390 mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
1391 if (mrt == NULL)
1392 return -ENOENT;
1393
1394 switch (cmd) {
1395 case SIOCGETVIFCNT:
1396 if (copy_from_user(&vr, arg, sizeof(vr)))
1397 return -EFAULT;
1398 if (vr.vifi >= mrt->maxvif)
1399 return -EINVAL;
1400 read_lock(&mrt_lock);
1401 vif = &mrt->vif_table[vr.vifi];
1402 if (VIF_EXISTS(mrt, vr.vifi)) {
1403 vr.icount = vif->pkt_in;
1404 vr.ocount = vif->pkt_out;
1405 vr.ibytes = vif->bytes_in;
1406 vr.obytes = vif->bytes_out;
1407 read_unlock(&mrt_lock);
1408
1409 if (copy_to_user(arg, &vr, sizeof(vr)))
1410 return -EFAULT;
1411 return 0;
1412 }
1413 read_unlock(&mrt_lock);
1414 return -EADDRNOTAVAIL;
1415 case SIOCGETSGCNT:
1416 if (copy_from_user(&sr, arg, sizeof(sr)))
1417 return -EFAULT;
1418
1419 rcu_read_lock();
1420 c = ipmr_cache_find(mrt, sr.src.s_addr, sr.grp.s_addr);
1421 if (c) {
1422 sr.pktcnt = c->mfc_un.res.pkt;
1423 sr.bytecnt = c->mfc_un.res.bytes;
1424 sr.wrong_if = c->mfc_un.res.wrong_if;
1425 rcu_read_unlock();
1426
1427 if (copy_to_user(arg, &sr, sizeof(sr)))
1428 return -EFAULT;
1429 return 0;
1430 }
1431 rcu_read_unlock();
1432 return -EADDRNOTAVAIL;
1433 default:
1434 return -ENOIOCTLCMD;
1435 }
1436}
1437
1438#ifdef CONFIG_COMPAT
1439struct compat_sioc_sg_req {
1440 struct in_addr src;
1441 struct in_addr grp;
1442 compat_ulong_t pktcnt;
1443 compat_ulong_t bytecnt;
1444 compat_ulong_t wrong_if;
1445};
1446
1447struct compat_sioc_vif_req {
1448 vifi_t vifi; /* Which iface */
1449 compat_ulong_t icount;
1450 compat_ulong_t ocount;
1451 compat_ulong_t ibytes;
1452 compat_ulong_t obytes;
1453};
1454
1455int ipmr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
1456{
1457 struct compat_sioc_sg_req sr;
1458 struct compat_sioc_vif_req vr;
1459 struct vif_device *vif;
1460 struct mfc_cache *c;
1461 struct net *net = sock_net(sk);
1462 struct mr_table *mrt;
1463
1464 mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
1465 if (mrt == NULL)
1466 return -ENOENT;
1467
1468 switch (cmd) {
1469 case SIOCGETVIFCNT:
1470 if (copy_from_user(&vr, arg, sizeof(vr)))
1471 return -EFAULT;
1472 if (vr.vifi >= mrt->maxvif)
1473 return -EINVAL;
1474 read_lock(&mrt_lock);
1475 vif = &mrt->vif_table[vr.vifi];
1476 if (VIF_EXISTS(mrt, vr.vifi)) {
1477 vr.icount = vif->pkt_in;
1478 vr.ocount = vif->pkt_out;
1479 vr.ibytes = vif->bytes_in;
1480 vr.obytes = vif->bytes_out;
1481 read_unlock(&mrt_lock);
1482
1483 if (copy_to_user(arg, &vr, sizeof(vr)))
1484 return -EFAULT;
1485 return 0;
1486 }
1487 read_unlock(&mrt_lock);
1488 return -EADDRNOTAVAIL;
1489 case SIOCGETSGCNT:
1490 if (copy_from_user(&sr, arg, sizeof(sr)))
1491 return -EFAULT;
1492
1493 rcu_read_lock();
1494 c = ipmr_cache_find(mrt, sr.src.s_addr, sr.grp.s_addr);
1495 if (c) {
1496 sr.pktcnt = c->mfc_un.res.pkt;
1497 sr.bytecnt = c->mfc_un.res.bytes;
1498 sr.wrong_if = c->mfc_un.res.wrong_if;
1499 rcu_read_unlock();
1500
1501 if (copy_to_user(arg, &sr, sizeof(sr)))
1502 return -EFAULT;
1503 return 0;
1504 }
1505 rcu_read_unlock();
1506 return -EADDRNOTAVAIL;
1507 default:
1508 return -ENOIOCTLCMD;
1509 }
1510}
1511#endif
1512
1513
1514static int ipmr_device_event(struct notifier_block *this, unsigned long event, void *ptr)
1515{
1516 struct net_device *dev = ptr;
1517 struct net *net = dev_net(dev);
1518 struct mr_table *mrt;
1519 struct vif_device *v;
1520 int ct;
1521
1522 if (event != NETDEV_UNREGISTER)
1523 return NOTIFY_DONE;
1524
1525 ipmr_for_each_table(mrt, net) {
1526 v = &mrt->vif_table[0];
1527 for (ct = 0; ct < mrt->maxvif; ct++, v++) {
1528 if (v->dev == dev)
1529 vif_delete(mrt, ct, 1, NULL);
1530 }
1531 }
1532 return NOTIFY_DONE;
1533}
1534
1535
1536static struct notifier_block ip_mr_notifier = {
1537 .notifier_call = ipmr_device_event,
1538};
1539
1540/*
1541 * Encapsulate a packet by attaching a valid IPIP header to it.
1542 * This avoids tunnel drivers and other mess and gives us the speed so
1543 * important for multicast video.
1544 */
1545
1546static void ip_encap(struct sk_buff *skb, __be32 saddr, __be32 daddr)
1547{
1548 struct iphdr *iph;
1549 const struct iphdr *old_iph = ip_hdr(skb);
1550
1551 skb_push(skb, sizeof(struct iphdr));
1552 skb->transport_header = skb->network_header;
1553 skb_reset_network_header(skb);
1554 iph = ip_hdr(skb);
1555
1556 iph->version = 4;
1557 iph->tos = old_iph->tos;
1558 iph->ttl = old_iph->ttl;
1559 iph->frag_off = 0;
1560 iph->daddr = daddr;
1561 iph->saddr = saddr;
1562 iph->protocol = IPPROTO_IPIP;
1563 iph->ihl = 5;
1564 iph->tot_len = htons(skb->len);
1565 ip_select_ident(iph, skb_dst(skb), NULL);
1566 ip_send_check(iph);
1567
1568 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
1569 nf_reset(skb);
1570}
1571
1572static inline int ipmr_forward_finish(struct sk_buff *skb)
1573{
1574 struct ip_options *opt = &(IPCB(skb)->opt);
1575
1576 IP_INC_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTFORWDATAGRAMS);
1577 IP_ADD_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTOCTETS, skb->len);
1578
1579 if (unlikely(opt->optlen))
1580 ip_forward_options(skb);
1581
1582 return dst_output(skb);
1583}
1584
1585/*
1586 * Processing handlers for ipmr_forward
1587 */
1588
1589static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
1590 struct sk_buff *skb, struct mfc_cache *c, int vifi)
1591{
1592 const struct iphdr *iph = ip_hdr(skb);
1593 struct vif_device *vif = &mrt->vif_table[vifi];
1594 struct net_device *dev;
1595 struct rtable *rt;
1596 struct flowi4 fl4;
1597 int encap = 0;
1598
1599 if (vif->dev == NULL)
1600 goto out_free;
1601
1602#ifdef CONFIG_IP_PIMSM
1603 if (vif->flags & VIFF_REGISTER) {
1604 vif->pkt_out++;
1605 vif->bytes_out += skb->len;
1606 vif->dev->stats.tx_bytes += skb->len;
1607 vif->dev->stats.tx_packets++;
1608 ipmr_cache_report(mrt, skb, vifi, IGMPMSG_WHOLEPKT);
1609 goto out_free;
1610 }
1611#endif
1612
1613 if (vif->flags & VIFF_TUNNEL) {
1614 rt = ip_route_output_ports(net, &fl4, NULL,
1615 vif->remote, vif->local,
1616 0, 0,
1617 IPPROTO_IPIP,
1618 RT_TOS(iph->tos), vif->link);
1619 if (IS_ERR(rt))
1620 goto out_free;
1621 encap = sizeof(struct iphdr);
1622 } else {
1623 rt = ip_route_output_ports(net, &fl4, NULL, iph->daddr, 0,
1624 0, 0,
1625 IPPROTO_IPIP,
1626 RT_TOS(iph->tos), vif->link);
1627 if (IS_ERR(rt))
1628 goto out_free;
1629 }
1630
1631 dev = rt->dst.dev;
1632
1633 if (skb->len+encap > dst_mtu(&rt->dst) && (ntohs(iph->frag_off) & IP_DF)) {
1634 /* Do not fragment multicasts. Alas, IPv4 does not
1635 * allow to send ICMP, so that packets will disappear
1636 * to blackhole.
1637 */
1638
1639 IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
1640 ip_rt_put(rt);
1641 goto out_free;
1642 }
1643
1644 encap += LL_RESERVED_SPACE(dev) + rt->dst.header_len;
1645
1646 if (skb_cow(skb, encap)) {
1647 ip_rt_put(rt);
1648 goto out_free;
1649 }
1650
1651 vif->pkt_out++;
1652 vif->bytes_out += skb->len;
1653
1654 skb_dst_drop(skb);
1655 skb_dst_set(skb, &rt->dst);
1656 ip_decrease_ttl(ip_hdr(skb));
1657
1658 /* FIXME: forward and output firewalls used to be called here.
1659 * What do we do with netfilter? -- RR
1660 */
1661 if (vif->flags & VIFF_TUNNEL) {
1662 ip_encap(skb, vif->local, vif->remote);
1663 /* FIXME: extra output firewall step used to be here. --RR */
1664 vif->dev->stats.tx_packets++;
1665 vif->dev->stats.tx_bytes += skb->len;
1666 }
1667
1668 IPCB(skb)->flags |= IPSKB_FORWARDED;
1669
1670 /*
1671 * RFC1584 teaches, that DVMRP/PIM router must deliver packets locally
1672 * not only before forwarding, but after forwarding on all output
1673 * interfaces. It is clear, if mrouter runs a multicasting
1674 * program, it should receive packets not depending to what interface
1675 * program is joined.
1676 * If we will not make it, the program will have to join on all
1677 * interfaces. On the other hand, multihoming host (or router, but
1678 * not mrouter) cannot join to more than one interface - it will
1679 * result in receiving multiple packets.
1680 */
1681 NF_HOOK(NFPROTO_IPV4, NF_INET_FORWARD, skb, skb->dev, dev,
1682 ipmr_forward_finish);
1683 return;
1684
1685out_free:
1686 kfree_skb(skb);
1687}
1688
1689static int ipmr_find_vif(struct mr_table *mrt, struct net_device *dev)
1690{
1691 int ct;
1692
1693 for (ct = mrt->maxvif-1; ct >= 0; ct--) {
1694 if (mrt->vif_table[ct].dev == dev)
1695 break;
1696 }
1697 return ct;
1698}
1699
1700/* "local" means that we should preserve one skb (for local delivery) */
1701
1702static int ip_mr_forward(struct net *net, struct mr_table *mrt,
1703 struct sk_buff *skb, struct mfc_cache *cache,
1704 int local)
1705{
1706 int psend = -1;
1707 int vif, ct;
1708
1709 vif = cache->mfc_parent;
1710 cache->mfc_un.res.pkt++;
1711 cache->mfc_un.res.bytes += skb->len;
1712
1713 /*
1714 * Wrong interface: drop packet and (maybe) send PIM assert.
1715 */
1716 if (mrt->vif_table[vif].dev != skb->dev) {
1717 int true_vifi;
1718
1719 if (rt_is_output_route(skb_rtable(skb))) {
1720 /* It is our own packet, looped back.
1721 * Very complicated situation...
1722 *
1723 * The best workaround until routing daemons will be
1724 * fixed is not to redistribute packet, if it was
1725 * send through wrong interface. It means, that
1726 * multicast applications WILL NOT work for
1727 * (S,G), which have default multicast route pointing
1728 * to wrong oif. In any case, it is not a good
1729 * idea to use multicasting applications on router.
1730 */
1731 goto dont_forward;
1732 }
1733
1734 cache->mfc_un.res.wrong_if++;
1735 true_vifi = ipmr_find_vif(mrt, skb->dev);
1736
1737 if (true_vifi >= 0 && mrt->mroute_do_assert &&
1738 /* pimsm uses asserts, when switching from RPT to SPT,
1739 * so that we cannot check that packet arrived on an oif.
1740 * It is bad, but otherwise we would need to move pretty
1741 * large chunk of pimd to kernel. Ough... --ANK
1742 */
1743 (mrt->mroute_do_pim ||
1744 cache->mfc_un.res.ttls[true_vifi] < 255) &&
1745 time_after(jiffies,
1746 cache->mfc_un.res.last_assert + MFC_ASSERT_THRESH)) {
1747 cache->mfc_un.res.last_assert = jiffies;
1748 ipmr_cache_report(mrt, skb, true_vifi, IGMPMSG_WRONGVIF);
1749 }
1750 goto dont_forward;
1751 }
1752
1753 mrt->vif_table[vif].pkt_in++;
1754 mrt->vif_table[vif].bytes_in += skb->len;
1755
1756 /*
1757 * Forward the frame
1758 */
1759 for (ct = cache->mfc_un.res.maxvif - 1;
1760 ct >= cache->mfc_un.res.minvif; ct--) {
1761 if (ip_hdr(skb)->ttl > cache->mfc_un.res.ttls[ct]) {
1762 if (psend != -1) {
1763 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1764
1765 if (skb2)
1766 ipmr_queue_xmit(net, mrt, skb2, cache,
1767 psend);
1768 }
1769 psend = ct;
1770 }
1771 }
1772 if (psend != -1) {
1773 if (local) {
1774 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1775
1776 if (skb2)
1777 ipmr_queue_xmit(net, mrt, skb2, cache, psend);
1778 } else {
1779 ipmr_queue_xmit(net, mrt, skb, cache, psend);
1780 return 0;
1781 }
1782 }
1783
1784dont_forward:
1785 if (!local)
1786 kfree_skb(skb);
1787 return 0;
1788}
1789
1790static struct mr_table *ipmr_rt_fib_lookup(struct net *net, struct sk_buff *skb)
1791{
1792 struct rtable *rt = skb_rtable(skb);
1793 struct iphdr *iph = ip_hdr(skb);
1794 struct flowi4 fl4 = {
1795 .daddr = iph->daddr,
1796 .saddr = iph->saddr,
1797 .flowi4_tos = RT_TOS(iph->tos),
1798 .flowi4_oif = rt->rt_oif,
1799 .flowi4_iif = rt->rt_iif,
1800 .flowi4_mark = rt->rt_mark,
1801 };
1802 struct mr_table *mrt;
1803 int err;
1804
1805 err = ipmr_fib_lookup(net, &fl4, &mrt);
1806 if (err)
1807 return ERR_PTR(err);
1808 return mrt;
1809}
1810
1811/*
1812 * Multicast packets for forwarding arrive here
1813 * Called with rcu_read_lock();
1814 */
1815
1816int ip_mr_input(struct sk_buff *skb)
1817{
1818 struct mfc_cache *cache;
1819 struct net *net = dev_net(skb->dev);
1820 int local = skb_rtable(skb)->rt_flags & RTCF_LOCAL;
1821 struct mr_table *mrt;
1822
1823 /* Packet is looped back after forward, it should not be
1824 * forwarded second time, but still can be delivered locally.
1825 */
1826 if (IPCB(skb)->flags & IPSKB_FORWARDED)
1827 goto dont_forward;
1828
1829 mrt = ipmr_rt_fib_lookup(net, skb);
1830 if (IS_ERR(mrt)) {
1831 kfree_skb(skb);
1832 return PTR_ERR(mrt);
1833 }
1834 if (!local) {
1835 if (IPCB(skb)->opt.router_alert) {
1836 if (ip_call_ra_chain(skb))
1837 return 0;
1838 } else if (ip_hdr(skb)->protocol == IPPROTO_IGMP) {
1839 /* IGMPv1 (and broken IGMPv2 implementations sort of
1840 * Cisco IOS <= 11.2(8)) do not put router alert
1841 * option to IGMP packets destined to routable
1842 * groups. It is very bad, because it means
1843 * that we can forward NO IGMP messages.
1844 */
1845 struct sock *mroute_sk;
1846
1847 mroute_sk = rcu_dereference(mrt->mroute_sk);
1848 if (mroute_sk) {
1849 nf_reset(skb);
1850 raw_rcv(mroute_sk, skb);
1851 return 0;
1852 }
1853 }
1854 }
1855
1856 /* already under rcu_read_lock() */
1857 cache = ipmr_cache_find(mrt, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr);
1858
1859 /*
1860 * No usable cache entry
1861 */
1862 if (cache == NULL) {
1863 int vif;
1864
1865 if (local) {
1866 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1867 ip_local_deliver(skb);
1868 if (skb2 == NULL)
1869 return -ENOBUFS;
1870 skb = skb2;
1871 }
1872
1873 read_lock(&mrt_lock);
1874 vif = ipmr_find_vif(mrt, skb->dev);
1875 if (vif >= 0) {
1876 int err2 = ipmr_cache_unresolved(mrt, vif, skb);
1877 read_unlock(&mrt_lock);
1878
1879 return err2;
1880 }
1881 read_unlock(&mrt_lock);
1882 kfree_skb(skb);
1883 return -ENODEV;
1884 }
1885
1886 read_lock(&mrt_lock);
1887 ip_mr_forward(net, mrt, skb, cache, local);
1888 read_unlock(&mrt_lock);
1889
1890 if (local)
1891 return ip_local_deliver(skb);
1892
1893 return 0;
1894
1895dont_forward:
1896 if (local)
1897 return ip_local_deliver(skb);
1898 kfree_skb(skb);
1899 return 0;
1900}
1901
1902#ifdef CONFIG_IP_PIMSM
1903/* called with rcu_read_lock() */
1904static int __pim_rcv(struct mr_table *mrt, struct sk_buff *skb,
1905 unsigned int pimlen)
1906{
1907 struct net_device *reg_dev = NULL;
1908 struct iphdr *encap;
1909
1910 encap = (struct iphdr *)(skb_transport_header(skb) + pimlen);
1911 /*
1912 * Check that:
1913 * a. packet is really sent to a multicast group
1914 * b. packet is not a NULL-REGISTER
1915 * c. packet is not truncated
1916 */
1917 if (!ipv4_is_multicast(encap->daddr) ||
1918 encap->tot_len == 0 ||
1919 ntohs(encap->tot_len) + pimlen > skb->len)
1920 return 1;
1921
1922 read_lock(&mrt_lock);
1923 if (mrt->mroute_reg_vif_num >= 0)
1924 reg_dev = mrt->vif_table[mrt->mroute_reg_vif_num].dev;
1925 read_unlock(&mrt_lock);
1926
1927 if (reg_dev == NULL)
1928 return 1;
1929
1930 skb->mac_header = skb->network_header;
1931 skb_pull(skb, (u8 *)encap - skb->data);
1932 skb_reset_network_header(skb);
1933 skb->protocol = htons(ETH_P_IP);
1934 skb->ip_summed = CHECKSUM_NONE;
1935 skb->pkt_type = PACKET_HOST;
1936
1937 skb_tunnel_rx(skb, reg_dev);
1938
1939 netif_rx(skb);
1940
1941 return NET_RX_SUCCESS;
1942}
1943#endif
1944
1945#ifdef CONFIG_IP_PIMSM_V1
1946/*
1947 * Handle IGMP messages of PIMv1
1948 */
1949
1950int pim_rcv_v1(struct sk_buff *skb)
1951{
1952 struct igmphdr *pim;
1953 struct net *net = dev_net(skb->dev);
1954 struct mr_table *mrt;
1955
1956 if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(struct iphdr)))
1957 goto drop;
1958
1959 pim = igmp_hdr(skb);
1960
1961 mrt = ipmr_rt_fib_lookup(net, skb);
1962 if (IS_ERR(mrt))
1963 goto drop;
1964 if (!mrt->mroute_do_pim ||
1965 pim->group != PIM_V1_VERSION || pim->code != PIM_V1_REGISTER)
1966 goto drop;
1967
1968 if (__pim_rcv(mrt, skb, sizeof(*pim))) {
1969drop:
1970 kfree_skb(skb);
1971 }
1972 return 0;
1973}
1974#endif
1975
1976#ifdef CONFIG_IP_PIMSM_V2
1977static int pim_rcv(struct sk_buff *skb)
1978{
1979 struct pimreghdr *pim;
1980 struct net *net = dev_net(skb->dev);
1981 struct mr_table *mrt;
1982
1983 if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(struct iphdr)))
1984 goto drop;
1985
1986 pim = (struct pimreghdr *)skb_transport_header(skb);
1987 if (pim->type != ((PIM_VERSION << 4) | (PIM_REGISTER)) ||
1988 (pim->flags & PIM_NULL_REGISTER) ||
1989 (ip_compute_csum((void *)pim, sizeof(*pim)) != 0 &&
1990 csum_fold(skb_checksum(skb, 0, skb->len, 0))))
1991 goto drop;
1992
1993 mrt = ipmr_rt_fib_lookup(net, skb);
1994 if (IS_ERR(mrt))
1995 goto drop;
1996 if (__pim_rcv(mrt, skb, sizeof(*pim))) {
1997drop:
1998 kfree_skb(skb);
1999 }
2000 return 0;
2001}
2002#endif
2003
2004static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
2005 struct mfc_cache *c, struct rtmsg *rtm)
2006{
2007 int ct;
2008 struct rtnexthop *nhp;
2009 u8 *b = skb_tail_pointer(skb);
2010 struct rtattr *mp_head;
2011
2012 /* If cache is unresolved, don't try to parse IIF and OIF */
2013 if (c->mfc_parent >= MAXVIFS)
2014 return -ENOENT;
2015
2016 if (VIF_EXISTS(mrt, c->mfc_parent))
2017 RTA_PUT(skb, RTA_IIF, 4, &mrt->vif_table[c->mfc_parent].dev->ifindex);
2018
2019 mp_head = (struct rtattr *)skb_put(skb, RTA_LENGTH(0));
2020
2021 for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) {
2022 if (VIF_EXISTS(mrt, ct) && c->mfc_un.res.ttls[ct] < 255) {
2023 if (skb_tailroom(skb) < RTA_ALIGN(RTA_ALIGN(sizeof(*nhp)) + 4))
2024 goto rtattr_failure;
2025 nhp = (struct rtnexthop *)skb_put(skb, RTA_ALIGN(sizeof(*nhp)));
2026 nhp->rtnh_flags = 0;
2027 nhp->rtnh_hops = c->mfc_un.res.ttls[ct];
2028 nhp->rtnh_ifindex = mrt->vif_table[ct].dev->ifindex;
2029 nhp->rtnh_len = sizeof(*nhp);
2030 }
2031 }
2032 mp_head->rta_type = RTA_MULTIPATH;
2033 mp_head->rta_len = skb_tail_pointer(skb) - (u8 *)mp_head;
2034 rtm->rtm_type = RTN_MULTICAST;
2035 return 1;
2036
2037rtattr_failure:
2038 nlmsg_trim(skb, b);
2039 return -EMSGSIZE;
2040}
2041
2042int ipmr_get_route(struct net *net, struct sk_buff *skb,
2043 __be32 saddr, __be32 daddr,
2044 struct rtmsg *rtm, int nowait)
2045{
2046 struct mfc_cache *cache;
2047 struct mr_table *mrt;
2048 int err;
2049
2050 mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
2051 if (mrt == NULL)
2052 return -ENOENT;
2053
2054 rcu_read_lock();
2055 cache = ipmr_cache_find(mrt, saddr, daddr);
2056
2057 if (cache == NULL) {
2058 struct sk_buff *skb2;
2059 struct iphdr *iph;
2060 struct net_device *dev;
2061 int vif = -1;
2062
2063 if (nowait) {
2064 rcu_read_unlock();
2065 return -EAGAIN;
2066 }
2067
2068 dev = skb->dev;
2069 read_lock(&mrt_lock);
2070 if (dev)
2071 vif = ipmr_find_vif(mrt, dev);
2072 if (vif < 0) {
2073 read_unlock(&mrt_lock);
2074 rcu_read_unlock();
2075 return -ENODEV;
2076 }
2077 skb2 = skb_clone(skb, GFP_ATOMIC);
2078 if (!skb2) {
2079 read_unlock(&mrt_lock);
2080 rcu_read_unlock();
2081 return -ENOMEM;
2082 }
2083
2084 skb_push(skb2, sizeof(struct iphdr));
2085 skb_reset_network_header(skb2);
2086 iph = ip_hdr(skb2);
2087 iph->ihl = sizeof(struct iphdr) >> 2;
2088 iph->saddr = saddr;
2089 iph->daddr = daddr;
2090 iph->version = 0;
2091 err = ipmr_cache_unresolved(mrt, vif, skb2);
2092 read_unlock(&mrt_lock);
2093 rcu_read_unlock();
2094 return err;
2095 }
2096
2097 read_lock(&mrt_lock);
2098 if (!nowait && (rtm->rtm_flags & RTM_F_NOTIFY))
2099 cache->mfc_flags |= MFC_NOTIFY;
2100 err = __ipmr_fill_mroute(mrt, skb, cache, rtm);
2101 read_unlock(&mrt_lock);
2102 rcu_read_unlock();
2103 return err;
2104}
2105
2106static int ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
2107 u32 pid, u32 seq, struct mfc_cache *c)
2108{
2109 struct nlmsghdr *nlh;
2110 struct rtmsg *rtm;
2111
2112 nlh = nlmsg_put(skb, pid, seq, RTM_NEWROUTE, sizeof(*rtm), NLM_F_MULTI);
2113 if (nlh == NULL)
2114 return -EMSGSIZE;
2115
2116 rtm = nlmsg_data(nlh);
2117 rtm->rtm_family = RTNL_FAMILY_IPMR;
2118 rtm->rtm_dst_len = 32;
2119 rtm->rtm_src_len = 32;
2120 rtm->rtm_tos = 0;
2121 rtm->rtm_table = mrt->id;
2122 if (nla_put_u32(skb, RTA_TABLE, mrt->id))
2123 goto nla_put_failure;
2124 rtm->rtm_type = RTN_MULTICAST;
2125 rtm->rtm_scope = RT_SCOPE_UNIVERSE;
2126 rtm->rtm_protocol = RTPROT_UNSPEC;
2127 rtm->rtm_flags = 0;
2128
2129 if (nla_put_be32(skb, RTA_SRC, c->mfc_origin) ||
2130 nla_put_be32(skb, RTA_DST, c->mfc_mcastgrp))
2131 goto nla_put_failure;
2132 if (__ipmr_fill_mroute(mrt, skb, c, rtm) < 0)
2133 goto nla_put_failure;
2134
2135 return nlmsg_end(skb, nlh);
2136
2137nla_put_failure:
2138 nlmsg_cancel(skb, nlh);
2139 return -EMSGSIZE;
2140}
2141
2142static int ipmr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
2143{
2144 struct net *net = sock_net(skb->sk);
2145 struct mr_table *mrt;
2146 struct mfc_cache *mfc;
2147 unsigned int t = 0, s_t;
2148 unsigned int h = 0, s_h;
2149 unsigned int e = 0, s_e;
2150
2151 s_t = cb->args[0];
2152 s_h = cb->args[1];
2153 s_e = cb->args[2];
2154
2155 rcu_read_lock();
2156 ipmr_for_each_table(mrt, net) {
2157 if (t < s_t)
2158 goto next_table;
2159 if (t > s_t)
2160 s_h = 0;
2161 for (h = s_h; h < MFC_LINES; h++) {
2162 list_for_each_entry_rcu(mfc, &mrt->mfc_cache_array[h], list) {
2163 if (e < s_e)
2164 goto next_entry;
2165 if (ipmr_fill_mroute(mrt, skb,
2166 NETLINK_CB(cb->skb).pid,
2167 cb->nlh->nlmsg_seq,
2168 mfc) < 0)
2169 goto done;
2170next_entry:
2171 e++;
2172 }
2173 e = s_e = 0;
2174 }
2175 s_h = 0;
2176next_table:
2177 t++;
2178 }
2179done:
2180 rcu_read_unlock();
2181
2182 cb->args[2] = e;
2183 cb->args[1] = h;
2184 cb->args[0] = t;
2185
2186 return skb->len;
2187}
2188
2189#ifdef CONFIG_PROC_FS
2190/*
2191 * The /proc interfaces to multicast routing :
2192 * /proc/net/ip_mr_cache & /proc/net/ip_mr_vif
2193 */
2194struct ipmr_vif_iter {
2195 struct seq_net_private p;
2196 struct mr_table *mrt;
2197 int ct;
2198};
2199
2200static struct vif_device *ipmr_vif_seq_idx(struct net *net,
2201 struct ipmr_vif_iter *iter,
2202 loff_t pos)
2203{
2204 struct mr_table *mrt = iter->mrt;
2205
2206 for (iter->ct = 0; iter->ct < mrt->maxvif; ++iter->ct) {
2207 if (!VIF_EXISTS(mrt, iter->ct))
2208 continue;
2209 if (pos-- == 0)
2210 return &mrt->vif_table[iter->ct];
2211 }
2212 return NULL;
2213}
2214
2215static void *ipmr_vif_seq_start(struct seq_file *seq, loff_t *pos)
2216 __acquires(mrt_lock)
2217{
2218 struct ipmr_vif_iter *iter = seq->private;
2219 struct net *net = seq_file_net(seq);
2220 struct mr_table *mrt;
2221
2222 mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
2223 if (mrt == NULL)
2224 return ERR_PTR(-ENOENT);
2225
2226 iter->mrt = mrt;
2227
2228 read_lock(&mrt_lock);
2229 return *pos ? ipmr_vif_seq_idx(net, seq->private, *pos - 1)
2230 : SEQ_START_TOKEN;
2231}
2232
2233static void *ipmr_vif_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2234{
2235 struct ipmr_vif_iter *iter = seq->private;
2236 struct net *net = seq_file_net(seq);
2237 struct mr_table *mrt = iter->mrt;
2238
2239 ++*pos;
2240 if (v == SEQ_START_TOKEN)
2241 return ipmr_vif_seq_idx(net, iter, 0);
2242
2243 while (++iter->ct < mrt->maxvif) {
2244 if (!VIF_EXISTS(mrt, iter->ct))
2245 continue;
2246 return &mrt->vif_table[iter->ct];
2247 }
2248 return NULL;
2249}
2250
2251static void ipmr_vif_seq_stop(struct seq_file *seq, void *v)
2252 __releases(mrt_lock)
2253{
2254 read_unlock(&mrt_lock);
2255}
2256
2257static int ipmr_vif_seq_show(struct seq_file *seq, void *v)
2258{
2259 struct ipmr_vif_iter *iter = seq->private;
2260 struct mr_table *mrt = iter->mrt;
2261
2262 if (v == SEQ_START_TOKEN) {
2263 seq_puts(seq,
2264 "Interface BytesIn PktsIn BytesOut PktsOut Flags Local Remote\n");
2265 } else {
2266 const struct vif_device *vif = v;
2267 const char *name = vif->dev ? vif->dev->name : "none";
2268
2269 seq_printf(seq,
2270 "%2Zd %-10s %8ld %7ld %8ld %7ld %05X %08X %08X\n",
2271 vif - mrt->vif_table,
2272 name, vif->bytes_in, vif->pkt_in,
2273 vif->bytes_out, vif->pkt_out,
2274 vif->flags, vif->local, vif->remote);
2275 }
2276 return 0;
2277}
2278
2279static const struct seq_operations ipmr_vif_seq_ops = {
2280 .start = ipmr_vif_seq_start,
2281 .next = ipmr_vif_seq_next,
2282 .stop = ipmr_vif_seq_stop,
2283 .show = ipmr_vif_seq_show,
2284};
2285
2286static int ipmr_vif_open(struct inode *inode, struct file *file)
2287{
2288 return seq_open_net(inode, file, &ipmr_vif_seq_ops,
2289 sizeof(struct ipmr_vif_iter));
2290}
2291
2292static const struct file_operations ipmr_vif_fops = {
2293 .owner = THIS_MODULE,
2294 .open = ipmr_vif_open,
2295 .read = seq_read,
2296 .llseek = seq_lseek,
2297 .release = seq_release_net,
2298};
2299
2300struct ipmr_mfc_iter {
2301 struct seq_net_private p;
2302 struct mr_table *mrt;
2303 struct list_head *cache;
2304 int ct;
2305};
2306
2307
2308static struct mfc_cache *ipmr_mfc_seq_idx(struct net *net,
2309 struct ipmr_mfc_iter *it, loff_t pos)
2310{
2311 struct mr_table *mrt = it->mrt;
2312 struct mfc_cache *mfc;
2313
2314 rcu_read_lock();
2315 for (it->ct = 0; it->ct < MFC_LINES; it->ct++) {
2316 it->cache = &mrt->mfc_cache_array[it->ct];
2317 list_for_each_entry_rcu(mfc, it->cache, list)
2318 if (pos-- == 0)
2319 return mfc;
2320 }
2321 rcu_read_unlock();
2322
2323 spin_lock_bh(&mfc_unres_lock);
2324 it->cache = &mrt->mfc_unres_queue;
2325 list_for_each_entry(mfc, it->cache, list)
2326 if (pos-- == 0)
2327 return mfc;
2328 spin_unlock_bh(&mfc_unres_lock);
2329
2330 it->cache = NULL;
2331 return NULL;
2332}
2333
2334
2335static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos)
2336{
2337 struct ipmr_mfc_iter *it = seq->private;
2338 struct net *net = seq_file_net(seq);
2339 struct mr_table *mrt;
2340
2341 mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
2342 if (mrt == NULL)
2343 return ERR_PTR(-ENOENT);
2344
2345 it->mrt = mrt;
2346 it->cache = NULL;
2347 it->ct = 0;
2348 return *pos ? ipmr_mfc_seq_idx(net, seq->private, *pos - 1)
2349 : SEQ_START_TOKEN;
2350}
2351
2352static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2353{
2354 struct mfc_cache *mfc = v;
2355 struct ipmr_mfc_iter *it = seq->private;
2356 struct net *net = seq_file_net(seq);
2357 struct mr_table *mrt = it->mrt;
2358
2359 ++*pos;
2360
2361 if (v == SEQ_START_TOKEN)
2362 return ipmr_mfc_seq_idx(net, seq->private, 0);
2363
2364 if (mfc->list.next != it->cache)
2365 return list_entry(mfc->list.next, struct mfc_cache, list);
2366
2367 if (it->cache == &mrt->mfc_unres_queue)
2368 goto end_of_list;
2369
2370 BUG_ON(it->cache != &mrt->mfc_cache_array[it->ct]);
2371
2372 while (++it->ct < MFC_LINES) {
2373 it->cache = &mrt->mfc_cache_array[it->ct];
2374 if (list_empty(it->cache))
2375 continue;
2376 return list_first_entry(it->cache, struct mfc_cache, list);
2377 }
2378
2379 /* exhausted cache_array, show unresolved */
2380 rcu_read_unlock();
2381 it->cache = &mrt->mfc_unres_queue;
2382 it->ct = 0;
2383
2384 spin_lock_bh(&mfc_unres_lock);
2385 if (!list_empty(it->cache))
2386 return list_first_entry(it->cache, struct mfc_cache, list);
2387
2388end_of_list:
2389 spin_unlock_bh(&mfc_unres_lock);
2390 it->cache = NULL;
2391
2392 return NULL;
2393}
2394
2395static void ipmr_mfc_seq_stop(struct seq_file *seq, void *v)
2396{
2397 struct ipmr_mfc_iter *it = seq->private;
2398 struct mr_table *mrt = it->mrt;
2399
2400 if (it->cache == &mrt->mfc_unres_queue)
2401 spin_unlock_bh(&mfc_unres_lock);
2402 else if (it->cache == &mrt->mfc_cache_array[it->ct])
2403 rcu_read_unlock();
2404}
2405
2406static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
2407{
2408 int n;
2409
2410 if (v == SEQ_START_TOKEN) {
2411 seq_puts(seq,
2412 "Group Origin Iif Pkts Bytes Wrong Oifs\n");
2413 } else {
2414 const struct mfc_cache *mfc = v;
2415 const struct ipmr_mfc_iter *it = seq->private;
2416 const struct mr_table *mrt = it->mrt;
2417
2418 seq_printf(seq, "%08X %08X %-3hd",
2419 (__force u32) mfc->mfc_mcastgrp,
2420 (__force u32) mfc->mfc_origin,
2421 mfc->mfc_parent);
2422
2423 if (it->cache != &mrt->mfc_unres_queue) {
2424 seq_printf(seq, " %8lu %8lu %8lu",
2425 mfc->mfc_un.res.pkt,
2426 mfc->mfc_un.res.bytes,
2427 mfc->mfc_un.res.wrong_if);
2428 for (n = mfc->mfc_un.res.minvif;
2429 n < mfc->mfc_un.res.maxvif; n++) {
2430 if (VIF_EXISTS(mrt, n) &&
2431 mfc->mfc_un.res.ttls[n] < 255)
2432 seq_printf(seq,
2433 " %2d:%-3d",
2434 n, mfc->mfc_un.res.ttls[n]);
2435 }
2436 } else {
2437 /* unresolved mfc_caches don't contain
2438 * pkt, bytes and wrong_if values
2439 */
2440 seq_printf(seq, " %8lu %8lu %8lu", 0ul, 0ul, 0ul);
2441 }
2442 seq_putc(seq, '\n');
2443 }
2444 return 0;
2445}
2446
2447static const struct seq_operations ipmr_mfc_seq_ops = {
2448 .start = ipmr_mfc_seq_start,
2449 .next = ipmr_mfc_seq_next,
2450 .stop = ipmr_mfc_seq_stop,
2451 .show = ipmr_mfc_seq_show,
2452};
2453
2454static int ipmr_mfc_open(struct inode *inode, struct file *file)
2455{
2456 return seq_open_net(inode, file, &ipmr_mfc_seq_ops,
2457 sizeof(struct ipmr_mfc_iter));
2458}
2459
2460static const struct file_operations ipmr_mfc_fops = {
2461 .owner = THIS_MODULE,
2462 .open = ipmr_mfc_open,
2463 .read = seq_read,
2464 .llseek = seq_lseek,
2465 .release = seq_release_net,
2466};
2467#endif
2468
2469#ifdef CONFIG_IP_PIMSM_V2
2470static const struct net_protocol pim_protocol = {
2471 .handler = pim_rcv,
2472 .netns_ok = 1,
2473};
2474#endif
2475
2476
2477/*
2478 * Setup for IP multicast routing
2479 */
2480static int __net_init ipmr_net_init(struct net *net)
2481{
2482 int err;
2483
2484 err = ipmr_rules_init(net);
2485 if (err < 0)
2486 goto fail;
2487
2488#ifdef CONFIG_PROC_FS
2489 err = -ENOMEM;
2490 if (!proc_net_fops_create(net, "ip_mr_vif", 0, &ipmr_vif_fops))
2491 goto proc_vif_fail;
2492 if (!proc_net_fops_create(net, "ip_mr_cache", 0, &ipmr_mfc_fops))
2493 goto proc_cache_fail;
2494#endif
2495 return 0;
2496
2497#ifdef CONFIG_PROC_FS
2498proc_cache_fail:
2499 proc_net_remove(net, "ip_mr_vif");
2500proc_vif_fail:
2501 ipmr_rules_exit(net);
2502#endif
2503fail:
2504 return err;
2505}
2506
2507static void __net_exit ipmr_net_exit(struct net *net)
2508{
2509#ifdef CONFIG_PROC_FS
2510 proc_net_remove(net, "ip_mr_cache");
2511 proc_net_remove(net, "ip_mr_vif");
2512#endif
2513 ipmr_rules_exit(net);
2514}
2515
2516static struct pernet_operations ipmr_net_ops = {
2517 .init = ipmr_net_init,
2518 .exit = ipmr_net_exit,
2519};
2520
2521int __init ip_mr_init(void)
2522{
2523 int err;
2524
2525 mrt_cachep = kmem_cache_create("ip_mrt_cache",
2526 sizeof(struct mfc_cache),
2527 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC,
2528 NULL);
2529 if (!mrt_cachep)
2530 return -ENOMEM;
2531
2532 err = register_pernet_subsys(&ipmr_net_ops);
2533 if (err)
2534 goto reg_pernet_fail;
2535
2536 err = register_netdevice_notifier(&ip_mr_notifier);
2537 if (err)
2538 goto reg_notif_fail;
2539#ifdef CONFIG_IP_PIMSM_V2
2540 if (inet_add_protocol(&pim_protocol, IPPROTO_PIM) < 0) {
2541 pr_err("%s: can't add PIM protocol\n", __func__);
2542 err = -EAGAIN;
2543 goto add_proto_fail;
2544 }
2545#endif
2546 rtnl_register(RTNL_FAMILY_IPMR, RTM_GETROUTE,
2547 NULL, ipmr_rtm_dumproute, NULL);
2548 return 0;
2549
2550#ifdef CONFIG_IP_PIMSM_V2
2551add_proto_fail:
2552 unregister_netdevice_notifier(&ip_mr_notifier);
2553#endif
2554reg_notif_fail:
2555 unregister_pernet_subsys(&ipmr_net_ops);
2556reg_pernet_fail:
2557 kmem_cache_destroy(mrt_cachep);
2558 return err;
2559}
This page took 0.044414 seconds and 5 git commands to generate.