tcp: add tcp_in_slow_start helper
[deliverable/linux.git] / net / ipv4 / ipmr.c
1 /*
2 * IP multicast routing support for mrouted 3.6/3.8
3 *
4 * (c) 1995 Alan Cox, <alan@lxorguk.ukuu.org.uk>
5 * Linux Consultancy and Custom Driver Development
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 *
12 * Fixes:
13 * Michael Chastain : Incorrect size of copying.
14 * Alan Cox : Added the cache manager code
15 * Alan Cox : Fixed the clone/copy bug and device race.
16 * Mike McLagan : Routing by source
17 * Malcolm Beattie : Buffer handling fixes.
18 * Alexey Kuznetsov : Double buffer free and other fixes.
19 * SVR Anand : Fixed several multicast bugs and problems.
20 * Alexey Kuznetsov : Status, optimisations and more.
21 * Brad Parker : Better behaviour on mrouted upcall
22 * overflow.
23 * Carlos Picoto : PIMv1 Support
24 * Pavlin Ivanov Radoslavov: PIMv2 Registers must checksum only PIM header
25 * Relax this requirement to work with older peers.
26 *
27 */
28
29 #include <asm/uaccess.h>
30 #include <linux/types.h>
31 #include <linux/capability.h>
32 #include <linux/errno.h>
33 #include <linux/timer.h>
34 #include <linux/mm.h>
35 #include <linux/kernel.h>
36 #include <linux/fcntl.h>
37 #include <linux/stat.h>
38 #include <linux/socket.h>
39 #include <linux/in.h>
40 #include <linux/inet.h>
41 #include <linux/netdevice.h>
42 #include <linux/inetdevice.h>
43 #include <linux/igmp.h>
44 #include <linux/proc_fs.h>
45 #include <linux/seq_file.h>
46 #include <linux/mroute.h>
47 #include <linux/init.h>
48 #include <linux/if_ether.h>
49 #include <linux/slab.h>
50 #include <net/net_namespace.h>
51 #include <net/ip.h>
52 #include <net/protocol.h>
53 #include <linux/skbuff.h>
54 #include <net/route.h>
55 #include <net/sock.h>
56 #include <net/icmp.h>
57 #include <net/udp.h>
58 #include <net/raw.h>
59 #include <linux/notifier.h>
60 #include <linux/if_arp.h>
61 #include <linux/netfilter_ipv4.h>
62 #include <linux/compat.h>
63 #include <linux/export.h>
64 #include <net/ip_tunnels.h>
65 #include <net/checksum.h>
66 #include <net/netlink.h>
67 #include <net/fib_rules.h>
68 #include <linux/netconf.h>
69
70 #if defined(CONFIG_IP_PIMSM_V1) || defined(CONFIG_IP_PIMSM_V2)
71 #define CONFIG_IP_PIMSM 1
72 #endif
73
74 struct mr_table {
75 struct list_head list;
76 possible_net_t net;
77 u32 id;
78 struct sock __rcu *mroute_sk;
79 struct timer_list ipmr_expire_timer;
80 struct list_head mfc_unres_queue;
81 struct list_head mfc_cache_array[MFC_LINES];
82 struct vif_device vif_table[MAXVIFS];
83 int maxvif;
84 atomic_t cache_resolve_queue_len;
85 bool mroute_do_assert;
86 bool mroute_do_pim;
87 #if defined(CONFIG_IP_PIMSM_V1) || defined(CONFIG_IP_PIMSM_V2)
88 int mroute_reg_vif_num;
89 #endif
90 };
91
92 struct ipmr_rule {
93 struct fib_rule common;
94 };
95
96 struct ipmr_result {
97 struct mr_table *mrt;
98 };
99
100 /* Big lock, protecting vif table, mrt cache and mroute socket state.
101 * Note that the changes are semaphored via rtnl_lock.
102 */
103
104 static DEFINE_RWLOCK(mrt_lock);
105
106 /*
107 * Multicast router control variables
108 */
109
110 #define VIF_EXISTS(_mrt, _idx) ((_mrt)->vif_table[_idx].dev != NULL)
111
112 /* Special spinlock for queue of unresolved entries */
113 static DEFINE_SPINLOCK(mfc_unres_lock);
114
115 /* We return to original Alan's scheme. Hash table of resolved
116 * entries is changed only in process context and protected
117 * with weak lock mrt_lock. Queue of unresolved entries is protected
118 * with strong spinlock mfc_unres_lock.
119 *
120 * In this case data path is free of exclusive locks at all.
121 */
122
123 static struct kmem_cache *mrt_cachep __read_mostly;
124
125 static struct mr_table *ipmr_new_table(struct net *net, u32 id);
126 static void ipmr_free_table(struct mr_table *mrt);
127
128 static void ip_mr_forward(struct net *net, struct mr_table *mrt,
129 struct sk_buff *skb, struct mfc_cache *cache,
130 int local);
131 static int ipmr_cache_report(struct mr_table *mrt,
132 struct sk_buff *pkt, vifi_t vifi, int assert);
133 static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
134 struct mfc_cache *c, struct rtmsg *rtm);
135 static void mroute_netlink_event(struct mr_table *mrt, struct mfc_cache *mfc,
136 int cmd);
137 static void mroute_clean_tables(struct mr_table *mrt);
138 static void ipmr_expire_process(unsigned long arg);
139
140 #ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
141 #define ipmr_for_each_table(mrt, net) \
142 list_for_each_entry_rcu(mrt, &net->ipv4.mr_tables, list)
143
144 static struct mr_table *ipmr_get_table(struct net *net, u32 id)
145 {
146 struct mr_table *mrt;
147
148 ipmr_for_each_table(mrt, net) {
149 if (mrt->id == id)
150 return mrt;
151 }
152 return NULL;
153 }
154
155 static int ipmr_fib_lookup(struct net *net, struct flowi4 *flp4,
156 struct mr_table **mrt)
157 {
158 int err;
159 struct ipmr_result res;
160 struct fib_lookup_arg arg = {
161 .result = &res,
162 .flags = FIB_LOOKUP_NOREF,
163 };
164
165 err = fib_rules_lookup(net->ipv4.mr_rules_ops,
166 flowi4_to_flowi(flp4), 0, &arg);
167 if (err < 0)
168 return err;
169 *mrt = res.mrt;
170 return 0;
171 }
172
173 static int ipmr_rule_action(struct fib_rule *rule, struct flowi *flp,
174 int flags, struct fib_lookup_arg *arg)
175 {
176 struct ipmr_result *res = arg->result;
177 struct mr_table *mrt;
178
179 switch (rule->action) {
180 case FR_ACT_TO_TBL:
181 break;
182 case FR_ACT_UNREACHABLE:
183 return -ENETUNREACH;
184 case FR_ACT_PROHIBIT:
185 return -EACCES;
186 case FR_ACT_BLACKHOLE:
187 default:
188 return -EINVAL;
189 }
190
191 mrt = ipmr_get_table(rule->fr_net, rule->table);
192 if (!mrt)
193 return -EAGAIN;
194 res->mrt = mrt;
195 return 0;
196 }
197
198 static int ipmr_rule_match(struct fib_rule *rule, struct flowi *fl, int flags)
199 {
200 return 1;
201 }
202
203 static const struct nla_policy ipmr_rule_policy[FRA_MAX + 1] = {
204 FRA_GENERIC_POLICY,
205 };
206
207 static int ipmr_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
208 struct fib_rule_hdr *frh, struct nlattr **tb)
209 {
210 return 0;
211 }
212
213 static int ipmr_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh,
214 struct nlattr **tb)
215 {
216 return 1;
217 }
218
219 static int ipmr_rule_fill(struct fib_rule *rule, struct sk_buff *skb,
220 struct fib_rule_hdr *frh)
221 {
222 frh->dst_len = 0;
223 frh->src_len = 0;
224 frh->tos = 0;
225 return 0;
226 }
227
228 static const struct fib_rules_ops __net_initconst ipmr_rules_ops_template = {
229 .family = RTNL_FAMILY_IPMR,
230 .rule_size = sizeof(struct ipmr_rule),
231 .addr_size = sizeof(u32),
232 .action = ipmr_rule_action,
233 .match = ipmr_rule_match,
234 .configure = ipmr_rule_configure,
235 .compare = ipmr_rule_compare,
236 .default_pref = fib_default_rule_pref,
237 .fill = ipmr_rule_fill,
238 .nlgroup = RTNLGRP_IPV4_RULE,
239 .policy = ipmr_rule_policy,
240 .owner = THIS_MODULE,
241 };
242
243 static int __net_init ipmr_rules_init(struct net *net)
244 {
245 struct fib_rules_ops *ops;
246 struct mr_table *mrt;
247 int err;
248
249 ops = fib_rules_register(&ipmr_rules_ops_template, net);
250 if (IS_ERR(ops))
251 return PTR_ERR(ops);
252
253 INIT_LIST_HEAD(&net->ipv4.mr_tables);
254
255 mrt = ipmr_new_table(net, RT_TABLE_DEFAULT);
256 if (!mrt) {
257 err = -ENOMEM;
258 goto err1;
259 }
260
261 err = fib_default_rule_add(ops, 0x7fff, RT_TABLE_DEFAULT, 0);
262 if (err < 0)
263 goto err2;
264
265 net->ipv4.mr_rules_ops = ops;
266 return 0;
267
268 err2:
269 ipmr_free_table(mrt);
270 err1:
271 fib_rules_unregister(ops);
272 return err;
273 }
274
275 static void __net_exit ipmr_rules_exit(struct net *net)
276 {
277 struct mr_table *mrt, *next;
278
279 rtnl_lock();
280 list_for_each_entry_safe(mrt, next, &net->ipv4.mr_tables, list) {
281 list_del(&mrt->list);
282 ipmr_free_table(mrt);
283 }
284 fib_rules_unregister(net->ipv4.mr_rules_ops);
285 rtnl_unlock();
286 }
287 #else
288 #define ipmr_for_each_table(mrt, net) \
289 for (mrt = net->ipv4.mrt; mrt; mrt = NULL)
290
291 static struct mr_table *ipmr_get_table(struct net *net, u32 id)
292 {
293 return net->ipv4.mrt;
294 }
295
296 static int ipmr_fib_lookup(struct net *net, struct flowi4 *flp4,
297 struct mr_table **mrt)
298 {
299 *mrt = net->ipv4.mrt;
300 return 0;
301 }
302
303 static int __net_init ipmr_rules_init(struct net *net)
304 {
305 net->ipv4.mrt = ipmr_new_table(net, RT_TABLE_DEFAULT);
306 return net->ipv4.mrt ? 0 : -ENOMEM;
307 }
308
309 static void __net_exit ipmr_rules_exit(struct net *net)
310 {
311 rtnl_lock();
312 ipmr_free_table(net->ipv4.mrt);
313 net->ipv4.mrt = NULL;
314 rtnl_unlock();
315 }
316 #endif
317
318 static struct mr_table *ipmr_new_table(struct net *net, u32 id)
319 {
320 struct mr_table *mrt;
321 unsigned int i;
322
323 mrt = ipmr_get_table(net, id);
324 if (mrt)
325 return mrt;
326
327 mrt = kzalloc(sizeof(*mrt), GFP_KERNEL);
328 if (!mrt)
329 return NULL;
330 write_pnet(&mrt->net, net);
331 mrt->id = id;
332
333 /* Forwarding cache */
334 for (i = 0; i < MFC_LINES; i++)
335 INIT_LIST_HEAD(&mrt->mfc_cache_array[i]);
336
337 INIT_LIST_HEAD(&mrt->mfc_unres_queue);
338
339 setup_timer(&mrt->ipmr_expire_timer, ipmr_expire_process,
340 (unsigned long)mrt);
341
342 #ifdef CONFIG_IP_PIMSM
343 mrt->mroute_reg_vif_num = -1;
344 #endif
345 #ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
346 list_add_tail_rcu(&mrt->list, &net->ipv4.mr_tables);
347 #endif
348 return mrt;
349 }
350
351 static void ipmr_free_table(struct mr_table *mrt)
352 {
353 del_timer_sync(&mrt->ipmr_expire_timer);
354 mroute_clean_tables(mrt);
355 kfree(mrt);
356 }
357
358 /* Service routines creating virtual interfaces: DVMRP tunnels and PIMREG */
359
360 static void ipmr_del_tunnel(struct net_device *dev, struct vifctl *v)
361 {
362 struct net *net = dev_net(dev);
363
364 dev_close(dev);
365
366 dev = __dev_get_by_name(net, "tunl0");
367 if (dev) {
368 const struct net_device_ops *ops = dev->netdev_ops;
369 struct ifreq ifr;
370 struct ip_tunnel_parm p;
371
372 memset(&p, 0, sizeof(p));
373 p.iph.daddr = v->vifc_rmt_addr.s_addr;
374 p.iph.saddr = v->vifc_lcl_addr.s_addr;
375 p.iph.version = 4;
376 p.iph.ihl = 5;
377 p.iph.protocol = IPPROTO_IPIP;
378 sprintf(p.name, "dvmrp%d", v->vifc_vifi);
379 ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
380
381 if (ops->ndo_do_ioctl) {
382 mm_segment_t oldfs = get_fs();
383
384 set_fs(KERNEL_DS);
385 ops->ndo_do_ioctl(dev, &ifr, SIOCDELTUNNEL);
386 set_fs(oldfs);
387 }
388 }
389 }
390
391 static
392 struct net_device *ipmr_new_tunnel(struct net *net, struct vifctl *v)
393 {
394 struct net_device *dev;
395
396 dev = __dev_get_by_name(net, "tunl0");
397
398 if (dev) {
399 const struct net_device_ops *ops = dev->netdev_ops;
400 int err;
401 struct ifreq ifr;
402 struct ip_tunnel_parm p;
403 struct in_device *in_dev;
404
405 memset(&p, 0, sizeof(p));
406 p.iph.daddr = v->vifc_rmt_addr.s_addr;
407 p.iph.saddr = v->vifc_lcl_addr.s_addr;
408 p.iph.version = 4;
409 p.iph.ihl = 5;
410 p.iph.protocol = IPPROTO_IPIP;
411 sprintf(p.name, "dvmrp%d", v->vifc_vifi);
412 ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
413
414 if (ops->ndo_do_ioctl) {
415 mm_segment_t oldfs = get_fs();
416
417 set_fs(KERNEL_DS);
418 err = ops->ndo_do_ioctl(dev, &ifr, SIOCADDTUNNEL);
419 set_fs(oldfs);
420 } else {
421 err = -EOPNOTSUPP;
422 }
423 dev = NULL;
424
425 if (err == 0 &&
426 (dev = __dev_get_by_name(net, p.name)) != NULL) {
427 dev->flags |= IFF_MULTICAST;
428
429 in_dev = __in_dev_get_rtnl(dev);
430 if (!in_dev)
431 goto failure;
432
433 ipv4_devconf_setall(in_dev);
434 neigh_parms_data_state_setall(in_dev->arp_parms);
435 IPV4_DEVCONF(in_dev->cnf, RP_FILTER) = 0;
436
437 if (dev_open(dev))
438 goto failure;
439 dev_hold(dev);
440 }
441 }
442 return dev;
443
444 failure:
445 /* allow the register to be completed before unregistering. */
446 rtnl_unlock();
447 rtnl_lock();
448
449 unregister_netdevice(dev);
450 return NULL;
451 }
452
453 #ifdef CONFIG_IP_PIMSM
454
455 static netdev_tx_t reg_vif_xmit(struct sk_buff *skb, struct net_device *dev)
456 {
457 struct net *net = dev_net(dev);
458 struct mr_table *mrt;
459 struct flowi4 fl4 = {
460 .flowi4_oif = dev->ifindex,
461 .flowi4_iif = skb->skb_iif ? : LOOPBACK_IFINDEX,
462 .flowi4_mark = skb->mark,
463 };
464 int err;
465
466 err = ipmr_fib_lookup(net, &fl4, &mrt);
467 if (err < 0) {
468 kfree_skb(skb);
469 return err;
470 }
471
472 read_lock(&mrt_lock);
473 dev->stats.tx_bytes += skb->len;
474 dev->stats.tx_packets++;
475 ipmr_cache_report(mrt, skb, mrt->mroute_reg_vif_num, IGMPMSG_WHOLEPKT);
476 read_unlock(&mrt_lock);
477 kfree_skb(skb);
478 return NETDEV_TX_OK;
479 }
480
481 static int reg_vif_get_iflink(const struct net_device *dev)
482 {
483 return 0;
484 }
485
486 static const struct net_device_ops reg_vif_netdev_ops = {
487 .ndo_start_xmit = reg_vif_xmit,
488 .ndo_get_iflink = reg_vif_get_iflink,
489 };
490
491 static void reg_vif_setup(struct net_device *dev)
492 {
493 dev->type = ARPHRD_PIMREG;
494 dev->mtu = ETH_DATA_LEN - sizeof(struct iphdr) - 8;
495 dev->flags = IFF_NOARP;
496 dev->netdev_ops = &reg_vif_netdev_ops;
497 dev->destructor = free_netdev;
498 dev->features |= NETIF_F_NETNS_LOCAL;
499 }
500
501 static struct net_device *ipmr_reg_vif(struct net *net, struct mr_table *mrt)
502 {
503 struct net_device *dev;
504 struct in_device *in_dev;
505 char name[IFNAMSIZ];
506
507 if (mrt->id == RT_TABLE_DEFAULT)
508 sprintf(name, "pimreg");
509 else
510 sprintf(name, "pimreg%u", mrt->id);
511
512 dev = alloc_netdev(0, name, NET_NAME_UNKNOWN, reg_vif_setup);
513
514 if (!dev)
515 return NULL;
516
517 dev_net_set(dev, net);
518
519 if (register_netdevice(dev)) {
520 free_netdev(dev);
521 return NULL;
522 }
523
524 rcu_read_lock();
525 in_dev = __in_dev_get_rcu(dev);
526 if (!in_dev) {
527 rcu_read_unlock();
528 goto failure;
529 }
530
531 ipv4_devconf_setall(in_dev);
532 neigh_parms_data_state_setall(in_dev->arp_parms);
533 IPV4_DEVCONF(in_dev->cnf, RP_FILTER) = 0;
534 rcu_read_unlock();
535
536 if (dev_open(dev))
537 goto failure;
538
539 dev_hold(dev);
540
541 return dev;
542
543 failure:
544 /* allow the register to be completed before unregistering. */
545 rtnl_unlock();
546 rtnl_lock();
547
548 unregister_netdevice(dev);
549 return NULL;
550 }
551 #endif
552
553 /**
554 * vif_delete - Delete a VIF entry
555 * @notify: Set to 1, if the caller is a notifier_call
556 */
557
558 static int vif_delete(struct mr_table *mrt, int vifi, int notify,
559 struct list_head *head)
560 {
561 struct vif_device *v;
562 struct net_device *dev;
563 struct in_device *in_dev;
564
565 if (vifi < 0 || vifi >= mrt->maxvif)
566 return -EADDRNOTAVAIL;
567
568 v = &mrt->vif_table[vifi];
569
570 write_lock_bh(&mrt_lock);
571 dev = v->dev;
572 v->dev = NULL;
573
574 if (!dev) {
575 write_unlock_bh(&mrt_lock);
576 return -EADDRNOTAVAIL;
577 }
578
579 #ifdef CONFIG_IP_PIMSM
580 if (vifi == mrt->mroute_reg_vif_num)
581 mrt->mroute_reg_vif_num = -1;
582 #endif
583
584 if (vifi + 1 == mrt->maxvif) {
585 int tmp;
586
587 for (tmp = vifi - 1; tmp >= 0; tmp--) {
588 if (VIF_EXISTS(mrt, tmp))
589 break;
590 }
591 mrt->maxvif = tmp+1;
592 }
593
594 write_unlock_bh(&mrt_lock);
595
596 dev_set_allmulti(dev, -1);
597
598 in_dev = __in_dev_get_rtnl(dev);
599 if (in_dev) {
600 IPV4_DEVCONF(in_dev->cnf, MC_FORWARDING)--;
601 inet_netconf_notify_devconf(dev_net(dev),
602 NETCONFA_MC_FORWARDING,
603 dev->ifindex, &in_dev->cnf);
604 ip_rt_multicast_event(in_dev);
605 }
606
607 if (v->flags & (VIFF_TUNNEL | VIFF_REGISTER) && !notify)
608 unregister_netdevice_queue(dev, head);
609
610 dev_put(dev);
611 return 0;
612 }
613
614 static void ipmr_cache_free_rcu(struct rcu_head *head)
615 {
616 struct mfc_cache *c = container_of(head, struct mfc_cache, rcu);
617
618 kmem_cache_free(mrt_cachep, c);
619 }
620
621 static inline void ipmr_cache_free(struct mfc_cache *c)
622 {
623 call_rcu(&c->rcu, ipmr_cache_free_rcu);
624 }
625
626 /* Destroy an unresolved cache entry, killing queued skbs
627 * and reporting error to netlink readers.
628 */
629
630 static void ipmr_destroy_unres(struct mr_table *mrt, struct mfc_cache *c)
631 {
632 struct net *net = read_pnet(&mrt->net);
633 struct sk_buff *skb;
634 struct nlmsgerr *e;
635
636 atomic_dec(&mrt->cache_resolve_queue_len);
637
638 while ((skb = skb_dequeue(&c->mfc_un.unres.unresolved))) {
639 if (ip_hdr(skb)->version == 0) {
640 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr));
641 nlh->nlmsg_type = NLMSG_ERROR;
642 nlh->nlmsg_len = nlmsg_msg_size(sizeof(struct nlmsgerr));
643 skb_trim(skb, nlh->nlmsg_len);
644 e = nlmsg_data(nlh);
645 e->error = -ETIMEDOUT;
646 memset(&e->msg, 0, sizeof(e->msg));
647
648 rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
649 } else {
650 kfree_skb(skb);
651 }
652 }
653
654 ipmr_cache_free(c);
655 }
656
657
658 /* Timer process for the unresolved queue. */
659
660 static void ipmr_expire_process(unsigned long arg)
661 {
662 struct mr_table *mrt = (struct mr_table *)arg;
663 unsigned long now;
664 unsigned long expires;
665 struct mfc_cache *c, *next;
666
667 if (!spin_trylock(&mfc_unres_lock)) {
668 mod_timer(&mrt->ipmr_expire_timer, jiffies+HZ/10);
669 return;
670 }
671
672 if (list_empty(&mrt->mfc_unres_queue))
673 goto out;
674
675 now = jiffies;
676 expires = 10*HZ;
677
678 list_for_each_entry_safe(c, next, &mrt->mfc_unres_queue, list) {
679 if (time_after(c->mfc_un.unres.expires, now)) {
680 unsigned long interval = c->mfc_un.unres.expires - now;
681 if (interval < expires)
682 expires = interval;
683 continue;
684 }
685
686 list_del(&c->list);
687 mroute_netlink_event(mrt, c, RTM_DELROUTE);
688 ipmr_destroy_unres(mrt, c);
689 }
690
691 if (!list_empty(&mrt->mfc_unres_queue))
692 mod_timer(&mrt->ipmr_expire_timer, jiffies + expires);
693
694 out:
695 spin_unlock(&mfc_unres_lock);
696 }
697
698 /* Fill oifs list. It is called under write locked mrt_lock. */
699
700 static void ipmr_update_thresholds(struct mr_table *mrt, struct mfc_cache *cache,
701 unsigned char *ttls)
702 {
703 int vifi;
704
705 cache->mfc_un.res.minvif = MAXVIFS;
706 cache->mfc_un.res.maxvif = 0;
707 memset(cache->mfc_un.res.ttls, 255, MAXVIFS);
708
709 for (vifi = 0; vifi < mrt->maxvif; vifi++) {
710 if (VIF_EXISTS(mrt, vifi) &&
711 ttls[vifi] && ttls[vifi] < 255) {
712 cache->mfc_un.res.ttls[vifi] = ttls[vifi];
713 if (cache->mfc_un.res.minvif > vifi)
714 cache->mfc_un.res.minvif = vifi;
715 if (cache->mfc_un.res.maxvif <= vifi)
716 cache->mfc_un.res.maxvif = vifi + 1;
717 }
718 }
719 }
720
721 static int vif_add(struct net *net, struct mr_table *mrt,
722 struct vifctl *vifc, int mrtsock)
723 {
724 int vifi = vifc->vifc_vifi;
725 struct vif_device *v = &mrt->vif_table[vifi];
726 struct net_device *dev;
727 struct in_device *in_dev;
728 int err;
729
730 /* Is vif busy ? */
731 if (VIF_EXISTS(mrt, vifi))
732 return -EADDRINUSE;
733
734 switch (vifc->vifc_flags) {
735 #ifdef CONFIG_IP_PIMSM
736 case VIFF_REGISTER:
737 /*
738 * Special Purpose VIF in PIM
739 * All the packets will be sent to the daemon
740 */
741 if (mrt->mroute_reg_vif_num >= 0)
742 return -EADDRINUSE;
743 dev = ipmr_reg_vif(net, mrt);
744 if (!dev)
745 return -ENOBUFS;
746 err = dev_set_allmulti(dev, 1);
747 if (err) {
748 unregister_netdevice(dev);
749 dev_put(dev);
750 return err;
751 }
752 break;
753 #endif
754 case VIFF_TUNNEL:
755 dev = ipmr_new_tunnel(net, vifc);
756 if (!dev)
757 return -ENOBUFS;
758 err = dev_set_allmulti(dev, 1);
759 if (err) {
760 ipmr_del_tunnel(dev, vifc);
761 dev_put(dev);
762 return err;
763 }
764 break;
765
766 case VIFF_USE_IFINDEX:
767 case 0:
768 if (vifc->vifc_flags == VIFF_USE_IFINDEX) {
769 dev = dev_get_by_index(net, vifc->vifc_lcl_ifindex);
770 if (dev && !__in_dev_get_rtnl(dev)) {
771 dev_put(dev);
772 return -EADDRNOTAVAIL;
773 }
774 } else {
775 dev = ip_dev_find(net, vifc->vifc_lcl_addr.s_addr);
776 }
777 if (!dev)
778 return -EADDRNOTAVAIL;
779 err = dev_set_allmulti(dev, 1);
780 if (err) {
781 dev_put(dev);
782 return err;
783 }
784 break;
785 default:
786 return -EINVAL;
787 }
788
789 in_dev = __in_dev_get_rtnl(dev);
790 if (!in_dev) {
791 dev_put(dev);
792 return -EADDRNOTAVAIL;
793 }
794 IPV4_DEVCONF(in_dev->cnf, MC_FORWARDING)++;
795 inet_netconf_notify_devconf(net, NETCONFA_MC_FORWARDING, dev->ifindex,
796 &in_dev->cnf);
797 ip_rt_multicast_event(in_dev);
798
799 /* Fill in the VIF structures */
800
801 v->rate_limit = vifc->vifc_rate_limit;
802 v->local = vifc->vifc_lcl_addr.s_addr;
803 v->remote = vifc->vifc_rmt_addr.s_addr;
804 v->flags = vifc->vifc_flags;
805 if (!mrtsock)
806 v->flags |= VIFF_STATIC;
807 v->threshold = vifc->vifc_threshold;
808 v->bytes_in = 0;
809 v->bytes_out = 0;
810 v->pkt_in = 0;
811 v->pkt_out = 0;
812 v->link = dev->ifindex;
813 if (v->flags & (VIFF_TUNNEL | VIFF_REGISTER))
814 v->link = dev_get_iflink(dev);
815
816 /* And finish update writing critical data */
817 write_lock_bh(&mrt_lock);
818 v->dev = dev;
819 #ifdef CONFIG_IP_PIMSM
820 if (v->flags & VIFF_REGISTER)
821 mrt->mroute_reg_vif_num = vifi;
822 #endif
823 if (vifi+1 > mrt->maxvif)
824 mrt->maxvif = vifi+1;
825 write_unlock_bh(&mrt_lock);
826 return 0;
827 }
828
829 /* called with rcu_read_lock() */
830 static struct mfc_cache *ipmr_cache_find(struct mr_table *mrt,
831 __be32 origin,
832 __be32 mcastgrp)
833 {
834 int line = MFC_HASH(mcastgrp, origin);
835 struct mfc_cache *c;
836
837 list_for_each_entry_rcu(c, &mrt->mfc_cache_array[line], list) {
838 if (c->mfc_origin == origin && c->mfc_mcastgrp == mcastgrp)
839 return c;
840 }
841 return NULL;
842 }
843
844 /* Look for a (*,*,oif) entry */
845 static struct mfc_cache *ipmr_cache_find_any_parent(struct mr_table *mrt,
846 int vifi)
847 {
848 int line = MFC_HASH(htonl(INADDR_ANY), htonl(INADDR_ANY));
849 struct mfc_cache *c;
850
851 list_for_each_entry_rcu(c, &mrt->mfc_cache_array[line], list)
852 if (c->mfc_origin == htonl(INADDR_ANY) &&
853 c->mfc_mcastgrp == htonl(INADDR_ANY) &&
854 c->mfc_un.res.ttls[vifi] < 255)
855 return c;
856
857 return NULL;
858 }
859
860 /* Look for a (*,G) entry */
861 static struct mfc_cache *ipmr_cache_find_any(struct mr_table *mrt,
862 __be32 mcastgrp, int vifi)
863 {
864 int line = MFC_HASH(mcastgrp, htonl(INADDR_ANY));
865 struct mfc_cache *c, *proxy;
866
867 if (mcastgrp == htonl(INADDR_ANY))
868 goto skip;
869
870 list_for_each_entry_rcu(c, &mrt->mfc_cache_array[line], list)
871 if (c->mfc_origin == htonl(INADDR_ANY) &&
872 c->mfc_mcastgrp == mcastgrp) {
873 if (c->mfc_un.res.ttls[vifi] < 255)
874 return c;
875
876 /* It's ok if the vifi is part of the static tree */
877 proxy = ipmr_cache_find_any_parent(mrt,
878 c->mfc_parent);
879 if (proxy && proxy->mfc_un.res.ttls[vifi] < 255)
880 return c;
881 }
882
883 skip:
884 return ipmr_cache_find_any_parent(mrt, vifi);
885 }
886
887 /*
888 * Allocate a multicast cache entry
889 */
890 static struct mfc_cache *ipmr_cache_alloc(void)
891 {
892 struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
893
894 if (c)
895 c->mfc_un.res.minvif = MAXVIFS;
896 return c;
897 }
898
899 static struct mfc_cache *ipmr_cache_alloc_unres(void)
900 {
901 struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC);
902
903 if (c) {
904 skb_queue_head_init(&c->mfc_un.unres.unresolved);
905 c->mfc_un.unres.expires = jiffies + 10*HZ;
906 }
907 return c;
908 }
909
910 /*
911 * A cache entry has gone into a resolved state from queued
912 */
913
914 static void ipmr_cache_resolve(struct net *net, struct mr_table *mrt,
915 struct mfc_cache *uc, struct mfc_cache *c)
916 {
917 struct sk_buff *skb;
918 struct nlmsgerr *e;
919
920 /* Play the pending entries through our router */
921
922 while ((skb = __skb_dequeue(&uc->mfc_un.unres.unresolved))) {
923 if (ip_hdr(skb)->version == 0) {
924 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr));
925
926 if (__ipmr_fill_mroute(mrt, skb, c, nlmsg_data(nlh)) > 0) {
927 nlh->nlmsg_len = skb_tail_pointer(skb) -
928 (u8 *)nlh;
929 } else {
930 nlh->nlmsg_type = NLMSG_ERROR;
931 nlh->nlmsg_len = nlmsg_msg_size(sizeof(struct nlmsgerr));
932 skb_trim(skb, nlh->nlmsg_len);
933 e = nlmsg_data(nlh);
934 e->error = -EMSGSIZE;
935 memset(&e->msg, 0, sizeof(e->msg));
936 }
937
938 rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
939 } else {
940 ip_mr_forward(net, mrt, skb, c, 0);
941 }
942 }
943 }
944
945 /*
946 * Bounce a cache query up to mrouted. We could use netlink for this but mrouted
947 * expects the following bizarre scheme.
948 *
949 * Called under mrt_lock.
950 */
951
952 static int ipmr_cache_report(struct mr_table *mrt,
953 struct sk_buff *pkt, vifi_t vifi, int assert)
954 {
955 struct sk_buff *skb;
956 const int ihl = ip_hdrlen(pkt);
957 struct igmphdr *igmp;
958 struct igmpmsg *msg;
959 struct sock *mroute_sk;
960 int ret;
961
962 #ifdef CONFIG_IP_PIMSM
963 if (assert == IGMPMSG_WHOLEPKT)
964 skb = skb_realloc_headroom(pkt, sizeof(struct iphdr));
965 else
966 #endif
967 skb = alloc_skb(128, GFP_ATOMIC);
968
969 if (!skb)
970 return -ENOBUFS;
971
972 #ifdef CONFIG_IP_PIMSM
973 if (assert == IGMPMSG_WHOLEPKT) {
974 /* Ugly, but we have no choice with this interface.
975 * Duplicate old header, fix ihl, length etc.
976 * And all this only to mangle msg->im_msgtype and
977 * to set msg->im_mbz to "mbz" :-)
978 */
979 skb_push(skb, sizeof(struct iphdr));
980 skb_reset_network_header(skb);
981 skb_reset_transport_header(skb);
982 msg = (struct igmpmsg *)skb_network_header(skb);
983 memcpy(msg, skb_network_header(pkt), sizeof(struct iphdr));
984 msg->im_msgtype = IGMPMSG_WHOLEPKT;
985 msg->im_mbz = 0;
986 msg->im_vif = mrt->mroute_reg_vif_num;
987 ip_hdr(skb)->ihl = sizeof(struct iphdr) >> 2;
988 ip_hdr(skb)->tot_len = htons(ntohs(ip_hdr(pkt)->tot_len) +
989 sizeof(struct iphdr));
990 } else
991 #endif
992 {
993
994 /* Copy the IP header */
995
996 skb_set_network_header(skb, skb->len);
997 skb_put(skb, ihl);
998 skb_copy_to_linear_data(skb, pkt->data, ihl);
999 ip_hdr(skb)->protocol = 0; /* Flag to the kernel this is a route add */
1000 msg = (struct igmpmsg *)skb_network_header(skb);
1001 msg->im_vif = vifi;
1002 skb_dst_set(skb, dst_clone(skb_dst(pkt)));
1003
1004 /* Add our header */
1005
1006 igmp = (struct igmphdr *)skb_put(skb, sizeof(struct igmphdr));
1007 igmp->type =
1008 msg->im_msgtype = assert;
1009 igmp->code = 0;
1010 ip_hdr(skb)->tot_len = htons(skb->len); /* Fix the length */
1011 skb->transport_header = skb->network_header;
1012 }
1013
1014 rcu_read_lock();
1015 mroute_sk = rcu_dereference(mrt->mroute_sk);
1016 if (!mroute_sk) {
1017 rcu_read_unlock();
1018 kfree_skb(skb);
1019 return -EINVAL;
1020 }
1021
1022 /* Deliver to mrouted */
1023
1024 ret = sock_queue_rcv_skb(mroute_sk, skb);
1025 rcu_read_unlock();
1026 if (ret < 0) {
1027 net_warn_ratelimited("mroute: pending queue full, dropping entries\n");
1028 kfree_skb(skb);
1029 }
1030
1031 return ret;
1032 }
1033
1034 /*
1035 * Queue a packet for resolution. It gets locked cache entry!
1036 */
1037
1038 static int
1039 ipmr_cache_unresolved(struct mr_table *mrt, vifi_t vifi, struct sk_buff *skb)
1040 {
1041 bool found = false;
1042 int err;
1043 struct mfc_cache *c;
1044 const struct iphdr *iph = ip_hdr(skb);
1045
1046 spin_lock_bh(&mfc_unres_lock);
1047 list_for_each_entry(c, &mrt->mfc_unres_queue, list) {
1048 if (c->mfc_mcastgrp == iph->daddr &&
1049 c->mfc_origin == iph->saddr) {
1050 found = true;
1051 break;
1052 }
1053 }
1054
1055 if (!found) {
1056 /* Create a new entry if allowable */
1057
1058 if (atomic_read(&mrt->cache_resolve_queue_len) >= 10 ||
1059 (c = ipmr_cache_alloc_unres()) == NULL) {
1060 spin_unlock_bh(&mfc_unres_lock);
1061
1062 kfree_skb(skb);
1063 return -ENOBUFS;
1064 }
1065
1066 /* Fill in the new cache entry */
1067
1068 c->mfc_parent = -1;
1069 c->mfc_origin = iph->saddr;
1070 c->mfc_mcastgrp = iph->daddr;
1071
1072 /* Reflect first query at mrouted. */
1073
1074 err = ipmr_cache_report(mrt, skb, vifi, IGMPMSG_NOCACHE);
1075 if (err < 0) {
1076 /* If the report failed throw the cache entry
1077 out - Brad Parker
1078 */
1079 spin_unlock_bh(&mfc_unres_lock);
1080
1081 ipmr_cache_free(c);
1082 kfree_skb(skb);
1083 return err;
1084 }
1085
1086 atomic_inc(&mrt->cache_resolve_queue_len);
1087 list_add(&c->list, &mrt->mfc_unres_queue);
1088 mroute_netlink_event(mrt, c, RTM_NEWROUTE);
1089
1090 if (atomic_read(&mrt->cache_resolve_queue_len) == 1)
1091 mod_timer(&mrt->ipmr_expire_timer, c->mfc_un.unres.expires);
1092 }
1093
1094 /* See if we can append the packet */
1095
1096 if (c->mfc_un.unres.unresolved.qlen > 3) {
1097 kfree_skb(skb);
1098 err = -ENOBUFS;
1099 } else {
1100 skb_queue_tail(&c->mfc_un.unres.unresolved, skb);
1101 err = 0;
1102 }
1103
1104 spin_unlock_bh(&mfc_unres_lock);
1105 return err;
1106 }
1107
1108 /*
1109 * MFC cache manipulation by user space mroute daemon
1110 */
1111
1112 static int ipmr_mfc_delete(struct mr_table *mrt, struct mfcctl *mfc, int parent)
1113 {
1114 int line;
1115 struct mfc_cache *c, *next;
1116
1117 line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr);
1118
1119 list_for_each_entry_safe(c, next, &mrt->mfc_cache_array[line], list) {
1120 if (c->mfc_origin == mfc->mfcc_origin.s_addr &&
1121 c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr &&
1122 (parent == -1 || parent == c->mfc_parent)) {
1123 list_del_rcu(&c->list);
1124 mroute_netlink_event(mrt, c, RTM_DELROUTE);
1125 ipmr_cache_free(c);
1126 return 0;
1127 }
1128 }
1129 return -ENOENT;
1130 }
1131
1132 static int ipmr_mfc_add(struct net *net, struct mr_table *mrt,
1133 struct mfcctl *mfc, int mrtsock, int parent)
1134 {
1135 bool found = false;
1136 int line;
1137 struct mfc_cache *uc, *c;
1138
1139 if (mfc->mfcc_parent >= MAXVIFS)
1140 return -ENFILE;
1141
1142 line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr);
1143
1144 list_for_each_entry(c, &mrt->mfc_cache_array[line], list) {
1145 if (c->mfc_origin == mfc->mfcc_origin.s_addr &&
1146 c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr &&
1147 (parent == -1 || parent == c->mfc_parent)) {
1148 found = true;
1149 break;
1150 }
1151 }
1152
1153 if (found) {
1154 write_lock_bh(&mrt_lock);
1155 c->mfc_parent = mfc->mfcc_parent;
1156 ipmr_update_thresholds(mrt, c, mfc->mfcc_ttls);
1157 if (!mrtsock)
1158 c->mfc_flags |= MFC_STATIC;
1159 write_unlock_bh(&mrt_lock);
1160 mroute_netlink_event(mrt, c, RTM_NEWROUTE);
1161 return 0;
1162 }
1163
1164 if (mfc->mfcc_mcastgrp.s_addr != htonl(INADDR_ANY) &&
1165 !ipv4_is_multicast(mfc->mfcc_mcastgrp.s_addr))
1166 return -EINVAL;
1167
1168 c = ipmr_cache_alloc();
1169 if (!c)
1170 return -ENOMEM;
1171
1172 c->mfc_origin = mfc->mfcc_origin.s_addr;
1173 c->mfc_mcastgrp = mfc->mfcc_mcastgrp.s_addr;
1174 c->mfc_parent = mfc->mfcc_parent;
1175 ipmr_update_thresholds(mrt, c, mfc->mfcc_ttls);
1176 if (!mrtsock)
1177 c->mfc_flags |= MFC_STATIC;
1178
1179 list_add_rcu(&c->list, &mrt->mfc_cache_array[line]);
1180
1181 /*
1182 * Check to see if we resolved a queued list. If so we
1183 * need to send on the frames and tidy up.
1184 */
1185 found = false;
1186 spin_lock_bh(&mfc_unres_lock);
1187 list_for_each_entry(uc, &mrt->mfc_unres_queue, list) {
1188 if (uc->mfc_origin == c->mfc_origin &&
1189 uc->mfc_mcastgrp == c->mfc_mcastgrp) {
1190 list_del(&uc->list);
1191 atomic_dec(&mrt->cache_resolve_queue_len);
1192 found = true;
1193 break;
1194 }
1195 }
1196 if (list_empty(&mrt->mfc_unres_queue))
1197 del_timer(&mrt->ipmr_expire_timer);
1198 spin_unlock_bh(&mfc_unres_lock);
1199
1200 if (found) {
1201 ipmr_cache_resolve(net, mrt, uc, c);
1202 ipmr_cache_free(uc);
1203 }
1204 mroute_netlink_event(mrt, c, RTM_NEWROUTE);
1205 return 0;
1206 }
1207
1208 /*
1209 * Close the multicast socket, and clear the vif tables etc
1210 */
1211
1212 static void mroute_clean_tables(struct mr_table *mrt)
1213 {
1214 int i;
1215 LIST_HEAD(list);
1216 struct mfc_cache *c, *next;
1217
1218 /* Shut down all active vif entries */
1219
1220 for (i = 0; i < mrt->maxvif; i++) {
1221 if (!(mrt->vif_table[i].flags & VIFF_STATIC))
1222 vif_delete(mrt, i, 0, &list);
1223 }
1224 unregister_netdevice_many(&list);
1225
1226 /* Wipe the cache */
1227
1228 for (i = 0; i < MFC_LINES; i++) {
1229 list_for_each_entry_safe(c, next, &mrt->mfc_cache_array[i], list) {
1230 if (c->mfc_flags & MFC_STATIC)
1231 continue;
1232 list_del_rcu(&c->list);
1233 mroute_netlink_event(mrt, c, RTM_DELROUTE);
1234 ipmr_cache_free(c);
1235 }
1236 }
1237
1238 if (atomic_read(&mrt->cache_resolve_queue_len) != 0) {
1239 spin_lock_bh(&mfc_unres_lock);
1240 list_for_each_entry_safe(c, next, &mrt->mfc_unres_queue, list) {
1241 list_del(&c->list);
1242 mroute_netlink_event(mrt, c, RTM_DELROUTE);
1243 ipmr_destroy_unres(mrt, c);
1244 }
1245 spin_unlock_bh(&mfc_unres_lock);
1246 }
1247 }
1248
1249 /* called from ip_ra_control(), before an RCU grace period,
1250 * we dont need to call synchronize_rcu() here
1251 */
1252 static void mrtsock_destruct(struct sock *sk)
1253 {
1254 struct net *net = sock_net(sk);
1255 struct mr_table *mrt;
1256
1257 rtnl_lock();
1258 ipmr_for_each_table(mrt, net) {
1259 if (sk == rtnl_dereference(mrt->mroute_sk)) {
1260 IPV4_DEVCONF_ALL(net, MC_FORWARDING)--;
1261 inet_netconf_notify_devconf(net, NETCONFA_MC_FORWARDING,
1262 NETCONFA_IFINDEX_ALL,
1263 net->ipv4.devconf_all);
1264 RCU_INIT_POINTER(mrt->mroute_sk, NULL);
1265 mroute_clean_tables(mrt);
1266 }
1267 }
1268 rtnl_unlock();
1269 }
1270
1271 /*
1272 * Socket options and virtual interface manipulation. The whole
1273 * virtual interface system is a complete heap, but unfortunately
1274 * that's how BSD mrouted happens to think. Maybe one day with a proper
1275 * MOSPF/PIM router set up we can clean this up.
1276 */
1277
1278 int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsigned int optlen)
1279 {
1280 int ret, parent = 0;
1281 struct vifctl vif;
1282 struct mfcctl mfc;
1283 struct net *net = sock_net(sk);
1284 struct mr_table *mrt;
1285
1286 if (sk->sk_type != SOCK_RAW ||
1287 inet_sk(sk)->inet_num != IPPROTO_IGMP)
1288 return -EOPNOTSUPP;
1289
1290 mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
1291 if (!mrt)
1292 return -ENOENT;
1293
1294 if (optname != MRT_INIT) {
1295 if (sk != rcu_access_pointer(mrt->mroute_sk) &&
1296 !ns_capable(net->user_ns, CAP_NET_ADMIN))
1297 return -EACCES;
1298 }
1299
1300 switch (optname) {
1301 case MRT_INIT:
1302 if (optlen != sizeof(int))
1303 return -EINVAL;
1304
1305 rtnl_lock();
1306 if (rtnl_dereference(mrt->mroute_sk)) {
1307 rtnl_unlock();
1308 return -EADDRINUSE;
1309 }
1310
1311 ret = ip_ra_control(sk, 1, mrtsock_destruct);
1312 if (ret == 0) {
1313 rcu_assign_pointer(mrt->mroute_sk, sk);
1314 IPV4_DEVCONF_ALL(net, MC_FORWARDING)++;
1315 inet_netconf_notify_devconf(net, NETCONFA_MC_FORWARDING,
1316 NETCONFA_IFINDEX_ALL,
1317 net->ipv4.devconf_all);
1318 }
1319 rtnl_unlock();
1320 return ret;
1321 case MRT_DONE:
1322 if (sk != rcu_access_pointer(mrt->mroute_sk))
1323 return -EACCES;
1324 return ip_ra_control(sk, 0, NULL);
1325 case MRT_ADD_VIF:
1326 case MRT_DEL_VIF:
1327 if (optlen != sizeof(vif))
1328 return -EINVAL;
1329 if (copy_from_user(&vif, optval, sizeof(vif)))
1330 return -EFAULT;
1331 if (vif.vifc_vifi >= MAXVIFS)
1332 return -ENFILE;
1333 rtnl_lock();
1334 if (optname == MRT_ADD_VIF) {
1335 ret = vif_add(net, mrt, &vif,
1336 sk == rtnl_dereference(mrt->mroute_sk));
1337 } else {
1338 ret = vif_delete(mrt, vif.vifc_vifi, 0, NULL);
1339 }
1340 rtnl_unlock();
1341 return ret;
1342
1343 /*
1344 * Manipulate the forwarding caches. These live
1345 * in a sort of kernel/user symbiosis.
1346 */
1347 case MRT_ADD_MFC:
1348 case MRT_DEL_MFC:
1349 parent = -1;
1350 case MRT_ADD_MFC_PROXY:
1351 case MRT_DEL_MFC_PROXY:
1352 if (optlen != sizeof(mfc))
1353 return -EINVAL;
1354 if (copy_from_user(&mfc, optval, sizeof(mfc)))
1355 return -EFAULT;
1356 if (parent == 0)
1357 parent = mfc.mfcc_parent;
1358 rtnl_lock();
1359 if (optname == MRT_DEL_MFC || optname == MRT_DEL_MFC_PROXY)
1360 ret = ipmr_mfc_delete(mrt, &mfc, parent);
1361 else
1362 ret = ipmr_mfc_add(net, mrt, &mfc,
1363 sk == rtnl_dereference(mrt->mroute_sk),
1364 parent);
1365 rtnl_unlock();
1366 return ret;
1367 /*
1368 * Control PIM assert.
1369 */
1370 case MRT_ASSERT:
1371 {
1372 int v;
1373 if (optlen != sizeof(v))
1374 return -EINVAL;
1375 if (get_user(v, (int __user *)optval))
1376 return -EFAULT;
1377 mrt->mroute_do_assert = v;
1378 return 0;
1379 }
1380 #ifdef CONFIG_IP_PIMSM
1381 case MRT_PIM:
1382 {
1383 int v;
1384
1385 if (optlen != sizeof(v))
1386 return -EINVAL;
1387 if (get_user(v, (int __user *)optval))
1388 return -EFAULT;
1389 v = !!v;
1390
1391 rtnl_lock();
1392 ret = 0;
1393 if (v != mrt->mroute_do_pim) {
1394 mrt->mroute_do_pim = v;
1395 mrt->mroute_do_assert = v;
1396 }
1397 rtnl_unlock();
1398 return ret;
1399 }
1400 #endif
1401 #ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
1402 case MRT_TABLE:
1403 {
1404 u32 v;
1405
1406 if (optlen != sizeof(u32))
1407 return -EINVAL;
1408 if (get_user(v, (u32 __user *)optval))
1409 return -EFAULT;
1410
1411 /* "pimreg%u" should not exceed 16 bytes (IFNAMSIZ) */
1412 if (v != RT_TABLE_DEFAULT && v >= 1000000000)
1413 return -EINVAL;
1414
1415 rtnl_lock();
1416 ret = 0;
1417 if (sk == rtnl_dereference(mrt->mroute_sk)) {
1418 ret = -EBUSY;
1419 } else {
1420 if (!ipmr_new_table(net, v))
1421 ret = -ENOMEM;
1422 else
1423 raw_sk(sk)->ipmr_table = v;
1424 }
1425 rtnl_unlock();
1426 return ret;
1427 }
1428 #endif
1429 /*
1430 * Spurious command, or MRT_VERSION which you cannot
1431 * set.
1432 */
1433 default:
1434 return -ENOPROTOOPT;
1435 }
1436 }
1437
1438 /*
1439 * Getsock opt support for the multicast routing system.
1440 */
1441
1442 int ip_mroute_getsockopt(struct sock *sk, int optname, char __user *optval, int __user *optlen)
1443 {
1444 int olr;
1445 int val;
1446 struct net *net = sock_net(sk);
1447 struct mr_table *mrt;
1448
1449 if (sk->sk_type != SOCK_RAW ||
1450 inet_sk(sk)->inet_num != IPPROTO_IGMP)
1451 return -EOPNOTSUPP;
1452
1453 mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
1454 if (!mrt)
1455 return -ENOENT;
1456
1457 if (optname != MRT_VERSION &&
1458 #ifdef CONFIG_IP_PIMSM
1459 optname != MRT_PIM &&
1460 #endif
1461 optname != MRT_ASSERT)
1462 return -ENOPROTOOPT;
1463
1464 if (get_user(olr, optlen))
1465 return -EFAULT;
1466
1467 olr = min_t(unsigned int, olr, sizeof(int));
1468 if (olr < 0)
1469 return -EINVAL;
1470
1471 if (put_user(olr, optlen))
1472 return -EFAULT;
1473 if (optname == MRT_VERSION)
1474 val = 0x0305;
1475 #ifdef CONFIG_IP_PIMSM
1476 else if (optname == MRT_PIM)
1477 val = mrt->mroute_do_pim;
1478 #endif
1479 else
1480 val = mrt->mroute_do_assert;
1481 if (copy_to_user(optval, &val, olr))
1482 return -EFAULT;
1483 return 0;
1484 }
1485
1486 /*
1487 * The IP multicast ioctl support routines.
1488 */
1489
1490 int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg)
1491 {
1492 struct sioc_sg_req sr;
1493 struct sioc_vif_req vr;
1494 struct vif_device *vif;
1495 struct mfc_cache *c;
1496 struct net *net = sock_net(sk);
1497 struct mr_table *mrt;
1498
1499 mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
1500 if (!mrt)
1501 return -ENOENT;
1502
1503 switch (cmd) {
1504 case SIOCGETVIFCNT:
1505 if (copy_from_user(&vr, arg, sizeof(vr)))
1506 return -EFAULT;
1507 if (vr.vifi >= mrt->maxvif)
1508 return -EINVAL;
1509 read_lock(&mrt_lock);
1510 vif = &mrt->vif_table[vr.vifi];
1511 if (VIF_EXISTS(mrt, vr.vifi)) {
1512 vr.icount = vif->pkt_in;
1513 vr.ocount = vif->pkt_out;
1514 vr.ibytes = vif->bytes_in;
1515 vr.obytes = vif->bytes_out;
1516 read_unlock(&mrt_lock);
1517
1518 if (copy_to_user(arg, &vr, sizeof(vr)))
1519 return -EFAULT;
1520 return 0;
1521 }
1522 read_unlock(&mrt_lock);
1523 return -EADDRNOTAVAIL;
1524 case SIOCGETSGCNT:
1525 if (copy_from_user(&sr, arg, sizeof(sr)))
1526 return -EFAULT;
1527
1528 rcu_read_lock();
1529 c = ipmr_cache_find(mrt, sr.src.s_addr, sr.grp.s_addr);
1530 if (c) {
1531 sr.pktcnt = c->mfc_un.res.pkt;
1532 sr.bytecnt = c->mfc_un.res.bytes;
1533 sr.wrong_if = c->mfc_un.res.wrong_if;
1534 rcu_read_unlock();
1535
1536 if (copy_to_user(arg, &sr, sizeof(sr)))
1537 return -EFAULT;
1538 return 0;
1539 }
1540 rcu_read_unlock();
1541 return -EADDRNOTAVAIL;
1542 default:
1543 return -ENOIOCTLCMD;
1544 }
1545 }
1546
1547 #ifdef CONFIG_COMPAT
1548 struct compat_sioc_sg_req {
1549 struct in_addr src;
1550 struct in_addr grp;
1551 compat_ulong_t pktcnt;
1552 compat_ulong_t bytecnt;
1553 compat_ulong_t wrong_if;
1554 };
1555
1556 struct compat_sioc_vif_req {
1557 vifi_t vifi; /* Which iface */
1558 compat_ulong_t icount;
1559 compat_ulong_t ocount;
1560 compat_ulong_t ibytes;
1561 compat_ulong_t obytes;
1562 };
1563
1564 int ipmr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
1565 {
1566 struct compat_sioc_sg_req sr;
1567 struct compat_sioc_vif_req vr;
1568 struct vif_device *vif;
1569 struct mfc_cache *c;
1570 struct net *net = sock_net(sk);
1571 struct mr_table *mrt;
1572
1573 mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
1574 if (!mrt)
1575 return -ENOENT;
1576
1577 switch (cmd) {
1578 case SIOCGETVIFCNT:
1579 if (copy_from_user(&vr, arg, sizeof(vr)))
1580 return -EFAULT;
1581 if (vr.vifi >= mrt->maxvif)
1582 return -EINVAL;
1583 read_lock(&mrt_lock);
1584 vif = &mrt->vif_table[vr.vifi];
1585 if (VIF_EXISTS(mrt, vr.vifi)) {
1586 vr.icount = vif->pkt_in;
1587 vr.ocount = vif->pkt_out;
1588 vr.ibytes = vif->bytes_in;
1589 vr.obytes = vif->bytes_out;
1590 read_unlock(&mrt_lock);
1591
1592 if (copy_to_user(arg, &vr, sizeof(vr)))
1593 return -EFAULT;
1594 return 0;
1595 }
1596 read_unlock(&mrt_lock);
1597 return -EADDRNOTAVAIL;
1598 case SIOCGETSGCNT:
1599 if (copy_from_user(&sr, arg, sizeof(sr)))
1600 return -EFAULT;
1601
1602 rcu_read_lock();
1603 c = ipmr_cache_find(mrt, sr.src.s_addr, sr.grp.s_addr);
1604 if (c) {
1605 sr.pktcnt = c->mfc_un.res.pkt;
1606 sr.bytecnt = c->mfc_un.res.bytes;
1607 sr.wrong_if = c->mfc_un.res.wrong_if;
1608 rcu_read_unlock();
1609
1610 if (copy_to_user(arg, &sr, sizeof(sr)))
1611 return -EFAULT;
1612 return 0;
1613 }
1614 rcu_read_unlock();
1615 return -EADDRNOTAVAIL;
1616 default:
1617 return -ENOIOCTLCMD;
1618 }
1619 }
1620 #endif
1621
1622
1623 static int ipmr_device_event(struct notifier_block *this, unsigned long event, void *ptr)
1624 {
1625 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1626 struct net *net = dev_net(dev);
1627 struct mr_table *mrt;
1628 struct vif_device *v;
1629 int ct;
1630
1631 if (event != NETDEV_UNREGISTER)
1632 return NOTIFY_DONE;
1633
1634 ipmr_for_each_table(mrt, net) {
1635 v = &mrt->vif_table[0];
1636 for (ct = 0; ct < mrt->maxvif; ct++, v++) {
1637 if (v->dev == dev)
1638 vif_delete(mrt, ct, 1, NULL);
1639 }
1640 }
1641 return NOTIFY_DONE;
1642 }
1643
1644
1645 static struct notifier_block ip_mr_notifier = {
1646 .notifier_call = ipmr_device_event,
1647 };
1648
1649 /*
1650 * Encapsulate a packet by attaching a valid IPIP header to it.
1651 * This avoids tunnel drivers and other mess and gives us the speed so
1652 * important for multicast video.
1653 */
1654
1655 static void ip_encap(struct net *net, struct sk_buff *skb,
1656 __be32 saddr, __be32 daddr)
1657 {
1658 struct iphdr *iph;
1659 const struct iphdr *old_iph = ip_hdr(skb);
1660
1661 skb_push(skb, sizeof(struct iphdr));
1662 skb->transport_header = skb->network_header;
1663 skb_reset_network_header(skb);
1664 iph = ip_hdr(skb);
1665
1666 iph->version = 4;
1667 iph->tos = old_iph->tos;
1668 iph->ttl = old_iph->ttl;
1669 iph->frag_off = 0;
1670 iph->daddr = daddr;
1671 iph->saddr = saddr;
1672 iph->protocol = IPPROTO_IPIP;
1673 iph->ihl = 5;
1674 iph->tot_len = htons(skb->len);
1675 ip_select_ident(net, skb, NULL);
1676 ip_send_check(iph);
1677
1678 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
1679 nf_reset(skb);
1680 }
1681
1682 static inline int ipmr_forward_finish(struct sock *sk, struct sk_buff *skb)
1683 {
1684 struct ip_options *opt = &(IPCB(skb)->opt);
1685
1686 IP_INC_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTFORWDATAGRAMS);
1687 IP_ADD_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTOCTETS, skb->len);
1688
1689 if (unlikely(opt->optlen))
1690 ip_forward_options(skb);
1691
1692 return dst_output_sk(sk, skb);
1693 }
1694
1695 /*
1696 * Processing handlers for ipmr_forward
1697 */
1698
1699 static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
1700 struct sk_buff *skb, struct mfc_cache *c, int vifi)
1701 {
1702 const struct iphdr *iph = ip_hdr(skb);
1703 struct vif_device *vif = &mrt->vif_table[vifi];
1704 struct net_device *dev;
1705 struct rtable *rt;
1706 struct flowi4 fl4;
1707 int encap = 0;
1708
1709 if (!vif->dev)
1710 goto out_free;
1711
1712 #ifdef CONFIG_IP_PIMSM
1713 if (vif->flags & VIFF_REGISTER) {
1714 vif->pkt_out++;
1715 vif->bytes_out += skb->len;
1716 vif->dev->stats.tx_bytes += skb->len;
1717 vif->dev->stats.tx_packets++;
1718 ipmr_cache_report(mrt, skb, vifi, IGMPMSG_WHOLEPKT);
1719 goto out_free;
1720 }
1721 #endif
1722
1723 if (vif->flags & VIFF_TUNNEL) {
1724 rt = ip_route_output_ports(net, &fl4, NULL,
1725 vif->remote, vif->local,
1726 0, 0,
1727 IPPROTO_IPIP,
1728 RT_TOS(iph->tos), vif->link);
1729 if (IS_ERR(rt))
1730 goto out_free;
1731 encap = sizeof(struct iphdr);
1732 } else {
1733 rt = ip_route_output_ports(net, &fl4, NULL, iph->daddr, 0,
1734 0, 0,
1735 IPPROTO_IPIP,
1736 RT_TOS(iph->tos), vif->link);
1737 if (IS_ERR(rt))
1738 goto out_free;
1739 }
1740
1741 dev = rt->dst.dev;
1742
1743 if (skb->len+encap > dst_mtu(&rt->dst) && (ntohs(iph->frag_off) & IP_DF)) {
1744 /* Do not fragment multicasts. Alas, IPv4 does not
1745 * allow to send ICMP, so that packets will disappear
1746 * to blackhole.
1747 */
1748
1749 IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
1750 ip_rt_put(rt);
1751 goto out_free;
1752 }
1753
1754 encap += LL_RESERVED_SPACE(dev) + rt->dst.header_len;
1755
1756 if (skb_cow(skb, encap)) {
1757 ip_rt_put(rt);
1758 goto out_free;
1759 }
1760
1761 vif->pkt_out++;
1762 vif->bytes_out += skb->len;
1763
1764 skb_dst_drop(skb);
1765 skb_dst_set(skb, &rt->dst);
1766 ip_decrease_ttl(ip_hdr(skb));
1767
1768 /* FIXME: forward and output firewalls used to be called here.
1769 * What do we do with netfilter? -- RR
1770 */
1771 if (vif->flags & VIFF_TUNNEL) {
1772 ip_encap(net, skb, vif->local, vif->remote);
1773 /* FIXME: extra output firewall step used to be here. --RR */
1774 vif->dev->stats.tx_packets++;
1775 vif->dev->stats.tx_bytes += skb->len;
1776 }
1777
1778 IPCB(skb)->flags |= IPSKB_FORWARDED;
1779
1780 /*
1781 * RFC1584 teaches, that DVMRP/PIM router must deliver packets locally
1782 * not only before forwarding, but after forwarding on all output
1783 * interfaces. It is clear, if mrouter runs a multicasting
1784 * program, it should receive packets not depending to what interface
1785 * program is joined.
1786 * If we will not make it, the program will have to join on all
1787 * interfaces. On the other hand, multihoming host (or router, but
1788 * not mrouter) cannot join to more than one interface - it will
1789 * result in receiving multiple packets.
1790 */
1791 NF_HOOK(NFPROTO_IPV4, NF_INET_FORWARD, NULL, skb,
1792 skb->dev, dev,
1793 ipmr_forward_finish);
1794 return;
1795
1796 out_free:
1797 kfree_skb(skb);
1798 }
1799
1800 static int ipmr_find_vif(struct mr_table *mrt, struct net_device *dev)
1801 {
1802 int ct;
1803
1804 for (ct = mrt->maxvif-1; ct >= 0; ct--) {
1805 if (mrt->vif_table[ct].dev == dev)
1806 break;
1807 }
1808 return ct;
1809 }
1810
1811 /* "local" means that we should preserve one skb (for local delivery) */
1812
1813 static void ip_mr_forward(struct net *net, struct mr_table *mrt,
1814 struct sk_buff *skb, struct mfc_cache *cache,
1815 int local)
1816 {
1817 int psend = -1;
1818 int vif, ct;
1819 int true_vifi = ipmr_find_vif(mrt, skb->dev);
1820
1821 vif = cache->mfc_parent;
1822 cache->mfc_un.res.pkt++;
1823 cache->mfc_un.res.bytes += skb->len;
1824
1825 if (cache->mfc_origin == htonl(INADDR_ANY) && true_vifi >= 0) {
1826 struct mfc_cache *cache_proxy;
1827
1828 /* For an (*,G) entry, we only check that the incomming
1829 * interface is part of the static tree.
1830 */
1831 cache_proxy = ipmr_cache_find_any_parent(mrt, vif);
1832 if (cache_proxy &&
1833 cache_proxy->mfc_un.res.ttls[true_vifi] < 255)
1834 goto forward;
1835 }
1836
1837 /*
1838 * Wrong interface: drop packet and (maybe) send PIM assert.
1839 */
1840 if (mrt->vif_table[vif].dev != skb->dev) {
1841 if (rt_is_output_route(skb_rtable(skb))) {
1842 /* It is our own packet, looped back.
1843 * Very complicated situation...
1844 *
1845 * The best workaround until routing daemons will be
1846 * fixed is not to redistribute packet, if it was
1847 * send through wrong interface. It means, that
1848 * multicast applications WILL NOT work for
1849 * (S,G), which have default multicast route pointing
1850 * to wrong oif. In any case, it is not a good
1851 * idea to use multicasting applications on router.
1852 */
1853 goto dont_forward;
1854 }
1855
1856 cache->mfc_un.res.wrong_if++;
1857
1858 if (true_vifi >= 0 && mrt->mroute_do_assert &&
1859 /* pimsm uses asserts, when switching from RPT to SPT,
1860 * so that we cannot check that packet arrived on an oif.
1861 * It is bad, but otherwise we would need to move pretty
1862 * large chunk of pimd to kernel. Ough... --ANK
1863 */
1864 (mrt->mroute_do_pim ||
1865 cache->mfc_un.res.ttls[true_vifi] < 255) &&
1866 time_after(jiffies,
1867 cache->mfc_un.res.last_assert + MFC_ASSERT_THRESH)) {
1868 cache->mfc_un.res.last_assert = jiffies;
1869 ipmr_cache_report(mrt, skb, true_vifi, IGMPMSG_WRONGVIF);
1870 }
1871 goto dont_forward;
1872 }
1873
1874 forward:
1875 mrt->vif_table[vif].pkt_in++;
1876 mrt->vif_table[vif].bytes_in += skb->len;
1877
1878 /*
1879 * Forward the frame
1880 */
1881 if (cache->mfc_origin == htonl(INADDR_ANY) &&
1882 cache->mfc_mcastgrp == htonl(INADDR_ANY)) {
1883 if (true_vifi >= 0 &&
1884 true_vifi != cache->mfc_parent &&
1885 ip_hdr(skb)->ttl >
1886 cache->mfc_un.res.ttls[cache->mfc_parent]) {
1887 /* It's an (*,*) entry and the packet is not coming from
1888 * the upstream: forward the packet to the upstream
1889 * only.
1890 */
1891 psend = cache->mfc_parent;
1892 goto last_forward;
1893 }
1894 goto dont_forward;
1895 }
1896 for (ct = cache->mfc_un.res.maxvif - 1;
1897 ct >= cache->mfc_un.res.minvif; ct--) {
1898 /* For (*,G) entry, don't forward to the incoming interface */
1899 if ((cache->mfc_origin != htonl(INADDR_ANY) ||
1900 ct != true_vifi) &&
1901 ip_hdr(skb)->ttl > cache->mfc_un.res.ttls[ct]) {
1902 if (psend != -1) {
1903 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1904
1905 if (skb2)
1906 ipmr_queue_xmit(net, mrt, skb2, cache,
1907 psend);
1908 }
1909 psend = ct;
1910 }
1911 }
1912 last_forward:
1913 if (psend != -1) {
1914 if (local) {
1915 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1916
1917 if (skb2)
1918 ipmr_queue_xmit(net, mrt, skb2, cache, psend);
1919 } else {
1920 ipmr_queue_xmit(net, mrt, skb, cache, psend);
1921 return;
1922 }
1923 }
1924
1925 dont_forward:
1926 if (!local)
1927 kfree_skb(skb);
1928 }
1929
1930 static struct mr_table *ipmr_rt_fib_lookup(struct net *net, struct sk_buff *skb)
1931 {
1932 struct rtable *rt = skb_rtable(skb);
1933 struct iphdr *iph = ip_hdr(skb);
1934 struct flowi4 fl4 = {
1935 .daddr = iph->daddr,
1936 .saddr = iph->saddr,
1937 .flowi4_tos = RT_TOS(iph->tos),
1938 .flowi4_oif = (rt_is_output_route(rt) ?
1939 skb->dev->ifindex : 0),
1940 .flowi4_iif = (rt_is_output_route(rt) ?
1941 LOOPBACK_IFINDEX :
1942 skb->dev->ifindex),
1943 .flowi4_mark = skb->mark,
1944 };
1945 struct mr_table *mrt;
1946 int err;
1947
1948 err = ipmr_fib_lookup(net, &fl4, &mrt);
1949 if (err)
1950 return ERR_PTR(err);
1951 return mrt;
1952 }
1953
1954 /*
1955 * Multicast packets for forwarding arrive here
1956 * Called with rcu_read_lock();
1957 */
1958
1959 int ip_mr_input(struct sk_buff *skb)
1960 {
1961 struct mfc_cache *cache;
1962 struct net *net = dev_net(skb->dev);
1963 int local = skb_rtable(skb)->rt_flags & RTCF_LOCAL;
1964 struct mr_table *mrt;
1965
1966 /* Packet is looped back after forward, it should not be
1967 * forwarded second time, but still can be delivered locally.
1968 */
1969 if (IPCB(skb)->flags & IPSKB_FORWARDED)
1970 goto dont_forward;
1971
1972 mrt = ipmr_rt_fib_lookup(net, skb);
1973 if (IS_ERR(mrt)) {
1974 kfree_skb(skb);
1975 return PTR_ERR(mrt);
1976 }
1977 if (!local) {
1978 if (IPCB(skb)->opt.router_alert) {
1979 if (ip_call_ra_chain(skb))
1980 return 0;
1981 } else if (ip_hdr(skb)->protocol == IPPROTO_IGMP) {
1982 /* IGMPv1 (and broken IGMPv2 implementations sort of
1983 * Cisco IOS <= 11.2(8)) do not put router alert
1984 * option to IGMP packets destined to routable
1985 * groups. It is very bad, because it means
1986 * that we can forward NO IGMP messages.
1987 */
1988 struct sock *mroute_sk;
1989
1990 mroute_sk = rcu_dereference(mrt->mroute_sk);
1991 if (mroute_sk) {
1992 nf_reset(skb);
1993 raw_rcv(mroute_sk, skb);
1994 return 0;
1995 }
1996 }
1997 }
1998
1999 /* already under rcu_read_lock() */
2000 cache = ipmr_cache_find(mrt, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr);
2001 if (!cache) {
2002 int vif = ipmr_find_vif(mrt, skb->dev);
2003
2004 if (vif >= 0)
2005 cache = ipmr_cache_find_any(mrt, ip_hdr(skb)->daddr,
2006 vif);
2007 }
2008
2009 /*
2010 * No usable cache entry
2011 */
2012 if (!cache) {
2013 int vif;
2014
2015 if (local) {
2016 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
2017 ip_local_deliver(skb);
2018 if (!skb2)
2019 return -ENOBUFS;
2020 skb = skb2;
2021 }
2022
2023 read_lock(&mrt_lock);
2024 vif = ipmr_find_vif(mrt, skb->dev);
2025 if (vif >= 0) {
2026 int err2 = ipmr_cache_unresolved(mrt, vif, skb);
2027 read_unlock(&mrt_lock);
2028
2029 return err2;
2030 }
2031 read_unlock(&mrt_lock);
2032 kfree_skb(skb);
2033 return -ENODEV;
2034 }
2035
2036 read_lock(&mrt_lock);
2037 ip_mr_forward(net, mrt, skb, cache, local);
2038 read_unlock(&mrt_lock);
2039
2040 if (local)
2041 return ip_local_deliver(skb);
2042
2043 return 0;
2044
2045 dont_forward:
2046 if (local)
2047 return ip_local_deliver(skb);
2048 kfree_skb(skb);
2049 return 0;
2050 }
2051
2052 #ifdef CONFIG_IP_PIMSM
2053 /* called with rcu_read_lock() */
2054 static int __pim_rcv(struct mr_table *mrt, struct sk_buff *skb,
2055 unsigned int pimlen)
2056 {
2057 struct net_device *reg_dev = NULL;
2058 struct iphdr *encap;
2059
2060 encap = (struct iphdr *)(skb_transport_header(skb) + pimlen);
2061 /*
2062 * Check that:
2063 * a. packet is really sent to a multicast group
2064 * b. packet is not a NULL-REGISTER
2065 * c. packet is not truncated
2066 */
2067 if (!ipv4_is_multicast(encap->daddr) ||
2068 encap->tot_len == 0 ||
2069 ntohs(encap->tot_len) + pimlen > skb->len)
2070 return 1;
2071
2072 read_lock(&mrt_lock);
2073 if (mrt->mroute_reg_vif_num >= 0)
2074 reg_dev = mrt->vif_table[mrt->mroute_reg_vif_num].dev;
2075 read_unlock(&mrt_lock);
2076
2077 if (!reg_dev)
2078 return 1;
2079
2080 skb->mac_header = skb->network_header;
2081 skb_pull(skb, (u8 *)encap - skb->data);
2082 skb_reset_network_header(skb);
2083 skb->protocol = htons(ETH_P_IP);
2084 skb->ip_summed = CHECKSUM_NONE;
2085
2086 skb_tunnel_rx(skb, reg_dev, dev_net(reg_dev));
2087
2088 netif_rx(skb);
2089
2090 return NET_RX_SUCCESS;
2091 }
2092 #endif
2093
2094 #ifdef CONFIG_IP_PIMSM_V1
2095 /*
2096 * Handle IGMP messages of PIMv1
2097 */
2098
2099 int pim_rcv_v1(struct sk_buff *skb)
2100 {
2101 struct igmphdr *pim;
2102 struct net *net = dev_net(skb->dev);
2103 struct mr_table *mrt;
2104
2105 if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(struct iphdr)))
2106 goto drop;
2107
2108 pim = igmp_hdr(skb);
2109
2110 mrt = ipmr_rt_fib_lookup(net, skb);
2111 if (IS_ERR(mrt))
2112 goto drop;
2113 if (!mrt->mroute_do_pim ||
2114 pim->group != PIM_V1_VERSION || pim->code != PIM_V1_REGISTER)
2115 goto drop;
2116
2117 if (__pim_rcv(mrt, skb, sizeof(*pim))) {
2118 drop:
2119 kfree_skb(skb);
2120 }
2121 return 0;
2122 }
2123 #endif
2124
2125 #ifdef CONFIG_IP_PIMSM_V2
2126 static int pim_rcv(struct sk_buff *skb)
2127 {
2128 struct pimreghdr *pim;
2129 struct net *net = dev_net(skb->dev);
2130 struct mr_table *mrt;
2131
2132 if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(struct iphdr)))
2133 goto drop;
2134
2135 pim = (struct pimreghdr *)skb_transport_header(skb);
2136 if (pim->type != ((PIM_VERSION << 4) | (PIM_REGISTER)) ||
2137 (pim->flags & PIM_NULL_REGISTER) ||
2138 (ip_compute_csum((void *)pim, sizeof(*pim)) != 0 &&
2139 csum_fold(skb_checksum(skb, 0, skb->len, 0))))
2140 goto drop;
2141
2142 mrt = ipmr_rt_fib_lookup(net, skb);
2143 if (IS_ERR(mrt))
2144 goto drop;
2145 if (__pim_rcv(mrt, skb, sizeof(*pim))) {
2146 drop:
2147 kfree_skb(skb);
2148 }
2149 return 0;
2150 }
2151 #endif
2152
2153 static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
2154 struct mfc_cache *c, struct rtmsg *rtm)
2155 {
2156 int ct;
2157 struct rtnexthop *nhp;
2158 struct nlattr *mp_attr;
2159 struct rta_mfc_stats mfcs;
2160
2161 /* If cache is unresolved, don't try to parse IIF and OIF */
2162 if (c->mfc_parent >= MAXVIFS)
2163 return -ENOENT;
2164
2165 if (VIF_EXISTS(mrt, c->mfc_parent) &&
2166 nla_put_u32(skb, RTA_IIF, mrt->vif_table[c->mfc_parent].dev->ifindex) < 0)
2167 return -EMSGSIZE;
2168
2169 if (!(mp_attr = nla_nest_start(skb, RTA_MULTIPATH)))
2170 return -EMSGSIZE;
2171
2172 for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) {
2173 if (VIF_EXISTS(mrt, ct) && c->mfc_un.res.ttls[ct] < 255) {
2174 if (!(nhp = nla_reserve_nohdr(skb, sizeof(*nhp)))) {
2175 nla_nest_cancel(skb, mp_attr);
2176 return -EMSGSIZE;
2177 }
2178
2179 nhp->rtnh_flags = 0;
2180 nhp->rtnh_hops = c->mfc_un.res.ttls[ct];
2181 nhp->rtnh_ifindex = mrt->vif_table[ct].dev->ifindex;
2182 nhp->rtnh_len = sizeof(*nhp);
2183 }
2184 }
2185
2186 nla_nest_end(skb, mp_attr);
2187
2188 mfcs.mfcs_packets = c->mfc_un.res.pkt;
2189 mfcs.mfcs_bytes = c->mfc_un.res.bytes;
2190 mfcs.mfcs_wrong_if = c->mfc_un.res.wrong_if;
2191 if (nla_put(skb, RTA_MFC_STATS, sizeof(mfcs), &mfcs) < 0)
2192 return -EMSGSIZE;
2193
2194 rtm->rtm_type = RTN_MULTICAST;
2195 return 1;
2196 }
2197
2198 int ipmr_get_route(struct net *net, struct sk_buff *skb,
2199 __be32 saddr, __be32 daddr,
2200 struct rtmsg *rtm, int nowait)
2201 {
2202 struct mfc_cache *cache;
2203 struct mr_table *mrt;
2204 int err;
2205
2206 mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
2207 if (!mrt)
2208 return -ENOENT;
2209
2210 rcu_read_lock();
2211 cache = ipmr_cache_find(mrt, saddr, daddr);
2212 if (!cache && skb->dev) {
2213 int vif = ipmr_find_vif(mrt, skb->dev);
2214
2215 if (vif >= 0)
2216 cache = ipmr_cache_find_any(mrt, daddr, vif);
2217 }
2218 if (!cache) {
2219 struct sk_buff *skb2;
2220 struct iphdr *iph;
2221 struct net_device *dev;
2222 int vif = -1;
2223
2224 if (nowait) {
2225 rcu_read_unlock();
2226 return -EAGAIN;
2227 }
2228
2229 dev = skb->dev;
2230 read_lock(&mrt_lock);
2231 if (dev)
2232 vif = ipmr_find_vif(mrt, dev);
2233 if (vif < 0) {
2234 read_unlock(&mrt_lock);
2235 rcu_read_unlock();
2236 return -ENODEV;
2237 }
2238 skb2 = skb_clone(skb, GFP_ATOMIC);
2239 if (!skb2) {
2240 read_unlock(&mrt_lock);
2241 rcu_read_unlock();
2242 return -ENOMEM;
2243 }
2244
2245 skb_push(skb2, sizeof(struct iphdr));
2246 skb_reset_network_header(skb2);
2247 iph = ip_hdr(skb2);
2248 iph->ihl = sizeof(struct iphdr) >> 2;
2249 iph->saddr = saddr;
2250 iph->daddr = daddr;
2251 iph->version = 0;
2252 err = ipmr_cache_unresolved(mrt, vif, skb2);
2253 read_unlock(&mrt_lock);
2254 rcu_read_unlock();
2255 return err;
2256 }
2257
2258 read_lock(&mrt_lock);
2259 if (!nowait && (rtm->rtm_flags & RTM_F_NOTIFY))
2260 cache->mfc_flags |= MFC_NOTIFY;
2261 err = __ipmr_fill_mroute(mrt, skb, cache, rtm);
2262 read_unlock(&mrt_lock);
2263 rcu_read_unlock();
2264 return err;
2265 }
2266
2267 static int ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
2268 u32 portid, u32 seq, struct mfc_cache *c, int cmd,
2269 int flags)
2270 {
2271 struct nlmsghdr *nlh;
2272 struct rtmsg *rtm;
2273 int err;
2274
2275 nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rtm), flags);
2276 if (!nlh)
2277 return -EMSGSIZE;
2278
2279 rtm = nlmsg_data(nlh);
2280 rtm->rtm_family = RTNL_FAMILY_IPMR;
2281 rtm->rtm_dst_len = 32;
2282 rtm->rtm_src_len = 32;
2283 rtm->rtm_tos = 0;
2284 rtm->rtm_table = mrt->id;
2285 if (nla_put_u32(skb, RTA_TABLE, mrt->id))
2286 goto nla_put_failure;
2287 rtm->rtm_type = RTN_MULTICAST;
2288 rtm->rtm_scope = RT_SCOPE_UNIVERSE;
2289 if (c->mfc_flags & MFC_STATIC)
2290 rtm->rtm_protocol = RTPROT_STATIC;
2291 else
2292 rtm->rtm_protocol = RTPROT_MROUTED;
2293 rtm->rtm_flags = 0;
2294
2295 if (nla_put_in_addr(skb, RTA_SRC, c->mfc_origin) ||
2296 nla_put_in_addr(skb, RTA_DST, c->mfc_mcastgrp))
2297 goto nla_put_failure;
2298 err = __ipmr_fill_mroute(mrt, skb, c, rtm);
2299 /* do not break the dump if cache is unresolved */
2300 if (err < 0 && err != -ENOENT)
2301 goto nla_put_failure;
2302
2303 nlmsg_end(skb, nlh);
2304 return 0;
2305
2306 nla_put_failure:
2307 nlmsg_cancel(skb, nlh);
2308 return -EMSGSIZE;
2309 }
2310
2311 static size_t mroute_msgsize(bool unresolved, int maxvif)
2312 {
2313 size_t len =
2314 NLMSG_ALIGN(sizeof(struct rtmsg))
2315 + nla_total_size(4) /* RTA_TABLE */
2316 + nla_total_size(4) /* RTA_SRC */
2317 + nla_total_size(4) /* RTA_DST */
2318 ;
2319
2320 if (!unresolved)
2321 len = len
2322 + nla_total_size(4) /* RTA_IIF */
2323 + nla_total_size(0) /* RTA_MULTIPATH */
2324 + maxvif * NLA_ALIGN(sizeof(struct rtnexthop))
2325 /* RTA_MFC_STATS */
2326 + nla_total_size(sizeof(struct rta_mfc_stats))
2327 ;
2328
2329 return len;
2330 }
2331
2332 static void mroute_netlink_event(struct mr_table *mrt, struct mfc_cache *mfc,
2333 int cmd)
2334 {
2335 struct net *net = read_pnet(&mrt->net);
2336 struct sk_buff *skb;
2337 int err = -ENOBUFS;
2338
2339 skb = nlmsg_new(mroute_msgsize(mfc->mfc_parent >= MAXVIFS, mrt->maxvif),
2340 GFP_ATOMIC);
2341 if (!skb)
2342 goto errout;
2343
2344 err = ipmr_fill_mroute(mrt, skb, 0, 0, mfc, cmd, 0);
2345 if (err < 0)
2346 goto errout;
2347
2348 rtnl_notify(skb, net, 0, RTNLGRP_IPV4_MROUTE, NULL, GFP_ATOMIC);
2349 return;
2350
2351 errout:
2352 kfree_skb(skb);
2353 if (err < 0)
2354 rtnl_set_sk_err(net, RTNLGRP_IPV4_MROUTE, err);
2355 }
2356
2357 static int ipmr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
2358 {
2359 struct net *net = sock_net(skb->sk);
2360 struct mr_table *mrt;
2361 struct mfc_cache *mfc;
2362 unsigned int t = 0, s_t;
2363 unsigned int h = 0, s_h;
2364 unsigned int e = 0, s_e;
2365
2366 s_t = cb->args[0];
2367 s_h = cb->args[1];
2368 s_e = cb->args[2];
2369
2370 rcu_read_lock();
2371 ipmr_for_each_table(mrt, net) {
2372 if (t < s_t)
2373 goto next_table;
2374 if (t > s_t)
2375 s_h = 0;
2376 for (h = s_h; h < MFC_LINES; h++) {
2377 list_for_each_entry_rcu(mfc, &mrt->mfc_cache_array[h], list) {
2378 if (e < s_e)
2379 goto next_entry;
2380 if (ipmr_fill_mroute(mrt, skb,
2381 NETLINK_CB(cb->skb).portid,
2382 cb->nlh->nlmsg_seq,
2383 mfc, RTM_NEWROUTE,
2384 NLM_F_MULTI) < 0)
2385 goto done;
2386 next_entry:
2387 e++;
2388 }
2389 e = s_e = 0;
2390 }
2391 spin_lock_bh(&mfc_unres_lock);
2392 list_for_each_entry(mfc, &mrt->mfc_unres_queue, list) {
2393 if (e < s_e)
2394 goto next_entry2;
2395 if (ipmr_fill_mroute(mrt, skb,
2396 NETLINK_CB(cb->skb).portid,
2397 cb->nlh->nlmsg_seq,
2398 mfc, RTM_NEWROUTE,
2399 NLM_F_MULTI) < 0) {
2400 spin_unlock_bh(&mfc_unres_lock);
2401 goto done;
2402 }
2403 next_entry2:
2404 e++;
2405 }
2406 spin_unlock_bh(&mfc_unres_lock);
2407 e = s_e = 0;
2408 s_h = 0;
2409 next_table:
2410 t++;
2411 }
2412 done:
2413 rcu_read_unlock();
2414
2415 cb->args[2] = e;
2416 cb->args[1] = h;
2417 cb->args[0] = t;
2418
2419 return skb->len;
2420 }
2421
2422 #ifdef CONFIG_PROC_FS
2423 /*
2424 * The /proc interfaces to multicast routing :
2425 * /proc/net/ip_mr_cache & /proc/net/ip_mr_vif
2426 */
2427 struct ipmr_vif_iter {
2428 struct seq_net_private p;
2429 struct mr_table *mrt;
2430 int ct;
2431 };
2432
2433 static struct vif_device *ipmr_vif_seq_idx(struct net *net,
2434 struct ipmr_vif_iter *iter,
2435 loff_t pos)
2436 {
2437 struct mr_table *mrt = iter->mrt;
2438
2439 for (iter->ct = 0; iter->ct < mrt->maxvif; ++iter->ct) {
2440 if (!VIF_EXISTS(mrt, iter->ct))
2441 continue;
2442 if (pos-- == 0)
2443 return &mrt->vif_table[iter->ct];
2444 }
2445 return NULL;
2446 }
2447
2448 static void *ipmr_vif_seq_start(struct seq_file *seq, loff_t *pos)
2449 __acquires(mrt_lock)
2450 {
2451 struct ipmr_vif_iter *iter = seq->private;
2452 struct net *net = seq_file_net(seq);
2453 struct mr_table *mrt;
2454
2455 mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
2456 if (!mrt)
2457 return ERR_PTR(-ENOENT);
2458
2459 iter->mrt = mrt;
2460
2461 read_lock(&mrt_lock);
2462 return *pos ? ipmr_vif_seq_idx(net, seq->private, *pos - 1)
2463 : SEQ_START_TOKEN;
2464 }
2465
2466 static void *ipmr_vif_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2467 {
2468 struct ipmr_vif_iter *iter = seq->private;
2469 struct net *net = seq_file_net(seq);
2470 struct mr_table *mrt = iter->mrt;
2471
2472 ++*pos;
2473 if (v == SEQ_START_TOKEN)
2474 return ipmr_vif_seq_idx(net, iter, 0);
2475
2476 while (++iter->ct < mrt->maxvif) {
2477 if (!VIF_EXISTS(mrt, iter->ct))
2478 continue;
2479 return &mrt->vif_table[iter->ct];
2480 }
2481 return NULL;
2482 }
2483
2484 static void ipmr_vif_seq_stop(struct seq_file *seq, void *v)
2485 __releases(mrt_lock)
2486 {
2487 read_unlock(&mrt_lock);
2488 }
2489
2490 static int ipmr_vif_seq_show(struct seq_file *seq, void *v)
2491 {
2492 struct ipmr_vif_iter *iter = seq->private;
2493 struct mr_table *mrt = iter->mrt;
2494
2495 if (v == SEQ_START_TOKEN) {
2496 seq_puts(seq,
2497 "Interface BytesIn PktsIn BytesOut PktsOut Flags Local Remote\n");
2498 } else {
2499 const struct vif_device *vif = v;
2500 const char *name = vif->dev ? vif->dev->name : "none";
2501
2502 seq_printf(seq,
2503 "%2Zd %-10s %8ld %7ld %8ld %7ld %05X %08X %08X\n",
2504 vif - mrt->vif_table,
2505 name, vif->bytes_in, vif->pkt_in,
2506 vif->bytes_out, vif->pkt_out,
2507 vif->flags, vif->local, vif->remote);
2508 }
2509 return 0;
2510 }
2511
2512 static const struct seq_operations ipmr_vif_seq_ops = {
2513 .start = ipmr_vif_seq_start,
2514 .next = ipmr_vif_seq_next,
2515 .stop = ipmr_vif_seq_stop,
2516 .show = ipmr_vif_seq_show,
2517 };
2518
2519 static int ipmr_vif_open(struct inode *inode, struct file *file)
2520 {
2521 return seq_open_net(inode, file, &ipmr_vif_seq_ops,
2522 sizeof(struct ipmr_vif_iter));
2523 }
2524
2525 static const struct file_operations ipmr_vif_fops = {
2526 .owner = THIS_MODULE,
2527 .open = ipmr_vif_open,
2528 .read = seq_read,
2529 .llseek = seq_lseek,
2530 .release = seq_release_net,
2531 };
2532
2533 struct ipmr_mfc_iter {
2534 struct seq_net_private p;
2535 struct mr_table *mrt;
2536 struct list_head *cache;
2537 int ct;
2538 };
2539
2540
2541 static struct mfc_cache *ipmr_mfc_seq_idx(struct net *net,
2542 struct ipmr_mfc_iter *it, loff_t pos)
2543 {
2544 struct mr_table *mrt = it->mrt;
2545 struct mfc_cache *mfc;
2546
2547 rcu_read_lock();
2548 for (it->ct = 0; it->ct < MFC_LINES; it->ct++) {
2549 it->cache = &mrt->mfc_cache_array[it->ct];
2550 list_for_each_entry_rcu(mfc, it->cache, list)
2551 if (pos-- == 0)
2552 return mfc;
2553 }
2554 rcu_read_unlock();
2555
2556 spin_lock_bh(&mfc_unres_lock);
2557 it->cache = &mrt->mfc_unres_queue;
2558 list_for_each_entry(mfc, it->cache, list)
2559 if (pos-- == 0)
2560 return mfc;
2561 spin_unlock_bh(&mfc_unres_lock);
2562
2563 it->cache = NULL;
2564 return NULL;
2565 }
2566
2567
2568 static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos)
2569 {
2570 struct ipmr_mfc_iter *it = seq->private;
2571 struct net *net = seq_file_net(seq);
2572 struct mr_table *mrt;
2573
2574 mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
2575 if (!mrt)
2576 return ERR_PTR(-ENOENT);
2577
2578 it->mrt = mrt;
2579 it->cache = NULL;
2580 it->ct = 0;
2581 return *pos ? ipmr_mfc_seq_idx(net, seq->private, *pos - 1)
2582 : SEQ_START_TOKEN;
2583 }
2584
2585 static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2586 {
2587 struct mfc_cache *mfc = v;
2588 struct ipmr_mfc_iter *it = seq->private;
2589 struct net *net = seq_file_net(seq);
2590 struct mr_table *mrt = it->mrt;
2591
2592 ++*pos;
2593
2594 if (v == SEQ_START_TOKEN)
2595 return ipmr_mfc_seq_idx(net, seq->private, 0);
2596
2597 if (mfc->list.next != it->cache)
2598 return list_entry(mfc->list.next, struct mfc_cache, list);
2599
2600 if (it->cache == &mrt->mfc_unres_queue)
2601 goto end_of_list;
2602
2603 BUG_ON(it->cache != &mrt->mfc_cache_array[it->ct]);
2604
2605 while (++it->ct < MFC_LINES) {
2606 it->cache = &mrt->mfc_cache_array[it->ct];
2607 if (list_empty(it->cache))
2608 continue;
2609 return list_first_entry(it->cache, struct mfc_cache, list);
2610 }
2611
2612 /* exhausted cache_array, show unresolved */
2613 rcu_read_unlock();
2614 it->cache = &mrt->mfc_unres_queue;
2615 it->ct = 0;
2616
2617 spin_lock_bh(&mfc_unres_lock);
2618 if (!list_empty(it->cache))
2619 return list_first_entry(it->cache, struct mfc_cache, list);
2620
2621 end_of_list:
2622 spin_unlock_bh(&mfc_unres_lock);
2623 it->cache = NULL;
2624
2625 return NULL;
2626 }
2627
2628 static void ipmr_mfc_seq_stop(struct seq_file *seq, void *v)
2629 {
2630 struct ipmr_mfc_iter *it = seq->private;
2631 struct mr_table *mrt = it->mrt;
2632
2633 if (it->cache == &mrt->mfc_unres_queue)
2634 spin_unlock_bh(&mfc_unres_lock);
2635 else if (it->cache == &mrt->mfc_cache_array[it->ct])
2636 rcu_read_unlock();
2637 }
2638
2639 static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
2640 {
2641 int n;
2642
2643 if (v == SEQ_START_TOKEN) {
2644 seq_puts(seq,
2645 "Group Origin Iif Pkts Bytes Wrong Oifs\n");
2646 } else {
2647 const struct mfc_cache *mfc = v;
2648 const struct ipmr_mfc_iter *it = seq->private;
2649 const struct mr_table *mrt = it->mrt;
2650
2651 seq_printf(seq, "%08X %08X %-3hd",
2652 (__force u32) mfc->mfc_mcastgrp,
2653 (__force u32) mfc->mfc_origin,
2654 mfc->mfc_parent);
2655
2656 if (it->cache != &mrt->mfc_unres_queue) {
2657 seq_printf(seq, " %8lu %8lu %8lu",
2658 mfc->mfc_un.res.pkt,
2659 mfc->mfc_un.res.bytes,
2660 mfc->mfc_un.res.wrong_if);
2661 for (n = mfc->mfc_un.res.minvif;
2662 n < mfc->mfc_un.res.maxvif; n++) {
2663 if (VIF_EXISTS(mrt, n) &&
2664 mfc->mfc_un.res.ttls[n] < 255)
2665 seq_printf(seq,
2666 " %2d:%-3d",
2667 n, mfc->mfc_un.res.ttls[n]);
2668 }
2669 } else {
2670 /* unresolved mfc_caches don't contain
2671 * pkt, bytes and wrong_if values
2672 */
2673 seq_printf(seq, " %8lu %8lu %8lu", 0ul, 0ul, 0ul);
2674 }
2675 seq_putc(seq, '\n');
2676 }
2677 return 0;
2678 }
2679
2680 static const struct seq_operations ipmr_mfc_seq_ops = {
2681 .start = ipmr_mfc_seq_start,
2682 .next = ipmr_mfc_seq_next,
2683 .stop = ipmr_mfc_seq_stop,
2684 .show = ipmr_mfc_seq_show,
2685 };
2686
2687 static int ipmr_mfc_open(struct inode *inode, struct file *file)
2688 {
2689 return seq_open_net(inode, file, &ipmr_mfc_seq_ops,
2690 sizeof(struct ipmr_mfc_iter));
2691 }
2692
2693 static const struct file_operations ipmr_mfc_fops = {
2694 .owner = THIS_MODULE,
2695 .open = ipmr_mfc_open,
2696 .read = seq_read,
2697 .llseek = seq_lseek,
2698 .release = seq_release_net,
2699 };
2700 #endif
2701
2702 #ifdef CONFIG_IP_PIMSM_V2
2703 static const struct net_protocol pim_protocol = {
2704 .handler = pim_rcv,
2705 .netns_ok = 1,
2706 };
2707 #endif
2708
2709
2710 /*
2711 * Setup for IP multicast routing
2712 */
2713 static int __net_init ipmr_net_init(struct net *net)
2714 {
2715 int err;
2716
2717 err = ipmr_rules_init(net);
2718 if (err < 0)
2719 goto fail;
2720
2721 #ifdef CONFIG_PROC_FS
2722 err = -ENOMEM;
2723 if (!proc_create("ip_mr_vif", 0, net->proc_net, &ipmr_vif_fops))
2724 goto proc_vif_fail;
2725 if (!proc_create("ip_mr_cache", 0, net->proc_net, &ipmr_mfc_fops))
2726 goto proc_cache_fail;
2727 #endif
2728 return 0;
2729
2730 #ifdef CONFIG_PROC_FS
2731 proc_cache_fail:
2732 remove_proc_entry("ip_mr_vif", net->proc_net);
2733 proc_vif_fail:
2734 ipmr_rules_exit(net);
2735 #endif
2736 fail:
2737 return err;
2738 }
2739
2740 static void __net_exit ipmr_net_exit(struct net *net)
2741 {
2742 #ifdef CONFIG_PROC_FS
2743 remove_proc_entry("ip_mr_cache", net->proc_net);
2744 remove_proc_entry("ip_mr_vif", net->proc_net);
2745 #endif
2746 ipmr_rules_exit(net);
2747 }
2748
2749 static struct pernet_operations ipmr_net_ops = {
2750 .init = ipmr_net_init,
2751 .exit = ipmr_net_exit,
2752 };
2753
2754 int __init ip_mr_init(void)
2755 {
2756 int err;
2757
2758 mrt_cachep = kmem_cache_create("ip_mrt_cache",
2759 sizeof(struct mfc_cache),
2760 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC,
2761 NULL);
2762 if (!mrt_cachep)
2763 return -ENOMEM;
2764
2765 err = register_pernet_subsys(&ipmr_net_ops);
2766 if (err)
2767 goto reg_pernet_fail;
2768
2769 err = register_netdevice_notifier(&ip_mr_notifier);
2770 if (err)
2771 goto reg_notif_fail;
2772 #ifdef CONFIG_IP_PIMSM_V2
2773 if (inet_add_protocol(&pim_protocol, IPPROTO_PIM) < 0) {
2774 pr_err("%s: can't add PIM protocol\n", __func__);
2775 err = -EAGAIN;
2776 goto add_proto_fail;
2777 }
2778 #endif
2779 rtnl_register(RTNL_FAMILY_IPMR, RTM_GETROUTE,
2780 NULL, ipmr_rtm_dumproute, NULL);
2781 return 0;
2782
2783 #ifdef CONFIG_IP_PIMSM_V2
2784 add_proto_fail:
2785 unregister_netdevice_notifier(&ip_mr_notifier);
2786 #endif
2787 reg_notif_fail:
2788 unregister_pernet_subsys(&ipmr_net_ops);
2789 reg_pernet_fail:
2790 kmem_cache_destroy(mrt_cachep);
2791 return err;
2792 }
This page took 0.088189 seconds and 5 git commands to generate.