Merge tag 'dm-4.2-fixes-5' of git://git.kernel.org/pub/scm/linux/kernel/git/device...
[deliverable/linux.git] / net / ipv4 / igmp.c
CommitLineData
1da177e4
LT
1/*
2 * Linux NET3: Internet Group Management Protocol [IGMP]
3 *
4 * This code implements the IGMP protocol as defined in RFC1112. There has
5 * been a further revision of this protocol since which is now supported.
6 *
7 * If you have trouble with this module be careful what gcc you have used,
8 * the older version didn't come out right using gcc 2.5.8, the newer one
9 * seems to fall out with gcc 2.6.2.
10 *
1da177e4 11 * Authors:
113aa838 12 * Alan Cox <alan@lxorguk.ukuu.org.uk>
1da177e4
LT
13 *
14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License
16 * as published by the Free Software Foundation; either version
17 * 2 of the License, or (at your option) any later version.
18 *
19 * Fixes:
20 *
21 * Alan Cox : Added lots of __inline__ to optimise
22 * the memory usage of all the tiny little
23 * functions.
24 * Alan Cox : Dumped the header building experiment.
25 * Alan Cox : Minor tweaks ready for multicast routing
26 * and extended IGMP protocol.
27 * Alan Cox : Removed a load of inline directives. Gcc 2.5.8
28 * writes utterly bogus code otherwise (sigh)
29 * fixed IGMP loopback to behave in the manner
30 * desired by mrouted, fixed the fact it has been
31 * broken since 1.3.6 and cleaned up a few minor
32 * points.
33 *
34 * Chih-Jen Chang : Tried to revise IGMP to Version 2
35 * Tsu-Sheng Tsao E-mail: chihjenc@scf.usc.edu and tsusheng@scf.usc.edu
e905a9ed 36 * The enhancements are mainly based on Steve Deering's
1da177e4
LT
37 * ipmulti-3.5 source code.
38 * Chih-Jen Chang : Added the igmp_get_mrouter_info and
39 * Tsu-Sheng Tsao igmp_set_mrouter_info to keep track of
40 * the mrouted version on that device.
41 * Chih-Jen Chang : Added the max_resp_time parameter to
42 * Tsu-Sheng Tsao igmp_heard_query(). Using this parameter
43 * to identify the multicast router version
44 * and do what the IGMP version 2 specified.
45 * Chih-Jen Chang : Added a timer to revert to IGMP V2 router
46 * Tsu-Sheng Tsao if the specified time expired.
47 * Alan Cox : Stop IGMP from 0.0.0.0 being accepted.
48 * Alan Cox : Use GFP_ATOMIC in the right places.
49 * Christian Daudt : igmp timer wasn't set for local group
e905a9ed
YH
50 * memberships but was being deleted,
51 * which caused a "del_timer() called
1da177e4
LT
52 * from %p with timer not initialized\n"
53 * message (960131).
e905a9ed 54 * Christian Daudt : removed del_timer from
1da177e4
LT
55 * igmp_timer_expire function (960205).
56 * Christian Daudt : igmp_heard_report now only calls
57 * igmp_timer_expire if tm->running is
58 * true (960216).
59 * Malcolm Beattie : ttl comparison wrong in igmp_rcv made
60 * igmp_heard_query never trigger. Expiry
61 * miscalculation fixed in igmp_heard_query
62 * and random() made to return unsigned to
63 * prevent negative expiry times.
64 * Alexey Kuznetsov: Wrong group leaving behaviour, backport
65 * fix from pending 2.1.x patches.
66 * Alan Cox: Forget to enable FDDI support earlier.
67 * Alexey Kuznetsov: Fixed leaving groups on device down.
68 * Alexey Kuznetsov: Accordance to igmp-v2-06 draft.
69 * David L Stevens: IGMPv3 support, with help from
70 * Vinay Kulkarni
71 */
72
1da177e4 73#include <linux/module.h>
5a0e3ad6 74#include <linux/slab.h>
1da177e4 75#include <asm/uaccess.h>
1da177e4
LT
76#include <linux/types.h>
77#include <linux/kernel.h>
78#include <linux/jiffies.h>
79#include <linux/string.h>
80#include <linux/socket.h>
81#include <linux/sockios.h>
82#include <linux/in.h>
83#include <linux/inet.h>
84#include <linux/netdevice.h>
85#include <linux/skbuff.h>
86#include <linux/inetdevice.h>
87#include <linux/igmp.h>
88#include <linux/if_arp.h>
89#include <linux/rtnetlink.h>
90#include <linux/times.h>
9d4a0314 91#include <linux/pkt_sched.h>
14c85021 92
457c4cbc 93#include <net/net_namespace.h>
14c85021 94#include <net/arp.h>
1da177e4
LT
95#include <net/ip.h>
96#include <net/protocol.h>
97#include <net/route.h>
98#include <net/sock.h>
99#include <net/checksum.h>
93a714d6 100#include <net/inet_common.h>
1da177e4
LT
101#include <linux/netfilter_ipv4.h>
102#ifdef CONFIG_IP_MROUTE
103#include <linux/mroute.h>
104#endif
105#ifdef CONFIG_PROC_FS
106#include <linux/proc_fs.h>
107#include <linux/seq_file.h>
108#endif
109
110#define IP_MAX_MEMBERSHIPS 20
111#define IP_MAX_MSF 10
112
113#ifdef CONFIG_IP_MULTICAST
114/* Parameter names and values are taken from igmp-v2-06 draft */
115
436f7c20
FF
116#define IGMP_V1_ROUTER_PRESENT_TIMEOUT (400*HZ)
117#define IGMP_V2_ROUTER_PRESENT_TIMEOUT (400*HZ)
118#define IGMP_V2_UNSOLICITED_REPORT_INTERVAL (10*HZ)
119#define IGMP_V3_UNSOLICITED_REPORT_INTERVAL (1*HZ)
120#define IGMP_QUERY_RESPONSE_INTERVAL (10*HZ)
121#define IGMP_QUERY_ROBUSTNESS_VARIABLE 2
1da177e4
LT
122
123
436f7c20 124#define IGMP_INITIAL_REPORT_DELAY (1)
1da177e4 125
436f7c20 126/* IGMP_INITIAL_REPORT_DELAY is not from IGMP specs!
1da177e4
LT
127 * IGMP specs require to report membership immediately after
128 * joining a group, but we delay the first report by a
129 * small interval. It seems more natural and still does not
130 * contradict to specs provided this delay is small enough.
131 */
132
42f811b8 133#define IGMP_V1_SEEN(in_dev) \
c346dca1 134 (IPV4_DEVCONF_ALL(dev_net(in_dev->dev), FORCE_IGMP_VERSION) == 1 || \
42f811b8
HX
135 IN_DEV_CONF_GET((in_dev), FORCE_IGMP_VERSION) == 1 || \
136 ((in_dev)->mr_v1_seen && \
137 time_before(jiffies, (in_dev)->mr_v1_seen)))
138#define IGMP_V2_SEEN(in_dev) \
c346dca1 139 (IPV4_DEVCONF_ALL(dev_net(in_dev->dev), FORCE_IGMP_VERSION) == 2 || \
42f811b8
HX
140 IN_DEV_CONF_GET((in_dev), FORCE_IGMP_VERSION) == 2 || \
141 ((in_dev)->mr_v2_seen && \
142 time_before(jiffies, (in_dev)->mr_v2_seen)))
1da177e4 143
cab70040
WM
144static int unsolicited_report_interval(struct in_device *in_dev)
145{
2690048c
WM
146 int interval_ms, interval_jiffies;
147
cab70040 148 if (IGMP_V1_SEEN(in_dev) || IGMP_V2_SEEN(in_dev))
2690048c
WM
149 interval_ms = IN_DEV_CONF_GET(
150 in_dev,
151 IGMPV2_UNSOLICITED_REPORT_INTERVAL);
cab70040 152 else /* v3 */
2690048c
WM
153 interval_ms = IN_DEV_CONF_GET(
154 in_dev,
155 IGMPV3_UNSOLICITED_REPORT_INTERVAL);
156
157 interval_jiffies = msecs_to_jiffies(interval_ms);
158
159 /* _timer functions can't handle a delay of 0 jiffies so ensure
160 * we always return a positive value.
161 */
162 if (interval_jiffies <= 0)
163 interval_jiffies = 1;
164 return interval_jiffies;
cab70040
WM
165}
166
1da177e4 167static void igmpv3_add_delrec(struct in_device *in_dev, struct ip_mc_list *im);
63007727 168static void igmpv3_del_delrec(struct in_device *in_dev, __be32 multiaddr);
1da177e4
LT
169static void igmpv3_clear_delrec(struct in_device *in_dev);
170static int sf_setstate(struct ip_mc_list *pmc);
171static void sf_markstate(struct ip_mc_list *pmc);
172#endif
173static void ip_mc_clear_src(struct ip_mc_list *pmc);
8f935bbd
AV
174static int ip_mc_add_src(struct in_device *in_dev, __be32 *pmca, int sfmode,
175 int sfcount, __be32 *psfsrc, int delta);
1da177e4
LT
176
177static void ip_ma_put(struct ip_mc_list *im)
178{
179 if (atomic_dec_and_test(&im->refcnt)) {
180 in_dev_put(im->interface);
42ea299d 181 kfree_rcu(im, rcu);
1da177e4
LT
182 }
183}
184
d9aa9380
DM
185#define for_each_pmc_rcu(in_dev, pmc) \
186 for (pmc = rcu_dereference(in_dev->mc_list); \
187 pmc != NULL; \
188 pmc = rcu_dereference(pmc->next_rcu))
189
190#define for_each_pmc_rtnl(in_dev, pmc) \
191 for (pmc = rtnl_dereference(in_dev->mc_list); \
192 pmc != NULL; \
193 pmc = rtnl_dereference(pmc->next_rcu))
194
1da177e4
LT
195#ifdef CONFIG_IP_MULTICAST
196
197/*
198 * Timer management
199 */
200
1d7138de 201static void igmp_stop_timer(struct ip_mc_list *im)
1da177e4
LT
202{
203 spin_lock_bh(&im->lock);
204 if (del_timer(&im->timer))
205 atomic_dec(&im->refcnt);
a7e9ff73 206 im->tm_running = 0;
1da177e4
LT
207 im->reporter = 0;
208 im->unsolicit_count = 0;
209 spin_unlock_bh(&im->lock);
210}
211
212/* It must be called with locked im->lock */
213static void igmp_start_timer(struct ip_mc_list *im, int max_delay)
214{
63862b5b 215 int tv = prandom_u32() % max_delay;
1da177e4 216
a7e9ff73 217 im->tm_running = 1;
1da177e4
LT
218 if (!mod_timer(&im->timer, jiffies+tv+2))
219 atomic_inc(&im->refcnt);
220}
221
222static void igmp_gq_start_timer(struct in_device *in_dev)
223{
63862b5b 224 int tv = prandom_u32() % in_dev->mr_maxdelay;
1da177e4
LT
225
226 in_dev->mr_gq_running = 1;
227 if (!mod_timer(&in_dev->mr_gq_timer, jiffies+tv+2))
228 in_dev_hold(in_dev);
229}
230
231static void igmp_ifc_start_timer(struct in_device *in_dev, int delay)
232{
63862b5b 233 int tv = prandom_u32() % delay;
1da177e4
LT
234
235 if (!mod_timer(&in_dev->mr_ifc_timer, jiffies+tv+2))
236 in_dev_hold(in_dev);
237}
238
239static void igmp_mod_timer(struct ip_mc_list *im, int max_delay)
240{
241 spin_lock_bh(&im->lock);
242 im->unsolicit_count = 0;
243 if (del_timer(&im->timer)) {
244 if ((long)(im->timer.expires-jiffies) < max_delay) {
245 add_timer(&im->timer);
a7e9ff73 246 im->tm_running = 1;
1da177e4
LT
247 spin_unlock_bh(&im->lock);
248 return;
249 }
250 atomic_dec(&im->refcnt);
251 }
252 igmp_start_timer(im, max_delay);
253 spin_unlock_bh(&im->lock);
254}
255
256
257/*
258 * Send an IGMP report.
259 */
260
261#define IGMP_SIZE (sizeof(struct igmphdr)+sizeof(struct iphdr)+4)
262
263
264static int is_in(struct ip_mc_list *pmc, struct ip_sf_list *psf, int type,
265 int gdeleted, int sdeleted)
266{
267 switch (type) {
268 case IGMPV3_MODE_IS_INCLUDE:
269 case IGMPV3_MODE_IS_EXCLUDE:
270 if (gdeleted || sdeleted)
271 return 0;
ad12583f
DS
272 if (!(pmc->gsquery && !psf->sf_gsresp)) {
273 if (pmc->sfmode == MCAST_INCLUDE)
274 return 1;
275 /* don't include if this source is excluded
276 * in all filters
277 */
278 if (psf->sf_count[MCAST_INCLUDE])
279 return type == IGMPV3_MODE_IS_INCLUDE;
280 return pmc->sfcount[MCAST_EXCLUDE] ==
281 psf->sf_count[MCAST_EXCLUDE];
282 }
283 return 0;
1da177e4
LT
284 case IGMPV3_CHANGE_TO_INCLUDE:
285 if (gdeleted || sdeleted)
286 return 0;
287 return psf->sf_count[MCAST_INCLUDE] != 0;
288 case IGMPV3_CHANGE_TO_EXCLUDE:
289 if (gdeleted || sdeleted)
290 return 0;
291 if (pmc->sfcount[MCAST_EXCLUDE] == 0 ||
292 psf->sf_count[MCAST_INCLUDE])
293 return 0;
294 return pmc->sfcount[MCAST_EXCLUDE] ==
295 psf->sf_count[MCAST_EXCLUDE];
296 case IGMPV3_ALLOW_NEW_SOURCES:
297 if (gdeleted || !psf->sf_crcount)
298 return 0;
299 return (pmc->sfmode == MCAST_INCLUDE) ^ sdeleted;
300 case IGMPV3_BLOCK_OLD_SOURCES:
301 if (pmc->sfmode == MCAST_INCLUDE)
302 return gdeleted || (psf->sf_crcount && sdeleted);
303 return psf->sf_crcount && !gdeleted && !sdeleted;
304 }
305 return 0;
306}
307
308static int
309igmp_scount(struct ip_mc_list *pmc, int type, int gdeleted, int sdeleted)
310{
311 struct ip_sf_list *psf;
312 int scount = 0;
313
c71151f0 314 for (psf = pmc->sources; psf; psf = psf->sf_next) {
1da177e4
LT
315 if (!is_in(pmc, psf, type, gdeleted, sdeleted))
316 continue;
317 scount++;
318 }
319 return scount;
320}
321
4c672e4b 322static struct sk_buff *igmpv3_newpack(struct net_device *dev, unsigned int mtu)
1da177e4
LT
323{
324 struct sk_buff *skb;
325 struct rtable *rt;
326 struct iphdr *pip;
327 struct igmpv3_report *pig;
877acedc 328 struct net *net = dev_net(dev);
31e4543d 329 struct flowi4 fl4;
66088243
HX
330 int hlen = LL_RESERVED_SPACE(dev);
331 int tlen = dev->needed_tailroom;
4c672e4b 332 unsigned int size = mtu;
1da177e4 333
57e1ab6e 334 while (1) {
66088243 335 skb = alloc_skb(size + hlen + tlen,
57e1ab6e
ED
336 GFP_ATOMIC | __GFP_NOWARN);
337 if (skb)
338 break;
339 size >>= 1;
340 if (size < 256)
341 return NULL;
342 }
9d4a0314 343 skb->priority = TC_PRIO_CONTROL;
1da177e4 344
31e4543d 345 rt = ip_route_output_ports(net, &fl4, NULL, IGMPV3_ALL_MCR, 0,
78fbfd8a
DM
346 0, 0,
347 IPPROTO_IGMP, 0, dev->ifindex);
348 if (IS_ERR(rt)) {
349 kfree_skb(skb);
350 return NULL;
1da177e4 351 }
1da177e4 352
d8d1f30b 353 skb_dst_set(skb, &rt->dst);
1da177e4
LT
354 skb->dev = dev;
355
4c672e4b
DB
356 skb->reserved_tailroom = skb_end_offset(skb) -
357 min(mtu, skb_end_offset(skb));
66088243 358 skb_reserve(skb, hlen);
1da177e4 359
7e28ecc2 360 skb_reset_network_header(skb);
eddc9ec5 361 pip = ip_hdr(skb);
7e28ecc2 362 skb_put(skb, sizeof(struct iphdr) + 4);
1da177e4
LT
363
364 pip->version = 4;
365 pip->ihl = (sizeof(struct iphdr)+4)>>2;
366 pip->tos = 0xc0;
367 pip->frag_off = htons(IP_DF);
368 pip->ttl = 1;
492f64ce
DM
369 pip->daddr = fl4.daddr;
370 pip->saddr = fl4.saddr;
1da177e4
LT
371 pip->protocol = IPPROTO_IGMP;
372 pip->tot_len = 0; /* filled in later */
b6a7719a 373 ip_select_ident(net, skb, NULL);
5e73ea1a
DB
374 ((u8 *)&pip[1])[0] = IPOPT_RA;
375 ((u8 *)&pip[1])[1] = 4;
376 ((u8 *)&pip[1])[2] = 0;
377 ((u8 *)&pip[1])[3] = 0;
1da177e4 378
b0e380b1 379 skb->transport_header = skb->network_header + sizeof(struct iphdr) + 4;
d10ba34b 380 skb_put(skb, sizeof(*pig));
d9edf9e2 381 pig = igmpv3_report_hdr(skb);
1da177e4
LT
382 pig->type = IGMPV3_HOST_MEMBERSHIP_REPORT;
383 pig->resv1 = 0;
384 pig->csum = 0;
385 pig->resv2 = 0;
386 pig->ngrec = 0;
387 return skb;
388}
389
390static int igmpv3_sendpack(struct sk_buff *skb)
391{
d9edf9e2 392 struct igmphdr *pig = igmp_hdr(skb);
f7c0c2ae 393 const int igmplen = skb_tail_pointer(skb) - skb_transport_header(skb);
1da177e4 394
d9edf9e2 395 pig->csum = ip_compute_csum(igmp_hdr(skb), igmplen);
1da177e4 396
c439cb2e 397 return ip_local_out(skb);
1da177e4
LT
398}
399
400static int grec_size(struct ip_mc_list *pmc, int type, int gdel, int sdel)
401{
a7e9ff73 402 return sizeof(struct igmpv3_grec) + 4*igmp_scount(pmc, type, gdel, sdel);
1da177e4
LT
403}
404
405static struct sk_buff *add_grhead(struct sk_buff *skb, struct ip_mc_list *pmc,
406 int type, struct igmpv3_grec **ppgr)
407{
408 struct net_device *dev = pmc->interface->dev;
409 struct igmpv3_report *pih;
410 struct igmpv3_grec *pgr;
411
412 if (!skb)
413 skb = igmpv3_newpack(dev, dev->mtu);
414 if (!skb)
415 return NULL;
416 pgr = (struct igmpv3_grec *)skb_put(skb, sizeof(struct igmpv3_grec));
417 pgr->grec_type = type;
418 pgr->grec_auxwords = 0;
419 pgr->grec_nsrcs = 0;
420 pgr->grec_mca = pmc->multiaddr;
d9edf9e2 421 pih = igmpv3_report_hdr(skb);
1da177e4
LT
422 pih->ngrec = htons(ntohs(pih->ngrec)+1);
423 *ppgr = pgr;
424 return skb;
425}
426
4c672e4b 427#define AVAILABLE(skb) ((skb) ? skb_availroom(skb) : 0)
1da177e4
LT
428
429static struct sk_buff *add_grec(struct sk_buff *skb, struct ip_mc_list *pmc,
430 int type, int gdeleted, int sdeleted)
431{
432 struct net_device *dev = pmc->interface->dev;
433 struct igmpv3_report *pih;
434 struct igmpv3_grec *pgr = NULL;
435 struct ip_sf_list *psf, *psf_next, *psf_prev, **psf_list;
ad12583f 436 int scount, stotal, first, isquery, truncate;
1da177e4
LT
437
438 if (pmc->multiaddr == IGMP_ALL_HOSTS)
439 return skb;
440
441 isquery = type == IGMPV3_MODE_IS_INCLUDE ||
442 type == IGMPV3_MODE_IS_EXCLUDE;
443 truncate = type == IGMPV3_MODE_IS_EXCLUDE ||
444 type == IGMPV3_CHANGE_TO_EXCLUDE;
445
ad12583f
DS
446 stotal = scount = 0;
447
1da177e4
LT
448 psf_list = sdeleted ? &pmc->tomb : &pmc->sources;
449
ad12583f
DS
450 if (!*psf_list)
451 goto empty_source;
452
d9edf9e2 453 pih = skb ? igmpv3_report_hdr(skb) : NULL;
1da177e4
LT
454
455 /* EX and TO_EX get a fresh packet, if needed */
456 if (truncate) {
457 if (pih && pih->ngrec &&
458 AVAILABLE(skb) < grec_size(pmc, type, gdeleted, sdeleted)) {
459 if (skb)
460 igmpv3_sendpack(skb);
461 skb = igmpv3_newpack(dev, dev->mtu);
462 }
463 }
464 first = 1;
1da177e4 465 psf_prev = NULL;
c71151f0 466 for (psf = *psf_list; psf; psf = psf_next) {
ea4d9e72 467 __be32 *psrc;
1da177e4
LT
468
469 psf_next = psf->sf_next;
470
471 if (!is_in(pmc, psf, type, gdeleted, sdeleted)) {
472 psf_prev = psf;
473 continue;
474 }
475
476 /* clear marks on query responses */
477 if (isquery)
478 psf->sf_gsresp = 0;
479
63007727 480 if (AVAILABLE(skb) < sizeof(__be32) +
1da177e4
LT
481 first*sizeof(struct igmpv3_grec)) {
482 if (truncate && !first)
483 break; /* truncate these */
484 if (pgr)
485 pgr->grec_nsrcs = htons(scount);
486 if (skb)
487 igmpv3_sendpack(skb);
488 skb = igmpv3_newpack(dev, dev->mtu);
489 first = 1;
490 scount = 0;
491 }
492 if (first) {
493 skb = add_grhead(skb, pmc, type, &pgr);
494 first = 0;
495 }
cc63f70b
AD
496 if (!skb)
497 return NULL;
63007727 498 psrc = (__be32 *)skb_put(skb, sizeof(__be32));
1da177e4 499 *psrc = psf->sf_inaddr;
ad12583f 500 scount++; stotal++;
1da177e4
LT
501 if ((type == IGMPV3_ALLOW_NEW_SOURCES ||
502 type == IGMPV3_BLOCK_OLD_SOURCES) && psf->sf_crcount) {
503 psf->sf_crcount--;
504 if ((sdeleted || gdeleted) && psf->sf_crcount == 0) {
505 if (psf_prev)
506 psf_prev->sf_next = psf->sf_next;
507 else
508 *psf_list = psf->sf_next;
509 kfree(psf);
510 continue;
511 }
512 }
513 psf_prev = psf;
514 }
ad12583f
DS
515
516empty_source:
517 if (!stotal) {
518 if (type == IGMPV3_ALLOW_NEW_SOURCES ||
519 type == IGMPV3_BLOCK_OLD_SOURCES)
520 return skb;
521 if (pmc->crcount || isquery) {
522 /* make sure we have room for group header */
c71151f0 523 if (skb && AVAILABLE(skb) < sizeof(struct igmpv3_grec)) {
ad12583f
DS
524 igmpv3_sendpack(skb);
525 skb = NULL; /* add_grhead will get a new one */
526 }
527 skb = add_grhead(skb, pmc, type, &pgr);
528 }
529 }
1da177e4
LT
530 if (pgr)
531 pgr->grec_nsrcs = htons(scount);
532
533 if (isquery)
534 pmc->gsquery = 0; /* clear query state on report */
535 return skb;
536}
537
538static int igmpv3_send_report(struct in_device *in_dev, struct ip_mc_list *pmc)
539{
540 struct sk_buff *skb = NULL;
541 int type;
542
543 if (!pmc) {
1d7138de
ED
544 rcu_read_lock();
545 for_each_pmc_rcu(in_dev, pmc) {
1da177e4
LT
546 if (pmc->multiaddr == IGMP_ALL_HOSTS)
547 continue;
548 spin_lock_bh(&pmc->lock);
549 if (pmc->sfcount[MCAST_EXCLUDE])
550 type = IGMPV3_MODE_IS_EXCLUDE;
551 else
552 type = IGMPV3_MODE_IS_INCLUDE;
553 skb = add_grec(skb, pmc, type, 0, 0);
554 spin_unlock_bh(&pmc->lock);
555 }
1d7138de 556 rcu_read_unlock();
1da177e4
LT
557 } else {
558 spin_lock_bh(&pmc->lock);
559 if (pmc->sfcount[MCAST_EXCLUDE])
560 type = IGMPV3_MODE_IS_EXCLUDE;
561 else
562 type = IGMPV3_MODE_IS_INCLUDE;
563 skb = add_grec(skb, pmc, type, 0, 0);
564 spin_unlock_bh(&pmc->lock);
565 }
566 if (!skb)
567 return 0;
568 return igmpv3_sendpack(skb);
569}
570
571/*
572 * remove zero-count source records from a source filter list
573 */
574static void igmpv3_clear_zeros(struct ip_sf_list **ppsf)
575{
576 struct ip_sf_list *psf_prev, *psf_next, *psf;
577
578 psf_prev = NULL;
c71151f0 579 for (psf = *ppsf; psf; psf = psf_next) {
1da177e4
LT
580 psf_next = psf->sf_next;
581 if (psf->sf_crcount == 0) {
582 if (psf_prev)
583 psf_prev->sf_next = psf->sf_next;
584 else
585 *ppsf = psf->sf_next;
586 kfree(psf);
587 } else
588 psf_prev = psf;
589 }
590}
591
592static void igmpv3_send_cr(struct in_device *in_dev)
593{
594 struct ip_mc_list *pmc, *pmc_prev, *pmc_next;
595 struct sk_buff *skb = NULL;
596 int type, dtype;
597
1d7138de 598 rcu_read_lock();
1da177e4
LT
599 spin_lock_bh(&in_dev->mc_tomb_lock);
600
601 /* deleted MCA's */
602 pmc_prev = NULL;
c71151f0 603 for (pmc = in_dev->mc_tomb; pmc; pmc = pmc_next) {
1da177e4
LT
604 pmc_next = pmc->next;
605 if (pmc->sfmode == MCAST_INCLUDE) {
606 type = IGMPV3_BLOCK_OLD_SOURCES;
607 dtype = IGMPV3_BLOCK_OLD_SOURCES;
608 skb = add_grec(skb, pmc, type, 1, 0);
609 skb = add_grec(skb, pmc, dtype, 1, 1);
610 }
611 if (pmc->crcount) {
1da177e4
LT
612 if (pmc->sfmode == MCAST_EXCLUDE) {
613 type = IGMPV3_CHANGE_TO_INCLUDE;
614 skb = add_grec(skb, pmc, type, 1, 0);
615 }
ad12583f 616 pmc->crcount--;
1da177e4
LT
617 if (pmc->crcount == 0) {
618 igmpv3_clear_zeros(&pmc->tomb);
619 igmpv3_clear_zeros(&pmc->sources);
620 }
621 }
622 if (pmc->crcount == 0 && !pmc->tomb && !pmc->sources) {
623 if (pmc_prev)
624 pmc_prev->next = pmc_next;
625 else
626 in_dev->mc_tomb = pmc_next;
627 in_dev_put(pmc->interface);
628 kfree(pmc);
629 } else
630 pmc_prev = pmc;
631 }
632 spin_unlock_bh(&in_dev->mc_tomb_lock);
633
634 /* change recs */
1d7138de 635 for_each_pmc_rcu(in_dev, pmc) {
1da177e4
LT
636 spin_lock_bh(&pmc->lock);
637 if (pmc->sfcount[MCAST_EXCLUDE]) {
638 type = IGMPV3_BLOCK_OLD_SOURCES;
639 dtype = IGMPV3_ALLOW_NEW_SOURCES;
640 } else {
641 type = IGMPV3_ALLOW_NEW_SOURCES;
642 dtype = IGMPV3_BLOCK_OLD_SOURCES;
643 }
644 skb = add_grec(skb, pmc, type, 0, 0);
645 skb = add_grec(skb, pmc, dtype, 0, 1); /* deleted sources */
646
647 /* filter mode changes */
648 if (pmc->crcount) {
1da177e4
LT
649 if (pmc->sfmode == MCAST_EXCLUDE)
650 type = IGMPV3_CHANGE_TO_EXCLUDE;
651 else
652 type = IGMPV3_CHANGE_TO_INCLUDE;
653 skb = add_grec(skb, pmc, type, 0, 0);
ad12583f 654 pmc->crcount--;
1da177e4
LT
655 }
656 spin_unlock_bh(&pmc->lock);
657 }
1d7138de 658 rcu_read_unlock();
1da177e4
LT
659
660 if (!skb)
661 return;
662 (void) igmpv3_sendpack(skb);
663}
664
665static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc,
666 int type)
667{
668 struct sk_buff *skb;
669 struct iphdr *iph;
670 struct igmphdr *ih;
671 struct rtable *rt;
672 struct net_device *dev = in_dev->dev;
877acedc 673 struct net *net = dev_net(dev);
63007727 674 __be32 group = pmc ? pmc->multiaddr : 0;
31e4543d 675 struct flowi4 fl4;
63007727 676 __be32 dst;
66088243 677 int hlen, tlen;
1da177e4
LT
678
679 if (type == IGMPV3_HOST_MEMBERSHIP_REPORT)
680 return igmpv3_send_report(in_dev, pmc);
681 else if (type == IGMP_HOST_LEAVE_MESSAGE)
682 dst = IGMP_ALL_ROUTER;
683 else
684 dst = group;
685
31e4543d 686 rt = ip_route_output_ports(net, &fl4, NULL, dst, 0,
78fbfd8a
DM
687 0, 0,
688 IPPROTO_IGMP, 0, dev->ifindex);
689 if (IS_ERR(rt))
690 return -1;
691
66088243
HX
692 hlen = LL_RESERVED_SPACE(dev);
693 tlen = dev->needed_tailroom;
694 skb = alloc_skb(IGMP_SIZE + hlen + tlen, GFP_ATOMIC);
51456b29 695 if (!skb) {
1da177e4
LT
696 ip_rt_put(rt);
697 return -1;
698 }
9d4a0314 699 skb->priority = TC_PRIO_CONTROL;
1da177e4 700
d8d1f30b 701 skb_dst_set(skb, &rt->dst);
1da177e4 702
66088243 703 skb_reserve(skb, hlen);
1da177e4 704
7e28ecc2 705 skb_reset_network_header(skb);
eddc9ec5 706 iph = ip_hdr(skb);
7e28ecc2 707 skb_put(skb, sizeof(struct iphdr) + 4);
1da177e4
LT
708
709 iph->version = 4;
710 iph->ihl = (sizeof(struct iphdr)+4)>>2;
711 iph->tos = 0xc0;
712 iph->frag_off = htons(IP_DF);
713 iph->ttl = 1;
714 iph->daddr = dst;
492f64ce 715 iph->saddr = fl4.saddr;
1da177e4 716 iph->protocol = IPPROTO_IGMP;
b6a7719a 717 ip_select_ident(net, skb, NULL);
5e73ea1a
DB
718 ((u8 *)&iph[1])[0] = IPOPT_RA;
719 ((u8 *)&iph[1])[1] = 4;
720 ((u8 *)&iph[1])[2] = 0;
721 ((u8 *)&iph[1])[3] = 0;
1da177e4
LT
722
723 ih = (struct igmphdr *)skb_put(skb, sizeof(struct igmphdr));
a7e9ff73
JK
724 ih->type = type;
725 ih->code = 0;
726 ih->csum = 0;
727 ih->group = group;
728 ih->csum = ip_compute_csum((void *)ih, sizeof(struct igmphdr));
1da177e4 729
c439cb2e 730 return ip_local_out(skb);
1da177e4
LT
731}
732
733static void igmp_gq_timer_expire(unsigned long data)
734{
735 struct in_device *in_dev = (struct in_device *)data;
736
737 in_dev->mr_gq_running = 0;
738 igmpv3_send_report(in_dev, NULL);
e2401654 739 in_dev_put(in_dev);
1da177e4
LT
740}
741
742static void igmp_ifc_timer_expire(unsigned long data)
743{
744 struct in_device *in_dev = (struct in_device *)data;
745
746 igmpv3_send_cr(in_dev);
747 if (in_dev->mr_ifc_count) {
748 in_dev->mr_ifc_count--;
cab70040
WM
749 igmp_ifc_start_timer(in_dev,
750 unsolicited_report_interval(in_dev));
1da177e4 751 }
e2401654 752 in_dev_put(in_dev);
1da177e4
LT
753}
754
755static void igmp_ifc_event(struct in_device *in_dev)
756{
757 if (IGMP_V1_SEEN(in_dev) || IGMP_V2_SEEN(in_dev))
758 return;
a9fe8e29 759 in_dev->mr_ifc_count = in_dev->mr_qrv ?: sysctl_igmp_qrv;
1da177e4
LT
760 igmp_ifc_start_timer(in_dev, 1);
761}
762
763
764static void igmp_timer_expire(unsigned long data)
765{
c71151f0 766 struct ip_mc_list *im = (struct ip_mc_list *)data;
1da177e4
LT
767 struct in_device *in_dev = im->interface;
768
769 spin_lock(&im->lock);
a7e9ff73 770 im->tm_running = 0;
1da177e4
LT
771
772 if (im->unsolicit_count) {
773 im->unsolicit_count--;
cab70040 774 igmp_start_timer(im, unsolicited_report_interval(in_dev));
1da177e4
LT
775 }
776 im->reporter = 1;
777 spin_unlock(&im->lock);
778
779 if (IGMP_V1_SEEN(in_dev))
780 igmp_send_report(in_dev, im, IGMP_HOST_MEMBERSHIP_REPORT);
781 else if (IGMP_V2_SEEN(in_dev))
782 igmp_send_report(in_dev, im, IGMPV2_HOST_MEMBERSHIP_REPORT);
783 else
784 igmp_send_report(in_dev, im, IGMPV3_HOST_MEMBERSHIP_REPORT);
785
786 ip_ma_put(im);
787}
788
ad12583f 789/* mark EXCLUDE-mode sources */
ea4d9e72 790static int igmp_xmarksources(struct ip_mc_list *pmc, int nsrcs, __be32 *srcs)
1da177e4
LT
791{
792 struct ip_sf_list *psf;
793 int i, scount;
794
ad12583f 795 scount = 0;
c71151f0 796 for (psf = pmc->sources; psf; psf = psf->sf_next) {
ad12583f
DS
797 if (scount == nsrcs)
798 break;
c71151f0 799 for (i = 0; i < nsrcs; i++) {
ad12583f 800 /* skip inactive filters */
e05c4ad3 801 if (psf->sf_count[MCAST_INCLUDE] ||
ad12583f
DS
802 pmc->sfcount[MCAST_EXCLUDE] !=
803 psf->sf_count[MCAST_EXCLUDE])
ce713ee5 804 break;
ad12583f
DS
805 if (srcs[i] == psf->sf_inaddr) {
806 scount++;
807 break;
808 }
809 }
810 }
811 pmc->gsquery = 0;
812 if (scount == nsrcs) /* all sources excluded */
813 return 0;
814 return 1;
815}
816
ea4d9e72 817static int igmp_marksources(struct ip_mc_list *pmc, int nsrcs, __be32 *srcs)
ad12583f
DS
818{
819 struct ip_sf_list *psf;
820 int i, scount;
821
822 if (pmc->sfmode == MCAST_EXCLUDE)
823 return igmp_xmarksources(pmc, nsrcs, srcs);
824
825 /* mark INCLUDE-mode sources */
1da177e4 826 scount = 0;
c71151f0 827 for (psf = pmc->sources; psf; psf = psf->sf_next) {
1da177e4
LT
828 if (scount == nsrcs)
829 break;
c71151f0 830 for (i = 0; i < nsrcs; i++)
1da177e4
LT
831 if (srcs[i] == psf->sf_inaddr) {
832 psf->sf_gsresp = 1;
833 scount++;
834 break;
835 }
836 }
ad12583f
DS
837 if (!scount) {
838 pmc->gsquery = 0;
839 return 0;
840 }
841 pmc->gsquery = 1;
842 return 1;
1da177e4
LT
843}
844
d679c532
ED
845/* return true if packet was dropped */
846static bool igmp_heard_report(struct in_device *in_dev, __be32 group)
1da177e4
LT
847{
848 struct ip_mc_list *im;
849
850 /* Timers are only set for non-local groups */
851
852 if (group == IGMP_ALL_HOSTS)
d679c532 853 return false;
1da177e4 854
1d7138de
ED
855 rcu_read_lock();
856 for_each_pmc_rcu(in_dev, im) {
1da177e4
LT
857 if (im->multiaddr == group) {
858 igmp_stop_timer(im);
859 break;
860 }
861 }
1d7138de 862 rcu_read_unlock();
d679c532 863 return false;
1da177e4
LT
864}
865
d679c532
ED
866/* return true if packet was dropped */
867static bool igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb,
1da177e4
LT
868 int len)
869{
d9edf9e2
ACM
870 struct igmphdr *ih = igmp_hdr(skb);
871 struct igmpv3_query *ih3 = igmpv3_query_hdr(skb);
1da177e4 872 struct ip_mc_list *im;
63007727 873 __be32 group = ih->group;
1da177e4
LT
874 int max_delay;
875 int mark = 0;
876
877
5b7c8406 878 if (len == 8) {
1da177e4
LT
879 if (ih->code == 0) {
880 /* Alas, old v1 router presents here. */
e905a9ed 881
436f7c20 882 max_delay = IGMP_QUERY_RESPONSE_INTERVAL;
1da177e4 883 in_dev->mr_v1_seen = jiffies +
436f7c20 884 IGMP_V1_ROUTER_PRESENT_TIMEOUT;
1da177e4
LT
885 group = 0;
886 } else {
887 /* v2 router present */
888 max_delay = ih->code*(HZ/IGMP_TIMER_SCALE);
889 in_dev->mr_v2_seen = jiffies +
436f7c20 890 IGMP_V2_ROUTER_PRESENT_TIMEOUT;
1da177e4
LT
891 }
892 /* cancel the interface change timer */
893 in_dev->mr_ifc_count = 0;
894 if (del_timer(&in_dev->mr_ifc_timer))
895 __in_dev_put(in_dev);
896 /* clear deleted report items */
897 igmpv3_clear_delrec(in_dev);
898 } else if (len < 12) {
d679c532 899 return true; /* ignore bogus packet; freed by caller */
5b7c8406
DS
900 } else if (IGMP_V1_SEEN(in_dev)) {
901 /* This is a v3 query with v1 queriers present */
436f7c20 902 max_delay = IGMP_QUERY_RESPONSE_INTERVAL;
5b7c8406
DS
903 group = 0;
904 } else if (IGMP_V2_SEEN(in_dev)) {
905 /* this is a v3 query with v2 queriers present;
906 * Interpretation of the max_delay code is problematic here.
907 * A real v2 host would use ih_code directly, while v3 has a
908 * different encoding. We use the v3 encoding as more likely
909 * to be intended in a v3 query.
910 */
911 max_delay = IGMPV3_MRC(ih3->code)*(HZ/IGMP_TIMER_SCALE);
a8c1f65c
BH
912 if (!max_delay)
913 max_delay = 1; /* can't mod w/ 0 */
1da177e4
LT
914 } else { /* v3 */
915 if (!pskb_may_pull(skb, sizeof(struct igmpv3_query)))
d679c532 916 return true;
e905a9ed 917
d9edf9e2 918 ih3 = igmpv3_query_hdr(skb);
1da177e4 919 if (ih3->nsrcs) {
e905a9ed 920 if (!pskb_may_pull(skb, sizeof(struct igmpv3_query)
63007727 921 + ntohs(ih3->nsrcs)*sizeof(__be32)))
d679c532 922 return true;
d9edf9e2 923 ih3 = igmpv3_query_hdr(skb);
1da177e4
LT
924 }
925
926 max_delay = IGMPV3_MRC(ih3->code)*(HZ/IGMP_TIMER_SCALE);
927 if (!max_delay)
928 max_delay = 1; /* can't mod w/ 0 */
929 in_dev->mr_maxdelay = max_delay;
930 if (ih3->qrv)
931 in_dev->mr_qrv = ih3->qrv;
932 if (!group) { /* general query */
933 if (ih3->nsrcs)
b47bd8d2 934 return true; /* no sources allowed */
1da177e4 935 igmp_gq_start_timer(in_dev);
d679c532 936 return false;
1da177e4
LT
937 }
938 /* mark sources to include, if group & source-specific */
939 mark = ih3->nsrcs != 0;
940 }
941
942 /*
943 * - Start the timers in all of our membership records
944 * that the query applies to for the interface on
945 * which the query arrived excl. those that belong
946 * to a "local" group (224.0.0.X)
947 * - For timers already running check if they need to
948 * be reset.
949 * - Use the igmp->igmp_code field as the maximum
950 * delay possible
951 */
1d7138de
ED
952 rcu_read_lock();
953 for_each_pmc_rcu(in_dev, im) {
ad12583f
DS
954 int changed;
955
1da177e4
LT
956 if (group && group != im->multiaddr)
957 continue;
958 if (im->multiaddr == IGMP_ALL_HOSTS)
959 continue;
960 spin_lock_bh(&im->lock);
961 if (im->tm_running)
962 im->gsquery = im->gsquery && mark;
963 else
964 im->gsquery = mark;
ad12583f 965 changed = !im->gsquery ||
e905a9ed 966 igmp_marksources(im, ntohs(ih3->nsrcs), ih3->srcs);
1da177e4 967 spin_unlock_bh(&im->lock);
ad12583f
DS
968 if (changed)
969 igmp_mod_timer(im, max_delay);
1da177e4 970 }
1d7138de 971 rcu_read_unlock();
d679c532 972 return false;
1da177e4
LT
973}
974
9a57a9d2 975/* called in rcu_read_lock() section */
1da177e4
LT
976int igmp_rcv(struct sk_buff *skb)
977{
978 /* This basically follows the spec line by line -- see RFC1112 */
979 struct igmphdr *ih;
9a57a9d2 980 struct in_device *in_dev = __in_dev_get_rcu(skb->dev);
1da177e4 981 int len = skb->len;
d679c532 982 bool dropped = true;
1da177e4 983
51456b29 984 if (!in_dev)
cd557bc1 985 goto drop;
1da177e4 986
fb286bb2 987 if (!pskb_may_pull(skb, sizeof(struct igmphdr)))
9a57a9d2 988 goto drop;
fb286bb2 989
de08dc1a
TH
990 if (skb_checksum_simple_validate(skb))
991 goto drop;
1da177e4 992
d9edf9e2 993 ih = igmp_hdr(skb);
1da177e4
LT
994 switch (ih->type) {
995 case IGMP_HOST_MEMBERSHIP_QUERY:
d679c532 996 dropped = igmp_heard_query(in_dev, skb, len);
1da177e4
LT
997 break;
998 case IGMP_HOST_MEMBERSHIP_REPORT:
999 case IGMPV2_HOST_MEMBERSHIP_REPORT:
1da177e4 1000 /* Is it our report looped back? */
c7537967 1001 if (rt_is_output_route(skb_rtable(skb)))
1da177e4 1002 break;
24c69275
DS
1003 /* don't rely on MC router hearing unicast reports */
1004 if (skb->pkt_type == PACKET_MULTICAST ||
1005 skb->pkt_type == PACKET_BROADCAST)
d679c532 1006 dropped = igmp_heard_report(in_dev, ih->group);
1da177e4
LT
1007 break;
1008 case IGMP_PIM:
1009#ifdef CONFIG_IP_PIMSM_V1
1da177e4
LT
1010 return pim_rcv_v1(skb);
1011#endif
c6b471e6 1012 case IGMPV3_HOST_MEMBERSHIP_REPORT:
1da177e4
LT
1013 case IGMP_DVMRP:
1014 case IGMP_TRACE:
1015 case IGMP_HOST_LEAVE_MESSAGE:
1016 case IGMP_MTRACE:
1017 case IGMP_MTRACE_RESP:
1018 break;
1019 default:
dd1c1853 1020 break;
1da177e4 1021 }
fb286bb2 1022
cd557bc1 1023drop:
d679c532
ED
1024 if (dropped)
1025 kfree_skb(skb);
1026 else
1027 consume_skb(skb);
1da177e4
LT
1028 return 0;
1029}
1030
1031#endif
1032
1033
1034/*
1035 * Add a filter to a device
1036 */
1037
63007727 1038static void ip_mc_filter_add(struct in_device *in_dev, __be32 addr)
1da177e4
LT
1039{
1040 char buf[MAX_ADDR_LEN];
1041 struct net_device *dev = in_dev->dev;
1042
1043 /* Checking for IFF_MULTICAST here is WRONG-WRONG-WRONG.
1044 We will get multicast token leakage, when IFF_MULTICAST
b81693d9 1045 is changed. This check should be done in ndo_set_rx_mode
1da177e4
LT
1046 routine. Something sort of:
1047 if (dev->mc_list && dev->flags&IFF_MULTICAST) { do it; }
1048 --ANK
1049 */
1050 if (arp_mc_map(addr, buf, dev, 0) == 0)
22bedad3 1051 dev_mc_add(dev, buf);
1da177e4
LT
1052}
1053
1054/*
1055 * Remove a filter from a device
1056 */
1057
63007727 1058static void ip_mc_filter_del(struct in_device *in_dev, __be32 addr)
1da177e4
LT
1059{
1060 char buf[MAX_ADDR_LEN];
1061 struct net_device *dev = in_dev->dev;
1062
1063 if (arp_mc_map(addr, buf, dev, 0) == 0)
22bedad3 1064 dev_mc_del(dev, buf);
1da177e4
LT
1065}
1066
1067#ifdef CONFIG_IP_MULTICAST
1068/*
1069 * deleted ip_mc_list manipulation
1070 */
1071static void igmpv3_add_delrec(struct in_device *in_dev, struct ip_mc_list *im)
1072{
1073 struct ip_mc_list *pmc;
1074
1075 /* this is an "ip_mc_list" for convenience; only the fields below
1076 * are actually used. In particular, the refcnt and users are not
1077 * used for management of the delete list. Using the same structure
1078 * for deleted items allows change reports to use common code with
1079 * non-deleted or query-response MCA's.
1080 */
0da974f4 1081 pmc = kzalloc(sizeof(*pmc), GFP_KERNEL);
1da177e4
LT
1082 if (!pmc)
1083 return;
1da177e4
LT
1084 spin_lock_bh(&im->lock);
1085 pmc->interface = im->interface;
1086 in_dev_hold(in_dev);
1087 pmc->multiaddr = im->multiaddr;
a9fe8e29 1088 pmc->crcount = in_dev->mr_qrv ?: sysctl_igmp_qrv;
1da177e4
LT
1089 pmc->sfmode = im->sfmode;
1090 if (pmc->sfmode == MCAST_INCLUDE) {
1091 struct ip_sf_list *psf;
1092
1093 pmc->tomb = im->tomb;
1094 pmc->sources = im->sources;
1095 im->tomb = im->sources = NULL;
c71151f0 1096 for (psf = pmc->sources; psf; psf = psf->sf_next)
1da177e4
LT
1097 psf->sf_crcount = pmc->crcount;
1098 }
1099 spin_unlock_bh(&im->lock);
1100
1101 spin_lock_bh(&in_dev->mc_tomb_lock);
1102 pmc->next = in_dev->mc_tomb;
1103 in_dev->mc_tomb = pmc;
1104 spin_unlock_bh(&in_dev->mc_tomb_lock);
1105}
1106
63007727 1107static void igmpv3_del_delrec(struct in_device *in_dev, __be32 multiaddr)
1da177e4
LT
1108{
1109 struct ip_mc_list *pmc, *pmc_prev;
1110 struct ip_sf_list *psf, *psf_next;
1111
1112 spin_lock_bh(&in_dev->mc_tomb_lock);
1113 pmc_prev = NULL;
c71151f0 1114 for (pmc = in_dev->mc_tomb; pmc; pmc = pmc->next) {
1da177e4
LT
1115 if (pmc->multiaddr == multiaddr)
1116 break;
1117 pmc_prev = pmc;
1118 }
1119 if (pmc) {
1120 if (pmc_prev)
1121 pmc_prev->next = pmc->next;
1122 else
1123 in_dev->mc_tomb = pmc->next;
1124 }
1125 spin_unlock_bh(&in_dev->mc_tomb_lock);
1126 if (pmc) {
c71151f0 1127 for (psf = pmc->tomb; psf; psf = psf_next) {
1da177e4
LT
1128 psf_next = psf->sf_next;
1129 kfree(psf);
1130 }
1131 in_dev_put(pmc->interface);
1132 kfree(pmc);
1133 }
1134}
1135
1136static void igmpv3_clear_delrec(struct in_device *in_dev)
1137{
1138 struct ip_mc_list *pmc, *nextpmc;
1139
1140 spin_lock_bh(&in_dev->mc_tomb_lock);
1141 pmc = in_dev->mc_tomb;
1142 in_dev->mc_tomb = NULL;
1143 spin_unlock_bh(&in_dev->mc_tomb_lock);
1144
1145 for (; pmc; pmc = nextpmc) {
1146 nextpmc = pmc->next;
1147 ip_mc_clear_src(pmc);
1148 in_dev_put(pmc->interface);
1149 kfree(pmc);
1150 }
1151 /* clear dead sources, too */
1d7138de
ED
1152 rcu_read_lock();
1153 for_each_pmc_rcu(in_dev, pmc) {
1da177e4
LT
1154 struct ip_sf_list *psf, *psf_next;
1155
1156 spin_lock_bh(&pmc->lock);
1157 psf = pmc->tomb;
1158 pmc->tomb = NULL;
1159 spin_unlock_bh(&pmc->lock);
c71151f0 1160 for (; psf; psf = psf_next) {
1da177e4
LT
1161 psf_next = psf->sf_next;
1162 kfree(psf);
1163 }
1164 }
1d7138de 1165 rcu_read_unlock();
1da177e4
LT
1166}
1167#endif
1168
1169static void igmp_group_dropped(struct ip_mc_list *im)
1170{
1171 struct in_device *in_dev = im->interface;
1172#ifdef CONFIG_IP_MULTICAST
1173 int reporter;
1174#endif
1175
1176 if (im->loaded) {
1177 im->loaded = 0;
1178 ip_mc_filter_del(in_dev, im->multiaddr);
1179 }
1180
1181#ifdef CONFIG_IP_MULTICAST
1182 if (im->multiaddr == IGMP_ALL_HOSTS)
1183 return;
1184
1185 reporter = im->reporter;
1186 igmp_stop_timer(im);
1187
1188 if (!in_dev->dead) {
1189 if (IGMP_V1_SEEN(in_dev))
24cf3af3 1190 return;
1da177e4
LT
1191 if (IGMP_V2_SEEN(in_dev)) {
1192 if (reporter)
1193 igmp_send_report(in_dev, im, IGMP_HOST_LEAVE_MESSAGE);
24cf3af3 1194 return;
1da177e4
LT
1195 }
1196 /* IGMPv3 */
1197 igmpv3_add_delrec(in_dev, im);
1198
1199 igmp_ifc_event(in_dev);
1200 }
1da177e4 1201#endif
1da177e4
LT
1202}
1203
1204static void igmp_group_added(struct ip_mc_list *im)
1205{
1206 struct in_device *in_dev = im->interface;
1207
1208 if (im->loaded == 0) {
1209 im->loaded = 1;
1210 ip_mc_filter_add(in_dev, im->multiaddr);
1211 }
1212
1213#ifdef CONFIG_IP_MULTICAST
1214 if (im->multiaddr == IGMP_ALL_HOSTS)
1215 return;
1216
1217 if (in_dev->dead)
1218 return;
1219 if (IGMP_V1_SEEN(in_dev) || IGMP_V2_SEEN(in_dev)) {
1220 spin_lock_bh(&im->lock);
436f7c20 1221 igmp_start_timer(im, IGMP_INITIAL_REPORT_DELAY);
1da177e4
LT
1222 spin_unlock_bh(&im->lock);
1223 return;
1224 }
1225 /* else, v3 */
1226
a9fe8e29 1227 im->crcount = in_dev->mr_qrv ?: sysctl_igmp_qrv;
1da177e4
LT
1228 igmp_ifc_event(in_dev);
1229#endif
1230}
1231
1232
1233/*
1234 * Multicast list managers
1235 */
1236
e9897071
ED
1237static u32 ip_mc_hash(const struct ip_mc_list *im)
1238{
c70eba74 1239 return hash_32((__force u32)im->multiaddr, MC_HASH_SZ_LOG);
e9897071
ED
1240}
1241
1242static void ip_mc_hash_add(struct in_device *in_dev,
1243 struct ip_mc_list *im)
1244{
1245 struct ip_mc_list __rcu **mc_hash;
1246 u32 hash;
1247
1248 mc_hash = rtnl_dereference(in_dev->mc_hash);
1249 if (mc_hash) {
1250 hash = ip_mc_hash(im);
c70eba74 1251 im->next_hash = mc_hash[hash];
e9897071
ED
1252 rcu_assign_pointer(mc_hash[hash], im);
1253 return;
1254 }
1255
1256 /* do not use a hash table for small number of items */
1257 if (in_dev->mc_count < 4)
1258 return;
1259
1260 mc_hash = kzalloc(sizeof(struct ip_mc_list *) << MC_HASH_SZ_LOG,
1261 GFP_KERNEL);
1262 if (!mc_hash)
1263 return;
1264
1265 for_each_pmc_rtnl(in_dev, im) {
1266 hash = ip_mc_hash(im);
c70eba74 1267 im->next_hash = mc_hash[hash];
e9897071
ED
1268 RCU_INIT_POINTER(mc_hash[hash], im);
1269 }
1270
1271 rcu_assign_pointer(in_dev->mc_hash, mc_hash);
1272}
1273
1274static void ip_mc_hash_remove(struct in_device *in_dev,
1275 struct ip_mc_list *im)
1276{
1277 struct ip_mc_list __rcu **mc_hash = rtnl_dereference(in_dev->mc_hash);
1278 struct ip_mc_list *aux;
1279
1280 if (!mc_hash)
1281 return;
1282 mc_hash += ip_mc_hash(im);
1283 while ((aux = rtnl_dereference(*mc_hash)) != im)
1284 mc_hash = &aux->next_hash;
1285 *mc_hash = im->next_hash;
1286}
1287
1da177e4
LT
1288
1289/*
1290 * A socket has joined a multicast group on device dev.
1291 */
1292
8f935bbd 1293void ip_mc_inc_group(struct in_device *in_dev, __be32 addr)
1da177e4
LT
1294{
1295 struct ip_mc_list *im;
1296
1297 ASSERT_RTNL();
1298
1d7138de 1299 for_each_pmc_rtnl(in_dev, im) {
1da177e4
LT
1300 if (im->multiaddr == addr) {
1301 im->users++;
1302 ip_mc_add_src(in_dev, &addr, MCAST_EXCLUDE, 0, NULL, 0);
1303 goto out;
1304 }
1305 }
1306
1d7138de 1307 im = kzalloc(sizeof(*im), GFP_KERNEL);
1da177e4
LT
1308 if (!im)
1309 goto out;
1310
a7e9ff73
JK
1311 im->users = 1;
1312 im->interface = in_dev;
1da177e4 1313 in_dev_hold(in_dev);
a7e9ff73 1314 im->multiaddr = addr;
1da177e4
LT
1315 /* initial mode is (EX, empty) */
1316 im->sfmode = MCAST_EXCLUDE;
1da177e4 1317 im->sfcount[MCAST_EXCLUDE] = 1;
1da177e4
LT
1318 atomic_set(&im->refcnt, 1);
1319 spin_lock_init(&im->lock);
1320#ifdef CONFIG_IP_MULTICAST
179542a5 1321 setup_timer(&im->timer, igmp_timer_expire, (unsigned long)im);
a9fe8e29 1322 im->unsolicit_count = sysctl_igmp_qrv;
1da177e4 1323#endif
1d7138de
ED
1324
1325 im->next_rcu = in_dev->mc_list;
b8bae41e 1326 in_dev->mc_count++;
cf778b00 1327 rcu_assign_pointer(in_dev->mc_list, im);
1d7138de 1328
e9897071
ED
1329 ip_mc_hash_add(in_dev, im);
1330
1da177e4
LT
1331#ifdef CONFIG_IP_MULTICAST
1332 igmpv3_del_delrec(in_dev, im->multiaddr);
1333#endif
1334 igmp_group_added(im);
1335 if (!in_dev->dead)
1336 ip_rt_multicast_event(in_dev);
1337out:
1338 return;
1339}
4bc2f18b 1340EXPORT_SYMBOL(ip_mc_inc_group);
1da177e4 1341
9afd85c9
LL
1342static int ip_mc_check_iphdr(struct sk_buff *skb)
1343{
1344 const struct iphdr *iph;
1345 unsigned int len;
1346 unsigned int offset = skb_network_offset(skb) + sizeof(*iph);
1347
1348 if (!pskb_may_pull(skb, offset))
1349 return -EINVAL;
1350
1351 iph = ip_hdr(skb);
1352
1353 if (iph->version != 4 || ip_hdrlen(skb) < sizeof(*iph))
1354 return -EINVAL;
1355
1356 offset += ip_hdrlen(skb) - sizeof(*iph);
1357
1358 if (!pskb_may_pull(skb, offset))
1359 return -EINVAL;
1360
1361 iph = ip_hdr(skb);
1362
1363 if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl)))
1364 return -EINVAL;
1365
1366 len = skb_network_offset(skb) + ntohs(iph->tot_len);
1367 if (skb->len < len || len < offset)
1368 return -EINVAL;
1369
1370 skb_set_transport_header(skb, offset);
1371
1372 return 0;
1373}
1374
1375static int ip_mc_check_igmp_reportv3(struct sk_buff *skb)
1376{
1377 unsigned int len = skb_transport_offset(skb);
1378
1379 len += sizeof(struct igmpv3_report);
1380
1381 return pskb_may_pull(skb, len) ? 0 : -EINVAL;
1382}
1383
1384static int ip_mc_check_igmp_query(struct sk_buff *skb)
1385{
1386 unsigned int len = skb_transport_offset(skb);
1387
1388 len += sizeof(struct igmphdr);
1389 if (skb->len < len)
1390 return -EINVAL;
1391
1392 /* IGMPv{1,2}? */
1393 if (skb->len != len) {
1394 /* or IGMPv3? */
1395 len += sizeof(struct igmpv3_query) - sizeof(struct igmphdr);
1396 if (skb->len < len || !pskb_may_pull(skb, len))
1397 return -EINVAL;
1398 }
1399
1400 /* RFC2236+RFC3376 (IGMPv2+IGMPv3) require the multicast link layer
1401 * all-systems destination addresses (224.0.0.1) for general queries
1402 */
1403 if (!igmp_hdr(skb)->group &&
1404 ip_hdr(skb)->daddr != htonl(INADDR_ALLHOSTS_GROUP))
1405 return -EINVAL;
1406
1407 return 0;
1408}
1409
1410static int ip_mc_check_igmp_msg(struct sk_buff *skb)
1411{
1412 switch (igmp_hdr(skb)->type) {
1413 case IGMP_HOST_LEAVE_MESSAGE:
1414 case IGMP_HOST_MEMBERSHIP_REPORT:
1415 case IGMPV2_HOST_MEMBERSHIP_REPORT:
1416 /* fall through */
1417 return 0;
1418 case IGMPV3_HOST_MEMBERSHIP_REPORT:
1419 return ip_mc_check_igmp_reportv3(skb);
1420 case IGMP_HOST_MEMBERSHIP_QUERY:
1421 return ip_mc_check_igmp_query(skb);
1422 default:
1423 return -ENOMSG;
1424 }
1425}
1426
1427static inline __sum16 ip_mc_validate_checksum(struct sk_buff *skb)
1428{
1429 return skb_checksum_simple_validate(skb);
1430}
1431
1432static int __ip_mc_check_igmp(struct sk_buff *skb, struct sk_buff **skb_trimmed)
1433
1434{
1435 struct sk_buff *skb_chk;
1436 unsigned int transport_len;
1437 unsigned int len = skb_transport_offset(skb) + sizeof(struct igmphdr);
1438 int ret;
1439
1440 transport_len = ntohs(ip_hdr(skb)->tot_len) - ip_hdrlen(skb);
1441
1442 skb_get(skb);
1443 skb_chk = skb_checksum_trimmed(skb, transport_len,
1444 ip_mc_validate_checksum);
1445 if (!skb_chk)
1446 return -EINVAL;
1447
1448 if (!pskb_may_pull(skb_chk, len)) {
1449 kfree_skb(skb_chk);
1450 return -EINVAL;
1451 }
1452
1453 ret = ip_mc_check_igmp_msg(skb_chk);
1454 if (ret) {
1455 kfree_skb(skb_chk);
1456 return ret;
1457 }
1458
1459 if (skb_trimmed)
1460 *skb_trimmed = skb_chk;
1461 else
1462 kfree_skb(skb_chk);
1463
1464 return 0;
1465}
1466
1467/**
1468 * ip_mc_check_igmp - checks whether this is a sane IGMP packet
1469 * @skb: the skb to validate
1470 * @skb_trimmed: to store an skb pointer trimmed to IPv4 packet tail (optional)
1471 *
1472 * Checks whether an IPv4 packet is a valid IGMP packet. If so sets
1473 * skb network and transport headers accordingly and returns zero.
1474 *
1475 * -EINVAL: A broken packet was detected, i.e. it violates some internet
1476 * standard
1477 * -ENOMSG: IP header validation succeeded but it is not an IGMP packet.
1478 * -ENOMEM: A memory allocation failure happened.
1479 *
1480 * Optionally, an skb pointer might be provided via skb_trimmed (or set it
1481 * to NULL): After parsing an IGMP packet successfully it will point to
1482 * an skb which has its tail aligned to the IP packet end. This might
1483 * either be the originally provided skb or a trimmed, cloned version if
1484 * the skb frame had data beyond the IP packet. A cloned skb allows us
1485 * to leave the original skb and its full frame unchanged (which might be
1486 * desirable for layer 2 frame jugglers).
1487 *
1488 * The caller needs to release a reference count from any returned skb_trimmed.
1489 */
1490int ip_mc_check_igmp(struct sk_buff *skb, struct sk_buff **skb_trimmed)
1491{
1492 int ret = ip_mc_check_iphdr(skb);
1493
1494 if (ret < 0)
1495 return ret;
1496
1497 if (ip_hdr(skb)->protocol != IPPROTO_IGMP)
1498 return -ENOMSG;
1499
1500 return __ip_mc_check_igmp(skb, skb_trimmed);
1501}
1502EXPORT_SYMBOL(ip_mc_check_igmp);
1503
a816c7c7 1504/*
4aa5dee4 1505 * Resend IGMP JOIN report; used by netdev notifier.
a816c7c7 1506 */
4aa5dee4 1507static void ip_mc_rejoin_groups(struct in_device *in_dev)
a816c7c7 1508{
08882669 1509#ifdef CONFIG_IP_MULTICAST
866f3b25
ED
1510 struct ip_mc_list *im;
1511 int type;
a816c7c7 1512
4aa5dee4
JP
1513 ASSERT_RTNL();
1514
1515 for_each_pmc_rtnl(in_dev, im) {
866f3b25
ED
1516 if (im->multiaddr == IGMP_ALL_HOSTS)
1517 continue;
a816c7c7 1518
866f3b25
ED
1519 /* a failover is happening and switches
1520 * must be notified immediately
1521 */
1522 if (IGMP_V1_SEEN(in_dev))
1523 type = IGMP_HOST_MEMBERSHIP_REPORT;
1524 else if (IGMP_V2_SEEN(in_dev))
1525 type = IGMPV2_HOST_MEMBERSHIP_REPORT;
1526 else
1527 type = IGMPV3_HOST_MEMBERSHIP_REPORT;
1528 igmp_send_report(in_dev, im, type);
1529 }
a816c7c7
JV
1530#endif
1531}
1532
1da177e4
LT
1533/*
1534 * A socket has left a multicast group on device dev
1535 */
1536
8f935bbd 1537void ip_mc_dec_group(struct in_device *in_dev, __be32 addr)
1da177e4 1538{
1d7138de
ED
1539 struct ip_mc_list *i;
1540 struct ip_mc_list __rcu **ip;
e905a9ed 1541
1da177e4 1542 ASSERT_RTNL();
e905a9ed 1543
1d7138de
ED
1544 for (ip = &in_dev->mc_list;
1545 (i = rtnl_dereference(*ip)) != NULL;
1546 ip = &i->next_rcu) {
a7e9ff73 1547 if (i->multiaddr == addr) {
1da177e4 1548 if (--i->users == 0) {
e9897071 1549 ip_mc_hash_remove(in_dev, i);
1d7138de 1550 *ip = i->next_rcu;
b8bae41e 1551 in_dev->mc_count--;
1da177e4 1552 igmp_group_dropped(i);
24cf3af3 1553 ip_mc_clear_src(i);
1da177e4
LT
1554
1555 if (!in_dev->dead)
1556 ip_rt_multicast_event(in_dev);
1557
1558 ip_ma_put(i);
1559 return;
1560 }
1561 break;
1562 }
1563 }
1564}
4bc2f18b 1565EXPORT_SYMBOL(ip_mc_dec_group);
1da177e4 1566
75c78500
MS
1567/* Device changing type */
1568
1569void ip_mc_unmap(struct in_device *in_dev)
1570{
1d7138de 1571 struct ip_mc_list *pmc;
75c78500
MS
1572
1573 ASSERT_RTNL();
1574
1d7138de
ED
1575 for_each_pmc_rtnl(in_dev, pmc)
1576 igmp_group_dropped(pmc);
75c78500
MS
1577}
1578
1579void ip_mc_remap(struct in_device *in_dev)
1580{
1d7138de 1581 struct ip_mc_list *pmc;
75c78500
MS
1582
1583 ASSERT_RTNL();
1584
1d7138de
ED
1585 for_each_pmc_rtnl(in_dev, pmc)
1586 igmp_group_added(pmc);
75c78500
MS
1587}
1588
1da177e4
LT
1589/* Device going down */
1590
1591void ip_mc_down(struct in_device *in_dev)
1592{
1d7138de 1593 struct ip_mc_list *pmc;
1da177e4
LT
1594
1595 ASSERT_RTNL();
1596
1d7138de
ED
1597 for_each_pmc_rtnl(in_dev, pmc)
1598 igmp_group_dropped(pmc);
1da177e4
LT
1599
1600#ifdef CONFIG_IP_MULTICAST
1601 in_dev->mr_ifc_count = 0;
1602 if (del_timer(&in_dev->mr_ifc_timer))
1603 __in_dev_put(in_dev);
1604 in_dev->mr_gq_running = 0;
1605 if (del_timer(&in_dev->mr_gq_timer))
1606 __in_dev_put(in_dev);
1607 igmpv3_clear_delrec(in_dev);
1608#endif
1609
1610 ip_mc_dec_group(in_dev, IGMP_ALL_HOSTS);
1611}
1612
1613void ip_mc_init_dev(struct in_device *in_dev)
1614{
1615 ASSERT_RTNL();
1616
1da177e4 1617#ifdef CONFIG_IP_MULTICAST
b24b8a24
PE
1618 setup_timer(&in_dev->mr_gq_timer, igmp_gq_timer_expire,
1619 (unsigned long)in_dev);
b24b8a24
PE
1620 setup_timer(&in_dev->mr_ifc_timer, igmp_ifc_timer_expire,
1621 (unsigned long)in_dev);
a9fe8e29 1622 in_dev->mr_qrv = sysctl_igmp_qrv;
1da177e4
LT
1623#endif
1624
1da177e4
LT
1625 spin_lock_init(&in_dev->mc_tomb_lock);
1626}
1627
1628/* Device going up */
1629
1630void ip_mc_up(struct in_device *in_dev)
1631{
1d7138de 1632 struct ip_mc_list *pmc;
1da177e4
LT
1633
1634 ASSERT_RTNL();
1635
a9fe8e29
HFS
1636#ifdef CONFIG_IP_MULTICAST
1637 in_dev->mr_qrv = sysctl_igmp_qrv;
1638#endif
1da177e4
LT
1639 ip_mc_inc_group(in_dev, IGMP_ALL_HOSTS);
1640
1d7138de
ED
1641 for_each_pmc_rtnl(in_dev, pmc)
1642 igmp_group_added(pmc);
1da177e4
LT
1643}
1644
1645/*
1646 * Device is about to be destroyed: clean up.
1647 */
1648
1649void ip_mc_destroy_dev(struct in_device *in_dev)
1650{
1651 struct ip_mc_list *i;
1652
1653 ASSERT_RTNL();
1654
1655 /* Deactivate timers */
1656 ip_mc_down(in_dev);
1657
1d7138de
ED
1658 while ((i = rtnl_dereference(in_dev->mc_list)) != NULL) {
1659 in_dev->mc_list = i->next_rcu;
b8bae41e 1660 in_dev->mc_count--;
1d7138de 1661
24cf3af3
VF
1662 /* We've dropped the groups in ip_mc_down already */
1663 ip_mc_clear_src(i);
1da177e4 1664 ip_ma_put(i);
1da177e4 1665 }
1da177e4
LT
1666}
1667
9e917dca 1668/* RTNL is locked */
877acedc 1669static struct in_device *ip_mc_find_dev(struct net *net, struct ip_mreqn *imr)
1da177e4 1670{
1da177e4
LT
1671 struct net_device *dev = NULL;
1672 struct in_device *idev = NULL;
1673
1674 if (imr->imr_ifindex) {
877acedc 1675 idev = inetdev_by_index(net, imr->imr_ifindex);
1da177e4
LT
1676 return idev;
1677 }
1678 if (imr->imr_address.s_addr) {
9e917dca 1679 dev = __ip_dev_find(net, imr->imr_address.s_addr, false);
1da177e4
LT
1680 if (!dev)
1681 return NULL;
1da177e4
LT
1682 }
1683
b23dd4fe 1684 if (!dev) {
78fbfd8a
DM
1685 struct rtable *rt = ip_route_output(net,
1686 imr->imr_multiaddr.s_addr,
1687 0, 0, 0);
b23dd4fe
DM
1688 if (!IS_ERR(rt)) {
1689 dev = rt->dst.dev;
1690 ip_rt_put(rt);
1691 }
1da177e4
LT
1692 }
1693 if (dev) {
1694 imr->imr_ifindex = dev->ifindex;
e5ed6399 1695 idev = __in_dev_get_rtnl(dev);
1da177e4
LT
1696 }
1697 return idev;
1698}
1699
1700/*
1701 * Join a socket to a group
1702 */
ab32ea5d
BH
1703int sysctl_igmp_max_memberships __read_mostly = IP_MAX_MEMBERSHIPS;
1704int sysctl_igmp_max_msf __read_mostly = IP_MAX_MSF;
a9fe8e29 1705#ifdef CONFIG_IP_MULTICAST
436f7c20 1706int sysctl_igmp_qrv __read_mostly = IGMP_QUERY_ROBUSTNESS_VARIABLE;
a9fe8e29 1707#endif
1da177e4
LT
1708
1709static int ip_mc_del1_src(struct ip_mc_list *pmc, int sfmode,
8f935bbd 1710 __be32 *psfsrc)
1da177e4
LT
1711{
1712 struct ip_sf_list *psf, *psf_prev;
1713 int rv = 0;
1714
1715 psf_prev = NULL;
c71151f0 1716 for (psf = pmc->sources; psf; psf = psf->sf_next) {
1da177e4
LT
1717 if (psf->sf_inaddr == *psfsrc)
1718 break;
1719 psf_prev = psf;
1720 }
1721 if (!psf || psf->sf_count[sfmode] == 0) {
1722 /* source filter not found, or count wrong => bug */
1723 return -ESRCH;
1724 }
1725 psf->sf_count[sfmode]--;
1726 if (psf->sf_count[sfmode] == 0) {
1727 ip_rt_multicast_event(pmc->interface);
1728 }
1729 if (!psf->sf_count[MCAST_INCLUDE] && !psf->sf_count[MCAST_EXCLUDE]) {
1730#ifdef CONFIG_IP_MULTICAST
1731 struct in_device *in_dev = pmc->interface;
1732#endif
1733
1734 /* no more filters for this source */
1735 if (psf_prev)
1736 psf_prev->sf_next = psf->sf_next;
1737 else
1738 pmc->sources = psf->sf_next;
1739#ifdef CONFIG_IP_MULTICAST
1740 if (psf->sf_oldin &&
1741 !IGMP_V1_SEEN(in_dev) && !IGMP_V2_SEEN(in_dev)) {
a9fe8e29 1742 psf->sf_crcount = in_dev->mr_qrv ?: sysctl_igmp_qrv;
1da177e4
LT
1743 psf->sf_next = pmc->tomb;
1744 pmc->tomb = psf;
1745 rv = 1;
1746 } else
1747#endif
1748 kfree(psf);
1749 }
1750 return rv;
1751}
1752
1753#ifndef CONFIG_IP_MULTICAST
1754#define igmp_ifc_event(x) do { } while (0)
1755#endif
1756
8f935bbd
AV
1757static int ip_mc_del_src(struct in_device *in_dev, __be32 *pmca, int sfmode,
1758 int sfcount, __be32 *psfsrc, int delta)
1da177e4
LT
1759{
1760 struct ip_mc_list *pmc;
1761 int changerec = 0;
1762 int i, err;
1763
1764 if (!in_dev)
1765 return -ENODEV;
1d7138de
ED
1766 rcu_read_lock();
1767 for_each_pmc_rcu(in_dev, pmc) {
1da177e4
LT
1768 if (*pmca == pmc->multiaddr)
1769 break;
1770 }
1771 if (!pmc) {
1772 /* MCA not found?? bug */
1d7138de 1773 rcu_read_unlock();
1da177e4
LT
1774 return -ESRCH;
1775 }
1776 spin_lock_bh(&pmc->lock);
1d7138de 1777 rcu_read_unlock();
1da177e4
LT
1778#ifdef CONFIG_IP_MULTICAST
1779 sf_markstate(pmc);
1780#endif
1781 if (!delta) {
1782 err = -EINVAL;
1783 if (!pmc->sfcount[sfmode])
1784 goto out_unlock;
1785 pmc->sfcount[sfmode]--;
1786 }
1787 err = 0;
c71151f0 1788 for (i = 0; i < sfcount; i++) {
1da177e4
LT
1789 int rv = ip_mc_del1_src(pmc, sfmode, &psfsrc[i]);
1790
1791 changerec |= rv > 0;
1792 if (!err && rv < 0)
1793 err = rv;
1794 }
1795 if (pmc->sfmode == MCAST_EXCLUDE &&
1796 pmc->sfcount[MCAST_EXCLUDE] == 0 &&
1797 pmc->sfcount[MCAST_INCLUDE]) {
1798#ifdef CONFIG_IP_MULTICAST
1799 struct ip_sf_list *psf;
1800#endif
1801
1802 /* filter mode change */
1803 pmc->sfmode = MCAST_INCLUDE;
1804#ifdef CONFIG_IP_MULTICAST
a9fe8e29 1805 pmc->crcount = in_dev->mr_qrv ?: sysctl_igmp_qrv;
1da177e4 1806 in_dev->mr_ifc_count = pmc->crcount;
c71151f0 1807 for (psf = pmc->sources; psf; psf = psf->sf_next)
1da177e4
LT
1808 psf->sf_crcount = 0;
1809 igmp_ifc_event(pmc->interface);
1810 } else if (sf_setstate(pmc) || changerec) {
1811 igmp_ifc_event(pmc->interface);
1812#endif
1813 }
1814out_unlock:
1815 spin_unlock_bh(&pmc->lock);
1816 return err;
1817}
1818
1819/*
1820 * Add multicast single-source filter to the interface list
1821 */
1822static int ip_mc_add1_src(struct ip_mc_list *pmc, int sfmode,
5eb81e89 1823 __be32 *psfsrc)
1da177e4
LT
1824{
1825 struct ip_sf_list *psf, *psf_prev;
1826
1827 psf_prev = NULL;
c71151f0 1828 for (psf = pmc->sources; psf; psf = psf->sf_next) {
1da177e4
LT
1829 if (psf->sf_inaddr == *psfsrc)
1830 break;
1831 psf_prev = psf;
1832 }
1833 if (!psf) {
0da974f4 1834 psf = kzalloc(sizeof(*psf), GFP_ATOMIC);
1da177e4
LT
1835 if (!psf)
1836 return -ENOBUFS;
1da177e4
LT
1837 psf->sf_inaddr = *psfsrc;
1838 if (psf_prev) {
1839 psf_prev->sf_next = psf;
1840 } else
1841 pmc->sources = psf;
1842 }
1843 psf->sf_count[sfmode]++;
1844 if (psf->sf_count[sfmode] == 1) {
1845 ip_rt_multicast_event(pmc->interface);
1846 }
1847 return 0;
1848}
1849
1850#ifdef CONFIG_IP_MULTICAST
1851static void sf_markstate(struct ip_mc_list *pmc)
1852{
1853 struct ip_sf_list *psf;
1854 int mca_xcount = pmc->sfcount[MCAST_EXCLUDE];
1855
c71151f0 1856 for (psf = pmc->sources; psf; psf = psf->sf_next)
1da177e4
LT
1857 if (pmc->sfcount[MCAST_EXCLUDE]) {
1858 psf->sf_oldin = mca_xcount ==
1859 psf->sf_count[MCAST_EXCLUDE] &&
1860 !psf->sf_count[MCAST_INCLUDE];
1861 } else
1862 psf->sf_oldin = psf->sf_count[MCAST_INCLUDE] != 0;
1863}
1864
1865static int sf_setstate(struct ip_mc_list *pmc)
1866{
ad12583f 1867 struct ip_sf_list *psf, *dpsf;
1da177e4
LT
1868 int mca_xcount = pmc->sfcount[MCAST_EXCLUDE];
1869 int qrv = pmc->interface->mr_qrv;
1870 int new_in, rv;
1871
1872 rv = 0;
c71151f0 1873 for (psf = pmc->sources; psf; psf = psf->sf_next) {
1da177e4
LT
1874 if (pmc->sfcount[MCAST_EXCLUDE]) {
1875 new_in = mca_xcount == psf->sf_count[MCAST_EXCLUDE] &&
1876 !psf->sf_count[MCAST_INCLUDE];
1877 } else
1878 new_in = psf->sf_count[MCAST_INCLUDE] != 0;
ad12583f
DS
1879 if (new_in) {
1880 if (!psf->sf_oldin) {
76edc605 1881 struct ip_sf_list *prev = NULL;
ad12583f 1882
c71151f0 1883 for (dpsf = pmc->tomb; dpsf; dpsf = dpsf->sf_next) {
ad12583f
DS
1884 if (dpsf->sf_inaddr == psf->sf_inaddr)
1885 break;
1886 prev = dpsf;
1887 }
1888 if (dpsf) {
1889 if (prev)
1890 prev->sf_next = dpsf->sf_next;
1891 else
1892 pmc->tomb = dpsf->sf_next;
1893 kfree(dpsf);
1894 }
1895 psf->sf_crcount = qrv;
1896 rv++;
1897 }
1898 } else if (psf->sf_oldin) {
1899
1900 psf->sf_crcount = 0;
1901 /*
1902 * add or update "delete" records if an active filter
1903 * is now inactive
1904 */
c71151f0 1905 for (dpsf = pmc->tomb; dpsf; dpsf = dpsf->sf_next)
ad12583f
DS
1906 if (dpsf->sf_inaddr == psf->sf_inaddr)
1907 break;
1908 if (!dpsf) {
3ed37a6f 1909 dpsf = kmalloc(sizeof(*dpsf), GFP_ATOMIC);
ad12583f
DS
1910 if (!dpsf)
1911 continue;
1912 *dpsf = *psf;
1913 /* pmc->lock held by callers */
1914 dpsf->sf_next = pmc->tomb;
1915 pmc->tomb = dpsf;
1916 }
1917 dpsf->sf_crcount = qrv;
1da177e4
LT
1918 rv++;
1919 }
1920 }
1921 return rv;
1922}
1923#endif
1924
1925/*
1926 * Add multicast source filter list to the interface list
1927 */
8f935bbd
AV
1928static int ip_mc_add_src(struct in_device *in_dev, __be32 *pmca, int sfmode,
1929 int sfcount, __be32 *psfsrc, int delta)
1da177e4
LT
1930{
1931 struct ip_mc_list *pmc;
1932 int isexclude;
1933 int i, err;
1934
1935 if (!in_dev)
1936 return -ENODEV;
1d7138de
ED
1937 rcu_read_lock();
1938 for_each_pmc_rcu(in_dev, pmc) {
1da177e4
LT
1939 if (*pmca == pmc->multiaddr)
1940 break;
1941 }
1942 if (!pmc) {
1943 /* MCA not found?? bug */
1d7138de 1944 rcu_read_unlock();
1da177e4
LT
1945 return -ESRCH;
1946 }
1947 spin_lock_bh(&pmc->lock);
1d7138de 1948 rcu_read_unlock();
1da177e4
LT
1949
1950#ifdef CONFIG_IP_MULTICAST
1951 sf_markstate(pmc);
1952#endif
1953 isexclude = pmc->sfmode == MCAST_EXCLUDE;
1954 if (!delta)
1955 pmc->sfcount[sfmode]++;
1956 err = 0;
c71151f0 1957 for (i = 0; i < sfcount; i++) {
5eb81e89 1958 err = ip_mc_add1_src(pmc, sfmode, &psfsrc[i]);
1da177e4
LT
1959 if (err)
1960 break;
1961 }
1962 if (err) {
1963 int j;
1964
685f94e6
JZ
1965 if (!delta)
1966 pmc->sfcount[sfmode]--;
c71151f0 1967 for (j = 0; j < i; j++)
a1889c0d 1968 (void) ip_mc_del1_src(pmc, sfmode, &psfsrc[j]);
1da177e4
LT
1969 } else if (isexclude != (pmc->sfcount[MCAST_EXCLUDE] != 0)) {
1970#ifdef CONFIG_IP_MULTICAST
1da177e4 1971 struct ip_sf_list *psf;
cfcabdcc 1972 in_dev = pmc->interface;
1da177e4
LT
1973#endif
1974
1975 /* filter mode change */
1976 if (pmc->sfcount[MCAST_EXCLUDE])
1977 pmc->sfmode = MCAST_EXCLUDE;
1978 else if (pmc->sfcount[MCAST_INCLUDE])
1979 pmc->sfmode = MCAST_INCLUDE;
1980#ifdef CONFIG_IP_MULTICAST
1981 /* else no filters; keep old mode for reports */
1982
a9fe8e29 1983 pmc->crcount = in_dev->mr_qrv ?: sysctl_igmp_qrv;
1da177e4 1984 in_dev->mr_ifc_count = pmc->crcount;
c71151f0 1985 for (psf = pmc->sources; psf; psf = psf->sf_next)
1da177e4
LT
1986 psf->sf_crcount = 0;
1987 igmp_ifc_event(in_dev);
1988 } else if (sf_setstate(pmc)) {
1989 igmp_ifc_event(in_dev);
1990#endif
1991 }
1992 spin_unlock_bh(&pmc->lock);
1993 return err;
1994}
1995
1996static void ip_mc_clear_src(struct ip_mc_list *pmc)
1997{
1998 struct ip_sf_list *psf, *nextpsf;
1999
c71151f0 2000 for (psf = pmc->tomb; psf; psf = nextpsf) {
1da177e4
LT
2001 nextpsf = psf->sf_next;
2002 kfree(psf);
2003 }
2004 pmc->tomb = NULL;
c71151f0 2005 for (psf = pmc->sources; psf; psf = nextpsf) {
1da177e4
LT
2006 nextpsf = psf->sf_next;
2007 kfree(psf);
2008 }
2009 pmc->sources = NULL;
2010 pmc->sfmode = MCAST_EXCLUDE;
de9daad9 2011 pmc->sfcount[MCAST_INCLUDE] = 0;
1da177e4
LT
2012 pmc->sfcount[MCAST_EXCLUDE] = 1;
2013}
2014
54ff9ef3
MRL
2015/* Join a multicast group
2016 */
2017
2018int ip_mc_join_group(struct sock *sk, struct ip_mreqn *imr)
1da177e4 2019{
8f935bbd 2020 __be32 addr = imr->imr_multiaddr.s_addr;
959d10f6 2021 struct ip_mc_socklist *iml, *i;
1da177e4
LT
2022 struct in_device *in_dev;
2023 struct inet_sock *inet = inet_sk(sk);
877acedc 2024 struct net *net = sock_net(sk);
ca9b907d 2025 int ifindex;
1da177e4 2026 int count = 0;
959d10f6
ED
2027 int err;
2028
2029 ASSERT_RTNL();
1da177e4 2030
f97c1e0c 2031 if (!ipv4_is_multicast(addr))
1da177e4
LT
2032 return -EINVAL;
2033
877acedc 2034 in_dev = ip_mc_find_dev(net, imr);
1da177e4
LT
2035
2036 if (!in_dev) {
1da177e4
LT
2037 err = -ENODEV;
2038 goto done;
2039 }
2040
1da177e4 2041 err = -EADDRINUSE;
ca9b907d 2042 ifindex = imr->imr_ifindex;
1d7138de 2043 for_each_pmc_rtnl(inet, i) {
ca9b907d
DS
2044 if (i->multi.imr_multiaddr.s_addr == addr &&
2045 i->multi.imr_ifindex == ifindex)
1da177e4 2046 goto done;
1da177e4
LT
2047 count++;
2048 }
2049 err = -ENOBUFS;
ca9b907d
DS
2050 if (count >= sysctl_igmp_max_memberships)
2051 goto done;
a7e9ff73 2052 iml = sock_kmalloc(sk, sizeof(*iml), GFP_KERNEL);
51456b29 2053 if (!iml)
1da177e4 2054 goto done;
ca9b907d 2055
1da177e4 2056 memcpy(&iml->multi, imr, sizeof(*imr));
1d7138de 2057 iml->next_rcu = inet->mc_list;
1da177e4
LT
2058 iml->sflist = NULL;
2059 iml->sfmode = MCAST_EXCLUDE;
cf778b00 2060 rcu_assign_pointer(inet->mc_list, iml);
1da177e4 2061 ip_mc_inc_group(in_dev, addr);
1da177e4 2062 err = 0;
1da177e4 2063done:
1da177e4
LT
2064 return err;
2065}
4bc2f18b 2066EXPORT_SYMBOL(ip_mc_join_group);
1da177e4
LT
2067
2068static int ip_mc_leave_src(struct sock *sk, struct ip_mc_socklist *iml,
2069 struct in_device *in_dev)
2070{
1d7138de 2071 struct ip_sf_socklist *psf = rtnl_dereference(iml->sflist);
1da177e4
LT
2072 int err;
2073
51456b29 2074 if (!psf) {
1da177e4
LT
2075 /* any-source empty exclude case */
2076 return ip_mc_del_src(in_dev, &iml->multi.imr_multiaddr.s_addr,
2077 iml->sfmode, 0, NULL, 0);
2078 }
2079 err = ip_mc_del_src(in_dev, &iml->multi.imr_multiaddr.s_addr,
c85bb41e 2080 iml->sfmode, psf->sl_count, psf->sl_addr, 0);
a9b3cd7f 2081 RCU_INIT_POINTER(iml->sflist, NULL);
c85bb41e
FL
2082 /* decrease mem now to avoid the memleak warning */
2083 atomic_sub(IP_SFLSIZE(psf->sl_max), &sk->sk_omem_alloc);
7519cce4 2084 kfree_rcu(psf, rcu);
1da177e4
LT
2085 return err;
2086}
2087
54ff9ef3 2088int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr)
1da177e4
LT
2089{
2090 struct inet_sock *inet = inet_sk(sk);
1d7138de
ED
2091 struct ip_mc_socklist *iml;
2092 struct ip_mc_socklist __rcu **imlp;
84b42bae 2093 struct in_device *in_dev;
877acedc 2094 struct net *net = sock_net(sk);
8f935bbd 2095 __be32 group = imr->imr_multiaddr.s_addr;
84b42bae 2096 u32 ifindex;
acd6e00b 2097 int ret = -EADDRNOTAVAIL;
1da177e4 2098
959d10f6
ED
2099 ASSERT_RTNL();
2100
877acedc 2101 in_dev = ip_mc_find_dev(net, imr);
52ad353a 2102 if (!in_dev) {
2103 ret = -ENODEV;
2104 goto out;
2105 }
84b42bae 2106 ifindex = imr->imr_ifindex;
1d7138de
ED
2107 for (imlp = &inet->mc_list;
2108 (iml = rtnl_dereference(*imlp)) != NULL;
2109 imlp = &iml->next_rcu) {
acd6e00b
DS
2110 if (iml->multi.imr_multiaddr.s_addr != group)
2111 continue;
2112 if (ifindex) {
2113 if (iml->multi.imr_ifindex != ifindex)
2114 continue;
2115 } else if (imr->imr_address.s_addr && imr->imr_address.s_addr !=
2116 iml->multi.imr_address.s_addr)
2117 continue;
1da177e4 2118
acd6e00b 2119 (void) ip_mc_leave_src(sk, iml, in_dev);
1da177e4 2120
1d7138de 2121 *imlp = iml->next_rcu;
acd6e00b 2122
52ad353a 2123 ip_mc_dec_group(in_dev, group);
959d10f6 2124
c85bb41e
FL
2125 /* decrease mem now to avoid the memleak warning */
2126 atomic_sub(sizeof(*iml), &sk->sk_omem_alloc);
10d50e74 2127 kfree_rcu(iml, rcu);
acd6e00b 2128 return 0;
1da177e4 2129 }
52ad353a 2130out:
959d10f6
ED
2131 return ret;
2132}
193ba924 2133EXPORT_SYMBOL(ip_mc_leave_group);
1da177e4
LT
2134
2135int ip_mc_source(int add, int omode, struct sock *sk, struct
2136 ip_mreq_source *mreqs, int ifindex)
2137{
2138 int err;
2139 struct ip_mreqn imr;
63007727 2140 __be32 addr = mreqs->imr_multiaddr;
1da177e4
LT
2141 struct ip_mc_socklist *pmc;
2142 struct in_device *in_dev = NULL;
2143 struct inet_sock *inet = inet_sk(sk);
2144 struct ip_sf_socklist *psl;
877acedc 2145 struct net *net = sock_net(sk);
8cdaaa15 2146 int leavegroup = 0;
1da177e4
LT
2147 int i, j, rv;
2148
f97c1e0c 2149 if (!ipv4_is_multicast(addr))
1da177e4
LT
2150 return -EINVAL;
2151
54ff9ef3 2152 ASSERT_RTNL();
1da177e4
LT
2153
2154 imr.imr_multiaddr.s_addr = mreqs->imr_multiaddr;
2155 imr.imr_address.s_addr = mreqs->imr_interface;
2156 imr.imr_ifindex = ifindex;
877acedc 2157 in_dev = ip_mc_find_dev(net, &imr);
1da177e4
LT
2158
2159 if (!in_dev) {
2160 err = -ENODEV;
2161 goto done;
2162 }
2163 err = -EADDRNOTAVAIL;
2164
1d7138de 2165 for_each_pmc_rtnl(inet, pmc) {
f64f9e71
JP
2166 if ((pmc->multi.imr_multiaddr.s_addr ==
2167 imr.imr_multiaddr.s_addr) &&
2168 (pmc->multi.imr_ifindex == imr.imr_ifindex))
1da177e4
LT
2169 break;
2170 }
917f2f10
DS
2171 if (!pmc) { /* must have a prior join */
2172 err = -EINVAL;
1da177e4 2173 goto done;
917f2f10 2174 }
1da177e4
LT
2175 /* if a source filter was set, must be the same mode as before */
2176 if (pmc->sflist) {
917f2f10
DS
2177 if (pmc->sfmode != omode) {
2178 err = -EINVAL;
1da177e4 2179 goto done;
917f2f10 2180 }
1da177e4
LT
2181 } else if (pmc->sfmode != omode) {
2182 /* allow mode switches for empty-set filters */
2183 ip_mc_add_src(in_dev, &mreqs->imr_multiaddr, omode, 0, NULL, 0);
e905a9ed 2184 ip_mc_del_src(in_dev, &mreqs->imr_multiaddr, pmc->sfmode, 0,
1da177e4
LT
2185 NULL, 0);
2186 pmc->sfmode = omode;
2187 }
2188
1d7138de 2189 psl = rtnl_dereference(pmc->sflist);
1da177e4
LT
2190 if (!add) {
2191 if (!psl)
917f2f10 2192 goto done; /* err = -EADDRNOTAVAIL */
1da177e4 2193 rv = !0;
c71151f0 2194 for (i = 0; i < psl->sl_count; i++) {
1da177e4 2195 rv = memcmp(&psl->sl_addr[i], &mreqs->imr_sourceaddr,
63007727 2196 sizeof(__be32));
1da177e4
LT
2197 if (rv == 0)
2198 break;
2199 }
2200 if (rv) /* source not found */
917f2f10 2201 goto done; /* err = -EADDRNOTAVAIL */
1da177e4 2202
8cdaaa15
DS
2203 /* special case - (INCLUDE, empty) == LEAVE_GROUP */
2204 if (psl->sl_count == 1 && omode == MCAST_INCLUDE) {
2205 leavegroup = 1;
2206 goto done;
2207 }
2208
1da177e4 2209 /* update the interface filter */
e905a9ed 2210 ip_mc_del_src(in_dev, &mreqs->imr_multiaddr, omode, 1,
1da177e4
LT
2211 &mreqs->imr_sourceaddr, 1);
2212
c71151f0 2213 for (j = i+1; j < psl->sl_count; j++)
1da177e4
LT
2214 psl->sl_addr[j-1] = psl->sl_addr[j];
2215 psl->sl_count--;
2216 err = 0;
2217 goto done;
2218 }
2219 /* else, add a new source to the filter */
2220
2221 if (psl && psl->sl_count >= sysctl_igmp_max_msf) {
2222 err = -ENOBUFS;
2223 goto done;
2224 }
2225 if (!psl || psl->sl_count == psl->sl_max) {
2226 struct ip_sf_socklist *newpsl;
2227 int count = IP_SFBLOCK;
2228
2229 if (psl)
2230 count += psl->sl_max;
8b3a7005 2231 newpsl = sock_kmalloc(sk, IP_SFLSIZE(count), GFP_KERNEL);
1da177e4
LT
2232 if (!newpsl) {
2233 err = -ENOBUFS;
2234 goto done;
2235 }
2236 newpsl->sl_max = count;
2237 newpsl->sl_count = count - IP_SFBLOCK;
2238 if (psl) {
c71151f0 2239 for (i = 0; i < psl->sl_count; i++)
1da177e4 2240 newpsl->sl_addr[i] = psl->sl_addr[i];
c85bb41e
FL
2241 /* decrease mem now to avoid the memleak warning */
2242 atomic_sub(IP_SFLSIZE(psl->sl_max), &sk->sk_omem_alloc);
7519cce4 2243 kfree_rcu(psl, rcu);
1da177e4 2244 }
cf778b00 2245 rcu_assign_pointer(pmc->sflist, newpsl);
c85bb41e 2246 psl = newpsl;
1da177e4
LT
2247 }
2248 rv = 1; /* > 0 for insert logic below if sl_count is 0 */
c71151f0 2249 for (i = 0; i < psl->sl_count; i++) {
1da177e4 2250 rv = memcmp(&psl->sl_addr[i], &mreqs->imr_sourceaddr,
63007727 2251 sizeof(__be32));
1da177e4
LT
2252 if (rv == 0)
2253 break;
2254 }
2255 if (rv == 0) /* address already there is an error */
2256 goto done;
c71151f0 2257 for (j = psl->sl_count-1; j >= i; j--)
1da177e4
LT
2258 psl->sl_addr[j+1] = psl->sl_addr[j];
2259 psl->sl_addr[i] = mreqs->imr_sourceaddr;
2260 psl->sl_count++;
2261 err = 0;
2262 /* update the interface list */
e905a9ed 2263 ip_mc_add_src(in_dev, &mreqs->imr_multiaddr, omode, 1,
1da177e4
LT
2264 &mreqs->imr_sourceaddr, 1);
2265done:
8cdaaa15 2266 if (leavegroup)
54ff9ef3 2267 err = ip_mc_leave_group(sk, &imr);
1da177e4
LT
2268 return err;
2269}
2270
2271int ip_mc_msfilter(struct sock *sk, struct ip_msfilter *msf, int ifindex)
2272{
9951f036 2273 int err = 0;
1da177e4 2274 struct ip_mreqn imr;
63007727 2275 __be32 addr = msf->imsf_multiaddr;
1da177e4
LT
2276 struct ip_mc_socklist *pmc;
2277 struct in_device *in_dev;
2278 struct inet_sock *inet = inet_sk(sk);
2279 struct ip_sf_socklist *newpsl, *psl;
877acedc 2280 struct net *net = sock_net(sk);
9951f036 2281 int leavegroup = 0;
1da177e4 2282
f97c1e0c 2283 if (!ipv4_is_multicast(addr))
1da177e4
LT
2284 return -EINVAL;
2285 if (msf->imsf_fmode != MCAST_INCLUDE &&
2286 msf->imsf_fmode != MCAST_EXCLUDE)
2287 return -EINVAL;
2288
54ff9ef3 2289 ASSERT_RTNL();
1da177e4
LT
2290
2291 imr.imr_multiaddr.s_addr = msf->imsf_multiaddr;
2292 imr.imr_address.s_addr = msf->imsf_interface;
2293 imr.imr_ifindex = ifindex;
877acedc 2294 in_dev = ip_mc_find_dev(net, &imr);
1da177e4
LT
2295
2296 if (!in_dev) {
2297 err = -ENODEV;
2298 goto done;
2299 }
1da177e4 2300
9951f036
DS
2301 /* special case - (INCLUDE, empty) == LEAVE_GROUP */
2302 if (msf->imsf_fmode == MCAST_INCLUDE && msf->imsf_numsrc == 0) {
2303 leavegroup = 1;
2304 goto done;
2305 }
2306
1d7138de 2307 for_each_pmc_rtnl(inet, pmc) {
1da177e4
LT
2308 if (pmc->multi.imr_multiaddr.s_addr == msf->imsf_multiaddr &&
2309 pmc->multi.imr_ifindex == imr.imr_ifindex)
2310 break;
2311 }
917f2f10
DS
2312 if (!pmc) { /* must have a prior join */
2313 err = -EINVAL;
1da177e4 2314 goto done;
917f2f10 2315 }
1da177e4 2316 if (msf->imsf_numsrc) {
8b3a7005
KK
2317 newpsl = sock_kmalloc(sk, IP_SFLSIZE(msf->imsf_numsrc),
2318 GFP_KERNEL);
1da177e4
LT
2319 if (!newpsl) {
2320 err = -ENOBUFS;
2321 goto done;
2322 }
2323 newpsl->sl_max = newpsl->sl_count = msf->imsf_numsrc;
2324 memcpy(newpsl->sl_addr, msf->imsf_slist,
2325 msf->imsf_numsrc * sizeof(msf->imsf_slist[0]));
2326 err = ip_mc_add_src(in_dev, &msf->imsf_multiaddr,
2327 msf->imsf_fmode, newpsl->sl_count, newpsl->sl_addr, 0);
2328 if (err) {
2329 sock_kfree_s(sk, newpsl, IP_SFLSIZE(newpsl->sl_max));
2330 goto done;
2331 }
8713dbf0 2332 } else {
1da177e4 2333 newpsl = NULL;
8713dbf0
YZ
2334 (void) ip_mc_add_src(in_dev, &msf->imsf_multiaddr,
2335 msf->imsf_fmode, 0, NULL, 0);
2336 }
1d7138de 2337 psl = rtnl_dereference(pmc->sflist);
1da177e4
LT
2338 if (psl) {
2339 (void) ip_mc_del_src(in_dev, &msf->imsf_multiaddr, pmc->sfmode,
2340 psl->sl_count, psl->sl_addr, 0);
c85bb41e
FL
2341 /* decrease mem now to avoid the memleak warning */
2342 atomic_sub(IP_SFLSIZE(psl->sl_max), &sk->sk_omem_alloc);
7519cce4 2343 kfree_rcu(psl, rcu);
1da177e4
LT
2344 } else
2345 (void) ip_mc_del_src(in_dev, &msf->imsf_multiaddr, pmc->sfmode,
2346 0, NULL, 0);
cf778b00 2347 rcu_assign_pointer(pmc->sflist, newpsl);
1da177e4 2348 pmc->sfmode = msf->imsf_fmode;
917f2f10 2349 err = 0;
1da177e4 2350done:
9951f036
DS
2351 if (leavegroup)
2352 err = ip_mc_leave_group(sk, &imr);
1da177e4
LT
2353 return err;
2354}
2355
2356int ip_mc_msfget(struct sock *sk, struct ip_msfilter *msf,
2357 struct ip_msfilter __user *optval, int __user *optlen)
2358{
2359 int err, len, count, copycount;
2360 struct ip_mreqn imr;
63007727 2361 __be32 addr = msf->imsf_multiaddr;
1da177e4
LT
2362 struct ip_mc_socklist *pmc;
2363 struct in_device *in_dev;
2364 struct inet_sock *inet = inet_sk(sk);
2365 struct ip_sf_socklist *psl;
877acedc 2366 struct net *net = sock_net(sk);
1da177e4 2367
f97c1e0c 2368 if (!ipv4_is_multicast(addr))
1da177e4
LT
2369 return -EINVAL;
2370
6756ae4b 2371 rtnl_lock();
1da177e4
LT
2372
2373 imr.imr_multiaddr.s_addr = msf->imsf_multiaddr;
2374 imr.imr_address.s_addr = msf->imsf_interface;
2375 imr.imr_ifindex = 0;
877acedc 2376 in_dev = ip_mc_find_dev(net, &imr);
1da177e4
LT
2377
2378 if (!in_dev) {
2379 err = -ENODEV;
2380 goto done;
2381 }
2382 err = -EADDRNOTAVAIL;
2383
1d7138de 2384 for_each_pmc_rtnl(inet, pmc) {
1da177e4
LT
2385 if (pmc->multi.imr_multiaddr.s_addr == msf->imsf_multiaddr &&
2386 pmc->multi.imr_ifindex == imr.imr_ifindex)
2387 break;
2388 }
2389 if (!pmc) /* must have a prior join */
2390 goto done;
2391 msf->imsf_fmode = pmc->sfmode;
1d7138de 2392 psl = rtnl_dereference(pmc->sflist);
6756ae4b 2393 rtnl_unlock();
1da177e4
LT
2394 if (!psl) {
2395 len = 0;
2396 count = 0;
2397 } else {
2398 count = psl->sl_count;
2399 }
2400 copycount = count < msf->imsf_numsrc ? count : msf->imsf_numsrc;
2401 len = copycount * sizeof(psl->sl_addr[0]);
2402 msf->imsf_numsrc = count;
2403 if (put_user(IP_MSFILTER_SIZE(copycount), optlen) ||
2404 copy_to_user(optval, msf, IP_MSFILTER_SIZE(0))) {
2405 return -EFAULT;
2406 }
2407 if (len &&
2408 copy_to_user(&optval->imsf_slist[0], psl->sl_addr, len))
2409 return -EFAULT;
2410 return 0;
2411done:
6756ae4b 2412 rtnl_unlock();
1da177e4
LT
2413 return err;
2414}
2415
2416int ip_mc_gsfget(struct sock *sk, struct group_filter *gsf,
2417 struct group_filter __user *optval, int __user *optlen)
2418{
2419 int err, i, count, copycount;
2420 struct sockaddr_in *psin;
63007727 2421 __be32 addr;
1da177e4
LT
2422 struct ip_mc_socklist *pmc;
2423 struct inet_sock *inet = inet_sk(sk);
2424 struct ip_sf_socklist *psl;
2425
2426 psin = (struct sockaddr_in *)&gsf->gf_group;
2427 if (psin->sin_family != AF_INET)
2428 return -EINVAL;
2429 addr = psin->sin_addr.s_addr;
f97c1e0c 2430 if (!ipv4_is_multicast(addr))
1da177e4
LT
2431 return -EINVAL;
2432
6756ae4b 2433 rtnl_lock();
1da177e4
LT
2434
2435 err = -EADDRNOTAVAIL;
2436
1d7138de 2437 for_each_pmc_rtnl(inet, pmc) {
1da177e4
LT
2438 if (pmc->multi.imr_multiaddr.s_addr == addr &&
2439 pmc->multi.imr_ifindex == gsf->gf_interface)
2440 break;
2441 }
2442 if (!pmc) /* must have a prior join */
2443 goto done;
2444 gsf->gf_fmode = pmc->sfmode;
1d7138de 2445 psl = rtnl_dereference(pmc->sflist);
6756ae4b 2446 rtnl_unlock();
1da177e4
LT
2447 count = psl ? psl->sl_count : 0;
2448 copycount = count < gsf->gf_numsrc ? count : gsf->gf_numsrc;
2449 gsf->gf_numsrc = count;
2450 if (put_user(GROUP_FILTER_SIZE(copycount), optlen) ||
2451 copy_to_user(optval, gsf, GROUP_FILTER_SIZE(0))) {
2452 return -EFAULT;
2453 }
c71151f0 2454 for (i = 0; i < copycount; i++) {
1da177e4
LT
2455 struct sockaddr_storage ss;
2456
2457 psin = (struct sockaddr_in *)&ss;
2458 memset(&ss, 0, sizeof(ss));
2459 psin->sin_family = AF_INET;
2460 psin->sin_addr.s_addr = psl->sl_addr[i];
2461 if (copy_to_user(&optval->gf_slist[i], &ss, sizeof(ss)))
2462 return -EFAULT;
2463 }
2464 return 0;
2465done:
6756ae4b 2466 rtnl_unlock();
1da177e4
LT
2467 return err;
2468}
2469
2470/*
2471 * check if a multicast source filter allows delivery for a given <src,dst,intf>
2472 */
c0cda068 2473int ip_mc_sf_allow(struct sock *sk, __be32 loc_addr, __be32 rmt_addr, int dif)
1da177e4
LT
2474{
2475 struct inet_sock *inet = inet_sk(sk);
2476 struct ip_mc_socklist *pmc;
2477 struct ip_sf_socklist *psl;
2478 int i;
c85bb41e 2479 int ret;
1da177e4 2480
c85bb41e 2481 ret = 1;
f97c1e0c 2482 if (!ipv4_is_multicast(loc_addr))
c85bb41e 2483 goto out;
1da177e4 2484
c85bb41e 2485 rcu_read_lock();
1d7138de 2486 for_each_pmc_rcu(inet, pmc) {
1da177e4
LT
2487 if (pmc->multi.imr_multiaddr.s_addr == loc_addr &&
2488 pmc->multi.imr_ifindex == dif)
2489 break;
2490 }
c85bb41e 2491 ret = inet->mc_all;
1da177e4 2492 if (!pmc)
c85bb41e 2493 goto unlock;
1d7138de 2494 psl = rcu_dereference(pmc->sflist);
c85bb41e 2495 ret = (pmc->sfmode == MCAST_EXCLUDE);
1da177e4 2496 if (!psl)
c85bb41e 2497 goto unlock;
1da177e4 2498
c71151f0 2499 for (i = 0; i < psl->sl_count; i++) {
1da177e4
LT
2500 if (psl->sl_addr[i] == rmt_addr)
2501 break;
2502 }
c85bb41e 2503 ret = 0;
1da177e4 2504 if (pmc->sfmode == MCAST_INCLUDE && i >= psl->sl_count)
c85bb41e 2505 goto unlock;
1da177e4 2506 if (pmc->sfmode == MCAST_EXCLUDE && i < psl->sl_count)
c85bb41e
FL
2507 goto unlock;
2508 ret = 1;
2509unlock:
2510 rcu_read_unlock();
2511out:
2512 return ret;
1da177e4
LT
2513}
2514
2515/*
2516 * A socket is closing.
2517 */
2518
2519void ip_mc_drop_socket(struct sock *sk)
2520{
2521 struct inet_sock *inet = inet_sk(sk);
2522 struct ip_mc_socklist *iml;
877acedc 2523 struct net *net = sock_net(sk);
1da177e4 2524
51456b29 2525 if (!inet->mc_list)
1da177e4
LT
2526 return;
2527
2528 rtnl_lock();
1d7138de 2529 while ((iml = rtnl_dereference(inet->mc_list)) != NULL) {
1da177e4 2530 struct in_device *in_dev;
1da177e4 2531
1d7138de 2532 inet->mc_list = iml->next_rcu;
877acedc 2533 in_dev = inetdev_by_index(net, iml->multi.imr_ifindex);
bb699cbc 2534 (void) ip_mc_leave_src(sk, iml, in_dev);
00db4124 2535 if (in_dev)
1da177e4 2536 ip_mc_dec_group(in_dev, iml->multi.imr_multiaddr.s_addr);
c85bb41e
FL
2537 /* decrease mem now to avoid the memleak warning */
2538 atomic_sub(sizeof(*iml), &sk->sk_omem_alloc);
10d50e74 2539 kfree_rcu(iml, rcu);
1da177e4
LT
2540 }
2541 rtnl_unlock();
2542}
2543
dbdd9a52
DM
2544/* called with rcu_read_lock() */
2545int ip_check_mc_rcu(struct in_device *in_dev, __be32 mc_addr, __be32 src_addr, u16 proto)
1da177e4
LT
2546{
2547 struct ip_mc_list *im;
e9897071 2548 struct ip_mc_list __rcu **mc_hash;
1da177e4
LT
2549 struct ip_sf_list *psf;
2550 int rv = 0;
2551
e9897071
ED
2552 mc_hash = rcu_dereference(in_dev->mc_hash);
2553 if (mc_hash) {
c70eba74 2554 u32 hash = hash_32((__force u32)mc_addr, MC_HASH_SZ_LOG);
e9897071
ED
2555
2556 for (im = rcu_dereference(mc_hash[hash]);
2557 im != NULL;
2558 im = rcu_dereference(im->next_hash)) {
2559 if (im->multiaddr == mc_addr)
2560 break;
2561 }
2562 } else {
2563 for_each_pmc_rcu(in_dev, im) {
2564 if (im->multiaddr == mc_addr)
2565 break;
2566 }
1da177e4
LT
2567 }
2568 if (im && proto == IPPROTO_IGMP) {
2569 rv = 1;
2570 } else if (im) {
2571 if (src_addr) {
c71151f0 2572 for (psf = im->sources; psf; psf = psf->sf_next) {
1da177e4
LT
2573 if (psf->sf_inaddr == src_addr)
2574 break;
2575 }
2576 if (psf)
2577 rv = psf->sf_count[MCAST_INCLUDE] ||
2578 psf->sf_count[MCAST_EXCLUDE] !=
2579 im->sfcount[MCAST_EXCLUDE];
2580 else
2581 rv = im->sfcount[MCAST_EXCLUDE] != 0;
2582 } else
2583 rv = 1; /* unspecified source; tentatively allow */
2584 }
1da177e4
LT
2585 return rv;
2586}
2587
2588#if defined(CONFIG_PROC_FS)
2589struct igmp_mc_iter_state {
7091e728 2590 struct seq_net_private p;
1da177e4
LT
2591 struct net_device *dev;
2592 struct in_device *in_dev;
2593};
2594
2595#define igmp_mc_seq_private(seq) ((struct igmp_mc_iter_state *)(seq)->private)
2596
2597static inline struct ip_mc_list *igmp_mc_get_first(struct seq_file *seq)
2598{
7091e728 2599 struct net *net = seq_file_net(seq);
1da177e4
LT
2600 struct ip_mc_list *im = NULL;
2601 struct igmp_mc_iter_state *state = igmp_mc_seq_private(seq);
2602
7562f876 2603 state->in_dev = NULL;
61fbab77 2604 for_each_netdev_rcu(net, state->dev) {
1da177e4 2605 struct in_device *in_dev;
6baff150
ED
2606
2607 in_dev = __in_dev_get_rcu(state->dev);
1da177e4
LT
2608 if (!in_dev)
2609 continue;
1d7138de 2610 im = rcu_dereference(in_dev->mc_list);
1da177e4
LT
2611 if (im) {
2612 state->in_dev = in_dev;
2613 break;
2614 }
1da177e4
LT
2615 }
2616 return im;
2617}
2618
2619static struct ip_mc_list *igmp_mc_get_next(struct seq_file *seq, struct ip_mc_list *im)
2620{
2621 struct igmp_mc_iter_state *state = igmp_mc_seq_private(seq);
6baff150 2622
1d7138de
ED
2623 im = rcu_dereference(im->next_rcu);
2624 while (!im) {
6baff150 2625 state->dev = next_net_device_rcu(state->dev);
1da177e4
LT
2626 if (!state->dev) {
2627 state->in_dev = NULL;
2628 break;
2629 }
6baff150 2630 state->in_dev = __in_dev_get_rcu(state->dev);
1da177e4
LT
2631 if (!state->in_dev)
2632 continue;
1d7138de 2633 im = rcu_dereference(state->in_dev->mc_list);
1da177e4
LT
2634 }
2635 return im;
2636}
2637
2638static struct ip_mc_list *igmp_mc_get_idx(struct seq_file *seq, loff_t pos)
2639{
2640 struct ip_mc_list *im = igmp_mc_get_first(seq);
2641 if (im)
2642 while (pos && (im = igmp_mc_get_next(seq, im)) != NULL)
2643 --pos;
2644 return pos ? NULL : im;
2645}
2646
2647static void *igmp_mc_seq_start(struct seq_file *seq, loff_t *pos)
61fbab77 2648 __acquires(rcu)
1da177e4 2649{
61fbab77 2650 rcu_read_lock();
1da177e4
LT
2651 return *pos ? igmp_mc_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2652}
2653
2654static void *igmp_mc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2655{
2656 struct ip_mc_list *im;
2657 if (v == SEQ_START_TOKEN)
2658 im = igmp_mc_get_first(seq);
2659 else
2660 im = igmp_mc_get_next(seq, v);
2661 ++*pos;
2662 return im;
2663}
2664
2665static void igmp_mc_seq_stop(struct seq_file *seq, void *v)
61fbab77 2666 __releases(rcu)
1da177e4
LT
2667{
2668 struct igmp_mc_iter_state *state = igmp_mc_seq_private(seq);
1d7138de
ED
2669
2670 state->in_dev = NULL;
1da177e4 2671 state->dev = NULL;
61fbab77 2672 rcu_read_unlock();
1da177e4
LT
2673}
2674
2675static int igmp_mc_seq_show(struct seq_file *seq, void *v)
2676{
2677 if (v == SEQ_START_TOKEN)
e905a9ed 2678 seq_puts(seq,
1da177e4
LT
2679 "Idx\tDevice : Count Querier\tGroup Users Timer\tReporter\n");
2680 else {
2681 struct ip_mc_list *im = (struct ip_mc_list *)v;
2682 struct igmp_mc_iter_state *state = igmp_mc_seq_private(seq);
2683 char *querier;
a399a805
ED
2684 long delta;
2685
1da177e4
LT
2686#ifdef CONFIG_IP_MULTICAST
2687 querier = IGMP_V1_SEEN(state->in_dev) ? "V1" :
2688 IGMP_V2_SEEN(state->in_dev) ? "V2" :
2689 "V3";
2690#else
2691 querier = "NONE";
2692#endif
2693
e6b68883 2694 if (rcu_access_pointer(state->in_dev->mc_list) == im) {
1da177e4 2695 seq_printf(seq, "%d\t%-10s: %5d %7s\n",
b8bae41e 2696 state->dev->ifindex, state->dev->name, state->in_dev->mc_count, querier);
1da177e4
LT
2697 }
2698
a399a805 2699 delta = im->timer.expires - jiffies;
1da177e4 2700 seq_printf(seq,
338fcf98 2701 "\t\t\t\t%08X %5d %d:%08lX\t\t%d\n",
1da177e4 2702 im->multiaddr, im->users,
a399a805
ED
2703 im->tm_running,
2704 im->tm_running ? jiffies_delta_to_clock_t(delta) : 0,
1da177e4
LT
2705 im->reporter);
2706 }
2707 return 0;
2708}
2709
f690808e 2710static const struct seq_operations igmp_mc_seq_ops = {
1da177e4
LT
2711 .start = igmp_mc_seq_start,
2712 .next = igmp_mc_seq_next,
2713 .stop = igmp_mc_seq_stop,
2714 .show = igmp_mc_seq_show,
2715};
2716
2717static int igmp_mc_seq_open(struct inode *inode, struct file *file)
2718{
7091e728 2719 return seq_open_net(inode, file, &igmp_mc_seq_ops,
cf7732e4 2720 sizeof(struct igmp_mc_iter_state));
1da177e4
LT
2721}
2722
9a32144e 2723static const struct file_operations igmp_mc_seq_fops = {
1da177e4
LT
2724 .owner = THIS_MODULE,
2725 .open = igmp_mc_seq_open,
2726 .read = seq_read,
2727 .llseek = seq_lseek,
7091e728 2728 .release = seq_release_net,
1da177e4
LT
2729};
2730
2731struct igmp_mcf_iter_state {
7091e728 2732 struct seq_net_private p;
1da177e4
LT
2733 struct net_device *dev;
2734 struct in_device *idev;
2735 struct ip_mc_list *im;
2736};
2737
2738#define igmp_mcf_seq_private(seq) ((struct igmp_mcf_iter_state *)(seq)->private)
2739
2740static inline struct ip_sf_list *igmp_mcf_get_first(struct seq_file *seq)
2741{
7091e728 2742 struct net *net = seq_file_net(seq);
1da177e4
LT
2743 struct ip_sf_list *psf = NULL;
2744 struct ip_mc_list *im = NULL;
2745 struct igmp_mcf_iter_state *state = igmp_mcf_seq_private(seq);
2746
7562f876
PE
2747 state->idev = NULL;
2748 state->im = NULL;
61fbab77 2749 for_each_netdev_rcu(net, state->dev) {
1da177e4 2750 struct in_device *idev;
6baff150 2751 idev = __in_dev_get_rcu(state->dev);
51456b29 2752 if (unlikely(!idev))
1da177e4 2753 continue;
1d7138de 2754 im = rcu_dereference(idev->mc_list);
00db4124 2755 if (likely(im)) {
1da177e4
LT
2756 spin_lock_bh(&im->lock);
2757 psf = im->sources;
00db4124 2758 if (likely(psf)) {
1da177e4
LT
2759 state->im = im;
2760 state->idev = idev;
2761 break;
2762 }
2763 spin_unlock_bh(&im->lock);
2764 }
1da177e4
LT
2765 }
2766 return psf;
2767}
2768
2769static struct ip_sf_list *igmp_mcf_get_next(struct seq_file *seq, struct ip_sf_list *psf)
2770{
2771 struct igmp_mcf_iter_state *state = igmp_mcf_seq_private(seq);
2772
2773 psf = psf->sf_next;
2774 while (!psf) {
2775 spin_unlock_bh(&state->im->lock);
2776 state->im = state->im->next;
2777 while (!state->im) {
6baff150 2778 state->dev = next_net_device_rcu(state->dev);
1da177e4
LT
2779 if (!state->dev) {
2780 state->idev = NULL;
2781 goto out;
2782 }
6baff150 2783 state->idev = __in_dev_get_rcu(state->dev);
1da177e4
LT
2784 if (!state->idev)
2785 continue;
1d7138de 2786 state->im = rcu_dereference(state->idev->mc_list);
1da177e4
LT
2787 }
2788 if (!state->im)
2789 break;
2790 spin_lock_bh(&state->im->lock);
2791 psf = state->im->sources;
2792 }
2793out:
2794 return psf;
2795}
2796
2797static struct ip_sf_list *igmp_mcf_get_idx(struct seq_file *seq, loff_t pos)
2798{
2799 struct ip_sf_list *psf = igmp_mcf_get_first(seq);
2800 if (psf)
2801 while (pos && (psf = igmp_mcf_get_next(seq, psf)) != NULL)
2802 --pos;
2803 return pos ? NULL : psf;
2804}
2805
2806static void *igmp_mcf_seq_start(struct seq_file *seq, loff_t *pos)
61fbab77 2807 __acquires(rcu)
1da177e4 2808{
61fbab77 2809 rcu_read_lock();
1da177e4
LT
2810 return *pos ? igmp_mcf_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2811}
2812
2813static void *igmp_mcf_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2814{
2815 struct ip_sf_list *psf;
2816 if (v == SEQ_START_TOKEN)
2817 psf = igmp_mcf_get_first(seq);
2818 else
2819 psf = igmp_mcf_get_next(seq, v);
2820 ++*pos;
2821 return psf;
2822}
2823
2824static void igmp_mcf_seq_stop(struct seq_file *seq, void *v)
61fbab77 2825 __releases(rcu)
1da177e4
LT
2826{
2827 struct igmp_mcf_iter_state *state = igmp_mcf_seq_private(seq);
00db4124 2828 if (likely(state->im)) {
1da177e4
LT
2829 spin_unlock_bh(&state->im->lock);
2830 state->im = NULL;
2831 }
1d7138de 2832 state->idev = NULL;
1da177e4 2833 state->dev = NULL;
61fbab77 2834 rcu_read_unlock();
1da177e4
LT
2835}
2836
2837static int igmp_mcf_seq_show(struct seq_file *seq, void *v)
2838{
2839 struct ip_sf_list *psf = (struct ip_sf_list *)v;
2840 struct igmp_mcf_iter_state *state = igmp_mcf_seq_private(seq);
2841
2842 if (v == SEQ_START_TOKEN) {
1744bea1 2843 seq_puts(seq, "Idx Device MCA SRC INC EXC\n");
1da177e4
LT
2844 } else {
2845 seq_printf(seq,
2846 "%3d %6.6s 0x%08x "
e905a9ed
YH
2847 "0x%08x %6lu %6lu\n",
2848 state->dev->ifindex, state->dev->name,
1da177e4
LT
2849 ntohl(state->im->multiaddr),
2850 ntohl(psf->sf_inaddr),
2851 psf->sf_count[MCAST_INCLUDE],
2852 psf->sf_count[MCAST_EXCLUDE]);
2853 }
2854 return 0;
2855}
2856
f690808e 2857static const struct seq_operations igmp_mcf_seq_ops = {
1da177e4
LT
2858 .start = igmp_mcf_seq_start,
2859 .next = igmp_mcf_seq_next,
2860 .stop = igmp_mcf_seq_stop,
2861 .show = igmp_mcf_seq_show,
2862};
2863
2864static int igmp_mcf_seq_open(struct inode *inode, struct file *file)
2865{
7091e728 2866 return seq_open_net(inode, file, &igmp_mcf_seq_ops,
cf7732e4 2867 sizeof(struct igmp_mcf_iter_state));
1da177e4
LT
2868}
2869
9a32144e 2870static const struct file_operations igmp_mcf_seq_fops = {
1da177e4
LT
2871 .owner = THIS_MODULE,
2872 .open = igmp_mcf_seq_open,
2873 .read = seq_read,
2874 .llseek = seq_lseek,
7091e728 2875 .release = seq_release_net,
1da177e4
LT
2876};
2877
2c8c1e72 2878static int __net_init igmp_net_init(struct net *net)
1da177e4 2879{
7091e728 2880 struct proc_dir_entry *pde;
93a714d6 2881 int err;
7091e728 2882
d4beaa66 2883 pde = proc_create("igmp", S_IRUGO, net->proc_net, &igmp_mc_seq_fops);
7091e728
AD
2884 if (!pde)
2885 goto out_igmp;
d4beaa66
G
2886 pde = proc_create("mcfilter", S_IRUGO, net->proc_net,
2887 &igmp_mcf_seq_fops);
7091e728
AD
2888 if (!pde)
2889 goto out_mcfilter;
93a714d6
MC
2890 err = inet_ctl_sock_create(&net->ipv4.mc_autojoin_sk, AF_INET,
2891 SOCK_DGRAM, 0, net);
2892 if (err < 0) {
2893 pr_err("Failed to initialize the IGMP autojoin socket (err %d)\n",
2894 err);
2895 goto out_sock;
2896 }
2897
1da177e4 2898 return 0;
7091e728 2899
93a714d6
MC
2900out_sock:
2901 remove_proc_entry("mcfilter", net->proc_net);
7091e728 2902out_mcfilter:
ece31ffd 2903 remove_proc_entry("igmp", net->proc_net);
7091e728
AD
2904out_igmp:
2905 return -ENOMEM;
2906}
2907
2c8c1e72 2908static void __net_exit igmp_net_exit(struct net *net)
7091e728 2909{
ece31ffd
G
2910 remove_proc_entry("mcfilter", net->proc_net);
2911 remove_proc_entry("igmp", net->proc_net);
93a714d6 2912 inet_ctl_sock_destroy(net->ipv4.mc_autojoin_sk);
7091e728
AD
2913}
2914
2915static struct pernet_operations igmp_net_ops = {
2916 .init = igmp_net_init,
2917 .exit = igmp_net_exit,
2918};
72c1d3bd 2919#endif
7091e728 2920
4aa5dee4
JP
2921static int igmp_netdev_event(struct notifier_block *this,
2922 unsigned long event, void *ptr)
2923{
2924 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
2925 struct in_device *in_dev;
2926
2927 switch (event) {
2928 case NETDEV_RESEND_IGMP:
2929 in_dev = __in_dev_get_rtnl(dev);
2930 if (in_dev)
2931 ip_mc_rejoin_groups(in_dev);
2932 break;
2933 default:
2934 break;
2935 }
2936 return NOTIFY_DONE;
2937}
2938
2939static struct notifier_block igmp_notifier = {
2940 .notifier_call = igmp_netdev_event,
2941};
2942
72c1d3bd 2943int __init igmp_mc_init(void)
7091e728 2944{
72c1d3bd 2945#if defined(CONFIG_PROC_FS)
4aa5dee4
JP
2946 int err;
2947
2948 err = register_pernet_subsys(&igmp_net_ops);
2949 if (err)
2950 return err;
2951 err = register_netdevice_notifier(&igmp_notifier);
2952 if (err)
2953 goto reg_notif_fail;
2954 return 0;
2955
2956reg_notif_fail:
2957 unregister_pernet_subsys(&igmp_net_ops);
2958 return err;
72c1d3bd
WC
2959#else
2960 return register_netdevice_notifier(&igmp_notifier);
1da177e4 2961#endif
72c1d3bd 2962}
This page took 1.094444 seconds and 5 git commands to generate.