Merge branch 'master'
[deliverable/linux.git] / drivers / infiniband / ulp / ipoib / ipoib_multicast.c
1 /*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 *
34 * $Id: ipoib_multicast.c 1362 2004-12-18 15:56:29Z roland $
35 */
36
37 #include <linux/skbuff.h>
38 #include <linux/rtnetlink.h>
39 #include <linux/ip.h>
40 #include <linux/in.h>
41 #include <linux/igmp.h>
42 #include <linux/inetdevice.h>
43 #include <linux/delay.h>
44 #include <linux/completion.h>
45
46 #include "ipoib.h"
47
48 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
49 static int mcast_debug_level;
50
51 module_param(mcast_debug_level, int, 0644);
52 MODULE_PARM_DESC(mcast_debug_level,
53 "Enable multicast debug tracing if > 0");
54 #endif
55
56 static DECLARE_MUTEX(mcast_mutex);
57
58 /* Used for all multicast joins (broadcast, IPv4 mcast and IPv6 mcast) */
59 struct ipoib_mcast {
60 struct ib_sa_mcmember_rec mcmember;
61 struct ipoib_ah *ah;
62
63 struct rb_node rb_node;
64 struct list_head list;
65 struct completion done;
66
67 int query_id;
68 struct ib_sa_query *query;
69
70 unsigned long created;
71 unsigned long backoff;
72
73 unsigned long flags;
74 unsigned char logcount;
75
76 struct list_head neigh_list;
77
78 struct sk_buff_head pkt_queue;
79
80 struct net_device *dev;
81 };
82
83 struct ipoib_mcast_iter {
84 struct net_device *dev;
85 union ib_gid mgid;
86 unsigned long created;
87 unsigned int queuelen;
88 unsigned int complete;
89 unsigned int send_only;
90 };
91
92 static void ipoib_mcast_free(struct ipoib_mcast *mcast)
93 {
94 struct net_device *dev = mcast->dev;
95 struct ipoib_dev_priv *priv = netdev_priv(dev);
96 struct ipoib_neigh *neigh, *tmp;
97 unsigned long flags;
98 LIST_HEAD(ah_list);
99 struct ipoib_ah *ah, *tah;
100
101 ipoib_dbg_mcast(netdev_priv(dev),
102 "deleting multicast group " IPOIB_GID_FMT "\n",
103 IPOIB_GID_ARG(mcast->mcmember.mgid));
104
105 spin_lock_irqsave(&priv->lock, flags);
106
107 list_for_each_entry_safe(neigh, tmp, &mcast->neigh_list, list) {
108 if (neigh->ah)
109 list_add_tail(&neigh->ah->list, &ah_list);
110 *to_ipoib_neigh(neigh->neighbour) = NULL;
111 neigh->neighbour->ops->destructor = NULL;
112 kfree(neigh);
113 }
114
115 spin_unlock_irqrestore(&priv->lock, flags);
116
117 list_for_each_entry_safe(ah, tah, &ah_list, list)
118 ipoib_put_ah(ah);
119
120 if (mcast->ah)
121 ipoib_put_ah(mcast->ah);
122
123 while (!skb_queue_empty(&mcast->pkt_queue))
124 dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue));
125
126 kfree(mcast);
127 }
128
129 static struct ipoib_mcast *ipoib_mcast_alloc(struct net_device *dev,
130 int can_sleep)
131 {
132 struct ipoib_mcast *mcast;
133
134 mcast = kzalloc(sizeof *mcast, can_sleep ? GFP_KERNEL : GFP_ATOMIC);
135 if (!mcast)
136 return NULL;
137
138 init_completion(&mcast->done);
139
140 mcast->dev = dev;
141 mcast->created = jiffies;
142 mcast->backoff = 1;
143 mcast->logcount = 0;
144
145 INIT_LIST_HEAD(&mcast->list);
146 INIT_LIST_HEAD(&mcast->neigh_list);
147 skb_queue_head_init(&mcast->pkt_queue);
148
149 mcast->ah = NULL;
150 mcast->query = NULL;
151
152 return mcast;
153 }
154
155 static struct ipoib_mcast *__ipoib_mcast_find(struct net_device *dev, union ib_gid *mgid)
156 {
157 struct ipoib_dev_priv *priv = netdev_priv(dev);
158 struct rb_node *n = priv->multicast_tree.rb_node;
159
160 while (n) {
161 struct ipoib_mcast *mcast;
162 int ret;
163
164 mcast = rb_entry(n, struct ipoib_mcast, rb_node);
165
166 ret = memcmp(mgid->raw, mcast->mcmember.mgid.raw,
167 sizeof (union ib_gid));
168 if (ret < 0)
169 n = n->rb_left;
170 else if (ret > 0)
171 n = n->rb_right;
172 else
173 return mcast;
174 }
175
176 return NULL;
177 }
178
179 static int __ipoib_mcast_add(struct net_device *dev, struct ipoib_mcast *mcast)
180 {
181 struct ipoib_dev_priv *priv = netdev_priv(dev);
182 struct rb_node **n = &priv->multicast_tree.rb_node, *pn = NULL;
183
184 while (*n) {
185 struct ipoib_mcast *tmcast;
186 int ret;
187
188 pn = *n;
189 tmcast = rb_entry(pn, struct ipoib_mcast, rb_node);
190
191 ret = memcmp(mcast->mcmember.mgid.raw, tmcast->mcmember.mgid.raw,
192 sizeof (union ib_gid));
193 if (ret < 0)
194 n = &pn->rb_left;
195 else if (ret > 0)
196 n = &pn->rb_right;
197 else
198 return -EEXIST;
199 }
200
201 rb_link_node(&mcast->rb_node, pn, n);
202 rb_insert_color(&mcast->rb_node, &priv->multicast_tree);
203
204 return 0;
205 }
206
207 static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast,
208 struct ib_sa_mcmember_rec *mcmember)
209 {
210 struct net_device *dev = mcast->dev;
211 struct ipoib_dev_priv *priv = netdev_priv(dev);
212 int ret;
213
214 mcast->mcmember = *mcmember;
215
216 /* Set the cached Q_Key before we attach if it's the broadcast group */
217 if (!memcmp(mcast->mcmember.mgid.raw, priv->dev->broadcast + 4,
218 sizeof (union ib_gid))) {
219 priv->qkey = be32_to_cpu(priv->broadcast->mcmember.qkey);
220 priv->tx_wr.wr.ud.remote_qkey = priv->qkey;
221 }
222
223 if (!test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) {
224 if (test_and_set_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) {
225 ipoib_warn(priv, "multicast group " IPOIB_GID_FMT
226 " already attached\n",
227 IPOIB_GID_ARG(mcast->mcmember.mgid));
228
229 return 0;
230 }
231
232 ret = ipoib_mcast_attach(dev, be16_to_cpu(mcast->mcmember.mlid),
233 &mcast->mcmember.mgid);
234 if (ret < 0) {
235 ipoib_warn(priv, "couldn't attach QP to multicast group "
236 IPOIB_GID_FMT "\n",
237 IPOIB_GID_ARG(mcast->mcmember.mgid));
238
239 clear_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags);
240 return ret;
241 }
242 }
243
244 {
245 struct ib_ah_attr av = {
246 .dlid = be16_to_cpu(mcast->mcmember.mlid),
247 .port_num = priv->port,
248 .sl = mcast->mcmember.sl,
249 .ah_flags = IB_AH_GRH,
250 .grh = {
251 .flow_label = be32_to_cpu(mcast->mcmember.flow_label),
252 .hop_limit = mcast->mcmember.hop_limit,
253 .sgid_index = 0,
254 .traffic_class = mcast->mcmember.traffic_class
255 }
256 };
257 int path_rate = ib_sa_rate_enum_to_int(mcast->mcmember.rate);
258
259 av.grh.dgid = mcast->mcmember.mgid;
260
261 if (path_rate > 0 && priv->local_rate > path_rate)
262 av.static_rate = (priv->local_rate - 1) / path_rate;
263
264 ipoib_dbg_mcast(priv, "static_rate %d for local port %dX, mcmember %dX\n",
265 av.static_rate, priv->local_rate,
266 ib_sa_rate_enum_to_int(mcast->mcmember.rate));
267
268 mcast->ah = ipoib_create_ah(dev, priv->pd, &av);
269 if (!mcast->ah) {
270 ipoib_warn(priv, "ib_address_create failed\n");
271 } else {
272 ipoib_dbg_mcast(priv, "MGID " IPOIB_GID_FMT
273 " AV %p, LID 0x%04x, SL %d\n",
274 IPOIB_GID_ARG(mcast->mcmember.mgid),
275 mcast->ah->ah,
276 be16_to_cpu(mcast->mcmember.mlid),
277 mcast->mcmember.sl);
278 }
279 }
280
281 /* actually send any queued packets */
282 while (!skb_queue_empty(&mcast->pkt_queue)) {
283 struct sk_buff *skb = skb_dequeue(&mcast->pkt_queue);
284
285 skb->dev = dev;
286
287 if (!skb->dst || !skb->dst->neighbour) {
288 /* put pseudoheader back on for next time */
289 skb_push(skb, sizeof (struct ipoib_pseudoheader));
290 }
291
292 if (dev_queue_xmit(skb))
293 ipoib_warn(priv, "dev_queue_xmit failed to requeue packet\n");
294 }
295
296 return 0;
297 }
298
299 static void
300 ipoib_mcast_sendonly_join_complete(int status,
301 struct ib_sa_mcmember_rec *mcmember,
302 void *mcast_ptr)
303 {
304 struct ipoib_mcast *mcast = mcast_ptr;
305 struct net_device *dev = mcast->dev;
306
307 if (!status)
308 ipoib_mcast_join_finish(mcast, mcmember);
309 else {
310 if (mcast->logcount++ < 20)
311 ipoib_dbg_mcast(netdev_priv(dev), "multicast join failed for "
312 IPOIB_GID_FMT ", status %d\n",
313 IPOIB_GID_ARG(mcast->mcmember.mgid), status);
314
315 /* Flush out any queued packets */
316 while (!skb_queue_empty(&mcast->pkt_queue))
317 dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue));
318
319 /* Clear the busy flag so we try again */
320 clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
321 }
322
323 complete(&mcast->done);
324 }
325
326 static int ipoib_mcast_sendonly_join(struct ipoib_mcast *mcast)
327 {
328 struct net_device *dev = mcast->dev;
329 struct ipoib_dev_priv *priv = netdev_priv(dev);
330 struct ib_sa_mcmember_rec rec = {
331 #if 0 /* Some SMs don't support send-only yet */
332 .join_state = 4
333 #else
334 .join_state = 1
335 #endif
336 };
337 int ret = 0;
338
339 if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) {
340 ipoib_dbg_mcast(priv, "device shutting down, no multicast joins\n");
341 return -ENODEV;
342 }
343
344 if (test_and_set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) {
345 ipoib_dbg_mcast(priv, "multicast entry busy, skipping\n");
346 return -EBUSY;
347 }
348
349 rec.mgid = mcast->mcmember.mgid;
350 rec.port_gid = priv->local_gid;
351 rec.pkey = cpu_to_be16(priv->pkey);
352
353 ret = ib_sa_mcmember_rec_set(priv->ca, priv->port, &rec,
354 IB_SA_MCMEMBER_REC_MGID |
355 IB_SA_MCMEMBER_REC_PORT_GID |
356 IB_SA_MCMEMBER_REC_PKEY |
357 IB_SA_MCMEMBER_REC_JOIN_STATE,
358 1000, GFP_ATOMIC,
359 ipoib_mcast_sendonly_join_complete,
360 mcast, &mcast->query);
361 if (ret < 0) {
362 ipoib_warn(priv, "ib_sa_mcmember_rec_set failed (ret = %d)\n",
363 ret);
364 } else {
365 ipoib_dbg_mcast(priv, "no multicast record for " IPOIB_GID_FMT
366 ", starting join\n",
367 IPOIB_GID_ARG(mcast->mcmember.mgid));
368
369 mcast->query_id = ret;
370 }
371
372 return ret;
373 }
374
375 static void ipoib_mcast_join_complete(int status,
376 struct ib_sa_mcmember_rec *mcmember,
377 void *mcast_ptr)
378 {
379 struct ipoib_mcast *mcast = mcast_ptr;
380 struct net_device *dev = mcast->dev;
381 struct ipoib_dev_priv *priv = netdev_priv(dev);
382
383 ipoib_dbg_mcast(priv, "join completion for " IPOIB_GID_FMT
384 " (status %d)\n",
385 IPOIB_GID_ARG(mcast->mcmember.mgid), status);
386
387 if (!status && !ipoib_mcast_join_finish(mcast, mcmember)) {
388 mcast->backoff = 1;
389 down(&mcast_mutex);
390 if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
391 queue_work(ipoib_workqueue, &priv->mcast_task);
392 up(&mcast_mutex);
393 complete(&mcast->done);
394 return;
395 }
396
397 if (status == -EINTR) {
398 complete(&mcast->done);
399 return;
400 }
401
402 if (status && mcast->logcount++ < 20) {
403 if (status == -ETIMEDOUT || status == -EINTR) {
404 ipoib_dbg_mcast(priv, "multicast join failed for " IPOIB_GID_FMT
405 ", status %d\n",
406 IPOIB_GID_ARG(mcast->mcmember.mgid),
407 status);
408 } else {
409 ipoib_warn(priv, "multicast join failed for "
410 IPOIB_GID_FMT ", status %d\n",
411 IPOIB_GID_ARG(mcast->mcmember.mgid),
412 status);
413 }
414 }
415
416 mcast->backoff *= 2;
417 if (mcast->backoff > IPOIB_MAX_BACKOFF_SECONDS)
418 mcast->backoff = IPOIB_MAX_BACKOFF_SECONDS;
419
420 mcast->query = NULL;
421
422 down(&mcast_mutex);
423 if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) {
424 if (status == -ETIMEDOUT)
425 queue_work(ipoib_workqueue, &priv->mcast_task);
426 else
427 queue_delayed_work(ipoib_workqueue, &priv->mcast_task,
428 mcast->backoff * HZ);
429 } else
430 complete(&mcast->done);
431 up(&mcast_mutex);
432
433 return;
434 }
435
436 static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast,
437 int create)
438 {
439 struct ipoib_dev_priv *priv = netdev_priv(dev);
440 struct ib_sa_mcmember_rec rec = {
441 .join_state = 1
442 };
443 ib_sa_comp_mask comp_mask;
444 int ret = 0;
445
446 ipoib_dbg_mcast(priv, "joining MGID " IPOIB_GID_FMT "\n",
447 IPOIB_GID_ARG(mcast->mcmember.mgid));
448
449 rec.mgid = mcast->mcmember.mgid;
450 rec.port_gid = priv->local_gid;
451 rec.pkey = cpu_to_be16(priv->pkey);
452
453 comp_mask =
454 IB_SA_MCMEMBER_REC_MGID |
455 IB_SA_MCMEMBER_REC_PORT_GID |
456 IB_SA_MCMEMBER_REC_PKEY |
457 IB_SA_MCMEMBER_REC_JOIN_STATE;
458
459 if (create) {
460 comp_mask |=
461 IB_SA_MCMEMBER_REC_QKEY |
462 IB_SA_MCMEMBER_REC_SL |
463 IB_SA_MCMEMBER_REC_FLOW_LABEL |
464 IB_SA_MCMEMBER_REC_TRAFFIC_CLASS;
465
466 rec.qkey = priv->broadcast->mcmember.qkey;
467 rec.sl = priv->broadcast->mcmember.sl;
468 rec.flow_label = priv->broadcast->mcmember.flow_label;
469 rec.traffic_class = priv->broadcast->mcmember.traffic_class;
470 }
471
472 ret = ib_sa_mcmember_rec_set(priv->ca, priv->port, &rec, comp_mask,
473 mcast->backoff * 1000, GFP_ATOMIC,
474 ipoib_mcast_join_complete,
475 mcast, &mcast->query);
476
477 if (ret < 0) {
478 ipoib_warn(priv, "ib_sa_mcmember_rec_set failed, status %d\n", ret);
479
480 mcast->backoff *= 2;
481 if (mcast->backoff > IPOIB_MAX_BACKOFF_SECONDS)
482 mcast->backoff = IPOIB_MAX_BACKOFF_SECONDS;
483
484 down(&mcast_mutex);
485 if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
486 queue_delayed_work(ipoib_workqueue,
487 &priv->mcast_task,
488 mcast->backoff * HZ);
489 up(&mcast_mutex);
490 } else
491 mcast->query_id = ret;
492 }
493
494 void ipoib_mcast_join_task(void *dev_ptr)
495 {
496 struct net_device *dev = dev_ptr;
497 struct ipoib_dev_priv *priv = netdev_priv(dev);
498
499 if (!test_bit(IPOIB_MCAST_RUN, &priv->flags))
500 return;
501
502 if (ib_query_gid(priv->ca, priv->port, 0, &priv->local_gid))
503 ipoib_warn(priv, "ib_gid_entry_get() failed\n");
504 else
505 memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw, sizeof (union ib_gid));
506
507 {
508 struct ib_port_attr attr;
509
510 if (!ib_query_port(priv->ca, priv->port, &attr)) {
511 priv->local_lid = attr.lid;
512 priv->local_rate = attr.active_speed *
513 ib_width_enum_to_int(attr.active_width);
514 } else
515 ipoib_warn(priv, "ib_query_port failed\n");
516 }
517
518 if (!priv->broadcast) {
519 priv->broadcast = ipoib_mcast_alloc(dev, 1);
520 if (!priv->broadcast) {
521 ipoib_warn(priv, "failed to allocate broadcast group\n");
522 down(&mcast_mutex);
523 if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
524 queue_delayed_work(ipoib_workqueue,
525 &priv->mcast_task, HZ);
526 up(&mcast_mutex);
527 return;
528 }
529
530 memcpy(priv->broadcast->mcmember.mgid.raw, priv->dev->broadcast + 4,
531 sizeof (union ib_gid));
532
533 spin_lock_irq(&priv->lock);
534 __ipoib_mcast_add(dev, priv->broadcast);
535 spin_unlock_irq(&priv->lock);
536 }
537
538 if (!test_bit(IPOIB_MCAST_FLAG_ATTACHED, &priv->broadcast->flags)) {
539 ipoib_mcast_join(dev, priv->broadcast, 0);
540 return;
541 }
542
543 while (1) {
544 struct ipoib_mcast *mcast = NULL;
545
546 spin_lock_irq(&priv->lock);
547 list_for_each_entry(mcast, &priv->multicast_list, list) {
548 if (!test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)
549 && !test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)
550 && !test_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) {
551 /* Found the next unjoined group */
552 break;
553 }
554 }
555 spin_unlock_irq(&priv->lock);
556
557 if (&mcast->list == &priv->multicast_list) {
558 /* All done */
559 break;
560 }
561
562 ipoib_mcast_join(dev, mcast, 1);
563 return;
564 }
565
566 priv->mcast_mtu = ib_mtu_enum_to_int(priv->broadcast->mcmember.mtu) -
567 IPOIB_ENCAP_LEN;
568 dev->mtu = min(priv->mcast_mtu, priv->admin_mtu);
569
570 ipoib_dbg_mcast(priv, "successfully joined all multicast groups\n");
571
572 clear_bit(IPOIB_MCAST_RUN, &priv->flags);
573 netif_carrier_on(dev);
574 }
575
576 int ipoib_mcast_start_thread(struct net_device *dev)
577 {
578 struct ipoib_dev_priv *priv = netdev_priv(dev);
579
580 ipoib_dbg_mcast(priv, "starting multicast thread\n");
581
582 down(&mcast_mutex);
583 if (!test_and_set_bit(IPOIB_MCAST_RUN, &priv->flags))
584 queue_work(ipoib_workqueue, &priv->mcast_task);
585 up(&mcast_mutex);
586
587 return 0;
588 }
589
590 int ipoib_mcast_stop_thread(struct net_device *dev, int flush)
591 {
592 struct ipoib_dev_priv *priv = netdev_priv(dev);
593 struct ipoib_mcast *mcast;
594
595 ipoib_dbg_mcast(priv, "stopping multicast thread\n");
596
597 down(&mcast_mutex);
598 clear_bit(IPOIB_MCAST_RUN, &priv->flags);
599 cancel_delayed_work(&priv->mcast_task);
600 up(&mcast_mutex);
601
602 if (flush)
603 flush_workqueue(ipoib_workqueue);
604
605 if (priv->broadcast && priv->broadcast->query) {
606 ib_sa_cancel_query(priv->broadcast->query_id, priv->broadcast->query);
607 priv->broadcast->query = NULL;
608 ipoib_dbg_mcast(priv, "waiting for bcast\n");
609 wait_for_completion(&priv->broadcast->done);
610 }
611
612 list_for_each_entry(mcast, &priv->multicast_list, list) {
613 if (mcast->query) {
614 ib_sa_cancel_query(mcast->query_id, mcast->query);
615 mcast->query = NULL;
616 ipoib_dbg_mcast(priv, "waiting for MGID " IPOIB_GID_FMT "\n",
617 IPOIB_GID_ARG(mcast->mcmember.mgid));
618 wait_for_completion(&mcast->done);
619 }
620 }
621
622 return 0;
623 }
624
625 static int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast)
626 {
627 struct ipoib_dev_priv *priv = netdev_priv(dev);
628 struct ib_sa_mcmember_rec rec = {
629 .join_state = 1
630 };
631 int ret = 0;
632
633 if (!test_and_clear_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags))
634 return 0;
635
636 ipoib_dbg_mcast(priv, "leaving MGID " IPOIB_GID_FMT "\n",
637 IPOIB_GID_ARG(mcast->mcmember.mgid));
638
639 rec.mgid = mcast->mcmember.mgid;
640 rec.port_gid = priv->local_gid;
641 rec.pkey = cpu_to_be16(priv->pkey);
642
643 /* Remove ourselves from the multicast group */
644 ret = ipoib_mcast_detach(dev, be16_to_cpu(mcast->mcmember.mlid),
645 &mcast->mcmember.mgid);
646 if (ret)
647 ipoib_warn(priv, "ipoib_mcast_detach failed (result = %d)\n", ret);
648
649 /*
650 * Just make one shot at leaving and don't wait for a reply;
651 * if we fail, too bad.
652 */
653 ret = ib_sa_mcmember_rec_delete(priv->ca, priv->port, &rec,
654 IB_SA_MCMEMBER_REC_MGID |
655 IB_SA_MCMEMBER_REC_PORT_GID |
656 IB_SA_MCMEMBER_REC_PKEY |
657 IB_SA_MCMEMBER_REC_JOIN_STATE,
658 0, GFP_ATOMIC, NULL,
659 mcast, &mcast->query);
660 if (ret < 0)
661 ipoib_warn(priv, "ib_sa_mcmember_rec_delete failed "
662 "for leave (result = %d)\n", ret);
663
664 return 0;
665 }
666
667 void ipoib_mcast_send(struct net_device *dev, union ib_gid *mgid,
668 struct sk_buff *skb)
669 {
670 struct ipoib_dev_priv *priv = netdev_priv(dev);
671 struct ipoib_mcast *mcast;
672
673 /*
674 * We can only be called from ipoib_start_xmit, so we're
675 * inside tx_lock -- no need to save/restore flags.
676 */
677 spin_lock(&priv->lock);
678
679 mcast = __ipoib_mcast_find(dev, mgid);
680 if (!mcast) {
681 /* Let's create a new send only group now */
682 ipoib_dbg_mcast(priv, "setting up send only multicast group for "
683 IPOIB_GID_FMT "\n", IPOIB_GID_ARG(*mgid));
684
685 mcast = ipoib_mcast_alloc(dev, 0);
686 if (!mcast) {
687 ipoib_warn(priv, "unable to allocate memory for "
688 "multicast structure\n");
689 dev_kfree_skb_any(skb);
690 goto out;
691 }
692
693 set_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags);
694 mcast->mcmember.mgid = *mgid;
695 __ipoib_mcast_add(dev, mcast);
696 list_add_tail(&mcast->list, &priv->multicast_list);
697 }
698
699 if (!mcast->ah) {
700 if (skb_queue_len(&mcast->pkt_queue) < IPOIB_MAX_MCAST_QUEUE)
701 skb_queue_tail(&mcast->pkt_queue, skb);
702 else
703 dev_kfree_skb_any(skb);
704
705 if (mcast->query)
706 ipoib_dbg_mcast(priv, "no address vector, "
707 "but multicast join already started\n");
708 else if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags))
709 ipoib_mcast_sendonly_join(mcast);
710
711 /*
712 * If lookup completes between here and out:, don't
713 * want to send packet twice.
714 */
715 mcast = NULL;
716 }
717
718 out:
719 if (mcast && mcast->ah) {
720 if (skb->dst &&
721 skb->dst->neighbour &&
722 !*to_ipoib_neigh(skb->dst->neighbour)) {
723 struct ipoib_neigh *neigh = kmalloc(sizeof *neigh, GFP_ATOMIC);
724
725 if (neigh) {
726 kref_get(&mcast->ah->ref);
727 neigh->ah = mcast->ah;
728 neigh->neighbour = skb->dst->neighbour;
729 *to_ipoib_neigh(skb->dst->neighbour) = neigh;
730 list_add_tail(&neigh->list, &mcast->neigh_list);
731 }
732 }
733
734 ipoib_send(dev, skb, mcast->ah, IB_MULTICAST_QPN);
735 }
736
737 spin_unlock(&priv->lock);
738 }
739
740 void ipoib_mcast_dev_flush(struct net_device *dev)
741 {
742 struct ipoib_dev_priv *priv = netdev_priv(dev);
743 LIST_HEAD(remove_list);
744 struct ipoib_mcast *mcast, *tmcast, *nmcast;
745 unsigned long flags;
746
747 ipoib_dbg_mcast(priv, "flushing multicast list\n");
748
749 spin_lock_irqsave(&priv->lock, flags);
750 list_for_each_entry_safe(mcast, tmcast, &priv->multicast_list, list) {
751 nmcast = ipoib_mcast_alloc(dev, 0);
752 if (nmcast) {
753 nmcast->flags =
754 mcast->flags & (1 << IPOIB_MCAST_FLAG_SENDONLY);
755
756 nmcast->mcmember.mgid = mcast->mcmember.mgid;
757
758 /* Add the new group in before the to-be-destroyed group */
759 list_add_tail(&nmcast->list, &mcast->list);
760 list_del_init(&mcast->list);
761
762 rb_replace_node(&mcast->rb_node, &nmcast->rb_node,
763 &priv->multicast_tree);
764
765 list_add_tail(&mcast->list, &remove_list);
766 } else {
767 ipoib_warn(priv, "could not reallocate multicast group "
768 IPOIB_GID_FMT "\n",
769 IPOIB_GID_ARG(mcast->mcmember.mgid));
770 }
771 }
772
773 if (priv->broadcast) {
774 nmcast = ipoib_mcast_alloc(dev, 0);
775 if (nmcast) {
776 nmcast->mcmember.mgid = priv->broadcast->mcmember.mgid;
777
778 rb_replace_node(&priv->broadcast->rb_node,
779 &nmcast->rb_node,
780 &priv->multicast_tree);
781
782 list_add_tail(&priv->broadcast->list, &remove_list);
783 }
784
785 priv->broadcast = nmcast;
786 }
787
788 spin_unlock_irqrestore(&priv->lock, flags);
789
790 list_for_each_entry_safe(mcast, tmcast, &remove_list, list) {
791 ipoib_mcast_leave(dev, mcast);
792 ipoib_mcast_free(mcast);
793 }
794 }
795
796 void ipoib_mcast_dev_down(struct net_device *dev)
797 {
798 struct ipoib_dev_priv *priv = netdev_priv(dev);
799 unsigned long flags;
800
801 /* Delete broadcast since it will be recreated */
802 if (priv->broadcast) {
803 ipoib_dbg_mcast(priv, "deleting broadcast group\n");
804
805 spin_lock_irqsave(&priv->lock, flags);
806 rb_erase(&priv->broadcast->rb_node, &priv->multicast_tree);
807 spin_unlock_irqrestore(&priv->lock, flags);
808 ipoib_mcast_leave(dev, priv->broadcast);
809 ipoib_mcast_free(priv->broadcast);
810 priv->broadcast = NULL;
811 }
812 }
813
814 void ipoib_mcast_restart_task(void *dev_ptr)
815 {
816 struct net_device *dev = dev_ptr;
817 struct ipoib_dev_priv *priv = netdev_priv(dev);
818 struct dev_mc_list *mclist;
819 struct ipoib_mcast *mcast, *tmcast;
820 LIST_HEAD(remove_list);
821 unsigned long flags;
822
823 ipoib_dbg_mcast(priv, "restarting multicast task\n");
824
825 ipoib_mcast_stop_thread(dev, 0);
826
827 spin_lock_irqsave(&priv->lock, flags);
828
829 /*
830 * Unfortunately, the networking core only gives us a list of all of
831 * the multicast hardware addresses. We need to figure out which ones
832 * are new and which ones have been removed
833 */
834
835 /* Clear out the found flag */
836 list_for_each_entry(mcast, &priv->multicast_list, list)
837 clear_bit(IPOIB_MCAST_FLAG_FOUND, &mcast->flags);
838
839 /* Mark all of the entries that are found or don't exist */
840 for (mclist = dev->mc_list; mclist; mclist = mclist->next) {
841 union ib_gid mgid;
842
843 memcpy(mgid.raw, mclist->dmi_addr + 4, sizeof mgid);
844
845 /* Add in the P_Key */
846 mgid.raw[4] = (priv->pkey >> 8) & 0xff;
847 mgid.raw[5] = priv->pkey & 0xff;
848
849 mcast = __ipoib_mcast_find(dev, &mgid);
850 if (!mcast || test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) {
851 struct ipoib_mcast *nmcast;
852
853 /* Not found or send-only group, let's add a new entry */
854 ipoib_dbg_mcast(priv, "adding multicast entry for mgid "
855 IPOIB_GID_FMT "\n", IPOIB_GID_ARG(mgid));
856
857 nmcast = ipoib_mcast_alloc(dev, 0);
858 if (!nmcast) {
859 ipoib_warn(priv, "unable to allocate memory for multicast structure\n");
860 continue;
861 }
862
863 set_bit(IPOIB_MCAST_FLAG_FOUND, &nmcast->flags);
864
865 nmcast->mcmember.mgid = mgid;
866
867 if (mcast) {
868 /* Destroy the send only entry */
869 list_del(&mcast->list);
870 list_add_tail(&mcast->list, &remove_list);
871
872 rb_replace_node(&mcast->rb_node,
873 &nmcast->rb_node,
874 &priv->multicast_tree);
875 } else
876 __ipoib_mcast_add(dev, nmcast);
877
878 list_add_tail(&nmcast->list, &priv->multicast_list);
879 }
880
881 if (mcast)
882 set_bit(IPOIB_MCAST_FLAG_FOUND, &mcast->flags);
883 }
884
885 /* Remove all of the entries don't exist anymore */
886 list_for_each_entry_safe(mcast, tmcast, &priv->multicast_list, list) {
887 if (!test_bit(IPOIB_MCAST_FLAG_FOUND, &mcast->flags) &&
888 !test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) {
889 ipoib_dbg_mcast(priv, "deleting multicast group " IPOIB_GID_FMT "\n",
890 IPOIB_GID_ARG(mcast->mcmember.mgid));
891
892 rb_erase(&mcast->rb_node, &priv->multicast_tree);
893
894 /* Move to the remove list */
895 list_del(&mcast->list);
896 list_add_tail(&mcast->list, &remove_list);
897 }
898 }
899 spin_unlock_irqrestore(&priv->lock, flags);
900
901 /* We have to cancel outside of the spinlock */
902 list_for_each_entry_safe(mcast, tmcast, &remove_list, list) {
903 ipoib_mcast_leave(mcast->dev, mcast);
904 ipoib_mcast_free(mcast);
905 }
906
907 if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
908 ipoib_mcast_start_thread(dev);
909 }
910
911 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
912
913 struct ipoib_mcast_iter *ipoib_mcast_iter_init(struct net_device *dev)
914 {
915 struct ipoib_mcast_iter *iter;
916
917 iter = kmalloc(sizeof *iter, GFP_KERNEL);
918 if (!iter)
919 return NULL;
920
921 iter->dev = dev;
922 memset(iter->mgid.raw, 0, 16);
923
924 if (ipoib_mcast_iter_next(iter)) {
925 kfree(iter);
926 return NULL;
927 }
928
929 return iter;
930 }
931
932 int ipoib_mcast_iter_next(struct ipoib_mcast_iter *iter)
933 {
934 struct ipoib_dev_priv *priv = netdev_priv(iter->dev);
935 struct rb_node *n;
936 struct ipoib_mcast *mcast;
937 int ret = 1;
938
939 spin_lock_irq(&priv->lock);
940
941 n = rb_first(&priv->multicast_tree);
942
943 while (n) {
944 mcast = rb_entry(n, struct ipoib_mcast, rb_node);
945
946 if (memcmp(iter->mgid.raw, mcast->mcmember.mgid.raw,
947 sizeof (union ib_gid)) < 0) {
948 iter->mgid = mcast->mcmember.mgid;
949 iter->created = mcast->created;
950 iter->queuelen = skb_queue_len(&mcast->pkt_queue);
951 iter->complete = !!mcast->ah;
952 iter->send_only = !!(mcast->flags & (1 << IPOIB_MCAST_FLAG_SENDONLY));
953
954 ret = 0;
955
956 break;
957 }
958
959 n = rb_next(n);
960 }
961
962 spin_unlock_irq(&priv->lock);
963
964 return ret;
965 }
966
967 void ipoib_mcast_iter_read(struct ipoib_mcast_iter *iter,
968 union ib_gid *mgid,
969 unsigned long *created,
970 unsigned int *queuelen,
971 unsigned int *complete,
972 unsigned int *send_only)
973 {
974 *mgid = iter->mgid;
975 *created = iter->created;
976 *queuelen = iter->queuelen;
977 *complete = iter->complete;
978 *send_only = iter->send_only;
979 }
980
981 #endif /* CONFIG_INFINIBAND_IPOIB_DEBUG */
This page took 0.054122 seconds and 6 git commands to generate.