ipoib: Need to do dst_neigh_lookup_skb() outside of priv->lock.
[deliverable/linux.git] / drivers / infiniband / ulp / ipoib / ipoib_multicast.c
1 /*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35 #include <linux/skbuff.h>
36 #include <linux/rtnetlink.h>
37 #include <linux/moduleparam.h>
38 #include <linux/ip.h>
39 #include <linux/in.h>
40 #include <linux/igmp.h>
41 #include <linux/inetdevice.h>
42 #include <linux/delay.h>
43 #include <linux/completion.h>
44 #include <linux/slab.h>
45
46 #include <net/dst.h>
47
48 #include "ipoib.h"
49
50 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
51 static int mcast_debug_level;
52
53 module_param(mcast_debug_level, int, 0644);
54 MODULE_PARM_DESC(mcast_debug_level,
55 "Enable multicast debug tracing if > 0");
56 #endif
57
58 static DEFINE_MUTEX(mcast_mutex);
59
60 struct ipoib_mcast_iter {
61 struct net_device *dev;
62 union ib_gid mgid;
63 unsigned long created;
64 unsigned int queuelen;
65 unsigned int complete;
66 unsigned int send_only;
67 };
68
69 static void ipoib_mcast_free(struct ipoib_mcast *mcast)
70 {
71 struct net_device *dev = mcast->dev;
72 struct ipoib_dev_priv *priv = netdev_priv(dev);
73 struct ipoib_neigh *neigh, *tmp;
74 int tx_dropped = 0;
75
76 ipoib_dbg_mcast(netdev_priv(dev), "deleting multicast group %pI6\n",
77 mcast->mcmember.mgid.raw);
78
79 spin_lock_irq(&priv->lock);
80
81 list_for_each_entry_safe(neigh, tmp, &mcast->neigh_list, list) {
82 /*
83 * It's safe to call ipoib_put_ah() inside priv->lock
84 * here, because we know that mcast->ah will always
85 * hold one more reference, so ipoib_put_ah() will
86 * never do more than decrement the ref count.
87 */
88 if (neigh->ah)
89 ipoib_put_ah(neigh->ah);
90 ipoib_neigh_free(dev, neigh);
91 }
92
93 spin_unlock_irq(&priv->lock);
94
95 if (mcast->ah)
96 ipoib_put_ah(mcast->ah);
97
98 while (!skb_queue_empty(&mcast->pkt_queue)) {
99 ++tx_dropped;
100 dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue));
101 }
102
103 netif_tx_lock_bh(dev);
104 dev->stats.tx_dropped += tx_dropped;
105 netif_tx_unlock_bh(dev);
106
107 kfree(mcast);
108 }
109
110 static struct ipoib_mcast *ipoib_mcast_alloc(struct net_device *dev,
111 int can_sleep)
112 {
113 struct ipoib_mcast *mcast;
114
115 mcast = kzalloc(sizeof *mcast, can_sleep ? GFP_KERNEL : GFP_ATOMIC);
116 if (!mcast)
117 return NULL;
118
119 mcast->dev = dev;
120 mcast->created = jiffies;
121 mcast->backoff = 1;
122
123 INIT_LIST_HEAD(&mcast->list);
124 INIT_LIST_HEAD(&mcast->neigh_list);
125 skb_queue_head_init(&mcast->pkt_queue);
126
127 return mcast;
128 }
129
130 static struct ipoib_mcast *__ipoib_mcast_find(struct net_device *dev, void *mgid)
131 {
132 struct ipoib_dev_priv *priv = netdev_priv(dev);
133 struct rb_node *n = priv->multicast_tree.rb_node;
134
135 while (n) {
136 struct ipoib_mcast *mcast;
137 int ret;
138
139 mcast = rb_entry(n, struct ipoib_mcast, rb_node);
140
141 ret = memcmp(mgid, mcast->mcmember.mgid.raw,
142 sizeof (union ib_gid));
143 if (ret < 0)
144 n = n->rb_left;
145 else if (ret > 0)
146 n = n->rb_right;
147 else
148 return mcast;
149 }
150
151 return NULL;
152 }
153
154 static int __ipoib_mcast_add(struct net_device *dev, struct ipoib_mcast *mcast)
155 {
156 struct ipoib_dev_priv *priv = netdev_priv(dev);
157 struct rb_node **n = &priv->multicast_tree.rb_node, *pn = NULL;
158
159 while (*n) {
160 struct ipoib_mcast *tmcast;
161 int ret;
162
163 pn = *n;
164 tmcast = rb_entry(pn, struct ipoib_mcast, rb_node);
165
166 ret = memcmp(mcast->mcmember.mgid.raw, tmcast->mcmember.mgid.raw,
167 sizeof (union ib_gid));
168 if (ret < 0)
169 n = &pn->rb_left;
170 else if (ret > 0)
171 n = &pn->rb_right;
172 else
173 return -EEXIST;
174 }
175
176 rb_link_node(&mcast->rb_node, pn, n);
177 rb_insert_color(&mcast->rb_node, &priv->multicast_tree);
178
179 return 0;
180 }
181
182 static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast,
183 struct ib_sa_mcmember_rec *mcmember)
184 {
185 struct net_device *dev = mcast->dev;
186 struct ipoib_dev_priv *priv = netdev_priv(dev);
187 struct ipoib_ah *ah;
188 int ret;
189 int set_qkey = 0;
190
191 mcast->mcmember = *mcmember;
192
193 /* Set the cached Q_Key before we attach if it's the broadcast group */
194 if (!memcmp(mcast->mcmember.mgid.raw, priv->dev->broadcast + 4,
195 sizeof (union ib_gid))) {
196 spin_lock_irq(&priv->lock);
197 if (!priv->broadcast) {
198 spin_unlock_irq(&priv->lock);
199 return -EAGAIN;
200 }
201 priv->qkey = be32_to_cpu(priv->broadcast->mcmember.qkey);
202 spin_unlock_irq(&priv->lock);
203 priv->tx_wr.wr.ud.remote_qkey = priv->qkey;
204 set_qkey = 1;
205 }
206
207 if (!test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) {
208 if (test_and_set_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) {
209 ipoib_warn(priv, "multicast group %pI6 already attached\n",
210 mcast->mcmember.mgid.raw);
211
212 return 0;
213 }
214
215 ret = ipoib_mcast_attach(dev, be16_to_cpu(mcast->mcmember.mlid),
216 &mcast->mcmember.mgid, set_qkey);
217 if (ret < 0) {
218 ipoib_warn(priv, "couldn't attach QP to multicast group %pI6\n",
219 mcast->mcmember.mgid.raw);
220
221 clear_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags);
222 return ret;
223 }
224 }
225
226 {
227 struct ib_ah_attr av = {
228 .dlid = be16_to_cpu(mcast->mcmember.mlid),
229 .port_num = priv->port,
230 .sl = mcast->mcmember.sl,
231 .ah_flags = IB_AH_GRH,
232 .static_rate = mcast->mcmember.rate,
233 .grh = {
234 .flow_label = be32_to_cpu(mcast->mcmember.flow_label),
235 .hop_limit = mcast->mcmember.hop_limit,
236 .sgid_index = 0,
237 .traffic_class = mcast->mcmember.traffic_class
238 }
239 };
240 av.grh.dgid = mcast->mcmember.mgid;
241
242 ah = ipoib_create_ah(dev, priv->pd, &av);
243 if (IS_ERR(ah)) {
244 ipoib_warn(priv, "ib_address_create failed %ld\n",
245 -PTR_ERR(ah));
246 /* use original error */
247 return PTR_ERR(ah);
248 } else {
249 spin_lock_irq(&priv->lock);
250 mcast->ah = ah;
251 spin_unlock_irq(&priv->lock);
252
253 ipoib_dbg_mcast(priv, "MGID %pI6 AV %p, LID 0x%04x, SL %d\n",
254 mcast->mcmember.mgid.raw,
255 mcast->ah->ah,
256 be16_to_cpu(mcast->mcmember.mlid),
257 mcast->mcmember.sl);
258 }
259 }
260
261 /* actually send any queued packets */
262 netif_tx_lock_bh(dev);
263 while (!skb_queue_empty(&mcast->pkt_queue)) {
264 struct sk_buff *skb = skb_dequeue(&mcast->pkt_queue);
265
266 netif_tx_unlock_bh(dev);
267
268 skb->dev = dev;
269 if (dev_queue_xmit(skb))
270 ipoib_warn(priv, "dev_queue_xmit failed to requeue packet\n");
271
272 netif_tx_lock_bh(dev);
273 }
274 netif_tx_unlock_bh(dev);
275
276 return 0;
277 }
278
279 static int
280 ipoib_mcast_sendonly_join_complete(int status,
281 struct ib_sa_multicast *multicast)
282 {
283 struct ipoib_mcast *mcast = multicast->context;
284 struct net_device *dev = mcast->dev;
285
286 /* We trap for port events ourselves. */
287 if (status == -ENETRESET)
288 return 0;
289
290 if (!status)
291 status = ipoib_mcast_join_finish(mcast, &multicast->rec);
292
293 if (status) {
294 if (mcast->logcount++ < 20)
295 ipoib_dbg_mcast(netdev_priv(dev), "multicast join failed for %pI6, status %d\n",
296 mcast->mcmember.mgid.raw, status);
297
298 /* Flush out any queued packets */
299 netif_tx_lock_bh(dev);
300 while (!skb_queue_empty(&mcast->pkt_queue)) {
301 ++dev->stats.tx_dropped;
302 dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue));
303 }
304 netif_tx_unlock_bh(dev);
305
306 /* Clear the busy flag so we try again */
307 status = test_and_clear_bit(IPOIB_MCAST_FLAG_BUSY,
308 &mcast->flags);
309 }
310 return status;
311 }
312
313 static int ipoib_mcast_sendonly_join(struct ipoib_mcast *mcast)
314 {
315 struct net_device *dev = mcast->dev;
316 struct ipoib_dev_priv *priv = netdev_priv(dev);
317 struct ib_sa_mcmember_rec rec = {
318 #if 0 /* Some SMs don't support send-only yet */
319 .join_state = 4
320 #else
321 .join_state = 1
322 #endif
323 };
324 int ret = 0;
325
326 if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) {
327 ipoib_dbg_mcast(priv, "device shutting down, no multicast joins\n");
328 return -ENODEV;
329 }
330
331 if (test_and_set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) {
332 ipoib_dbg_mcast(priv, "multicast entry busy, skipping\n");
333 return -EBUSY;
334 }
335
336 rec.mgid = mcast->mcmember.mgid;
337 rec.port_gid = priv->local_gid;
338 rec.pkey = cpu_to_be16(priv->pkey);
339
340 mcast->mc = ib_sa_join_multicast(&ipoib_sa_client, priv->ca,
341 priv->port, &rec,
342 IB_SA_MCMEMBER_REC_MGID |
343 IB_SA_MCMEMBER_REC_PORT_GID |
344 IB_SA_MCMEMBER_REC_PKEY |
345 IB_SA_MCMEMBER_REC_JOIN_STATE,
346 GFP_ATOMIC,
347 ipoib_mcast_sendonly_join_complete,
348 mcast);
349 if (IS_ERR(mcast->mc)) {
350 ret = PTR_ERR(mcast->mc);
351 clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
352 ipoib_warn(priv, "ib_sa_join_multicast failed (ret = %d)\n",
353 ret);
354 } else {
355 ipoib_dbg_mcast(priv, "no multicast record for %pI6, starting join\n",
356 mcast->mcmember.mgid.raw);
357 }
358
359 return ret;
360 }
361
362 void ipoib_mcast_carrier_on_task(struct work_struct *work)
363 {
364 struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
365 carrier_on_task);
366 struct ib_port_attr attr;
367
368 /*
369 * Take rtnl_lock to avoid racing with ipoib_stop() and
370 * turning the carrier back on while a device is being
371 * removed.
372 */
373 if (ib_query_port(priv->ca, priv->port, &attr) ||
374 attr.state != IB_PORT_ACTIVE) {
375 ipoib_dbg(priv, "Keeping carrier off until IB port is active\n");
376 return;
377 }
378
379 rtnl_lock();
380 netif_carrier_on(priv->dev);
381 rtnl_unlock();
382 }
383
384 static int ipoib_mcast_join_complete(int status,
385 struct ib_sa_multicast *multicast)
386 {
387 struct ipoib_mcast *mcast = multicast->context;
388 struct net_device *dev = mcast->dev;
389 struct ipoib_dev_priv *priv = netdev_priv(dev);
390
391 ipoib_dbg_mcast(priv, "join completion for %pI6 (status %d)\n",
392 mcast->mcmember.mgid.raw, status);
393
394 /* We trap for port events ourselves. */
395 if (status == -ENETRESET)
396 return 0;
397
398 if (!status)
399 status = ipoib_mcast_join_finish(mcast, &multicast->rec);
400
401 if (!status) {
402 mcast->backoff = 1;
403 mutex_lock(&mcast_mutex);
404 if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
405 queue_delayed_work(ipoib_workqueue,
406 &priv->mcast_task, 0);
407 mutex_unlock(&mcast_mutex);
408
409 /*
410 * Defer carrier on work to ipoib_workqueue to avoid a
411 * deadlock on rtnl_lock here.
412 */
413 if (mcast == priv->broadcast)
414 queue_work(ipoib_workqueue, &priv->carrier_on_task);
415
416 return 0;
417 }
418
419 if (mcast->logcount++ < 20) {
420 if (status == -ETIMEDOUT || status == -EAGAIN) {
421 ipoib_dbg_mcast(priv, "multicast join failed for %pI6, status %d\n",
422 mcast->mcmember.mgid.raw, status);
423 } else {
424 ipoib_warn(priv, "multicast join failed for %pI6, status %d\n",
425 mcast->mcmember.mgid.raw, status);
426 }
427 }
428
429 mcast->backoff *= 2;
430 if (mcast->backoff > IPOIB_MAX_BACKOFF_SECONDS)
431 mcast->backoff = IPOIB_MAX_BACKOFF_SECONDS;
432
433 /* Clear the busy flag so we try again */
434 status = test_and_clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
435
436 mutex_lock(&mcast_mutex);
437 spin_lock_irq(&priv->lock);
438 if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
439 queue_delayed_work(ipoib_workqueue, &priv->mcast_task,
440 mcast->backoff * HZ);
441 spin_unlock_irq(&priv->lock);
442 mutex_unlock(&mcast_mutex);
443
444 return status;
445 }
446
447 static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast,
448 int create)
449 {
450 struct ipoib_dev_priv *priv = netdev_priv(dev);
451 struct ib_sa_mcmember_rec rec = {
452 .join_state = 1
453 };
454 ib_sa_comp_mask comp_mask;
455 int ret = 0;
456
457 ipoib_dbg_mcast(priv, "joining MGID %pI6\n", mcast->mcmember.mgid.raw);
458
459 rec.mgid = mcast->mcmember.mgid;
460 rec.port_gid = priv->local_gid;
461 rec.pkey = cpu_to_be16(priv->pkey);
462
463 comp_mask =
464 IB_SA_MCMEMBER_REC_MGID |
465 IB_SA_MCMEMBER_REC_PORT_GID |
466 IB_SA_MCMEMBER_REC_PKEY |
467 IB_SA_MCMEMBER_REC_JOIN_STATE;
468
469 if (create) {
470 comp_mask |=
471 IB_SA_MCMEMBER_REC_QKEY |
472 IB_SA_MCMEMBER_REC_MTU_SELECTOR |
473 IB_SA_MCMEMBER_REC_MTU |
474 IB_SA_MCMEMBER_REC_TRAFFIC_CLASS |
475 IB_SA_MCMEMBER_REC_RATE_SELECTOR |
476 IB_SA_MCMEMBER_REC_RATE |
477 IB_SA_MCMEMBER_REC_SL |
478 IB_SA_MCMEMBER_REC_FLOW_LABEL |
479 IB_SA_MCMEMBER_REC_HOP_LIMIT;
480
481 rec.qkey = priv->broadcast->mcmember.qkey;
482 rec.mtu_selector = IB_SA_EQ;
483 rec.mtu = priv->broadcast->mcmember.mtu;
484 rec.traffic_class = priv->broadcast->mcmember.traffic_class;
485 rec.rate_selector = IB_SA_EQ;
486 rec.rate = priv->broadcast->mcmember.rate;
487 rec.sl = priv->broadcast->mcmember.sl;
488 rec.flow_label = priv->broadcast->mcmember.flow_label;
489 rec.hop_limit = priv->broadcast->mcmember.hop_limit;
490 }
491
492 set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
493 mcast->mc = ib_sa_join_multicast(&ipoib_sa_client, priv->ca, priv->port,
494 &rec, comp_mask, GFP_KERNEL,
495 ipoib_mcast_join_complete, mcast);
496 if (IS_ERR(mcast->mc)) {
497 clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
498 ret = PTR_ERR(mcast->mc);
499 ipoib_warn(priv, "ib_sa_join_multicast failed, status %d\n", ret);
500
501 mcast->backoff *= 2;
502 if (mcast->backoff > IPOIB_MAX_BACKOFF_SECONDS)
503 mcast->backoff = IPOIB_MAX_BACKOFF_SECONDS;
504
505 mutex_lock(&mcast_mutex);
506 if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
507 queue_delayed_work(ipoib_workqueue,
508 &priv->mcast_task,
509 mcast->backoff * HZ);
510 mutex_unlock(&mcast_mutex);
511 }
512 }
513
514 void ipoib_mcast_join_task(struct work_struct *work)
515 {
516 struct ipoib_dev_priv *priv =
517 container_of(work, struct ipoib_dev_priv, mcast_task.work);
518 struct net_device *dev = priv->dev;
519
520 if (!test_bit(IPOIB_MCAST_RUN, &priv->flags))
521 return;
522
523 if (ib_query_gid(priv->ca, priv->port, 0, &priv->local_gid))
524 ipoib_warn(priv, "ib_query_gid() failed\n");
525 else
526 memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw, sizeof (union ib_gid));
527
528 {
529 struct ib_port_attr attr;
530
531 if (!ib_query_port(priv->ca, priv->port, &attr))
532 priv->local_lid = attr.lid;
533 else
534 ipoib_warn(priv, "ib_query_port failed\n");
535 }
536
537 if (!priv->broadcast) {
538 struct ipoib_mcast *broadcast;
539
540 if (!test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
541 return;
542
543 broadcast = ipoib_mcast_alloc(dev, 1);
544 if (!broadcast) {
545 ipoib_warn(priv, "failed to allocate broadcast group\n");
546 mutex_lock(&mcast_mutex);
547 if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
548 queue_delayed_work(ipoib_workqueue,
549 &priv->mcast_task, HZ);
550 mutex_unlock(&mcast_mutex);
551 return;
552 }
553
554 spin_lock_irq(&priv->lock);
555 memcpy(broadcast->mcmember.mgid.raw, priv->dev->broadcast + 4,
556 sizeof (union ib_gid));
557 priv->broadcast = broadcast;
558
559 __ipoib_mcast_add(dev, priv->broadcast);
560 spin_unlock_irq(&priv->lock);
561 }
562
563 if (!test_bit(IPOIB_MCAST_FLAG_ATTACHED, &priv->broadcast->flags)) {
564 if (!test_bit(IPOIB_MCAST_FLAG_BUSY, &priv->broadcast->flags))
565 ipoib_mcast_join(dev, priv->broadcast, 0);
566 return;
567 }
568
569 while (1) {
570 struct ipoib_mcast *mcast = NULL;
571
572 spin_lock_irq(&priv->lock);
573 list_for_each_entry(mcast, &priv->multicast_list, list) {
574 if (!test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)
575 && !test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)
576 && !test_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) {
577 /* Found the next unjoined group */
578 break;
579 }
580 }
581 spin_unlock_irq(&priv->lock);
582
583 if (&mcast->list == &priv->multicast_list) {
584 /* All done */
585 break;
586 }
587
588 ipoib_mcast_join(dev, mcast, 1);
589 return;
590 }
591
592 priv->mcast_mtu = IPOIB_UD_MTU(ib_mtu_enum_to_int(priv->broadcast->mcmember.mtu));
593
594 if (!ipoib_cm_admin_enabled(dev)) {
595 rtnl_lock();
596 dev_set_mtu(dev, min(priv->mcast_mtu, priv->admin_mtu));
597 rtnl_unlock();
598 }
599
600 ipoib_dbg_mcast(priv, "successfully joined all multicast groups\n");
601
602 clear_bit(IPOIB_MCAST_RUN, &priv->flags);
603 }
604
605 int ipoib_mcast_start_thread(struct net_device *dev)
606 {
607 struct ipoib_dev_priv *priv = netdev_priv(dev);
608
609 ipoib_dbg_mcast(priv, "starting multicast thread\n");
610
611 mutex_lock(&mcast_mutex);
612 if (!test_and_set_bit(IPOIB_MCAST_RUN, &priv->flags))
613 queue_delayed_work(ipoib_workqueue, &priv->mcast_task, 0);
614 mutex_unlock(&mcast_mutex);
615
616 return 0;
617 }
618
619 int ipoib_mcast_stop_thread(struct net_device *dev, int flush)
620 {
621 struct ipoib_dev_priv *priv = netdev_priv(dev);
622
623 ipoib_dbg_mcast(priv, "stopping multicast thread\n");
624
625 mutex_lock(&mcast_mutex);
626 clear_bit(IPOIB_MCAST_RUN, &priv->flags);
627 cancel_delayed_work(&priv->mcast_task);
628 mutex_unlock(&mcast_mutex);
629
630 if (flush)
631 flush_workqueue(ipoib_workqueue);
632
633 return 0;
634 }
635
636 static int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast)
637 {
638 struct ipoib_dev_priv *priv = netdev_priv(dev);
639 int ret = 0;
640
641 if (test_and_clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags))
642 ib_sa_free_multicast(mcast->mc);
643
644 if (test_and_clear_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) {
645 ipoib_dbg_mcast(priv, "leaving MGID %pI6\n",
646 mcast->mcmember.mgid.raw);
647
648 /* Remove ourselves from the multicast group */
649 ret = ib_detach_mcast(priv->qp, &mcast->mcmember.mgid,
650 be16_to_cpu(mcast->mcmember.mlid));
651 if (ret)
652 ipoib_warn(priv, "ib_detach_mcast failed (result = %d)\n", ret);
653 }
654
655 return 0;
656 }
657
658 void ipoib_mcast_send(struct net_device *dev, void *mgid, struct sk_buff *skb)
659 {
660 struct ipoib_dev_priv *priv = netdev_priv(dev);
661 struct dst_entry *dst = skb_dst(skb);
662 struct ipoib_mcast *mcast;
663 struct neighbour *n;
664 unsigned long flags;
665
666 n = NULL;
667 if (dst)
668 n = dst_neigh_lookup_skb(dst, skb);
669
670 spin_lock_irqsave(&priv->lock, flags);
671
672 if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags) ||
673 !priv->broadcast ||
674 !test_bit(IPOIB_MCAST_FLAG_ATTACHED, &priv->broadcast->flags)) {
675 ++dev->stats.tx_dropped;
676 dev_kfree_skb_any(skb);
677 goto unlock;
678 }
679
680 mcast = __ipoib_mcast_find(dev, mgid);
681 if (!mcast) {
682 /* Let's create a new send only group now */
683 ipoib_dbg_mcast(priv, "setting up send only multicast group for %pI6\n",
684 mgid);
685
686 mcast = ipoib_mcast_alloc(dev, 0);
687 if (!mcast) {
688 ipoib_warn(priv, "unable to allocate memory for "
689 "multicast structure\n");
690 ++dev->stats.tx_dropped;
691 dev_kfree_skb_any(skb);
692 goto out;
693 }
694
695 set_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags);
696 memcpy(mcast->mcmember.mgid.raw, mgid, sizeof (union ib_gid));
697 __ipoib_mcast_add(dev, mcast);
698 list_add_tail(&mcast->list, &priv->multicast_list);
699 }
700
701 if (!mcast->ah) {
702 if (skb_queue_len(&mcast->pkt_queue) < IPOIB_MAX_MCAST_QUEUE)
703 skb_queue_tail(&mcast->pkt_queue, skb);
704 else {
705 ++dev->stats.tx_dropped;
706 dev_kfree_skb_any(skb);
707 }
708
709 if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags))
710 ipoib_dbg_mcast(priv, "no address vector, "
711 "but multicast join already started\n");
712 else if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags))
713 ipoib_mcast_sendonly_join(mcast);
714
715 /*
716 * If lookup completes between here and out:, don't
717 * want to send packet twice.
718 */
719 mcast = NULL;
720 }
721
722 out:
723 if (mcast && mcast->ah) {
724 if (n) {
725 if (!*to_ipoib_neigh(n)) {
726 struct ipoib_neigh *neigh;
727
728 neigh = ipoib_neigh_alloc(n, skb->dev);
729 if (neigh) {
730 kref_get(&mcast->ah->ref);
731 neigh->ah = mcast->ah;
732 list_add_tail(&neigh->list,
733 &mcast->neigh_list);
734 }
735 }
736 neigh_release(n);
737 }
738 spin_unlock_irqrestore(&priv->lock, flags);
739 ipoib_send(dev, skb, mcast->ah, IB_MULTICAST_QPN);
740 return;
741 }
742
743 unlock:
744 if (n)
745 neigh_release(n);
746 spin_unlock_irqrestore(&priv->lock, flags);
747 }
748
749 void ipoib_mcast_dev_flush(struct net_device *dev)
750 {
751 struct ipoib_dev_priv *priv = netdev_priv(dev);
752 LIST_HEAD(remove_list);
753 struct ipoib_mcast *mcast, *tmcast;
754 unsigned long flags;
755
756 ipoib_dbg_mcast(priv, "flushing multicast list\n");
757
758 spin_lock_irqsave(&priv->lock, flags);
759
760 list_for_each_entry_safe(mcast, tmcast, &priv->multicast_list, list) {
761 list_del(&mcast->list);
762 rb_erase(&mcast->rb_node, &priv->multicast_tree);
763 list_add_tail(&mcast->list, &remove_list);
764 }
765
766 if (priv->broadcast) {
767 rb_erase(&priv->broadcast->rb_node, &priv->multicast_tree);
768 list_add_tail(&priv->broadcast->list, &remove_list);
769 priv->broadcast = NULL;
770 }
771
772 spin_unlock_irqrestore(&priv->lock, flags);
773
774 list_for_each_entry_safe(mcast, tmcast, &remove_list, list) {
775 ipoib_mcast_leave(dev, mcast);
776 ipoib_mcast_free(mcast);
777 }
778 }
779
780 static int ipoib_mcast_addr_is_valid(const u8 *addr, const u8 *broadcast)
781 {
782 /* reserved QPN, prefix, scope */
783 if (memcmp(addr, broadcast, 6))
784 return 0;
785 /* signature lower, pkey */
786 if (memcmp(addr + 7, broadcast + 7, 3))
787 return 0;
788 return 1;
789 }
790
791 void ipoib_mcast_restart_task(struct work_struct *work)
792 {
793 struct ipoib_dev_priv *priv =
794 container_of(work, struct ipoib_dev_priv, restart_task);
795 struct net_device *dev = priv->dev;
796 struct netdev_hw_addr *ha;
797 struct ipoib_mcast *mcast, *tmcast;
798 LIST_HEAD(remove_list);
799 unsigned long flags;
800 struct ib_sa_mcmember_rec rec;
801
802 ipoib_dbg_mcast(priv, "restarting multicast task\n");
803
804 ipoib_mcast_stop_thread(dev, 0);
805
806 local_irq_save(flags);
807 netif_addr_lock(dev);
808 spin_lock(&priv->lock);
809
810 /*
811 * Unfortunately, the networking core only gives us a list of all of
812 * the multicast hardware addresses. We need to figure out which ones
813 * are new and which ones have been removed
814 */
815
816 /* Clear out the found flag */
817 list_for_each_entry(mcast, &priv->multicast_list, list)
818 clear_bit(IPOIB_MCAST_FLAG_FOUND, &mcast->flags);
819
820 /* Mark all of the entries that are found or don't exist */
821 netdev_for_each_mc_addr(ha, dev) {
822 union ib_gid mgid;
823
824 if (!ipoib_mcast_addr_is_valid(ha->addr, dev->broadcast))
825 continue;
826
827 memcpy(mgid.raw, ha->addr + 4, sizeof mgid);
828
829 mcast = __ipoib_mcast_find(dev, &mgid);
830 if (!mcast || test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) {
831 struct ipoib_mcast *nmcast;
832
833 /* ignore group which is directly joined by userspace */
834 if (test_bit(IPOIB_FLAG_UMCAST, &priv->flags) &&
835 !ib_sa_get_mcmember_rec(priv->ca, priv->port, &mgid, &rec)) {
836 ipoib_dbg_mcast(priv, "ignoring multicast entry for mgid %pI6\n",
837 mgid.raw);
838 continue;
839 }
840
841 /* Not found or send-only group, let's add a new entry */
842 ipoib_dbg_mcast(priv, "adding multicast entry for mgid %pI6\n",
843 mgid.raw);
844
845 nmcast = ipoib_mcast_alloc(dev, 0);
846 if (!nmcast) {
847 ipoib_warn(priv, "unable to allocate memory for multicast structure\n");
848 continue;
849 }
850
851 set_bit(IPOIB_MCAST_FLAG_FOUND, &nmcast->flags);
852
853 nmcast->mcmember.mgid = mgid;
854
855 if (mcast) {
856 /* Destroy the send only entry */
857 list_move_tail(&mcast->list, &remove_list);
858
859 rb_replace_node(&mcast->rb_node,
860 &nmcast->rb_node,
861 &priv->multicast_tree);
862 } else
863 __ipoib_mcast_add(dev, nmcast);
864
865 list_add_tail(&nmcast->list, &priv->multicast_list);
866 }
867
868 if (mcast)
869 set_bit(IPOIB_MCAST_FLAG_FOUND, &mcast->flags);
870 }
871
872 /* Remove all of the entries don't exist anymore */
873 list_for_each_entry_safe(mcast, tmcast, &priv->multicast_list, list) {
874 if (!test_bit(IPOIB_MCAST_FLAG_FOUND, &mcast->flags) &&
875 !test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) {
876 ipoib_dbg_mcast(priv, "deleting multicast group %pI6\n",
877 mcast->mcmember.mgid.raw);
878
879 rb_erase(&mcast->rb_node, &priv->multicast_tree);
880
881 /* Move to the remove list */
882 list_move_tail(&mcast->list, &remove_list);
883 }
884 }
885
886 spin_unlock(&priv->lock);
887 netif_addr_unlock(dev);
888 local_irq_restore(flags);
889
890 /* We have to cancel outside of the spinlock */
891 list_for_each_entry_safe(mcast, tmcast, &remove_list, list) {
892 ipoib_mcast_leave(mcast->dev, mcast);
893 ipoib_mcast_free(mcast);
894 }
895
896 if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
897 ipoib_mcast_start_thread(dev);
898 }
899
900 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
901
902 struct ipoib_mcast_iter *ipoib_mcast_iter_init(struct net_device *dev)
903 {
904 struct ipoib_mcast_iter *iter;
905
906 iter = kmalloc(sizeof *iter, GFP_KERNEL);
907 if (!iter)
908 return NULL;
909
910 iter->dev = dev;
911 memset(iter->mgid.raw, 0, 16);
912
913 if (ipoib_mcast_iter_next(iter)) {
914 kfree(iter);
915 return NULL;
916 }
917
918 return iter;
919 }
920
921 int ipoib_mcast_iter_next(struct ipoib_mcast_iter *iter)
922 {
923 struct ipoib_dev_priv *priv = netdev_priv(iter->dev);
924 struct rb_node *n;
925 struct ipoib_mcast *mcast;
926 int ret = 1;
927
928 spin_lock_irq(&priv->lock);
929
930 n = rb_first(&priv->multicast_tree);
931
932 while (n) {
933 mcast = rb_entry(n, struct ipoib_mcast, rb_node);
934
935 if (memcmp(iter->mgid.raw, mcast->mcmember.mgid.raw,
936 sizeof (union ib_gid)) < 0) {
937 iter->mgid = mcast->mcmember.mgid;
938 iter->created = mcast->created;
939 iter->queuelen = skb_queue_len(&mcast->pkt_queue);
940 iter->complete = !!mcast->ah;
941 iter->send_only = !!(mcast->flags & (1 << IPOIB_MCAST_FLAG_SENDONLY));
942
943 ret = 0;
944
945 break;
946 }
947
948 n = rb_next(n);
949 }
950
951 spin_unlock_irq(&priv->lock);
952
953 return ret;
954 }
955
956 void ipoib_mcast_iter_read(struct ipoib_mcast_iter *iter,
957 union ib_gid *mgid,
958 unsigned long *created,
959 unsigned int *queuelen,
960 unsigned int *complete,
961 unsigned int *send_only)
962 {
963 *mgid = iter->mgid;
964 *created = iter->created;
965 *queuelen = iter->queuelen;
966 *complete = iter->complete;
967 *send_only = iter->send_only;
968 }
969
970 #endif /* CONFIG_INFINIBAND_IPOIB_DEBUG */
This page took 0.094656 seconds and 5 git commands to generate.