IPoIB: Consolidate private neighbour data handling
[deliverable/linux.git] / drivers / infiniband / ulp / ipoib / ipoib_main.c
CommitLineData
1da177e4
LT
1/*
2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
2a1d9b7f
RD
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
1da177e4
LT
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 *
34 * $Id: ipoib_main.c 1377 2004-12-23 19:57:12Z roland $
35 */
36
37#include "ipoib.h"
38
1da177e4
LT
39#include <linux/module.h>
40
41#include <linux/init.h>
42#include <linux/slab.h>
43#include <linux/vmalloc.h>
44
45#include <linux/if_arp.h> /* For ARPHRD_xxx */
46
47#include <linux/ip.h>
48#include <linux/in.h>
49
14c85021
ACM
50#include <net/dst.h>
51
1da177e4
LT
52MODULE_AUTHOR("Roland Dreier");
53MODULE_DESCRIPTION("IP-over-InfiniBand net driver");
54MODULE_LICENSE("Dual BSD/GPL");
55
56#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
57int ipoib_debug_level;
58
59module_param_named(debug_level, ipoib_debug_level, int, 0644);
60MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0");
61#endif
62
1732b0ef
RD
63struct ipoib_path_iter {
64 struct net_device *dev;
65 struct ipoib_path path;
66};
67
1da177e4
LT
68static const u8 ipv4_bcast_addr[] = {
69 0x00, 0xff, 0xff, 0xff,
70 0xff, 0x12, 0x40, 0x1b, 0x00, 0x00, 0x00, 0x00,
71 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff
72};
73
74struct workqueue_struct *ipoib_workqueue;
75
76static void ipoib_add_one(struct ib_device *device);
77static void ipoib_remove_one(struct ib_device *device);
78
79static struct ib_client ipoib_client = {
80 .name = "ipoib",
81 .add = ipoib_add_one,
82 .remove = ipoib_remove_one
83};
84
85int ipoib_open(struct net_device *dev)
86{
87 struct ipoib_dev_priv *priv = netdev_priv(dev);
88
89 ipoib_dbg(priv, "bringing up interface\n");
90
91 set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
92
93 if (ipoib_pkey_dev_delay_open(dev))
94 return 0;
95
96 if (ipoib_ib_dev_open(dev))
97 return -EINVAL;
98
267ee88e
RD
99 if (ipoib_ib_dev_up(dev)) {
100 ipoib_ib_dev_stop(dev);
1da177e4 101 return -EINVAL;
267ee88e 102 }
1da177e4
LT
103
104 if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
105 struct ipoib_dev_priv *cpriv;
106
107 /* Bring up any child interfaces too */
95ed644f 108 mutex_lock(&priv->vlan_mutex);
1da177e4
LT
109 list_for_each_entry(cpriv, &priv->child_intfs, list) {
110 int flags;
111
112 flags = cpriv->dev->flags;
113 if (flags & IFF_UP)
114 continue;
115
116 dev_change_flags(cpriv->dev, flags | IFF_UP);
117 }
95ed644f 118 mutex_unlock(&priv->vlan_mutex);
1da177e4
LT
119 }
120
121 netif_start_queue(dev);
122
123 return 0;
124}
125
126static int ipoib_stop(struct net_device *dev)
127{
128 struct ipoib_dev_priv *priv = netdev_priv(dev);
129
130 ipoib_dbg(priv, "stopping interface\n");
131
132 clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
133
134 netif_stop_queue(dev);
135
0b3ea082
JM
136 /*
137 * Now flush workqueue to make sure a scheduled task doesn't
138 * bring our internal state back up.
139 */
140 flush_workqueue(ipoib_workqueue);
141
142 ipoib_ib_dev_down(dev, 1);
1da177e4
LT
143 ipoib_ib_dev_stop(dev);
144
145 if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
146 struct ipoib_dev_priv *cpriv;
147
148 /* Bring down any child interfaces too */
95ed644f 149 mutex_lock(&priv->vlan_mutex);
1da177e4
LT
150 list_for_each_entry(cpriv, &priv->child_intfs, list) {
151 int flags;
152
153 flags = cpriv->dev->flags;
154 if (!(flags & IFF_UP))
155 continue;
156
157 dev_change_flags(cpriv->dev, flags & ~IFF_UP);
158 }
95ed644f 159 mutex_unlock(&priv->vlan_mutex);
1da177e4
LT
160 }
161
162 return 0;
163}
164
165static int ipoib_change_mtu(struct net_device *dev, int new_mtu)
166{
167 struct ipoib_dev_priv *priv = netdev_priv(dev);
168
169 if (new_mtu > IPOIB_PACKET_SIZE - IPOIB_ENCAP_LEN)
170 return -EINVAL;
171
172 priv->admin_mtu = new_mtu;
173
174 dev->mtu = min(priv->mcast_mtu, priv->admin_mtu);
175
176 return 0;
177}
178
179static struct ipoib_path *__path_find(struct net_device *dev,
180 union ib_gid *gid)
181{
182 struct ipoib_dev_priv *priv = netdev_priv(dev);
183 struct rb_node *n = priv->path_tree.rb_node;
184 struct ipoib_path *path;
185 int ret;
186
187 while (n) {
188 path = rb_entry(n, struct ipoib_path, rb_node);
189
190 ret = memcmp(gid->raw, path->pathrec.dgid.raw,
191 sizeof (union ib_gid));
192
193 if (ret < 0)
194 n = n->rb_left;
195 else if (ret > 0)
196 n = n->rb_right;
197 else
198 return path;
199 }
200
201 return NULL;
202}
203
204static int __path_add(struct net_device *dev, struct ipoib_path *path)
205{
206 struct ipoib_dev_priv *priv = netdev_priv(dev);
207 struct rb_node **n = &priv->path_tree.rb_node;
208 struct rb_node *pn = NULL;
209 struct ipoib_path *tpath;
210 int ret;
211
212 while (*n) {
213 pn = *n;
214 tpath = rb_entry(pn, struct ipoib_path, rb_node);
215
216 ret = memcmp(path->pathrec.dgid.raw, tpath->pathrec.dgid.raw,
217 sizeof (union ib_gid));
218 if (ret < 0)
219 n = &pn->rb_left;
220 else if (ret > 0)
221 n = &pn->rb_right;
222 else
223 return -EEXIST;
224 }
225
226 rb_link_node(&path->rb_node, pn, n);
227 rb_insert_color(&path->rb_node, &priv->path_tree);
228
229 list_add_tail(&path->list, &priv->path_list);
230
231 return 0;
232}
233
234static void path_free(struct net_device *dev, struct ipoib_path *path)
235{
236 struct ipoib_dev_priv *priv = netdev_priv(dev);
237 struct ipoib_neigh *neigh, *tn;
238 struct sk_buff *skb;
239 unsigned long flags;
240
241 while ((skb = __skb_dequeue(&path->queue)))
242 dev_kfree_skb_irq(skb);
243
244 spin_lock_irqsave(&priv->lock, flags);
245
246 list_for_each_entry_safe(neigh, tn, &path->neigh_list, list) {
247 /*
248 * It's safe to call ipoib_put_ah() inside priv->lock
249 * here, because we know that path->ah will always
250 * hold one more reference, so ipoib_put_ah() will
251 * never do more than decrement the ref count.
252 */
253 if (neigh->ah)
254 ipoib_put_ah(neigh->ah);
d2e0655e
MT
255
256 ipoib_neigh_free(neigh);
1da177e4
LT
257 }
258
259 spin_unlock_irqrestore(&priv->lock, flags);
260
261 if (path->ah)
262 ipoib_put_ah(path->ah);
263
264 kfree(path);
265}
266
1732b0ef
RD
267#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
268
269struct ipoib_path_iter *ipoib_path_iter_init(struct net_device *dev)
270{
271 struct ipoib_path_iter *iter;
272
273 iter = kmalloc(sizeof *iter, GFP_KERNEL);
274 if (!iter)
275 return NULL;
276
277 iter->dev = dev;
278 memset(iter->path.pathrec.dgid.raw, 0, 16);
279
280 if (ipoib_path_iter_next(iter)) {
281 kfree(iter);
282 return NULL;
283 }
284
285 return iter;
286}
287
288int ipoib_path_iter_next(struct ipoib_path_iter *iter)
289{
290 struct ipoib_dev_priv *priv = netdev_priv(iter->dev);
291 struct rb_node *n;
292 struct ipoib_path *path;
293 int ret = 1;
294
295 spin_lock_irq(&priv->lock);
296
297 n = rb_first(&priv->path_tree);
298
299 while (n) {
300 path = rb_entry(n, struct ipoib_path, rb_node);
301
302 if (memcmp(iter->path.pathrec.dgid.raw, path->pathrec.dgid.raw,
303 sizeof (union ib_gid)) < 0) {
304 iter->path = *path;
305 ret = 0;
306 break;
307 }
308
309 n = rb_next(n);
310 }
311
312 spin_unlock_irq(&priv->lock);
313
314 return ret;
315}
316
317void ipoib_path_iter_read(struct ipoib_path_iter *iter,
318 struct ipoib_path *path)
319{
320 *path = iter->path;
321}
322
323#endif /* CONFIG_INFINIBAND_IPOIB_DEBUG */
324
1da177e4
LT
325void ipoib_flush_paths(struct net_device *dev)
326{
327 struct ipoib_dev_priv *priv = netdev_priv(dev);
328 struct ipoib_path *path, *tp;
329 LIST_HEAD(remove_list);
330 unsigned long flags;
331
332 spin_lock_irqsave(&priv->lock, flags);
333
334 list_splice(&priv->path_list, &remove_list);
335 INIT_LIST_HEAD(&priv->path_list);
336
337 list_for_each_entry(path, &remove_list, list)
338 rb_erase(&path->rb_node, &priv->path_tree);
339
340 spin_unlock_irqrestore(&priv->lock, flags);
341
342 list_for_each_entry_safe(path, tp, &remove_list, list) {
343 if (path->query)
344 ib_sa_cancel_query(path->query_id, path->query);
345 wait_for_completion(&path->done);
346 path_free(dev, path);
347 }
348}
349
350static void path_rec_completion(int status,
351 struct ib_sa_path_rec *pathrec,
352 void *path_ptr)
353{
354 struct ipoib_path *path = path_ptr;
355 struct net_device *dev = path->dev;
356 struct ipoib_dev_priv *priv = netdev_priv(dev);
357 struct ipoib_ah *ah = NULL;
358 struct ipoib_neigh *neigh;
359 struct sk_buff_head skqueue;
360 struct sk_buff *skb;
361 unsigned long flags;
362
363 if (pathrec)
364 ipoib_dbg(priv, "PathRec LID 0x%04x for GID " IPOIB_GID_FMT "\n",
365 be16_to_cpu(pathrec->dlid), IPOIB_GID_ARG(pathrec->dgid));
366 else
367 ipoib_dbg(priv, "PathRec status %d for GID " IPOIB_GID_FMT "\n",
368 status, IPOIB_GID_ARG(path->pathrec.dgid));
369
370 skb_queue_head_init(&skqueue);
371
372 if (!status) {
373 struct ib_ah_attr av = {
374 .dlid = be16_to_cpu(pathrec->dlid),
375 .sl = pathrec->sl,
376 .port_num = priv->port
377 };
e6ded99c 378 int path_rate = ib_sa_rate_enum_to_int(pathrec->rate);
1da177e4 379
e6ded99c
RD
380 if (path_rate > 0 && priv->local_rate > path_rate)
381 av.static_rate = (priv->local_rate - 1) / path_rate;
1da177e4
LT
382
383 ipoib_dbg(priv, "static_rate %d for local port %dX, path %dX\n",
384 av.static_rate, priv->local_rate,
385 ib_sa_rate_enum_to_int(pathrec->rate));
386
387 ah = ipoib_create_ah(dev, priv->pd, &av);
388 }
389
390 spin_lock_irqsave(&priv->lock, flags);
391
392 path->ah = ah;
393
394 if (ah) {
395 path->pathrec = *pathrec;
396
397 ipoib_dbg(priv, "created address handle %p for LID 0x%04x, SL %d\n",
398 ah, be16_to_cpu(pathrec->dlid), pathrec->sl);
399
400 while ((skb = __skb_dequeue(&path->queue)))
401 __skb_queue_tail(&skqueue, skb);
402
403 list_for_each_entry(neigh, &path->neigh_list, list) {
404 kref_get(&path->ah->ref);
405 neigh->ah = path->ah;
406
407 while ((skb = __skb_dequeue(&neigh->queue)))
408 __skb_queue_tail(&skqueue, skb);
409 }
5872a9fc 410 }
1da177e4 411
5872a9fc 412 path->query = NULL;
1da177e4
LT
413 complete(&path->done);
414
415 spin_unlock_irqrestore(&priv->lock, flags);
416
417 while ((skb = __skb_dequeue(&skqueue))) {
418 skb->dev = dev;
419 if (dev_queue_xmit(skb))
420 ipoib_warn(priv, "dev_queue_xmit failed "
421 "to requeue packet\n");
422 }
423}
424
425static struct ipoib_path *path_rec_create(struct net_device *dev,
426 union ib_gid *gid)
427{
428 struct ipoib_dev_priv *priv = netdev_priv(dev);
429 struct ipoib_path *path;
430
21a38489 431 path = kzalloc(sizeof *path, GFP_ATOMIC);
1da177e4
LT
432 if (!path)
433 return NULL;
434
21a38489 435 path->dev = dev;
1da177e4
LT
436
437 skb_queue_head_init(&path->queue);
438
439 INIT_LIST_HEAD(&path->neigh_list);
1da177e4
LT
440
441 memcpy(path->pathrec.dgid.raw, gid->raw, sizeof (union ib_gid));
442 path->pathrec.sgid = priv->local_gid;
443 path->pathrec.pkey = cpu_to_be16(priv->pkey);
444 path->pathrec.numb_path = 1;
445
446 return path;
447}
448
449static int path_rec_start(struct net_device *dev,
450 struct ipoib_path *path)
451{
452 struct ipoib_dev_priv *priv = netdev_priv(dev);
453
454 ipoib_dbg(priv, "Start path record lookup for " IPOIB_GID_FMT "\n",
455 IPOIB_GID_ARG(path->pathrec.dgid));
456
65c7edda
RD
457 init_completion(&path->done);
458
1da177e4
LT
459 path->query_id =
460 ib_sa_path_rec_get(priv->ca, priv->port,
461 &path->pathrec,
462 IB_SA_PATH_REC_DGID |
463 IB_SA_PATH_REC_SGID |
464 IB_SA_PATH_REC_NUMB_PATH |
465 IB_SA_PATH_REC_PKEY,
466 1000, GFP_ATOMIC,
467 path_rec_completion,
468 path, &path->query);
469 if (path->query_id < 0) {
470 ipoib_warn(priv, "ib_sa_path_rec_get failed\n");
471 path->query = NULL;
472 return path->query_id;
473 }
474
475 return 0;
476}
477
478static void neigh_add_path(struct sk_buff *skb, struct net_device *dev)
479{
480 struct ipoib_dev_priv *priv = netdev_priv(dev);
481 struct ipoib_path *path;
482 struct ipoib_neigh *neigh;
483
d2e0655e 484 neigh = ipoib_neigh_alloc(skb->dst->neighbour);
1da177e4
LT
485 if (!neigh) {
486 ++priv->stats.tx_dropped;
487 dev_kfree_skb_any(skb);
488 return;
489 }
490
491 skb_queue_head_init(&neigh->queue);
1da177e4
LT
492
493 /*
494 * We can only be called from ipoib_start_xmit, so we're
495 * inside tx_lock -- no need to save/restore flags.
496 */
497 spin_lock(&priv->lock);
498
499 path = __path_find(dev, (union ib_gid *) (skb->dst->neighbour->ha + 4));
500 if (!path) {
501 path = path_rec_create(dev,
502 (union ib_gid *) (skb->dst->neighbour->ha + 4));
503 if (!path)
d2e0655e 504 goto err_path;
1da177e4
LT
505
506 __path_add(dev, path);
507 }
508
509 list_add_tail(&neigh->list, &path->neigh_list);
510
47f7a071 511 if (path->ah) {
1da177e4
LT
512 kref_get(&path->ah->ref);
513 neigh->ah = path->ah;
514
515 ipoib_send(dev, skb, path->ah,
516 be32_to_cpup((__be32 *) skb->dst->neighbour->ha));
517 } else {
518 neigh->ah = NULL;
bfef73fa 519 __skb_queue_tail(&neigh->queue, skb);
1da177e4
LT
520
521 if (!path->query && path_rec_start(dev, path))
d2e0655e 522 goto err_list;
1da177e4
LT
523 }
524
525 spin_unlock(&priv->lock);
526 return;
527
d2e0655e 528err_list:
1da177e4 529 list_del(&neigh->list);
1da177e4 530
d2e0655e
MT
531err_path:
532 ipoib_neigh_free(neigh);
1da177e4
LT
533 ++priv->stats.tx_dropped;
534 dev_kfree_skb_any(skb);
535
536 spin_unlock(&priv->lock);
537}
538
d70ed607 539static void ipoib_path_lookup(struct sk_buff *skb, struct net_device *dev)
1da177e4
LT
540{
541 struct ipoib_dev_priv *priv = netdev_priv(skb->dev);
542
543 /* Look up path record for unicasts */
544 if (skb->dst->neighbour->ha[4] != 0xff) {
545 neigh_add_path(skb, dev);
546 return;
547 }
548
549 /* Add in the P_Key for multicasts */
550 skb->dst->neighbour->ha[8] = (priv->pkey >> 8) & 0xff;
551 skb->dst->neighbour->ha[9] = priv->pkey & 0xff;
552 ipoib_mcast_send(dev, (union ib_gid *) (skb->dst->neighbour->ha + 4), skb);
553}
554
555static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
556 struct ipoib_pseudoheader *phdr)
557{
558 struct ipoib_dev_priv *priv = netdev_priv(dev);
559 struct ipoib_path *path;
560
561 /*
562 * We can only be called from ipoib_start_xmit, so we're
563 * inside tx_lock -- no need to save/restore flags.
564 */
565 spin_lock(&priv->lock);
566
567 path = __path_find(dev, (union ib_gid *) (phdr->hwaddr + 4));
568 if (!path) {
569 path = path_rec_create(dev,
570 (union ib_gid *) (phdr->hwaddr + 4));
571 if (path) {
572 /* put pseudoheader back on for next time */
573 skb_push(skb, sizeof *phdr);
574 __skb_queue_tail(&path->queue, skb);
575
576 if (path_rec_start(dev, path)) {
577 spin_unlock(&priv->lock);
578 path_free(dev, path);
579 return;
580 } else
581 __path_add(dev, path);
582 } else {
583 ++priv->stats.tx_dropped;
584 dev_kfree_skb_any(skb);
585 }
586
587 spin_unlock(&priv->lock);
588 return;
589 }
590
47f7a071 591 if (path->ah) {
1da177e4
LT
592 ipoib_dbg(priv, "Send unicast ARP to %04x\n",
593 be16_to_cpu(path->pathrec.dlid));
594
595 ipoib_send(dev, skb, path->ah,
596 be32_to_cpup((__be32 *) phdr->hwaddr));
597 } else if ((path->query || !path_rec_start(dev, path)) &&
598 skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
599 /* put pseudoheader back on for next time */
600 skb_push(skb, sizeof *phdr);
601 __skb_queue_tail(&path->queue, skb);
602 } else {
603 ++priv->stats.tx_dropped;
604 dev_kfree_skb_any(skb);
605 }
606
607 spin_unlock(&priv->lock);
608}
609
610static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
611{
612 struct ipoib_dev_priv *priv = netdev_priv(dev);
613 struct ipoib_neigh *neigh;
614 unsigned long flags;
615
a20583a7 616 if (!spin_trylock_irqsave(&priv->tx_lock, flags))
1da177e4 617 return NETDEV_TX_LOCKED;
1da177e4
LT
618
619 /*
620 * Check if our queue is stopped. Since we have the LLTX bit
621 * set, we can't rely on netif_stop_queue() preventing our
622 * xmit function from being called with a full queue.
623 */
624 if (unlikely(netif_queue_stopped(dev))) {
625 spin_unlock_irqrestore(&priv->tx_lock, flags);
626 return NETDEV_TX_BUSY;
627 }
628
629 if (skb->dst && skb->dst->neighbour) {
630 if (unlikely(!*to_ipoib_neigh(skb->dst->neighbour))) {
d70ed607 631 ipoib_path_lookup(skb, dev);
1da177e4
LT
632 goto out;
633 }
634
635 neigh = *to_ipoib_neigh(skb->dst->neighbour);
636
637 if (likely(neigh->ah)) {
638 ipoib_send(dev, skb, neigh->ah,
639 be32_to_cpup((__be32 *) skb->dst->neighbour->ha));
640 goto out;
641 }
642
643 if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
644 spin_lock(&priv->lock);
645 __skb_queue_tail(&neigh->queue, skb);
646 spin_unlock(&priv->lock);
647 } else {
648 ++priv->stats.tx_dropped;
649 dev_kfree_skb_any(skb);
650 }
651 } else {
652 struct ipoib_pseudoheader *phdr =
653 (struct ipoib_pseudoheader *) skb->data;
654 skb_pull(skb, sizeof *phdr);
655
656 if (phdr->hwaddr[4] == 0xff) {
657 /* Add in the P_Key for multicast*/
658 phdr->hwaddr[8] = (priv->pkey >> 8) & 0xff;
659 phdr->hwaddr[9] = priv->pkey & 0xff;
660
661 ipoib_mcast_send(dev, (union ib_gid *) (phdr->hwaddr + 4), skb);
662 } else {
0dca0f7b 663 /* unicast GID -- should be ARP or RARP reply */
1da177e4 664
0dca0f7b
HR
665 if ((be16_to_cpup((__be16 *) skb->data) != ETH_P_ARP) &&
666 (be16_to_cpup((__be16 *) skb->data) != ETH_P_RARP)) {
1da177e4
LT
667 ipoib_warn(priv, "Unicast, no %s: type %04x, QPN %06x "
668 IPOIB_GID_FMT "\n",
669 skb->dst ? "neigh" : "dst",
97f52eb4
SH
670 be16_to_cpup((__be16 *) skb->data),
671 be32_to_cpup((__be32 *) phdr->hwaddr),
1da177e4
LT
672 IPOIB_GID_ARG(*(union ib_gid *) (phdr->hwaddr + 4)));
673 dev_kfree_skb_any(skb);
674 ++priv->stats.tx_dropped;
675 goto out;
676 }
677
678 unicast_arp_send(skb, dev, phdr);
679 }
680 }
681
682out:
683 spin_unlock_irqrestore(&priv->tx_lock, flags);
684
685 return NETDEV_TX_OK;
686}
687
688static struct net_device_stats *ipoib_get_stats(struct net_device *dev)
689{
690 struct ipoib_dev_priv *priv = netdev_priv(dev);
691
692 return &priv->stats;
693}
694
695static void ipoib_timeout(struct net_device *dev)
696{
697 struct ipoib_dev_priv *priv = netdev_priv(dev);
698
4b2d319b
RD
699 ipoib_warn(priv, "transmit timeout: latency %d msecs\n",
700 jiffies_to_msecs(jiffies - dev->trans_start));
701 ipoib_warn(priv, "queue stopped %d, tx_head %u, tx_tail %u\n",
702 netif_queue_stopped(dev),
703 priv->tx_head, priv->tx_tail);
1da177e4
LT
704 /* XXX reset QP, etc. */
705}
706
707static int ipoib_hard_header(struct sk_buff *skb,
708 struct net_device *dev,
709 unsigned short type,
710 void *daddr, void *saddr, unsigned len)
711{
712 struct ipoib_header *header;
713
714 header = (struct ipoib_header *) skb_push(skb, sizeof *header);
715
716 header->proto = htons(type);
717 header->reserved = 0;
718
719 /*
720 * If we don't have a neighbour structure, stuff the
721 * destination address onto the front of the skb so we can
722 * figure out where to send the packet later.
723 */
ef12d456 724 if ((!skb->dst || !skb->dst->neighbour) && daddr) {
1da177e4
LT
725 struct ipoib_pseudoheader *phdr =
726 (struct ipoib_pseudoheader *) skb_push(skb, sizeof *phdr);
727 memcpy(phdr->hwaddr, daddr, INFINIBAND_ALEN);
728 }
729
730 return 0;
731}
732
733static void ipoib_set_mcast_list(struct net_device *dev)
734{
735 struct ipoib_dev_priv *priv = netdev_priv(dev);
736
7a343d4c
LA
737 if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) {
738 ipoib_dbg(priv, "IPOIB_FLAG_OPER_UP not set");
739 return;
740 }
741
1ad62a19 742 queue_work(ipoib_workqueue, &priv->restart_task);
1da177e4
LT
743}
744
745static void ipoib_neigh_destructor(struct neighbour *n)
746{
747 struct ipoib_neigh *neigh;
748 struct ipoib_dev_priv *priv = netdev_priv(n->dev);
749 unsigned long flags;
750 struct ipoib_ah *ah = NULL;
751
752 ipoib_dbg(priv,
753 "neigh_destructor for %06x " IPOIB_GID_FMT "\n",
754 be32_to_cpup((__be32 *) n->ha),
755 IPOIB_GID_ARG(*((union ib_gid *) (n->ha + 4))));
756
757 spin_lock_irqsave(&priv->lock, flags);
758
759 neigh = *to_ipoib_neigh(n);
760 if (neigh) {
761 if (neigh->ah)
762 ah = neigh->ah;
763 list_del(&neigh->list);
d2e0655e 764 ipoib_neigh_free(neigh);
1da177e4
LT
765 }
766
767 spin_unlock_irqrestore(&priv->lock, flags);
768
769 if (ah)
770 ipoib_put_ah(ah);
771}
772
d2e0655e
MT
773struct ipoib_neigh *ipoib_neigh_alloc(struct neighbour *neighbour)
774{
775 struct ipoib_neigh *neigh;
776
777 neigh = kmalloc(sizeof *neigh, GFP_ATOMIC);
778 if (!neigh)
779 return NULL;
780
781 neigh->neighbour = neighbour;
782 *to_ipoib_neigh(neighbour) = neigh;
783
784 return neigh;
785}
786
787void ipoib_neigh_free(struct ipoib_neigh *neigh)
788{
789 *to_ipoib_neigh(neigh->neighbour) = NULL;
790 kfree(neigh);
791}
792
1da177e4
LT
793static int ipoib_neigh_setup_dev(struct net_device *dev, struct neigh_parms *parms)
794{
c5ecd62c 795 parms->neigh_destructor = ipoib_neigh_destructor;
1da177e4
LT
796
797 return 0;
798}
799
800int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
801{
802 struct ipoib_dev_priv *priv = netdev_priv(dev);
803
804 /* Allocate RX/TX "rings" to hold queued skbs */
805
de6eb66b 806 priv->rx_ring = kzalloc(IPOIB_RX_RING_SIZE * sizeof (struct ipoib_rx_buf),
1da177e4
LT
807 GFP_KERNEL);
808 if (!priv->rx_ring) {
809 printk(KERN_WARNING "%s: failed to allocate RX ring (%d entries)\n",
810 ca->name, IPOIB_RX_RING_SIZE);
811 goto out;
812 }
1da177e4 813
de6eb66b 814 priv->tx_ring = kzalloc(IPOIB_TX_RING_SIZE * sizeof (struct ipoib_tx_buf),
1da177e4
LT
815 GFP_KERNEL);
816 if (!priv->tx_ring) {
817 printk(KERN_WARNING "%s: failed to allocate TX ring (%d entries)\n",
818 ca->name, IPOIB_TX_RING_SIZE);
819 goto out_rx_ring_cleanup;
820 }
1da177e4
LT
821
822 /* priv->tx_head & tx_tail are already 0 */
823
824 if (ipoib_ib_dev_init(dev, ca, port))
825 goto out_tx_ring_cleanup;
826
827 return 0;
828
829out_tx_ring_cleanup:
830 kfree(priv->tx_ring);
831
832out_rx_ring_cleanup:
833 kfree(priv->rx_ring);
834
835out:
836 return -ENOMEM;
837}
838
839void ipoib_dev_cleanup(struct net_device *dev)
840{
841 struct ipoib_dev_priv *priv = netdev_priv(dev), *cpriv, *tcpriv;
842
1732b0ef 843 ipoib_delete_debug_files(dev);
1da177e4
LT
844
845 /* Delete any child interfaces first */
846 list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs, list) {
847 unregister_netdev(cpriv->dev);
848 ipoib_dev_cleanup(cpriv->dev);
849 free_netdev(cpriv->dev);
850 }
851
852 ipoib_ib_dev_cleanup(dev);
853
92a6b34b
HR
854 kfree(priv->rx_ring);
855 kfree(priv->tx_ring);
1da177e4 856
92a6b34b
HR
857 priv->rx_ring = NULL;
858 priv->tx_ring = NULL;
1da177e4
LT
859}
860
861static void ipoib_setup(struct net_device *dev)
862{
863 struct ipoib_dev_priv *priv = netdev_priv(dev);
864
865 dev->open = ipoib_open;
866 dev->stop = ipoib_stop;
867 dev->change_mtu = ipoib_change_mtu;
868 dev->hard_start_xmit = ipoib_start_xmit;
869 dev->get_stats = ipoib_get_stats;
870 dev->tx_timeout = ipoib_timeout;
871 dev->hard_header = ipoib_hard_header;
872 dev->set_multicast_list = ipoib_set_mcast_list;
873 dev->neigh_setup = ipoib_neigh_setup_dev;
874
875 dev->watchdog_timeo = HZ;
876
1da177e4
LT
877 dev->flags |= IFF_BROADCAST | IFF_MULTICAST;
878
879 /*
880 * We add in INFINIBAND_ALEN to allow for the destination
881 * address "pseudoheader" for skbs without neighbour struct.
882 */
883 dev->hard_header_len = IPOIB_ENCAP_LEN + INFINIBAND_ALEN;
884 dev->addr_len = INFINIBAND_ALEN;
885 dev->type = ARPHRD_INFINIBAND;
886 dev->tx_queue_len = IPOIB_TX_RING_SIZE * 2;
887 dev->features = NETIF_F_VLAN_CHALLENGED | NETIF_F_LLTX;
888
889 /* MTU will be reset when mcast join happens */
890 dev->mtu = IPOIB_PACKET_SIZE - IPOIB_ENCAP_LEN;
891 priv->mcast_mtu = priv->admin_mtu = dev->mtu;
892
893 memcpy(dev->broadcast, ipv4_bcast_addr, INFINIBAND_ALEN);
894
895 netif_carrier_off(dev);
896
897 SET_MODULE_OWNER(dev);
898
899 priv->dev = dev;
900
901 spin_lock_init(&priv->lock);
902 spin_lock_init(&priv->tx_lock);
903
95ed644f
IM
904 mutex_init(&priv->mcast_mutex);
905 mutex_init(&priv->vlan_mutex);
1da177e4
LT
906
907 INIT_LIST_HEAD(&priv->path_list);
908 INIT_LIST_HEAD(&priv->child_intfs);
909 INIT_LIST_HEAD(&priv->dead_ahs);
910 INIT_LIST_HEAD(&priv->multicast_list);
911
912 INIT_WORK(&priv->pkey_task, ipoib_pkey_poll, priv->dev);
913 INIT_WORK(&priv->mcast_task, ipoib_mcast_join_task, priv->dev);
914 INIT_WORK(&priv->flush_task, ipoib_ib_dev_flush, priv->dev);
915 INIT_WORK(&priv->restart_task, ipoib_mcast_restart_task, priv->dev);
916 INIT_WORK(&priv->ah_reap_task, ipoib_reap_ah, priv->dev);
917}
918
919struct ipoib_dev_priv *ipoib_intf_alloc(const char *name)
920{
921 struct net_device *dev;
922
923 dev = alloc_netdev((int) sizeof (struct ipoib_dev_priv), name,
924 ipoib_setup);
925 if (!dev)
926 return NULL;
927
928 return netdev_priv(dev);
929}
930
931static ssize_t show_pkey(struct class_device *cdev, char *buf)
932{
933 struct ipoib_dev_priv *priv =
934 netdev_priv(container_of(cdev, struct net_device, class_dev));
935
936 return sprintf(buf, "0x%04x\n", priv->pkey);
937}
938static CLASS_DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL);
939
940static ssize_t create_child(struct class_device *cdev,
941 const char *buf, size_t count)
942{
943 int pkey;
944 int ret;
945
946 if (sscanf(buf, "%i", &pkey) != 1)
947 return -EINVAL;
948
949 if (pkey < 0 || pkey > 0xffff)
950 return -EINVAL;
951
4ce05937
RD
952 /*
953 * Set the full membership bit, so that we join the right
954 * broadcast group, etc.
955 */
956 pkey |= 0x8000;
957
1da177e4
LT
958 ret = ipoib_vlan_add(container_of(cdev, struct net_device, class_dev),
959 pkey);
960
961 return ret ? ret : count;
962}
963static CLASS_DEVICE_ATTR(create_child, S_IWUGO, NULL, create_child);
964
965static ssize_t delete_child(struct class_device *cdev,
966 const char *buf, size_t count)
967{
968 int pkey;
969 int ret;
970
971 if (sscanf(buf, "%i", &pkey) != 1)
972 return -EINVAL;
973
974 if (pkey < 0 || pkey > 0xffff)
975 return -EINVAL;
976
977 ret = ipoib_vlan_delete(container_of(cdev, struct net_device, class_dev),
978 pkey);
979
980 return ret ? ret : count;
981
982}
983static CLASS_DEVICE_ATTR(delete_child, S_IWUGO, NULL, delete_child);
984
985int ipoib_add_pkey_attr(struct net_device *dev)
986{
987 return class_device_create_file(&dev->class_dev,
988 &class_device_attr_pkey);
989}
990
991static struct net_device *ipoib_add_port(const char *format,
992 struct ib_device *hca, u8 port)
993{
994 struct ipoib_dev_priv *priv;
995 int result = -ENOMEM;
996
997 priv = ipoib_intf_alloc(format);
998 if (!priv)
999 goto alloc_mem_failed;
1000
1001 SET_NETDEV_DEV(priv->dev, hca->dma_device);
1002
1003 result = ib_query_pkey(hca, port, 0, &priv->pkey);
1004 if (result) {
1005 printk(KERN_WARNING "%s: ib_query_pkey port %d failed (ret = %d)\n",
1006 hca->name, port, result);
1007 goto alloc_mem_failed;
1008 }
1009
4ce05937
RD
1010 /*
1011 * Set the full membership bit, so that we join the right
1012 * broadcast group, etc.
1013 */
1014 priv->pkey |= 0x8000;
1015
1da177e4
LT
1016 priv->dev->broadcast[8] = priv->pkey >> 8;
1017 priv->dev->broadcast[9] = priv->pkey & 0xff;
1018
1019 result = ib_query_gid(hca, port, 0, &priv->local_gid);
1020 if (result) {
1021 printk(KERN_WARNING "%s: ib_query_gid port %d failed (ret = %d)\n",
1022 hca->name, port, result);
1023 goto alloc_mem_failed;
1024 } else
1025 memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw, sizeof (union ib_gid));
1026
1027
1028 result = ipoib_dev_init(priv->dev, hca, port);
1029 if (result < 0) {
1030 printk(KERN_WARNING "%s: failed to initialize port %d (ret = %d)\n",
1031 hca->name, port, result);
1032 goto device_init_failed;
1033 }
1034
1035 INIT_IB_EVENT_HANDLER(&priv->event_handler,
1036 priv->ca, ipoib_event);
1037 result = ib_register_event_handler(&priv->event_handler);
1038 if (result < 0) {
1039 printk(KERN_WARNING "%s: ib_register_event_handler failed for "
1040 "port %d (ret = %d)\n",
1041 hca->name, port, result);
1042 goto event_failed;
1043 }
1044
1045 result = register_netdev(priv->dev);
1046 if (result) {
1047 printk(KERN_WARNING "%s: couldn't register ipoib port %d; error %d\n",
1048 hca->name, port, result);
1049 goto register_failed;
1050 }
1051
1732b0ef 1052 ipoib_create_debug_files(priv->dev);
1da177e4
LT
1053
1054 if (ipoib_add_pkey_attr(priv->dev))
1055 goto sysfs_failed;
1056 if (class_device_create_file(&priv->dev->class_dev,
1057 &class_device_attr_create_child))
1058 goto sysfs_failed;
1059 if (class_device_create_file(&priv->dev->class_dev,
1060 &class_device_attr_delete_child))
1061 goto sysfs_failed;
1062
1063 return priv->dev;
1064
1065sysfs_failed:
1732b0ef 1066 ipoib_delete_debug_files(priv->dev);
1da177e4
LT
1067 unregister_netdev(priv->dev);
1068
1069register_failed:
1070 ib_unregister_event_handler(&priv->event_handler);
51574e03 1071 flush_scheduled_work();
1da177e4
LT
1072
1073event_failed:
1074 ipoib_dev_cleanup(priv->dev);
1075
1076device_init_failed:
1077 free_netdev(priv->dev);
1078
1079alloc_mem_failed:
1080 return ERR_PTR(result);
1081}
1082
1083static void ipoib_add_one(struct ib_device *device)
1084{
1085 struct list_head *dev_list;
1086 struct net_device *dev;
1087 struct ipoib_dev_priv *priv;
1088 int s, e, p;
1089
1090 dev_list = kmalloc(sizeof *dev_list, GFP_KERNEL);
1091 if (!dev_list)
1092 return;
1093
1094 INIT_LIST_HEAD(dev_list);
1095
1096 if (device->node_type == IB_NODE_SWITCH) {
1097 s = 0;
1098 e = 0;
1099 } else {
1100 s = 1;
1101 e = device->phys_port_cnt;
1102 }
1103
1104 for (p = s; p <= e; ++p) {
1105 dev = ipoib_add_port("ib%d", device, p);
1106 if (!IS_ERR(dev)) {
1107 priv = netdev_priv(dev);
1108 list_add_tail(&priv->list, dev_list);
1109 }
1110 }
1111
1112 ib_set_client_data(device, &ipoib_client, dev_list);
1113}
1114
1115static void ipoib_remove_one(struct ib_device *device)
1116{
1117 struct ipoib_dev_priv *priv, *tmp;
1118 struct list_head *dev_list;
1119
1120 dev_list = ib_get_client_data(device, &ipoib_client);
1121
1122 list_for_each_entry_safe(priv, tmp, dev_list, list) {
1123 ib_unregister_event_handler(&priv->event_handler);
51574e03 1124 flush_scheduled_work();
1da177e4
LT
1125
1126 unregister_netdev(priv->dev);
1127 ipoib_dev_cleanup(priv->dev);
1128 free_netdev(priv->dev);
1129 }
06c56e44
MT
1130
1131 kfree(dev_list);
1da177e4
LT
1132}
1133
1134static int __init ipoib_init_module(void)
1135{
1136 int ret;
1137
1138 ret = ipoib_register_debugfs();
1139 if (ret)
1140 return ret;
1141
1142 /*
1143 * We create our own workqueue mainly because we want to be
1144 * able to flush it when devices are being removed. We can't
1145 * use schedule_work()/flush_scheduled_work() because both
1146 * unregister_netdev() and linkwatch_event take the rtnl lock,
1147 * so flush_scheduled_work() can deadlock during device
1148 * removal.
1149 */
1150 ipoib_workqueue = create_singlethread_workqueue("ipoib");
1151 if (!ipoib_workqueue) {
1152 ret = -ENOMEM;
1153 goto err_fs;
1154 }
1155
1156 ret = ib_register_client(&ipoib_client);
1157 if (ret)
1158 goto err_wq;
1159
1160 return 0;
1161
1da177e4
LT
1162err_wq:
1163 destroy_workqueue(ipoib_workqueue);
1164
9adec1a8
RD
1165err_fs:
1166 ipoib_unregister_debugfs();
1167
1da177e4
LT
1168 return ret;
1169}
1170
1171static void __exit ipoib_cleanup_module(void)
1172{
1da177e4 1173 ib_unregister_client(&ipoib_client);
9adec1a8 1174 ipoib_unregister_debugfs();
1da177e4
LT
1175 destroy_workqueue(ipoib_workqueue);
1176}
1177
1178module_init(ipoib_init_module);
1179module_exit(ipoib_cleanup_module);
This page took 0.147235 seconds and 5 git commands to generate.