IPoIB: Use separate CQ for UD send completions
[deliverable/linux.git] / drivers / infiniband / ulp / ipoib / ipoib_ib.c
1 /*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
5 * Copyright (c) 2004, 2005 Voltaire, Inc. All rights reserved.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
15 * conditions are met:
16 *
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer.
20 *
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
34 *
35 * $Id: ipoib_ib.c 1386 2004-12-27 16:23:17Z roland $
36 */
37
38 #include <linux/delay.h>
39 #include <linux/dma-mapping.h>
40
41 #include <rdma/ib_cache.h>
42 #include <linux/ip.h>
43 #include <linux/tcp.h>
44
45 #include "ipoib.h"
46
47 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG_DATA
48 static int data_debug_level;
49
50 module_param(data_debug_level, int, 0644);
51 MODULE_PARM_DESC(data_debug_level,
52 "Enable data path debug tracing if > 0");
53 #endif
54
55 static DEFINE_MUTEX(pkey_mutex);
56
57 struct ipoib_ah *ipoib_create_ah(struct net_device *dev,
58 struct ib_pd *pd, struct ib_ah_attr *attr)
59 {
60 struct ipoib_ah *ah;
61
62 ah = kmalloc(sizeof *ah, GFP_KERNEL);
63 if (!ah)
64 return NULL;
65
66 ah->dev = dev;
67 ah->last_send = 0;
68 kref_init(&ah->ref);
69
70 ah->ah = ib_create_ah(pd, attr);
71 if (IS_ERR(ah->ah)) {
72 kfree(ah);
73 ah = NULL;
74 } else
75 ipoib_dbg(netdev_priv(dev), "Created ah %p\n", ah->ah);
76
77 return ah;
78 }
79
80 void ipoib_free_ah(struct kref *kref)
81 {
82 struct ipoib_ah *ah = container_of(kref, struct ipoib_ah, ref);
83 struct ipoib_dev_priv *priv = netdev_priv(ah->dev);
84
85 unsigned long flags;
86
87 spin_lock_irqsave(&priv->lock, flags);
88 list_add_tail(&ah->list, &priv->dead_ahs);
89 spin_unlock_irqrestore(&priv->lock, flags);
90 }
91
92 static void ipoib_ud_dma_unmap_rx(struct ipoib_dev_priv *priv,
93 u64 mapping[IPOIB_UD_RX_SG])
94 {
95 if (ipoib_ud_need_sg(priv->max_ib_mtu)) {
96 ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_UD_HEAD_SIZE,
97 DMA_FROM_DEVICE);
98 ib_dma_unmap_page(priv->ca, mapping[1], PAGE_SIZE,
99 DMA_FROM_DEVICE);
100 } else
101 ib_dma_unmap_single(priv->ca, mapping[0],
102 IPOIB_UD_BUF_SIZE(priv->max_ib_mtu),
103 DMA_FROM_DEVICE);
104 }
105
106 static void ipoib_ud_skb_put_frags(struct ipoib_dev_priv *priv,
107 struct sk_buff *skb,
108 unsigned int length)
109 {
110 if (ipoib_ud_need_sg(priv->max_ib_mtu)) {
111 skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
112 unsigned int size;
113 /*
114 * There is only two buffers needed for max_payload = 4K,
115 * first buf size is IPOIB_UD_HEAD_SIZE
116 */
117 skb->tail += IPOIB_UD_HEAD_SIZE;
118 skb->len += length;
119
120 size = length - IPOIB_UD_HEAD_SIZE;
121
122 frag->size = size;
123 skb->data_len += size;
124 skb->truesize += size;
125 } else
126 skb_put(skb, length);
127
128 }
129
130 static int ipoib_ib_post_receive(struct net_device *dev, int id)
131 {
132 struct ipoib_dev_priv *priv = netdev_priv(dev);
133 struct ib_recv_wr *bad_wr;
134 int ret;
135
136 priv->rx_wr.wr_id = id | IPOIB_OP_RECV;
137 priv->rx_sge[0].addr = priv->rx_ring[id].mapping[0];
138 priv->rx_sge[1].addr = priv->rx_ring[id].mapping[1];
139
140
141 ret = ib_post_recv(priv->qp, &priv->rx_wr, &bad_wr);
142 if (unlikely(ret)) {
143 ipoib_warn(priv, "receive failed for buf %d (%d)\n", id, ret);
144 ipoib_ud_dma_unmap_rx(priv, priv->rx_ring[id].mapping);
145 dev_kfree_skb_any(priv->rx_ring[id].skb);
146 priv->rx_ring[id].skb = NULL;
147 }
148
149 return ret;
150 }
151
152 static struct sk_buff *ipoib_alloc_rx_skb(struct net_device *dev, int id)
153 {
154 struct ipoib_dev_priv *priv = netdev_priv(dev);
155 struct sk_buff *skb;
156 int buf_size;
157 u64 *mapping;
158
159 if (ipoib_ud_need_sg(priv->max_ib_mtu))
160 buf_size = IPOIB_UD_HEAD_SIZE;
161 else
162 buf_size = IPOIB_UD_BUF_SIZE(priv->max_ib_mtu);
163
164 skb = dev_alloc_skb(buf_size + 4);
165 if (unlikely(!skb))
166 return NULL;
167
168 /*
169 * IB will leave a 40 byte gap for a GRH and IPoIB adds a 4 byte
170 * header. So we need 4 more bytes to get to 48 and align the
171 * IP header to a multiple of 16.
172 */
173 skb_reserve(skb, 4);
174
175 mapping = priv->rx_ring[id].mapping;
176 mapping[0] = ib_dma_map_single(priv->ca, skb->data, buf_size,
177 DMA_FROM_DEVICE);
178 if (unlikely(ib_dma_mapping_error(priv->ca, mapping[0])))
179 goto error;
180
181 if (ipoib_ud_need_sg(priv->max_ib_mtu)) {
182 struct page *page = alloc_page(GFP_ATOMIC);
183 if (!page)
184 goto partial_error;
185 skb_fill_page_desc(skb, 0, page, 0, PAGE_SIZE);
186 mapping[1] =
187 ib_dma_map_page(priv->ca, skb_shinfo(skb)->frags[0].page,
188 0, PAGE_SIZE, DMA_FROM_DEVICE);
189 if (unlikely(ib_dma_mapping_error(priv->ca, mapping[1])))
190 goto partial_error;
191 }
192
193 priv->rx_ring[id].skb = skb;
194 return skb;
195
196 partial_error:
197 ib_dma_unmap_single(priv->ca, mapping[0], buf_size, DMA_FROM_DEVICE);
198 error:
199 dev_kfree_skb_any(skb);
200 return NULL;
201 }
202
203 static int ipoib_ib_post_receives(struct net_device *dev)
204 {
205 struct ipoib_dev_priv *priv = netdev_priv(dev);
206 int i;
207
208 for (i = 0; i < ipoib_recvq_size; ++i) {
209 if (!ipoib_alloc_rx_skb(dev, i)) {
210 ipoib_warn(priv, "failed to allocate receive buffer %d\n", i);
211 return -ENOMEM;
212 }
213 if (ipoib_ib_post_receive(dev, i)) {
214 ipoib_warn(priv, "ipoib_ib_post_receive failed for buf %d\n", i);
215 return -EIO;
216 }
217 }
218
219 return 0;
220 }
221
222 static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
223 {
224 struct ipoib_dev_priv *priv = netdev_priv(dev);
225 unsigned int wr_id = wc->wr_id & ~IPOIB_OP_RECV;
226 struct sk_buff *skb;
227 u64 mapping[IPOIB_UD_RX_SG];
228
229 ipoib_dbg_data(priv, "recv completion: id %d, status: %d\n",
230 wr_id, wc->status);
231
232 if (unlikely(wr_id >= ipoib_recvq_size)) {
233 ipoib_warn(priv, "recv completion event with wrid %d (> %d)\n",
234 wr_id, ipoib_recvq_size);
235 return;
236 }
237
238 skb = priv->rx_ring[wr_id].skb;
239
240 if (unlikely(wc->status != IB_WC_SUCCESS)) {
241 if (wc->status != IB_WC_WR_FLUSH_ERR)
242 ipoib_warn(priv, "failed recv event "
243 "(status=%d, wrid=%d vend_err %x)\n",
244 wc->status, wr_id, wc->vendor_err);
245 ipoib_ud_dma_unmap_rx(priv, priv->rx_ring[wr_id].mapping);
246 dev_kfree_skb_any(skb);
247 priv->rx_ring[wr_id].skb = NULL;
248 return;
249 }
250
251 /*
252 * Drop packets that this interface sent, ie multicast packets
253 * that the HCA has replicated.
254 */
255 if (wc->slid == priv->local_lid && wc->src_qp == priv->qp->qp_num)
256 goto repost;
257
258 memcpy(mapping, priv->rx_ring[wr_id].mapping,
259 IPOIB_UD_RX_SG * sizeof *mapping);
260
261 /*
262 * If we can't allocate a new RX buffer, dump
263 * this packet and reuse the old buffer.
264 */
265 if (unlikely(!ipoib_alloc_rx_skb(dev, wr_id))) {
266 ++dev->stats.rx_dropped;
267 goto repost;
268 }
269
270 ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n",
271 wc->byte_len, wc->slid);
272
273 ipoib_ud_dma_unmap_rx(priv, mapping);
274 ipoib_ud_skb_put_frags(priv, skb, wc->byte_len);
275
276 skb_pull(skb, IB_GRH_BYTES);
277
278 skb->protocol = ((struct ipoib_header *) skb->data)->proto;
279 skb_reset_mac_header(skb);
280 skb_pull(skb, IPOIB_ENCAP_LEN);
281
282 dev->last_rx = jiffies;
283 ++dev->stats.rx_packets;
284 dev->stats.rx_bytes += skb->len;
285
286 skb->dev = dev;
287 /* XXX get correct PACKET_ type here */
288 skb->pkt_type = PACKET_HOST;
289
290 if (test_bit(IPOIB_FLAG_CSUM, &priv->flags) && likely(wc->csum_ok))
291 skb->ip_summed = CHECKSUM_UNNECESSARY;
292
293 netif_receive_skb(skb);
294
295 repost:
296 if (unlikely(ipoib_ib_post_receive(dev, wr_id)))
297 ipoib_warn(priv, "ipoib_ib_post_receive failed "
298 "for buf %d\n", wr_id);
299 }
300
301 static int ipoib_dma_map_tx(struct ib_device *ca,
302 struct ipoib_tx_buf *tx_req)
303 {
304 struct sk_buff *skb = tx_req->skb;
305 u64 *mapping = tx_req->mapping;
306 int i;
307 int off;
308
309 if (skb_headlen(skb)) {
310 mapping[0] = ib_dma_map_single(ca, skb->data, skb_headlen(skb),
311 DMA_TO_DEVICE);
312 if (unlikely(ib_dma_mapping_error(ca, mapping[0])))
313 return -EIO;
314
315 off = 1;
316 } else
317 off = 0;
318
319 for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
320 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
321 mapping[i + off] = ib_dma_map_page(ca, frag->page,
322 frag->page_offset, frag->size,
323 DMA_TO_DEVICE);
324 if (unlikely(ib_dma_mapping_error(ca, mapping[i + off])))
325 goto partial_error;
326 }
327 return 0;
328
329 partial_error:
330 for (; i > 0; --i) {
331 skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
332 ib_dma_unmap_page(ca, mapping[i - !off], frag->size, DMA_TO_DEVICE);
333 }
334
335 if (off)
336 ib_dma_unmap_single(ca, mapping[0], skb_headlen(skb), DMA_TO_DEVICE);
337
338 return -EIO;
339 }
340
341 static void ipoib_dma_unmap_tx(struct ib_device *ca,
342 struct ipoib_tx_buf *tx_req)
343 {
344 struct sk_buff *skb = tx_req->skb;
345 u64 *mapping = tx_req->mapping;
346 int i;
347 int off;
348
349 if (skb_headlen(skb)) {
350 ib_dma_unmap_single(ca, mapping[0], skb_headlen(skb), DMA_TO_DEVICE);
351 off = 1;
352 } else
353 off = 0;
354
355 for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
356 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
357 ib_dma_unmap_page(ca, mapping[i + off], frag->size,
358 DMA_TO_DEVICE);
359 }
360 }
361
362 static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
363 {
364 struct ipoib_dev_priv *priv = netdev_priv(dev);
365 unsigned int wr_id = wc->wr_id;
366 struct ipoib_tx_buf *tx_req;
367
368 ipoib_dbg_data(priv, "send completion: id %d, status: %d\n",
369 wr_id, wc->status);
370
371 if (unlikely(wr_id >= ipoib_sendq_size)) {
372 ipoib_warn(priv, "send completion event with wrid %d (> %d)\n",
373 wr_id, ipoib_sendq_size);
374 return;
375 }
376
377 tx_req = &priv->tx_ring[wr_id];
378
379 ipoib_dma_unmap_tx(priv->ca, tx_req);
380
381 ++dev->stats.tx_packets;
382 dev->stats.tx_bytes += tx_req->skb->len;
383
384 dev_kfree_skb_any(tx_req->skb);
385
386 ++priv->tx_tail;
387 if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) &&
388 netif_queue_stopped(dev) &&
389 test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
390 netif_wake_queue(dev);
391
392 if (wc->status != IB_WC_SUCCESS &&
393 wc->status != IB_WC_WR_FLUSH_ERR)
394 ipoib_warn(priv, "failed send event "
395 "(status=%d, wrid=%d vend_err %x)\n",
396 wc->status, wr_id, wc->vendor_err);
397 }
398
399 static int poll_tx(struct ipoib_dev_priv *priv)
400 {
401 int n, i;
402
403 n = ib_poll_cq(priv->send_cq, MAX_SEND_CQE, priv->send_wc);
404 for (i = 0; i < n; ++i)
405 ipoib_ib_handle_tx_wc(priv->dev, priv->send_wc + i);
406
407 return n == MAX_SEND_CQE;
408 }
409
410 int ipoib_poll(struct napi_struct *napi, int budget)
411 {
412 struct ipoib_dev_priv *priv = container_of(napi, struct ipoib_dev_priv, napi);
413 struct net_device *dev = priv->dev;
414 int done;
415 int t;
416 int n, i;
417
418 done = 0;
419
420 poll_more:
421 while (done < budget) {
422 int max = (budget - done);
423
424 t = min(IPOIB_NUM_WC, max);
425 n = ib_poll_cq(priv->recv_cq, t, priv->ibwc);
426
427 for (i = 0; i < n; i++) {
428 struct ib_wc *wc = priv->ibwc + i;
429
430 if (wc->wr_id & IPOIB_OP_RECV) {
431 ++done;
432 if (wc->wr_id & IPOIB_OP_CM)
433 ipoib_cm_handle_rx_wc(dev, wc);
434 else
435 ipoib_ib_handle_rx_wc(dev, wc);
436 } else
437 ipoib_cm_handle_tx_wc(priv->dev, wc);
438 }
439
440 if (n != t)
441 break;
442 }
443
444 if (done < budget) {
445 netif_rx_complete(dev, napi);
446 if (unlikely(ib_req_notify_cq(priv->recv_cq,
447 IB_CQ_NEXT_COMP |
448 IB_CQ_REPORT_MISSED_EVENTS)) &&
449 netif_rx_reschedule(dev, napi))
450 goto poll_more;
451 }
452
453 return done;
454 }
455
456 void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr)
457 {
458 struct net_device *dev = dev_ptr;
459 struct ipoib_dev_priv *priv = netdev_priv(dev);
460
461 netif_rx_schedule(dev, &priv->napi);
462 }
463
464 static inline int post_send(struct ipoib_dev_priv *priv,
465 unsigned int wr_id,
466 struct ib_ah *address, u32 qpn,
467 struct ipoib_tx_buf *tx_req,
468 void *head, int hlen)
469 {
470 struct ib_send_wr *bad_wr;
471 int i, off;
472 struct sk_buff *skb = tx_req->skb;
473 skb_frag_t *frags = skb_shinfo(skb)->frags;
474 int nr_frags = skb_shinfo(skb)->nr_frags;
475 u64 *mapping = tx_req->mapping;
476
477 if (skb_headlen(skb)) {
478 priv->tx_sge[0].addr = mapping[0];
479 priv->tx_sge[0].length = skb_headlen(skb);
480 off = 1;
481 } else
482 off = 0;
483
484 for (i = 0; i < nr_frags; ++i) {
485 priv->tx_sge[i + off].addr = mapping[i + off];
486 priv->tx_sge[i + off].length = frags[i].size;
487 }
488 priv->tx_wr.num_sge = nr_frags + off;
489 priv->tx_wr.wr_id = wr_id;
490 priv->tx_wr.wr.ud.remote_qpn = qpn;
491 priv->tx_wr.wr.ud.ah = address;
492
493 if (head) {
494 priv->tx_wr.wr.ud.mss = skb_shinfo(skb)->gso_size;
495 priv->tx_wr.wr.ud.header = head;
496 priv->tx_wr.wr.ud.hlen = hlen;
497 priv->tx_wr.opcode = IB_WR_LSO;
498 } else
499 priv->tx_wr.opcode = IB_WR_SEND;
500
501 return ib_post_send(priv->qp, &priv->tx_wr, &bad_wr);
502 }
503
504 void ipoib_send(struct net_device *dev, struct sk_buff *skb,
505 struct ipoib_ah *address, u32 qpn)
506 {
507 struct ipoib_dev_priv *priv = netdev_priv(dev);
508 struct ipoib_tx_buf *tx_req;
509 int hlen;
510 void *phead;
511
512 if (skb_is_gso(skb)) {
513 hlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
514 phead = skb->data;
515 if (unlikely(!skb_pull(skb, hlen))) {
516 ipoib_warn(priv, "linear data too small\n");
517 ++dev->stats.tx_dropped;
518 ++dev->stats.tx_errors;
519 dev_kfree_skb_any(skb);
520 return;
521 }
522 } else {
523 if (unlikely(skb->len > priv->mcast_mtu + IPOIB_ENCAP_LEN)) {
524 ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n",
525 skb->len, priv->mcast_mtu + IPOIB_ENCAP_LEN);
526 ++dev->stats.tx_dropped;
527 ++dev->stats.tx_errors;
528 ipoib_cm_skb_too_long(dev, skb, priv->mcast_mtu);
529 return;
530 }
531 phead = NULL;
532 hlen = 0;
533 }
534
535 ipoib_dbg_data(priv, "sending packet, length=%d address=%p qpn=0x%06x\n",
536 skb->len, address, qpn);
537
538 /*
539 * We put the skb into the tx_ring _before_ we call post_send()
540 * because it's entirely possible that the completion handler will
541 * run before we execute anything after the post_send(). That
542 * means we have to make sure everything is properly recorded and
543 * our state is consistent before we call post_send().
544 */
545 tx_req = &priv->tx_ring[priv->tx_head & (ipoib_sendq_size - 1)];
546 tx_req->skb = skb;
547 if (unlikely(ipoib_dma_map_tx(priv->ca, tx_req))) {
548 ++dev->stats.tx_errors;
549 dev_kfree_skb_any(skb);
550 return;
551 }
552
553 if (skb->ip_summed == CHECKSUM_PARTIAL)
554 priv->tx_wr.send_flags |= IB_SEND_IP_CSUM;
555 else
556 priv->tx_wr.send_flags &= ~IB_SEND_IP_CSUM;
557
558 if (unlikely(post_send(priv, priv->tx_head & (ipoib_sendq_size - 1),
559 address->ah, qpn, tx_req, phead, hlen))) {
560 ipoib_warn(priv, "post_send failed\n");
561 ++dev->stats.tx_errors;
562 ipoib_dma_unmap_tx(priv->ca, tx_req);
563 dev_kfree_skb_any(skb);
564 } else {
565 dev->trans_start = jiffies;
566
567 address->last_send = priv->tx_head;
568 ++priv->tx_head;
569 skb_orphan(skb);
570
571 if (++priv->tx_outstanding == ipoib_sendq_size) {
572 ipoib_dbg(priv, "TX ring full, stopping kernel net queue\n");
573 netif_stop_queue(dev);
574 }
575 }
576
577 if (unlikely(priv->tx_outstanding > MAX_SEND_CQE))
578 poll_tx(priv);
579 }
580
581 static void __ipoib_reap_ah(struct net_device *dev)
582 {
583 struct ipoib_dev_priv *priv = netdev_priv(dev);
584 struct ipoib_ah *ah, *tah;
585 LIST_HEAD(remove_list);
586
587 spin_lock_irq(&priv->tx_lock);
588 spin_lock(&priv->lock);
589 list_for_each_entry_safe(ah, tah, &priv->dead_ahs, list)
590 if ((int) priv->tx_tail - (int) ah->last_send >= 0) {
591 list_del(&ah->list);
592 ib_destroy_ah(ah->ah);
593 kfree(ah);
594 }
595 spin_unlock(&priv->lock);
596 spin_unlock_irq(&priv->tx_lock);
597 }
598
599 void ipoib_reap_ah(struct work_struct *work)
600 {
601 struct ipoib_dev_priv *priv =
602 container_of(work, struct ipoib_dev_priv, ah_reap_task.work);
603 struct net_device *dev = priv->dev;
604
605 __ipoib_reap_ah(dev);
606
607 if (!test_bit(IPOIB_STOP_REAPER, &priv->flags))
608 queue_delayed_work(ipoib_workqueue, &priv->ah_reap_task,
609 round_jiffies_relative(HZ));
610 }
611
612 int ipoib_ib_dev_open(struct net_device *dev)
613 {
614 struct ipoib_dev_priv *priv = netdev_priv(dev);
615 int ret;
616
617 if (ib_find_pkey(priv->ca, priv->port, priv->pkey, &priv->pkey_index)) {
618 ipoib_warn(priv, "P_Key 0x%04x not found\n", priv->pkey);
619 clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
620 return -1;
621 }
622 set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
623
624 ret = ipoib_init_qp(dev);
625 if (ret) {
626 ipoib_warn(priv, "ipoib_init_qp returned %d\n", ret);
627 return -1;
628 }
629
630 ret = ipoib_ib_post_receives(dev);
631 if (ret) {
632 ipoib_warn(priv, "ipoib_ib_post_receives returned %d\n", ret);
633 ipoib_ib_dev_stop(dev, 1);
634 return -1;
635 }
636
637 ret = ipoib_cm_dev_open(dev);
638 if (ret) {
639 ipoib_warn(priv, "ipoib_cm_dev_open returned %d\n", ret);
640 ipoib_ib_dev_stop(dev, 1);
641 return -1;
642 }
643
644 clear_bit(IPOIB_STOP_REAPER, &priv->flags);
645 queue_delayed_work(ipoib_workqueue, &priv->ah_reap_task,
646 round_jiffies_relative(HZ));
647
648 set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
649
650 return 0;
651 }
652
653 static void ipoib_pkey_dev_check_presence(struct net_device *dev)
654 {
655 struct ipoib_dev_priv *priv = netdev_priv(dev);
656 u16 pkey_index = 0;
657
658 if (ib_find_pkey(priv->ca, priv->port, priv->pkey, &pkey_index))
659 clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
660 else
661 set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
662 }
663
664 int ipoib_ib_dev_up(struct net_device *dev)
665 {
666 struct ipoib_dev_priv *priv = netdev_priv(dev);
667
668 ipoib_pkey_dev_check_presence(dev);
669
670 if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) {
671 ipoib_dbg(priv, "PKEY is not assigned.\n");
672 return 0;
673 }
674
675 set_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
676
677 return ipoib_mcast_start_thread(dev);
678 }
679
680 int ipoib_ib_dev_down(struct net_device *dev, int flush)
681 {
682 struct ipoib_dev_priv *priv = netdev_priv(dev);
683
684 ipoib_dbg(priv, "downing ib_dev\n");
685
686 clear_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
687 netif_carrier_off(dev);
688
689 /* Shutdown the P_Key thread if still active */
690 if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) {
691 mutex_lock(&pkey_mutex);
692 set_bit(IPOIB_PKEY_STOP, &priv->flags);
693 cancel_delayed_work(&priv->pkey_poll_task);
694 mutex_unlock(&pkey_mutex);
695 if (flush)
696 flush_workqueue(ipoib_workqueue);
697 }
698
699 ipoib_mcast_stop_thread(dev, flush);
700 ipoib_mcast_dev_flush(dev);
701
702 ipoib_flush_paths(dev);
703
704 return 0;
705 }
706
707 static int recvs_pending(struct net_device *dev)
708 {
709 struct ipoib_dev_priv *priv = netdev_priv(dev);
710 int pending = 0;
711 int i;
712
713 for (i = 0; i < ipoib_recvq_size; ++i)
714 if (priv->rx_ring[i].skb)
715 ++pending;
716
717 return pending;
718 }
719
720 void ipoib_drain_cq(struct net_device *dev)
721 {
722 struct ipoib_dev_priv *priv = netdev_priv(dev);
723 int i, n;
724 do {
725 n = ib_poll_cq(priv->recv_cq, IPOIB_NUM_WC, priv->ibwc);
726 for (i = 0; i < n; ++i) {
727 /*
728 * Convert any successful completions to flush
729 * errors to avoid passing packets up the
730 * stack after bringing the device down.
731 */
732 if (priv->ibwc[i].status == IB_WC_SUCCESS)
733 priv->ibwc[i].status = IB_WC_WR_FLUSH_ERR;
734
735 if (priv->ibwc[i].wr_id & IPOIB_OP_RECV) {
736 if (priv->ibwc[i].wr_id & IPOIB_OP_CM)
737 ipoib_cm_handle_rx_wc(dev, priv->ibwc + i);
738 else
739 ipoib_ib_handle_rx_wc(dev, priv->ibwc + i);
740 } else
741 ipoib_cm_handle_tx_wc(dev, priv->ibwc + i);
742 }
743 } while (n == IPOIB_NUM_WC);
744
745 while (poll_tx(priv))
746 ; /* nothing */
747 }
748
749 int ipoib_ib_dev_stop(struct net_device *dev, int flush)
750 {
751 struct ipoib_dev_priv *priv = netdev_priv(dev);
752 struct ib_qp_attr qp_attr;
753 unsigned long begin;
754 struct ipoib_tx_buf *tx_req;
755 int i;
756
757 clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
758
759 ipoib_cm_dev_stop(dev);
760
761 /*
762 * Move our QP to the error state and then reinitialize in
763 * when all work requests have completed or have been flushed.
764 */
765 qp_attr.qp_state = IB_QPS_ERR;
766 if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE))
767 ipoib_warn(priv, "Failed to modify QP to ERROR state\n");
768
769 /* Wait for all sends and receives to complete */
770 begin = jiffies;
771
772 while (priv->tx_head != priv->tx_tail || recvs_pending(dev)) {
773 if (time_after(jiffies, begin + 5 * HZ)) {
774 ipoib_warn(priv, "timing out; %d sends %d receives not completed\n",
775 priv->tx_head - priv->tx_tail, recvs_pending(dev));
776
777 /*
778 * assume the HW is wedged and just free up
779 * all our pending work requests.
780 */
781 while ((int) priv->tx_tail - (int) priv->tx_head < 0) {
782 tx_req = &priv->tx_ring[priv->tx_tail &
783 (ipoib_sendq_size - 1)];
784 ipoib_dma_unmap_tx(priv->ca, tx_req);
785 dev_kfree_skb_any(tx_req->skb);
786 ++priv->tx_tail;
787 --priv->tx_outstanding;
788 }
789
790 for (i = 0; i < ipoib_recvq_size; ++i) {
791 struct ipoib_rx_buf *rx_req;
792
793 rx_req = &priv->rx_ring[i];
794 if (!rx_req->skb)
795 continue;
796 ipoib_ud_dma_unmap_rx(priv,
797 priv->rx_ring[i].mapping);
798 dev_kfree_skb_any(rx_req->skb);
799 rx_req->skb = NULL;
800 }
801
802 goto timeout;
803 }
804
805 ipoib_drain_cq(dev);
806
807 msleep(1);
808 }
809
810 ipoib_dbg(priv, "All sends and receives done.\n");
811
812 timeout:
813 qp_attr.qp_state = IB_QPS_RESET;
814 if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE))
815 ipoib_warn(priv, "Failed to modify QP to RESET state\n");
816
817 /* Wait for all AHs to be reaped */
818 set_bit(IPOIB_STOP_REAPER, &priv->flags);
819 cancel_delayed_work(&priv->ah_reap_task);
820 if (flush)
821 flush_workqueue(ipoib_workqueue);
822
823 begin = jiffies;
824
825 while (!list_empty(&priv->dead_ahs)) {
826 __ipoib_reap_ah(dev);
827
828 if (time_after(jiffies, begin + HZ)) {
829 ipoib_warn(priv, "timing out; will leak address handles\n");
830 break;
831 }
832
833 msleep(1);
834 }
835
836 ib_req_notify_cq(priv->recv_cq, IB_CQ_NEXT_COMP);
837
838 return 0;
839 }
840
841 int ipoib_ib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
842 {
843 struct ipoib_dev_priv *priv = netdev_priv(dev);
844
845 priv->ca = ca;
846 priv->port = port;
847 priv->qp = NULL;
848
849 if (ipoib_transport_dev_init(dev, ca)) {
850 printk(KERN_WARNING "%s: ipoib_transport_dev_init failed\n", ca->name);
851 return -ENODEV;
852 }
853
854 if (dev->flags & IFF_UP) {
855 if (ipoib_ib_dev_open(dev)) {
856 ipoib_transport_dev_cleanup(dev);
857 return -ENODEV;
858 }
859 }
860
861 return 0;
862 }
863
864 static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv, int pkey_event)
865 {
866 struct ipoib_dev_priv *cpriv;
867 struct net_device *dev = priv->dev;
868 u16 new_index;
869
870 mutex_lock(&priv->vlan_mutex);
871
872 /*
873 * Flush any child interfaces too -- they might be up even if
874 * the parent is down.
875 */
876 list_for_each_entry(cpriv, &priv->child_intfs, list)
877 __ipoib_ib_dev_flush(cpriv, pkey_event);
878
879 mutex_unlock(&priv->vlan_mutex);
880
881 if (!test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags)) {
882 ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_INITIALIZED not set.\n");
883 return;
884 }
885
886 if (!test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) {
887 ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_ADMIN_UP not set.\n");
888 return;
889 }
890
891 if (pkey_event) {
892 if (ib_find_pkey(priv->ca, priv->port, priv->pkey, &new_index)) {
893 clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
894 ipoib_ib_dev_down(dev, 0);
895 ipoib_ib_dev_stop(dev, 0);
896 if (ipoib_pkey_dev_delay_open(dev))
897 return;
898 }
899
900 /* restart QP only if P_Key index is changed */
901 if (test_and_set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags) &&
902 new_index == priv->pkey_index) {
903 ipoib_dbg(priv, "Not flushing - P_Key index not changed.\n");
904 return;
905 }
906 priv->pkey_index = new_index;
907 }
908
909 ipoib_dbg(priv, "flushing\n");
910
911 ipoib_ib_dev_down(dev, 0);
912
913 if (pkey_event) {
914 ipoib_ib_dev_stop(dev, 0);
915 ipoib_ib_dev_open(dev);
916 }
917
918 /*
919 * The device could have been brought down between the start and when
920 * we get here, don't bring it back up if it's not configured up
921 */
922 if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) {
923 ipoib_ib_dev_up(dev);
924 ipoib_mcast_restart_task(&priv->restart_task);
925 }
926 }
927
928 void ipoib_ib_dev_flush(struct work_struct *work)
929 {
930 struct ipoib_dev_priv *priv =
931 container_of(work, struct ipoib_dev_priv, flush_task);
932
933 ipoib_dbg(priv, "Flushing %s\n", priv->dev->name);
934 __ipoib_ib_dev_flush(priv, 0);
935 }
936
937 void ipoib_pkey_event(struct work_struct *work)
938 {
939 struct ipoib_dev_priv *priv =
940 container_of(work, struct ipoib_dev_priv, pkey_event_task);
941
942 ipoib_dbg(priv, "Flushing %s and restarting its QP\n", priv->dev->name);
943 __ipoib_ib_dev_flush(priv, 1);
944 }
945
946 void ipoib_ib_dev_cleanup(struct net_device *dev)
947 {
948 struct ipoib_dev_priv *priv = netdev_priv(dev);
949
950 ipoib_dbg(priv, "cleaning up ib_dev\n");
951
952 ipoib_mcast_stop_thread(dev, 1);
953 ipoib_mcast_dev_flush(dev);
954
955 ipoib_transport_dev_cleanup(dev);
956 }
957
958 /*
959 * Delayed P_Key Assigment Interim Support
960 *
961 * The following is initial implementation of delayed P_Key assigment
962 * mechanism. It is using the same approach implemented for the multicast
963 * group join. The single goal of this implementation is to quickly address
964 * Bug #2507. This implementation will probably be removed when the P_Key
965 * change async notification is available.
966 */
967
968 void ipoib_pkey_poll(struct work_struct *work)
969 {
970 struct ipoib_dev_priv *priv =
971 container_of(work, struct ipoib_dev_priv, pkey_poll_task.work);
972 struct net_device *dev = priv->dev;
973
974 ipoib_pkey_dev_check_presence(dev);
975
976 if (test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags))
977 ipoib_open(dev);
978 else {
979 mutex_lock(&pkey_mutex);
980 if (!test_bit(IPOIB_PKEY_STOP, &priv->flags))
981 queue_delayed_work(ipoib_workqueue,
982 &priv->pkey_poll_task,
983 HZ);
984 mutex_unlock(&pkey_mutex);
985 }
986 }
987
988 int ipoib_pkey_dev_delay_open(struct net_device *dev)
989 {
990 struct ipoib_dev_priv *priv = netdev_priv(dev);
991
992 /* Look for the interface pkey value in the IB Port P_Key table and */
993 /* set the interface pkey assigment flag */
994 ipoib_pkey_dev_check_presence(dev);
995
996 /* P_Key value not assigned yet - start polling */
997 if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) {
998 mutex_lock(&pkey_mutex);
999 clear_bit(IPOIB_PKEY_STOP, &priv->flags);
1000 queue_delayed_work(ipoib_workqueue,
1001 &priv->pkey_poll_task,
1002 HZ);
1003 mutex_unlock(&pkey_mutex);
1004 return 1;
1005 }
1006
1007 return 0;
1008 }
This page took 0.061957 seconds and 5 git commands to generate.