2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
5 * Copyright (c) 2004, 2005 Voltaire, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/delay.h>
37 #include <linux/moduleparam.h>
38 #include <linux/dma-mapping.h>
39 #include <linux/slab.h>
42 #include <linux/tcp.h>
46 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG_DATA
47 static int data_debug_level
;
49 module_param(data_debug_level
, int, 0644);
50 MODULE_PARM_DESC(data_debug_level
,
51 "Enable data path debug tracing if > 0");
54 static DEFINE_MUTEX(pkey_mutex
);
56 struct ipoib_ah
*ipoib_create_ah(struct net_device
*dev
,
57 struct ib_pd
*pd
, struct ib_ah_attr
*attr
)
62 ah
= kmalloc(sizeof *ah
, GFP_KERNEL
);
64 return ERR_PTR(-ENOMEM
);
70 vah
= ib_create_ah(pd
, attr
);
73 ah
= (struct ipoib_ah
*)vah
;
76 ipoib_dbg(netdev_priv(dev
), "Created ah %p\n", ah
->ah
);
82 void ipoib_free_ah(struct kref
*kref
)
84 struct ipoib_ah
*ah
= container_of(kref
, struct ipoib_ah
, ref
);
85 struct ipoib_dev_priv
*priv
= netdev_priv(ah
->dev
);
89 spin_lock_irqsave(&priv
->lock
, flags
);
90 list_add_tail(&ah
->list
, &priv
->dead_ahs
);
91 spin_unlock_irqrestore(&priv
->lock
, flags
);
94 static void ipoib_ud_dma_unmap_rx(struct ipoib_dev_priv
*priv
,
95 u64 mapping
[IPOIB_UD_RX_SG
])
97 if (ipoib_ud_need_sg(priv
->max_ib_mtu
)) {
98 ib_dma_unmap_single(priv
->ca
, mapping
[0], IPOIB_UD_HEAD_SIZE
,
100 ib_dma_unmap_page(priv
->ca
, mapping
[1], PAGE_SIZE
,
103 ib_dma_unmap_single(priv
->ca
, mapping
[0],
104 IPOIB_UD_BUF_SIZE(priv
->max_ib_mtu
),
108 static void ipoib_ud_skb_put_frags(struct ipoib_dev_priv
*priv
,
112 if (ipoib_ud_need_sg(priv
->max_ib_mtu
)) {
113 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[0];
116 * There is only two buffers needed for max_payload = 4K,
117 * first buf size is IPOIB_UD_HEAD_SIZE
119 skb
->tail
+= IPOIB_UD_HEAD_SIZE
;
122 size
= length
- IPOIB_UD_HEAD_SIZE
;
124 skb_frag_size_set(frag
, size
);
125 skb
->data_len
+= size
;
126 skb
->truesize
+= size
;
128 skb_put(skb
, length
);
132 static int ipoib_ib_post_receive(struct net_device
*dev
, int id
)
134 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
135 struct ib_recv_wr
*bad_wr
;
138 priv
->rx_wr
.wr_id
= id
| IPOIB_OP_RECV
;
139 priv
->rx_sge
[0].addr
= priv
->rx_ring
[id
].mapping
[0];
140 priv
->rx_sge
[1].addr
= priv
->rx_ring
[id
].mapping
[1];
143 ret
= ib_post_recv(priv
->qp
, &priv
->rx_wr
, &bad_wr
);
145 ipoib_warn(priv
, "receive failed for buf %d (%d)\n", id
, ret
);
146 ipoib_ud_dma_unmap_rx(priv
, priv
->rx_ring
[id
].mapping
);
147 dev_kfree_skb_any(priv
->rx_ring
[id
].skb
);
148 priv
->rx_ring
[id
].skb
= NULL
;
154 static struct sk_buff
*ipoib_alloc_rx_skb(struct net_device
*dev
, int id
)
156 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
161 if (ipoib_ud_need_sg(priv
->max_ib_mtu
))
162 buf_size
= IPOIB_UD_HEAD_SIZE
;
164 buf_size
= IPOIB_UD_BUF_SIZE(priv
->max_ib_mtu
);
166 skb
= dev_alloc_skb(buf_size
+ 4);
171 * IB will leave a 40 byte gap for a GRH and IPoIB adds a 4 byte
172 * header. So we need 4 more bytes to get to 48 and align the
173 * IP header to a multiple of 16.
177 mapping
= priv
->rx_ring
[id
].mapping
;
178 mapping
[0] = ib_dma_map_single(priv
->ca
, skb
->data
, buf_size
,
180 if (unlikely(ib_dma_mapping_error(priv
->ca
, mapping
[0])))
183 if (ipoib_ud_need_sg(priv
->max_ib_mtu
)) {
184 struct page
*page
= alloc_page(GFP_ATOMIC
);
187 skb_fill_page_desc(skb
, 0, page
, 0, PAGE_SIZE
);
189 ib_dma_map_page(priv
->ca
, page
,
190 0, PAGE_SIZE
, DMA_FROM_DEVICE
);
191 if (unlikely(ib_dma_mapping_error(priv
->ca
, mapping
[1])))
195 priv
->rx_ring
[id
].skb
= skb
;
199 ib_dma_unmap_single(priv
->ca
, mapping
[0], buf_size
, DMA_FROM_DEVICE
);
201 dev_kfree_skb_any(skb
);
205 static int ipoib_ib_post_receives(struct net_device
*dev
)
207 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
210 for (i
= 0; i
< ipoib_recvq_size
; ++i
) {
211 if (!ipoib_alloc_rx_skb(dev
, i
)) {
212 ipoib_warn(priv
, "failed to allocate receive buffer %d\n", i
);
215 if (ipoib_ib_post_receive(dev
, i
)) {
216 ipoib_warn(priv
, "ipoib_ib_post_receive failed for buf %d\n", i
);
224 static void ipoib_ib_handle_rx_wc(struct net_device
*dev
, struct ib_wc
*wc
)
226 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
227 unsigned int wr_id
= wc
->wr_id
& ~IPOIB_OP_RECV
;
229 u64 mapping
[IPOIB_UD_RX_SG
];
232 ipoib_dbg_data(priv
, "recv completion: id %d, status: %d\n",
235 if (unlikely(wr_id
>= ipoib_recvq_size
)) {
236 ipoib_warn(priv
, "recv completion event with wrid %d (> %d)\n",
237 wr_id
, ipoib_recvq_size
);
241 skb
= priv
->rx_ring
[wr_id
].skb
;
243 if (unlikely(wc
->status
!= IB_WC_SUCCESS
)) {
244 if (wc
->status
!= IB_WC_WR_FLUSH_ERR
)
245 ipoib_warn(priv
, "failed recv event "
246 "(status=%d, wrid=%d vend_err %x)\n",
247 wc
->status
, wr_id
, wc
->vendor_err
);
248 ipoib_ud_dma_unmap_rx(priv
, priv
->rx_ring
[wr_id
].mapping
);
249 dev_kfree_skb_any(skb
);
250 priv
->rx_ring
[wr_id
].skb
= NULL
;
255 * Drop packets that this interface sent, ie multicast packets
256 * that the HCA has replicated.
258 if (wc
->slid
== priv
->local_lid
&& wc
->src_qp
== priv
->qp
->qp_num
)
261 memcpy(mapping
, priv
->rx_ring
[wr_id
].mapping
,
262 IPOIB_UD_RX_SG
* sizeof *mapping
);
265 * If we can't allocate a new RX buffer, dump
266 * this packet and reuse the old buffer.
268 if (unlikely(!ipoib_alloc_rx_skb(dev
, wr_id
))) {
269 ++dev
->stats
.rx_dropped
;
273 ipoib_dbg_data(priv
, "received %d bytes, SLID 0x%04x\n",
274 wc
->byte_len
, wc
->slid
);
276 ipoib_ud_dma_unmap_rx(priv
, mapping
);
277 ipoib_ud_skb_put_frags(priv
, skb
, wc
->byte_len
);
279 /* First byte of dgid signals multicast when 0xff */
280 dgid
= &((struct ib_grh
*)skb
->data
)->dgid
;
282 if (!(wc
->wc_flags
& IB_WC_GRH
) || dgid
->raw
[0] != 0xff)
283 skb
->pkt_type
= PACKET_HOST
;
284 else if (memcmp(dgid
, dev
->broadcast
+ 4, sizeof(union ib_gid
)) == 0)
285 skb
->pkt_type
= PACKET_BROADCAST
;
287 skb
->pkt_type
= PACKET_MULTICAST
;
289 skb_pull(skb
, IB_GRH_BYTES
);
291 skb
->protocol
= ((struct ipoib_header
*) skb
->data
)->proto
;
292 skb_reset_mac_header(skb
);
293 skb_pull(skb
, IPOIB_ENCAP_LEN
);
295 ++dev
->stats
.rx_packets
;
296 dev
->stats
.rx_bytes
+= skb
->len
;
299 if ((dev
->features
& NETIF_F_RXCSUM
) &&
300 likely(wc
->wc_flags
& IB_WC_IP_CSUM_OK
))
301 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
303 napi_gro_receive(&priv
->napi
, skb
);
306 if (unlikely(ipoib_ib_post_receive(dev
, wr_id
)))
307 ipoib_warn(priv
, "ipoib_ib_post_receive failed "
308 "for buf %d\n", wr_id
);
311 static int ipoib_dma_map_tx(struct ib_device
*ca
,
312 struct ipoib_tx_buf
*tx_req
)
314 struct sk_buff
*skb
= tx_req
->skb
;
315 u64
*mapping
= tx_req
->mapping
;
319 if (skb_headlen(skb
)) {
320 mapping
[0] = ib_dma_map_single(ca
, skb
->data
, skb_headlen(skb
),
322 if (unlikely(ib_dma_mapping_error(ca
, mapping
[0])))
329 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; ++i
) {
330 const skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
331 mapping
[i
+ off
] = ib_dma_map_page(ca
,
333 frag
->page_offset
, skb_frag_size(frag
),
335 if (unlikely(ib_dma_mapping_error(ca
, mapping
[i
+ off
])))
342 const skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
- 1];
344 ib_dma_unmap_page(ca
, mapping
[i
- !off
], skb_frag_size(frag
), DMA_TO_DEVICE
);
348 ib_dma_unmap_single(ca
, mapping
[0], skb_headlen(skb
), DMA_TO_DEVICE
);
353 static void ipoib_dma_unmap_tx(struct ib_device
*ca
,
354 struct ipoib_tx_buf
*tx_req
)
356 struct sk_buff
*skb
= tx_req
->skb
;
357 u64
*mapping
= tx_req
->mapping
;
361 if (skb_headlen(skb
)) {
362 ib_dma_unmap_single(ca
, mapping
[0], skb_headlen(skb
), DMA_TO_DEVICE
);
367 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; ++i
) {
368 const skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
370 ib_dma_unmap_page(ca
, mapping
[i
+ off
], skb_frag_size(frag
),
375 static void ipoib_ib_handle_tx_wc(struct net_device
*dev
, struct ib_wc
*wc
)
377 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
378 unsigned int wr_id
= wc
->wr_id
;
379 struct ipoib_tx_buf
*tx_req
;
381 ipoib_dbg_data(priv
, "send completion: id %d, status: %d\n",
384 if (unlikely(wr_id
>= ipoib_sendq_size
)) {
385 ipoib_warn(priv
, "send completion event with wrid %d (> %d)\n",
386 wr_id
, ipoib_sendq_size
);
390 tx_req
= &priv
->tx_ring
[wr_id
];
392 ipoib_dma_unmap_tx(priv
->ca
, tx_req
);
394 ++dev
->stats
.tx_packets
;
395 dev
->stats
.tx_bytes
+= tx_req
->skb
->len
;
397 dev_kfree_skb_any(tx_req
->skb
);
400 if (unlikely(--priv
->tx_outstanding
== ipoib_sendq_size
>> 1) &&
401 netif_queue_stopped(dev
) &&
402 test_bit(IPOIB_FLAG_ADMIN_UP
, &priv
->flags
))
403 netif_wake_queue(dev
);
405 if (wc
->status
!= IB_WC_SUCCESS
&&
406 wc
->status
!= IB_WC_WR_FLUSH_ERR
)
407 ipoib_warn(priv
, "failed send event "
408 "(status=%d, wrid=%d vend_err %x)\n",
409 wc
->status
, wr_id
, wc
->vendor_err
);
412 static int poll_tx(struct ipoib_dev_priv
*priv
)
416 n
= ib_poll_cq(priv
->send_cq
, MAX_SEND_CQE
, priv
->send_wc
);
417 for (i
= 0; i
< n
; ++i
)
418 ipoib_ib_handle_tx_wc(priv
->dev
, priv
->send_wc
+ i
);
420 return n
== MAX_SEND_CQE
;
423 int ipoib_poll(struct napi_struct
*napi
, int budget
)
425 struct ipoib_dev_priv
*priv
= container_of(napi
, struct ipoib_dev_priv
, napi
);
426 struct net_device
*dev
= priv
->dev
;
434 while (done
< budget
) {
435 int max
= (budget
- done
);
437 t
= min(IPOIB_NUM_WC
, max
);
438 n
= ib_poll_cq(priv
->recv_cq
, t
, priv
->ibwc
);
440 for (i
= 0; i
< n
; i
++) {
441 struct ib_wc
*wc
= priv
->ibwc
+ i
;
443 if (wc
->wr_id
& IPOIB_OP_RECV
) {
445 if (wc
->wr_id
& IPOIB_OP_CM
)
446 ipoib_cm_handle_rx_wc(dev
, wc
);
448 ipoib_ib_handle_rx_wc(dev
, wc
);
450 ipoib_cm_handle_tx_wc(priv
->dev
, wc
);
459 if (unlikely(ib_req_notify_cq(priv
->recv_cq
,
461 IB_CQ_REPORT_MISSED_EVENTS
)) &&
462 napi_reschedule(napi
))
469 void ipoib_ib_completion(struct ib_cq
*cq
, void *dev_ptr
)
471 struct net_device
*dev
= dev_ptr
;
472 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
474 napi_schedule(&priv
->napi
);
477 static void drain_tx_cq(struct net_device
*dev
)
479 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
482 while (poll_tx(priv
))
485 if (netif_queue_stopped(dev
))
486 mod_timer(&priv
->poll_timer
, jiffies
+ 1);
488 netif_tx_unlock(dev
);
491 void ipoib_send_comp_handler(struct ib_cq
*cq
, void *dev_ptr
)
493 struct ipoib_dev_priv
*priv
= netdev_priv(dev_ptr
);
495 mod_timer(&priv
->poll_timer
, jiffies
);
498 static inline int post_send(struct ipoib_dev_priv
*priv
,
500 struct ib_ah
*address
, u32 qpn
,
501 struct ipoib_tx_buf
*tx_req
,
502 void *head
, int hlen
)
504 struct ib_send_wr
*bad_wr
;
506 struct sk_buff
*skb
= tx_req
->skb
;
507 skb_frag_t
*frags
= skb_shinfo(skb
)->frags
;
508 int nr_frags
= skb_shinfo(skb
)->nr_frags
;
509 u64
*mapping
= tx_req
->mapping
;
511 if (skb_headlen(skb
)) {
512 priv
->tx_sge
[0].addr
= mapping
[0];
513 priv
->tx_sge
[0].length
= skb_headlen(skb
);
518 for (i
= 0; i
< nr_frags
; ++i
) {
519 priv
->tx_sge
[i
+ off
].addr
= mapping
[i
+ off
];
520 priv
->tx_sge
[i
+ off
].length
= skb_frag_size(&frags
[i
]);
522 priv
->tx_wr
.num_sge
= nr_frags
+ off
;
523 priv
->tx_wr
.wr_id
= wr_id
;
524 priv
->tx_wr
.wr
.ud
.remote_qpn
= qpn
;
525 priv
->tx_wr
.wr
.ud
.ah
= address
;
528 priv
->tx_wr
.wr
.ud
.mss
= skb_shinfo(skb
)->gso_size
;
529 priv
->tx_wr
.wr
.ud
.header
= head
;
530 priv
->tx_wr
.wr
.ud
.hlen
= hlen
;
531 priv
->tx_wr
.opcode
= IB_WR_LSO
;
533 priv
->tx_wr
.opcode
= IB_WR_SEND
;
535 return ib_post_send(priv
->qp
, &priv
->tx_wr
, &bad_wr
);
538 void ipoib_send(struct net_device
*dev
, struct sk_buff
*skb
,
539 struct ipoib_ah
*address
, u32 qpn
)
541 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
542 struct ipoib_tx_buf
*tx_req
;
546 if (skb_is_gso(skb
)) {
547 hlen
= skb_transport_offset(skb
) + tcp_hdrlen(skb
);
549 if (unlikely(!skb_pull(skb
, hlen
))) {
550 ipoib_warn(priv
, "linear data too small\n");
551 ++dev
->stats
.tx_dropped
;
552 ++dev
->stats
.tx_errors
;
553 dev_kfree_skb_any(skb
);
557 if (unlikely(skb
->len
> priv
->mcast_mtu
+ IPOIB_ENCAP_LEN
)) {
558 ipoib_warn(priv
, "packet len %d (> %d) too long to send, dropping\n",
559 skb
->len
, priv
->mcast_mtu
+ IPOIB_ENCAP_LEN
);
560 ++dev
->stats
.tx_dropped
;
561 ++dev
->stats
.tx_errors
;
562 ipoib_cm_skb_too_long(dev
, skb
, priv
->mcast_mtu
);
569 ipoib_dbg_data(priv
, "sending packet, length=%d address=%p qpn=0x%06x\n",
570 skb
->len
, address
, qpn
);
573 * We put the skb into the tx_ring _before_ we call post_send()
574 * because it's entirely possible that the completion handler will
575 * run before we execute anything after the post_send(). That
576 * means we have to make sure everything is properly recorded and
577 * our state is consistent before we call post_send().
579 tx_req
= &priv
->tx_ring
[priv
->tx_head
& (ipoib_sendq_size
- 1)];
581 if (unlikely(ipoib_dma_map_tx(priv
->ca
, tx_req
))) {
582 ++dev
->stats
.tx_errors
;
583 dev_kfree_skb_any(skb
);
587 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
588 priv
->tx_wr
.send_flags
|= IB_SEND_IP_CSUM
;
590 priv
->tx_wr
.send_flags
&= ~IB_SEND_IP_CSUM
;
592 if (++priv
->tx_outstanding
== ipoib_sendq_size
) {
593 ipoib_dbg(priv
, "TX ring full, stopping kernel net queue\n");
594 if (ib_req_notify_cq(priv
->send_cq
, IB_CQ_NEXT_COMP
))
595 ipoib_warn(priv
, "request notify on send CQ failed\n");
596 netif_stop_queue(dev
);
599 rc
= post_send(priv
, priv
->tx_head
& (ipoib_sendq_size
- 1),
600 address
->ah
, qpn
, tx_req
, phead
, hlen
);
602 ipoib_warn(priv
, "post_send failed, error %d\n", rc
);
603 ++dev
->stats
.tx_errors
;
604 --priv
->tx_outstanding
;
605 ipoib_dma_unmap_tx(priv
->ca
, tx_req
);
606 dev_kfree_skb_any(skb
);
607 if (netif_queue_stopped(dev
))
608 netif_wake_queue(dev
);
610 dev
->trans_start
= jiffies
;
612 address
->last_send
= priv
->tx_head
;
618 if (unlikely(priv
->tx_outstanding
> MAX_SEND_CQE
))
619 while (poll_tx(priv
))
623 static void __ipoib_reap_ah(struct net_device
*dev
)
625 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
626 struct ipoib_ah
*ah
, *tah
;
627 LIST_HEAD(remove_list
);
630 netif_tx_lock_bh(dev
);
631 spin_lock_irqsave(&priv
->lock
, flags
);
633 list_for_each_entry_safe(ah
, tah
, &priv
->dead_ahs
, list
)
634 if ((int) priv
->tx_tail
- (int) ah
->last_send
>= 0) {
636 ib_destroy_ah(ah
->ah
);
640 spin_unlock_irqrestore(&priv
->lock
, flags
);
641 netif_tx_unlock_bh(dev
);
644 void ipoib_reap_ah(struct work_struct
*work
)
646 struct ipoib_dev_priv
*priv
=
647 container_of(work
, struct ipoib_dev_priv
, ah_reap_task
.work
);
648 struct net_device
*dev
= priv
->dev
;
650 __ipoib_reap_ah(dev
);
652 if (!test_bit(IPOIB_STOP_REAPER
, &priv
->flags
))
653 queue_delayed_work(ipoib_workqueue
, &priv
->ah_reap_task
,
654 round_jiffies_relative(HZ
));
657 static void ipoib_ib_tx_timer_func(unsigned long ctx
)
659 drain_tx_cq((struct net_device
*)ctx
);
662 int ipoib_ib_dev_open(struct net_device
*dev
)
664 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
667 if (ib_find_pkey(priv
->ca
, priv
->port
, priv
->pkey
, &priv
->pkey_index
)) {
668 ipoib_warn(priv
, "P_Key 0x%04x not found\n", priv
->pkey
);
669 clear_bit(IPOIB_PKEY_ASSIGNED
, &priv
->flags
);
672 set_bit(IPOIB_PKEY_ASSIGNED
, &priv
->flags
);
674 ret
= ipoib_init_qp(dev
);
676 ipoib_warn(priv
, "ipoib_init_qp returned %d\n", ret
);
680 ret
= ipoib_ib_post_receives(dev
);
682 ipoib_warn(priv
, "ipoib_ib_post_receives returned %d\n", ret
);
683 ipoib_ib_dev_stop(dev
, 1);
687 ret
= ipoib_cm_dev_open(dev
);
689 ipoib_warn(priv
, "ipoib_cm_dev_open returned %d\n", ret
);
690 ipoib_ib_dev_stop(dev
, 1);
694 clear_bit(IPOIB_STOP_REAPER
, &priv
->flags
);
695 queue_delayed_work(ipoib_workqueue
, &priv
->ah_reap_task
,
696 round_jiffies_relative(HZ
));
698 if (!test_and_set_bit(IPOIB_FLAG_INITIALIZED
, &priv
->flags
))
699 napi_enable(&priv
->napi
);
704 static void ipoib_pkey_dev_check_presence(struct net_device
*dev
)
706 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
709 if (ib_find_pkey(priv
->ca
, priv
->port
, priv
->pkey
, &pkey_index
))
710 clear_bit(IPOIB_PKEY_ASSIGNED
, &priv
->flags
);
712 set_bit(IPOIB_PKEY_ASSIGNED
, &priv
->flags
);
715 int ipoib_ib_dev_up(struct net_device
*dev
)
717 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
719 ipoib_pkey_dev_check_presence(dev
);
721 if (!test_bit(IPOIB_PKEY_ASSIGNED
, &priv
->flags
)) {
722 ipoib_dbg(priv
, "PKEY is not assigned.\n");
726 set_bit(IPOIB_FLAG_OPER_UP
, &priv
->flags
);
728 return ipoib_mcast_start_thread(dev
);
731 int ipoib_ib_dev_down(struct net_device
*dev
, int flush
)
733 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
735 ipoib_dbg(priv
, "downing ib_dev\n");
737 clear_bit(IPOIB_FLAG_OPER_UP
, &priv
->flags
);
738 netif_carrier_off(dev
);
740 /* Shutdown the P_Key thread if still active */
741 if (!test_bit(IPOIB_PKEY_ASSIGNED
, &priv
->flags
)) {
742 mutex_lock(&pkey_mutex
);
743 set_bit(IPOIB_PKEY_STOP
, &priv
->flags
);
744 cancel_delayed_work(&priv
->pkey_poll_task
);
745 mutex_unlock(&pkey_mutex
);
747 flush_workqueue(ipoib_workqueue
);
750 ipoib_mcast_stop_thread(dev
, flush
);
751 ipoib_mcast_dev_flush(dev
);
753 ipoib_flush_paths(dev
);
758 static int recvs_pending(struct net_device
*dev
)
760 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
764 for (i
= 0; i
< ipoib_recvq_size
; ++i
)
765 if (priv
->rx_ring
[i
].skb
)
771 void ipoib_drain_cq(struct net_device
*dev
)
773 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
777 * We call completion handling routines that expect to be
778 * called from the BH-disabled NAPI poll context, so disable
784 n
= ib_poll_cq(priv
->recv_cq
, IPOIB_NUM_WC
, priv
->ibwc
);
785 for (i
= 0; i
< n
; ++i
) {
787 * Convert any successful completions to flush
788 * errors to avoid passing packets up the
789 * stack after bringing the device down.
791 if (priv
->ibwc
[i
].status
== IB_WC_SUCCESS
)
792 priv
->ibwc
[i
].status
= IB_WC_WR_FLUSH_ERR
;
794 if (priv
->ibwc
[i
].wr_id
& IPOIB_OP_RECV
) {
795 if (priv
->ibwc
[i
].wr_id
& IPOIB_OP_CM
)
796 ipoib_cm_handle_rx_wc(dev
, priv
->ibwc
+ i
);
798 ipoib_ib_handle_rx_wc(dev
, priv
->ibwc
+ i
);
800 ipoib_cm_handle_tx_wc(dev
, priv
->ibwc
+ i
);
802 } while (n
== IPOIB_NUM_WC
);
804 while (poll_tx(priv
))
810 int ipoib_ib_dev_stop(struct net_device
*dev
, int flush
)
812 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
813 struct ib_qp_attr qp_attr
;
815 struct ipoib_tx_buf
*tx_req
;
818 if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED
, &priv
->flags
))
819 napi_disable(&priv
->napi
);
821 ipoib_cm_dev_stop(dev
);
824 * Move our QP to the error state and then reinitialize in
825 * when all work requests have completed or have been flushed.
827 qp_attr
.qp_state
= IB_QPS_ERR
;
828 if (ib_modify_qp(priv
->qp
, &qp_attr
, IB_QP_STATE
))
829 ipoib_warn(priv
, "Failed to modify QP to ERROR state\n");
831 /* Wait for all sends and receives to complete */
834 while (priv
->tx_head
!= priv
->tx_tail
|| recvs_pending(dev
)) {
835 if (time_after(jiffies
, begin
+ 5 * HZ
)) {
836 ipoib_warn(priv
, "timing out; %d sends %d receives not completed\n",
837 priv
->tx_head
- priv
->tx_tail
, recvs_pending(dev
));
840 * assume the HW is wedged and just free up
841 * all our pending work requests.
843 while ((int) priv
->tx_tail
- (int) priv
->tx_head
< 0) {
844 tx_req
= &priv
->tx_ring
[priv
->tx_tail
&
845 (ipoib_sendq_size
- 1)];
846 ipoib_dma_unmap_tx(priv
->ca
, tx_req
);
847 dev_kfree_skb_any(tx_req
->skb
);
849 --priv
->tx_outstanding
;
852 for (i
= 0; i
< ipoib_recvq_size
; ++i
) {
853 struct ipoib_rx_buf
*rx_req
;
855 rx_req
= &priv
->rx_ring
[i
];
858 ipoib_ud_dma_unmap_rx(priv
,
859 priv
->rx_ring
[i
].mapping
);
860 dev_kfree_skb_any(rx_req
->skb
);
872 ipoib_dbg(priv
, "All sends and receives done.\n");
875 del_timer_sync(&priv
->poll_timer
);
876 qp_attr
.qp_state
= IB_QPS_RESET
;
877 if (ib_modify_qp(priv
->qp
, &qp_attr
, IB_QP_STATE
))
878 ipoib_warn(priv
, "Failed to modify QP to RESET state\n");
880 /* Wait for all AHs to be reaped */
881 set_bit(IPOIB_STOP_REAPER
, &priv
->flags
);
882 cancel_delayed_work(&priv
->ah_reap_task
);
884 flush_workqueue(ipoib_workqueue
);
888 while (!list_empty(&priv
->dead_ahs
)) {
889 __ipoib_reap_ah(dev
);
891 if (time_after(jiffies
, begin
+ HZ
)) {
892 ipoib_warn(priv
, "timing out; will leak address handles\n");
899 ib_req_notify_cq(priv
->recv_cq
, IB_CQ_NEXT_COMP
);
904 int ipoib_ib_dev_init(struct net_device
*dev
, struct ib_device
*ca
, int port
)
906 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
912 if (ipoib_transport_dev_init(dev
, ca
)) {
913 printk(KERN_WARNING
"%s: ipoib_transport_dev_init failed\n", ca
->name
);
917 setup_timer(&priv
->poll_timer
, ipoib_ib_tx_timer_func
,
918 (unsigned long) dev
);
920 if (dev
->flags
& IFF_UP
) {
921 if (ipoib_ib_dev_open(dev
)) {
922 ipoib_transport_dev_cleanup(dev
);
930 static void __ipoib_ib_dev_flush(struct ipoib_dev_priv
*priv
,
931 enum ipoib_flush_level level
)
933 struct ipoib_dev_priv
*cpriv
;
934 struct net_device
*dev
= priv
->dev
;
937 mutex_lock(&priv
->vlan_mutex
);
940 * Flush any child interfaces too -- they might be up even if
941 * the parent is down.
943 list_for_each_entry(cpriv
, &priv
->child_intfs
, list
)
944 __ipoib_ib_dev_flush(cpriv
, level
);
946 mutex_unlock(&priv
->vlan_mutex
);
948 if (!test_bit(IPOIB_FLAG_INITIALIZED
, &priv
->flags
)) {
949 ipoib_dbg(priv
, "Not flushing - IPOIB_FLAG_INITIALIZED not set.\n");
953 if (!test_bit(IPOIB_FLAG_ADMIN_UP
, &priv
->flags
)) {
954 ipoib_dbg(priv
, "Not flushing - IPOIB_FLAG_ADMIN_UP not set.\n");
958 if (level
== IPOIB_FLUSH_HEAVY
) {
959 if (ib_find_pkey(priv
->ca
, priv
->port
, priv
->pkey
, &new_index
)) {
960 clear_bit(IPOIB_PKEY_ASSIGNED
, &priv
->flags
);
961 ipoib_ib_dev_down(dev
, 0);
962 ipoib_ib_dev_stop(dev
, 0);
963 if (ipoib_pkey_dev_delay_open(dev
))
967 /* restart QP only if P_Key index is changed */
968 if (test_and_set_bit(IPOIB_PKEY_ASSIGNED
, &priv
->flags
) &&
969 new_index
== priv
->pkey_index
) {
970 ipoib_dbg(priv
, "Not flushing - P_Key index not changed.\n");
973 priv
->pkey_index
= new_index
;
976 if (level
== IPOIB_FLUSH_LIGHT
) {
977 ipoib_mark_paths_invalid(dev
);
978 ipoib_mcast_dev_flush(dev
);
981 if (level
>= IPOIB_FLUSH_NORMAL
)
982 ipoib_ib_dev_down(dev
, 0);
984 if (level
== IPOIB_FLUSH_HEAVY
) {
985 ipoib_ib_dev_stop(dev
, 0);
986 ipoib_ib_dev_open(dev
);
990 * The device could have been brought down between the start and when
991 * we get here, don't bring it back up if it's not configured up
993 if (test_bit(IPOIB_FLAG_ADMIN_UP
, &priv
->flags
)) {
994 if (level
>= IPOIB_FLUSH_NORMAL
)
995 ipoib_ib_dev_up(dev
);
996 ipoib_mcast_restart_task(&priv
->restart_task
);
1000 void ipoib_ib_dev_flush_light(struct work_struct
*work
)
1002 struct ipoib_dev_priv
*priv
=
1003 container_of(work
, struct ipoib_dev_priv
, flush_light
);
1005 __ipoib_ib_dev_flush(priv
, IPOIB_FLUSH_LIGHT
);
1008 void ipoib_ib_dev_flush_normal(struct work_struct
*work
)
1010 struct ipoib_dev_priv
*priv
=
1011 container_of(work
, struct ipoib_dev_priv
, flush_normal
);
1013 __ipoib_ib_dev_flush(priv
, IPOIB_FLUSH_NORMAL
);
1016 void ipoib_ib_dev_flush_heavy(struct work_struct
*work
)
1018 struct ipoib_dev_priv
*priv
=
1019 container_of(work
, struct ipoib_dev_priv
, flush_heavy
);
1021 __ipoib_ib_dev_flush(priv
, IPOIB_FLUSH_HEAVY
);
1024 void ipoib_ib_dev_cleanup(struct net_device
*dev
)
1026 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
1028 ipoib_dbg(priv
, "cleaning up ib_dev\n");
1030 ipoib_mcast_stop_thread(dev
, 1);
1031 ipoib_mcast_dev_flush(dev
);
1033 ipoib_transport_dev_cleanup(dev
);
1037 * Delayed P_Key Assigment Interim Support
1039 * The following is initial implementation of delayed P_Key assigment
1040 * mechanism. It is using the same approach implemented for the multicast
1041 * group join. The single goal of this implementation is to quickly address
1042 * Bug #2507. This implementation will probably be removed when the P_Key
1043 * change async notification is available.
1046 void ipoib_pkey_poll(struct work_struct
*work
)
1048 struct ipoib_dev_priv
*priv
=
1049 container_of(work
, struct ipoib_dev_priv
, pkey_poll_task
.work
);
1050 struct net_device
*dev
= priv
->dev
;
1052 ipoib_pkey_dev_check_presence(dev
);
1054 if (test_bit(IPOIB_PKEY_ASSIGNED
, &priv
->flags
))
1057 mutex_lock(&pkey_mutex
);
1058 if (!test_bit(IPOIB_PKEY_STOP
, &priv
->flags
))
1059 queue_delayed_work(ipoib_workqueue
,
1060 &priv
->pkey_poll_task
,
1062 mutex_unlock(&pkey_mutex
);
1066 int ipoib_pkey_dev_delay_open(struct net_device
*dev
)
1068 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
1070 /* Look for the interface pkey value in the IB Port P_Key table and */
1071 /* set the interface pkey assigment flag */
1072 ipoib_pkey_dev_check_presence(dev
);
1074 /* P_Key value not assigned yet - start polling */
1075 if (!test_bit(IPOIB_PKEY_ASSIGNED
, &priv
->flags
)) {
1076 mutex_lock(&pkey_mutex
);
1077 clear_bit(IPOIB_PKEY_STOP
, &priv
->flags
);
1078 queue_delayed_work(ipoib_workqueue
,
1079 &priv
->pkey_poll_task
,
1081 mutex_unlock(&pkey_mutex
);