2 * Linux network driver for Brocade Converged Network Adapter.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
18 #include <linux/bitops.h>
19 #include <linux/netdevice.h>
20 #include <linux/skbuff.h>
21 #include <linux/etherdevice.h>
23 #include <linux/ethtool.h>
24 #include <linux/if_vlan.h>
25 #include <linux/if_ether.h>
27 #include <linux/prefetch.h>
28 #include <linux/module.h>
34 static DEFINE_MUTEX(bnad_fwimg_mutex
);
39 static uint bnad_msix_disable
;
40 module_param(bnad_msix_disable
, uint
, 0444);
41 MODULE_PARM_DESC(bnad_msix_disable
, "Disable MSIX mode");
43 static uint bnad_ioc_auto_recover
= 1;
44 module_param(bnad_ioc_auto_recover
, uint
, 0444);
45 MODULE_PARM_DESC(bnad_ioc_auto_recover
, "Enable / Disable auto recovery");
47 static uint bna_debugfs_enable
= 1;
48 module_param(bna_debugfs_enable
, uint
, S_IRUGO
| S_IWUSR
);
49 MODULE_PARM_DESC(bna_debugfs_enable
, "Enables debugfs feature, default=1,"
50 " Range[false:0|true:1]");
55 u32 bnad_rxqs_per_cq
= 2;
57 static struct mutex bnad_list_mutex
;
58 static LIST_HEAD(bnad_list
);
59 static const u8 bnad_bcast_addr
[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
64 #define BNAD_TX_UNMAPQ_DEPTH (bnad->txq_depth * 2)
66 #define BNAD_RX_UNMAPQ_DEPTH (bnad->rxq_depth)
68 #define BNAD_GET_MBOX_IRQ(_bnad) \
69 (((_bnad)->cfg_flags & BNAD_CF_MSIX) ? \
70 ((_bnad)->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector) : \
71 ((_bnad)->pcidev->irq))
73 #define BNAD_FILL_UNMAPQ_MEM_REQ(_res_info, _num, _depth) \
75 (_res_info)->res_type = BNA_RES_T_MEM; \
76 (_res_info)->res_u.mem_info.mem_type = BNA_MEM_T_KVA; \
77 (_res_info)->res_u.mem_info.num = (_num); \
78 (_res_info)->res_u.mem_info.len = \
79 sizeof(struct bnad_unmap_q) + \
80 (sizeof(struct bnad_skb_unmap) * ((_depth) - 1)); \
84 bnad_add_to_list(struct bnad
*bnad
)
86 mutex_lock(&bnad_list_mutex
);
87 list_add_tail(&bnad
->list_entry
, &bnad_list
);
89 mutex_unlock(&bnad_list_mutex
);
93 bnad_remove_from_list(struct bnad
*bnad
)
95 mutex_lock(&bnad_list_mutex
);
96 list_del(&bnad
->list_entry
);
97 mutex_unlock(&bnad_list_mutex
);
101 * Reinitialize completions in CQ, once Rx is taken down
104 bnad_cq_cmpl_init(struct bnad
*bnad
, struct bna_ccb
*ccb
)
106 struct bna_cq_entry
*cmpl
, *next_cmpl
;
107 unsigned int wi_range
, wis
= 0, ccb_prod
= 0;
110 BNA_CQ_QPGE_PTR_GET(ccb_prod
, ccb
->sw_qpt
, cmpl
,
113 for (i
= 0; i
< ccb
->q_depth
; i
++) {
115 if (likely(--wi_range
))
116 next_cmpl
= cmpl
+ 1;
118 BNA_QE_INDX_ADD(ccb_prod
, wis
, ccb
->q_depth
);
120 BNA_CQ_QPGE_PTR_GET(ccb_prod
, ccb
->sw_qpt
,
121 next_cmpl
, wi_range
);
129 bnad_pci_unmap_skb(struct device
*pdev
, struct bnad_skb_unmap
*array
,
130 u32 index
, u32 depth
, struct sk_buff
*skb
, u32 frag
)
133 array
[index
].skb
= NULL
;
135 dma_unmap_single(pdev
, dma_unmap_addr(&array
[index
], dma_addr
),
136 skb_headlen(skb
), DMA_TO_DEVICE
);
137 dma_unmap_addr_set(&array
[index
], dma_addr
, 0);
138 BNA_QE_INDX_ADD(index
, 1, depth
);
140 for (j
= 0; j
< frag
; j
++) {
141 dma_unmap_page(pdev
, dma_unmap_addr(&array
[index
], dma_addr
),
142 skb_frag_size(&skb_shinfo(skb
)->frags
[j
]),
144 dma_unmap_addr_set(&array
[index
], dma_addr
, 0);
145 BNA_QE_INDX_ADD(index
, 1, depth
);
152 * Frees all pending Tx Bufs
153 * At this point no activity is expected on the Q,
154 * so DMA unmap & freeing is fine.
157 bnad_free_all_txbufs(struct bnad
*bnad
,
161 struct bnad_unmap_q
*unmap_q
= tcb
->unmap_q
;
162 struct bnad_skb_unmap
*unmap_array
;
163 struct sk_buff
*skb
= NULL
;
166 unmap_array
= unmap_q
->unmap_array
;
168 for (q
= 0; q
< unmap_q
->q_depth
; q
++) {
169 skb
= unmap_array
[q
].skb
;
174 unmap_cons
= bnad_pci_unmap_skb(&bnad
->pcidev
->dev
, unmap_array
,
175 unmap_cons
, unmap_q
->q_depth
, skb
,
176 skb_shinfo(skb
)->nr_frags
);
178 dev_kfree_skb_any(skb
);
182 /* Data Path Handlers */
185 * bnad_free_txbufs : Frees the Tx bufs on Tx completion
186 * Can be called in a) Interrupt context
191 bnad_free_txbufs(struct bnad
*bnad
,
194 u32 unmap_cons
, sent_packets
= 0, sent_bytes
= 0;
195 u16 wis
, updated_hw_cons
;
196 struct bnad_unmap_q
*unmap_q
= tcb
->unmap_q
;
197 struct bnad_skb_unmap
*unmap_array
;
201 * Just return if TX is stopped. This check is useful
202 * when bnad_free_txbufs() runs out of a tasklet scheduled
203 * before bnad_cb_tx_cleanup() cleared BNAD_TXQ_TX_STARTED bit
204 * but this routine runs actually after the cleanup has been
207 if (!test_bit(BNAD_TXQ_TX_STARTED
, &tcb
->flags
))
210 updated_hw_cons
= *(tcb
->hw_consumer_index
);
212 wis
= BNA_Q_INDEX_CHANGE(tcb
->consumer_index
,
213 updated_hw_cons
, tcb
->q_depth
);
215 BUG_ON(!(wis
<= BNA_QE_IN_USE_CNT(tcb
, tcb
->q_depth
)));
217 unmap_array
= unmap_q
->unmap_array
;
218 unmap_cons
= unmap_q
->consumer_index
;
220 prefetch(&unmap_array
[unmap_cons
+ 1]);
222 skb
= unmap_array
[unmap_cons
].skb
;
225 sent_bytes
+= skb
->len
;
226 wis
-= BNA_TXQ_WI_NEEDED(1 + skb_shinfo(skb
)->nr_frags
);
228 unmap_cons
= bnad_pci_unmap_skb(&bnad
->pcidev
->dev
, unmap_array
,
229 unmap_cons
, unmap_q
->q_depth
, skb
,
230 skb_shinfo(skb
)->nr_frags
);
232 dev_kfree_skb_any(skb
);
235 /* Update consumer pointers. */
236 tcb
->consumer_index
= updated_hw_cons
;
237 unmap_q
->consumer_index
= unmap_cons
;
239 tcb
->txq
->tx_packets
+= sent_packets
;
240 tcb
->txq
->tx_bytes
+= sent_bytes
;
245 /* Tx Free Tasklet function */
246 /* Frees for all the tcb's in all the Tx's */
248 * Scheduled from sending context, so that
249 * the fat Tx lock is not held for too long
250 * in the sending context.
253 bnad_tx_free_tasklet(unsigned long bnad_ptr
)
255 struct bnad
*bnad
= (struct bnad
*)bnad_ptr
;
260 for (i
= 0; i
< bnad
->num_tx
; i
++) {
261 for (j
= 0; j
< bnad
->num_txq_per_tx
; j
++) {
262 tcb
= bnad
->tx_info
[i
].tcb
[j
];
265 if (((u16
) (*tcb
->hw_consumer_index
) !=
266 tcb
->consumer_index
) &&
267 (!test_and_set_bit(BNAD_TXQ_FREE_SENT
,
269 acked
= bnad_free_txbufs(bnad
, tcb
);
270 if (likely(test_bit(BNAD_TXQ_TX_STARTED
,
272 bna_ib_ack(tcb
->i_dbell
, acked
);
273 smp_mb__before_clear_bit();
274 clear_bit(BNAD_TXQ_FREE_SENT
, &tcb
->flags
);
276 if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED
,
279 if (netif_queue_stopped(bnad
->netdev
)) {
280 if (acked
&& netif_carrier_ok(bnad
->netdev
) &&
281 BNA_QE_FREE_CNT(tcb
, tcb
->q_depth
) >=
282 BNAD_NETIF_WAKE_THRESHOLD
) {
283 netif_wake_queue(bnad
->netdev
);
285 /* Counters for individual TxQs? */
286 BNAD_UPDATE_CTR(bnad
,
295 bnad_tx(struct bnad
*bnad
, struct bna_tcb
*tcb
)
297 struct net_device
*netdev
= bnad
->netdev
;
300 if (test_and_set_bit(BNAD_TXQ_FREE_SENT
, &tcb
->flags
))
303 sent
= bnad_free_txbufs(bnad
, tcb
);
305 if (netif_queue_stopped(netdev
) &&
306 netif_carrier_ok(netdev
) &&
307 BNA_QE_FREE_CNT(tcb
, tcb
->q_depth
) >=
308 BNAD_NETIF_WAKE_THRESHOLD
) {
309 if (test_bit(BNAD_TXQ_TX_STARTED
, &tcb
->flags
)) {
310 netif_wake_queue(netdev
);
311 BNAD_UPDATE_CTR(bnad
, netif_queue_wakeup
);
316 if (likely(test_bit(BNAD_TXQ_TX_STARTED
, &tcb
->flags
)))
317 bna_ib_ack(tcb
->i_dbell
, sent
);
319 smp_mb__before_clear_bit();
320 clear_bit(BNAD_TXQ_FREE_SENT
, &tcb
->flags
);
325 /* MSIX Tx Completion Handler */
327 bnad_msix_tx(int irq
, void *data
)
329 struct bna_tcb
*tcb
= (struct bna_tcb
*)data
;
330 struct bnad
*bnad
= tcb
->bnad
;
338 bnad_reset_rcb(struct bnad
*bnad
, struct bna_rcb
*rcb
)
340 struct bnad_unmap_q
*unmap_q
= rcb
->unmap_q
;
342 rcb
->producer_index
= 0;
343 rcb
->consumer_index
= 0;
345 unmap_q
->producer_index
= 0;
346 unmap_q
->consumer_index
= 0;
350 bnad_free_all_rxbufs(struct bnad
*bnad
, struct bna_rcb
*rcb
)
352 struct bnad_unmap_q
*unmap_q
;
353 struct bnad_skb_unmap
*unmap_array
;
357 unmap_q
= rcb
->unmap_q
;
358 unmap_array
= unmap_q
->unmap_array
;
359 for (unmap_cons
= 0; unmap_cons
< unmap_q
->q_depth
; unmap_cons
++) {
360 skb
= unmap_array
[unmap_cons
].skb
;
363 unmap_array
[unmap_cons
].skb
= NULL
;
364 dma_unmap_single(&bnad
->pcidev
->dev
,
365 dma_unmap_addr(&unmap_array
[unmap_cons
],
367 rcb
->rxq
->buffer_size
,
371 bnad_reset_rcb(bnad
, rcb
);
375 bnad_alloc_n_post_rxbufs(struct bnad
*bnad
, struct bna_rcb
*rcb
)
377 u16 to_alloc
, alloced
, unmap_prod
, wi_range
;
378 struct bnad_unmap_q
*unmap_q
= rcb
->unmap_q
;
379 struct bnad_skb_unmap
*unmap_array
;
380 struct bna_rxq_entry
*rxent
;
386 BNA_QE_FREE_CNT(unmap_q
, unmap_q
->q_depth
);
388 unmap_array
= unmap_q
->unmap_array
;
389 unmap_prod
= unmap_q
->producer_index
;
391 BNA_RXQ_QPGE_PTR_GET(unmap_prod
, rcb
->sw_qpt
, rxent
, wi_range
);
395 BNA_RXQ_QPGE_PTR_GET(unmap_prod
, rcb
->sw_qpt
, rxent
,
397 skb
= netdev_alloc_skb_ip_align(bnad
->netdev
,
398 rcb
->rxq
->buffer_size
);
399 if (unlikely(!skb
)) {
400 BNAD_UPDATE_CTR(bnad
, rxbuf_alloc_failed
);
401 rcb
->rxq
->rxbuf_alloc_failed
++;
404 unmap_array
[unmap_prod
].skb
= skb
;
405 dma_addr
= dma_map_single(&bnad
->pcidev
->dev
, skb
->data
,
406 rcb
->rxq
->buffer_size
,
408 dma_unmap_addr_set(&unmap_array
[unmap_prod
], dma_addr
,
410 BNA_SET_DMA_ADDR(dma_addr
, &rxent
->host_addr
);
411 BNA_QE_INDX_ADD(unmap_prod
, 1, unmap_q
->q_depth
);
419 if (likely(alloced
)) {
420 unmap_q
->producer_index
= unmap_prod
;
421 rcb
->producer_index
= unmap_prod
;
423 if (likely(test_bit(BNAD_RXQ_POST_OK
, &rcb
->flags
)))
424 bna_rxq_prod_indx_doorbell(rcb
);
429 bnad_refill_rxq(struct bnad
*bnad
, struct bna_rcb
*rcb
)
431 struct bnad_unmap_q
*unmap_q
= rcb
->unmap_q
;
433 if (!test_and_set_bit(BNAD_RXQ_REFILL
, &rcb
->flags
)) {
434 if (BNA_QE_FREE_CNT(unmap_q
, unmap_q
->q_depth
)
435 >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT
)
436 bnad_alloc_n_post_rxbufs(bnad
, rcb
);
437 smp_mb__before_clear_bit();
438 clear_bit(BNAD_RXQ_REFILL
, &rcb
->flags
);
443 bnad_poll_cq(struct bnad
*bnad
, struct bna_ccb
*ccb
, int budget
)
445 struct bna_cq_entry
*cmpl
, *next_cmpl
;
446 struct bna_rcb
*rcb
= NULL
;
447 unsigned int wi_range
, packets
= 0, wis
= 0;
448 struct bnad_unmap_q
*unmap_q
;
449 struct bnad_skb_unmap
*unmap_array
;
451 u32 flags
, unmap_cons
;
452 struct bna_pkt_rate
*pkt_rt
= &ccb
->pkt_rate
;
453 struct bnad_rx_ctrl
*rx_ctrl
= (struct bnad_rx_ctrl
*)(ccb
->ctrl
);
455 if (!test_bit(BNAD_RXQ_STARTED
, &ccb
->rcb
[0]->flags
))
458 prefetch(bnad
->netdev
);
459 BNA_CQ_QPGE_PTR_GET(ccb
->producer_index
, ccb
->sw_qpt
, cmpl
,
461 BUG_ON(!(wi_range
<= ccb
->q_depth
));
462 while (cmpl
->valid
&& packets
< budget
) {
464 BNA_UPDATE_PKT_CNT(pkt_rt
, ntohs(cmpl
->length
));
466 if (bna_is_small_rxq(cmpl
->rxq_id
))
471 unmap_q
= rcb
->unmap_q
;
472 unmap_array
= unmap_q
->unmap_array
;
473 unmap_cons
= unmap_q
->consumer_index
;
475 skb
= unmap_array
[unmap_cons
].skb
;
477 unmap_array
[unmap_cons
].skb
= NULL
;
478 dma_unmap_single(&bnad
->pcidev
->dev
,
479 dma_unmap_addr(&unmap_array
[unmap_cons
],
481 rcb
->rxq
->buffer_size
,
483 BNA_QE_INDX_ADD(unmap_q
->consumer_index
, 1, unmap_q
->q_depth
);
485 /* Should be more efficient ? Performance ? */
486 BNA_QE_INDX_ADD(rcb
->consumer_index
, 1, rcb
->q_depth
);
489 if (likely(--wi_range
))
490 next_cmpl
= cmpl
+ 1;
492 BNA_QE_INDX_ADD(ccb
->producer_index
, wis
, ccb
->q_depth
);
494 BNA_CQ_QPGE_PTR_GET(ccb
->producer_index
, ccb
->sw_qpt
,
495 next_cmpl
, wi_range
);
496 BUG_ON(!(wi_range
<= ccb
->q_depth
));
500 flags
= ntohl(cmpl
->flags
);
503 (BNA_CQ_EF_MAC_ERROR
| BNA_CQ_EF_FCS_ERROR
|
504 BNA_CQ_EF_TOO_LONG
))) {
505 dev_kfree_skb_any(skb
);
506 rcb
->rxq
->rx_packets_with_error
++;
510 skb_put(skb
, ntohs(cmpl
->length
));
512 ((bnad
->netdev
->features
& NETIF_F_RXCSUM
) &&
513 (((flags
& BNA_CQ_EF_IPV4
) &&
514 (flags
& BNA_CQ_EF_L3_CKSUM_OK
)) ||
515 (flags
& BNA_CQ_EF_IPV6
)) &&
516 (flags
& (BNA_CQ_EF_TCP
| BNA_CQ_EF_UDP
)) &&
517 (flags
& BNA_CQ_EF_L4_CKSUM_OK
)))
518 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
520 skb_checksum_none_assert(skb
);
522 rcb
->rxq
->rx_packets
++;
523 rcb
->rxq
->rx_bytes
+= skb
->len
;
524 skb
->protocol
= eth_type_trans(skb
, bnad
->netdev
);
526 if (flags
& BNA_CQ_EF_VLAN
)
527 __vlan_hwaccel_put_tag(skb
, ntohs(cmpl
->vlan_tag
));
529 if (skb
->ip_summed
== CHECKSUM_UNNECESSARY
)
530 napi_gro_receive(&rx_ctrl
->napi
, skb
);
532 netif_receive_skb(skb
);
539 BNA_QE_INDX_ADD(ccb
->producer_index
, wis
, ccb
->q_depth
);
541 if (likely(test_bit(BNAD_RXQ_STARTED
, &ccb
->rcb
[0]->flags
)))
542 bna_ib_ack_disable_irq(ccb
->i_dbell
, packets
);
544 bnad_refill_rxq(bnad
, ccb
->rcb
[0]);
546 bnad_refill_rxq(bnad
, ccb
->rcb
[1]);
548 clear_bit(BNAD_FP_IN_RX_PATH
, &rx_ctrl
->flags
);
554 bnad_netif_rx_schedule_poll(struct bnad
*bnad
, struct bna_ccb
*ccb
)
556 struct bnad_rx_ctrl
*rx_ctrl
= (struct bnad_rx_ctrl
*)(ccb
->ctrl
);
557 struct napi_struct
*napi
= &rx_ctrl
->napi
;
559 if (likely(napi_schedule_prep(napi
))) {
560 __napi_schedule(napi
);
561 rx_ctrl
->rx_schedule
++;
565 /* MSIX Rx Path Handler */
567 bnad_msix_rx(int irq
, void *data
)
569 struct bna_ccb
*ccb
= (struct bna_ccb
*)data
;
572 ((struct bnad_rx_ctrl
*)(ccb
->ctrl
))->rx_intr_ctr
++;
573 bnad_netif_rx_schedule_poll(ccb
->bnad
, ccb
);
579 /* Interrupt handlers */
581 /* Mbox Interrupt Handlers */
583 bnad_msix_mbox_handler(int irq
, void *data
)
587 struct bnad
*bnad
= (struct bnad
*)data
;
589 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
590 if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED
, &bnad
->run_flags
))) {
591 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
595 bna_intr_status_get(&bnad
->bna
, intr_status
);
597 if (BNA_IS_MBOX_ERR_INTR(&bnad
->bna
, intr_status
))
598 bna_mbox_handler(&bnad
->bna
, intr_status
);
600 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
606 bnad_isr(int irq
, void *data
)
611 struct bnad
*bnad
= (struct bnad
*)data
;
612 struct bnad_rx_info
*rx_info
;
613 struct bnad_rx_ctrl
*rx_ctrl
;
614 struct bna_tcb
*tcb
= NULL
;
616 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
617 if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED
, &bnad
->run_flags
))) {
618 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
622 bna_intr_status_get(&bnad
->bna
, intr_status
);
624 if (unlikely(!intr_status
)) {
625 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
629 if (BNA_IS_MBOX_ERR_INTR(&bnad
->bna
, intr_status
))
630 bna_mbox_handler(&bnad
->bna
, intr_status
);
632 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
634 if (!BNA_IS_INTX_DATA_INTR(intr_status
))
637 /* Process data interrupts */
639 for (i
= 0; i
< bnad
->num_tx
; i
++) {
640 for (j
= 0; j
< bnad
->num_txq_per_tx
; j
++) {
641 tcb
= bnad
->tx_info
[i
].tcb
[j
];
642 if (tcb
&& test_bit(BNAD_TXQ_TX_STARTED
, &tcb
->flags
))
643 bnad_tx(bnad
, bnad
->tx_info
[i
].tcb
[j
]);
647 for (i
= 0; i
< bnad
->num_rx
; i
++) {
648 rx_info
= &bnad
->rx_info
[i
];
651 for (j
= 0; j
< bnad
->num_rxp_per_rx
; j
++) {
652 rx_ctrl
= &rx_info
->rx_ctrl
[j
];
654 bnad_netif_rx_schedule_poll(bnad
,
662 * Called in interrupt / callback context
663 * with bna_lock held, so cfg_flags access is OK
666 bnad_enable_mbox_irq(struct bnad
*bnad
)
668 clear_bit(BNAD_RF_MBOX_IRQ_DISABLED
, &bnad
->run_flags
);
670 BNAD_UPDATE_CTR(bnad
, mbox_intr_enabled
);
674 * Called with bnad->bna_lock held b'cos of
675 * bnad->cfg_flags access.
678 bnad_disable_mbox_irq(struct bnad
*bnad
)
680 set_bit(BNAD_RF_MBOX_IRQ_DISABLED
, &bnad
->run_flags
);
682 BNAD_UPDATE_CTR(bnad
, mbox_intr_disabled
);
686 bnad_set_netdev_perm_addr(struct bnad
*bnad
)
688 struct net_device
*netdev
= bnad
->netdev
;
690 memcpy(netdev
->perm_addr
, &bnad
->perm_addr
, netdev
->addr_len
);
691 if (is_zero_ether_addr(netdev
->dev_addr
))
692 memcpy(netdev
->dev_addr
, &bnad
->perm_addr
, netdev
->addr_len
);
695 /* Control Path Handlers */
699 bnad_cb_mbox_intr_enable(struct bnad
*bnad
)
701 bnad_enable_mbox_irq(bnad
);
705 bnad_cb_mbox_intr_disable(struct bnad
*bnad
)
707 bnad_disable_mbox_irq(bnad
);
711 bnad_cb_ioceth_ready(struct bnad
*bnad
)
713 bnad
->bnad_completions
.ioc_comp_status
= BNA_CB_SUCCESS
;
714 complete(&bnad
->bnad_completions
.ioc_comp
);
718 bnad_cb_ioceth_failed(struct bnad
*bnad
)
720 bnad
->bnad_completions
.ioc_comp_status
= BNA_CB_FAIL
;
721 complete(&bnad
->bnad_completions
.ioc_comp
);
725 bnad_cb_ioceth_disabled(struct bnad
*bnad
)
727 bnad
->bnad_completions
.ioc_comp_status
= BNA_CB_SUCCESS
;
728 complete(&bnad
->bnad_completions
.ioc_comp
);
732 bnad_cb_enet_disabled(void *arg
)
734 struct bnad
*bnad
= (struct bnad
*)arg
;
736 netif_carrier_off(bnad
->netdev
);
737 complete(&bnad
->bnad_completions
.enet_comp
);
741 bnad_cb_ethport_link_status(struct bnad
*bnad
,
742 enum bna_link_status link_status
)
744 bool link_up
= false;
746 link_up
= (link_status
== BNA_LINK_UP
) || (link_status
== BNA_CEE_UP
);
748 if (link_status
== BNA_CEE_UP
) {
749 if (!test_bit(BNAD_RF_CEE_RUNNING
, &bnad
->run_flags
))
750 BNAD_UPDATE_CTR(bnad
, cee_toggle
);
751 set_bit(BNAD_RF_CEE_RUNNING
, &bnad
->run_flags
);
753 if (test_bit(BNAD_RF_CEE_RUNNING
, &bnad
->run_flags
))
754 BNAD_UPDATE_CTR(bnad
, cee_toggle
);
755 clear_bit(BNAD_RF_CEE_RUNNING
, &bnad
->run_flags
);
759 if (!netif_carrier_ok(bnad
->netdev
)) {
761 printk(KERN_WARNING
"bna: %s link up\n",
763 netif_carrier_on(bnad
->netdev
);
764 BNAD_UPDATE_CTR(bnad
, link_toggle
);
765 for (tx_id
= 0; tx_id
< bnad
->num_tx
; tx_id
++) {
766 for (tcb_id
= 0; tcb_id
< bnad
->num_txq_per_tx
;
768 struct bna_tcb
*tcb
=
769 bnad
->tx_info
[tx_id
].tcb
[tcb_id
];
776 if (test_bit(BNAD_TXQ_TX_STARTED
,
780 * Transmit Schedule */
781 printk(KERN_INFO
"bna: %s %d "
788 BNAD_UPDATE_CTR(bnad
,
794 BNAD_UPDATE_CTR(bnad
,
801 if (netif_carrier_ok(bnad
->netdev
)) {
802 printk(KERN_WARNING
"bna: %s link down\n",
804 netif_carrier_off(bnad
->netdev
);
805 BNAD_UPDATE_CTR(bnad
, link_toggle
);
811 bnad_cb_tx_disabled(void *arg
, struct bna_tx
*tx
)
813 struct bnad
*bnad
= (struct bnad
*)arg
;
815 complete(&bnad
->bnad_completions
.tx_comp
);
819 bnad_cb_tcb_setup(struct bnad
*bnad
, struct bna_tcb
*tcb
)
821 struct bnad_tx_info
*tx_info
=
822 (struct bnad_tx_info
*)tcb
->txq
->tx
->priv
;
823 struct bnad_unmap_q
*unmap_q
= tcb
->unmap_q
;
825 tx_info
->tcb
[tcb
->id
] = tcb
;
826 unmap_q
->producer_index
= 0;
827 unmap_q
->consumer_index
= 0;
828 unmap_q
->q_depth
= BNAD_TX_UNMAPQ_DEPTH
;
832 bnad_cb_tcb_destroy(struct bnad
*bnad
, struct bna_tcb
*tcb
)
834 struct bnad_tx_info
*tx_info
=
835 (struct bnad_tx_info
*)tcb
->txq
->tx
->priv
;
837 tx_info
->tcb
[tcb
->id
] = NULL
;
842 bnad_cb_rcb_setup(struct bnad
*bnad
, struct bna_rcb
*rcb
)
844 struct bnad_unmap_q
*unmap_q
= rcb
->unmap_q
;
846 unmap_q
->producer_index
= 0;
847 unmap_q
->consumer_index
= 0;
848 unmap_q
->q_depth
= BNAD_RX_UNMAPQ_DEPTH
;
852 bnad_cb_ccb_setup(struct bnad
*bnad
, struct bna_ccb
*ccb
)
854 struct bnad_rx_info
*rx_info
=
855 (struct bnad_rx_info
*)ccb
->cq
->rx
->priv
;
857 rx_info
->rx_ctrl
[ccb
->id
].ccb
= ccb
;
858 ccb
->ctrl
= &rx_info
->rx_ctrl
[ccb
->id
];
862 bnad_cb_ccb_destroy(struct bnad
*bnad
, struct bna_ccb
*ccb
)
864 struct bnad_rx_info
*rx_info
=
865 (struct bnad_rx_info
*)ccb
->cq
->rx
->priv
;
867 rx_info
->rx_ctrl
[ccb
->id
].ccb
= NULL
;
871 bnad_cb_tx_stall(struct bnad
*bnad
, struct bna_tx
*tx
)
873 struct bnad_tx_info
*tx_info
=
874 (struct bnad_tx_info
*)tx
->priv
;
879 for (i
= 0; i
< BNAD_MAX_TXQ_PER_TX
; i
++) {
880 tcb
= tx_info
->tcb
[i
];
884 clear_bit(BNAD_TXQ_TX_STARTED
, &tcb
->flags
);
885 netif_stop_subqueue(bnad
->netdev
, txq_id
);
886 printk(KERN_INFO
"bna: %s %d TXQ_STOPPED\n",
887 bnad
->netdev
->name
, txq_id
);
892 bnad_cb_tx_resume(struct bnad
*bnad
, struct bna_tx
*tx
)
894 struct bnad_tx_info
*tx_info
= (struct bnad_tx_info
*)tx
->priv
;
899 for (i
= 0; i
< BNAD_MAX_TXQ_PER_TX
; i
++) {
900 tcb
= tx_info
->tcb
[i
];
905 BUG_ON(test_bit(BNAD_TXQ_TX_STARTED
, &tcb
->flags
));
906 set_bit(BNAD_TXQ_TX_STARTED
, &tcb
->flags
);
907 BUG_ON(*(tcb
->hw_consumer_index
) != 0);
909 if (netif_carrier_ok(bnad
->netdev
)) {
910 printk(KERN_INFO
"bna: %s %d TXQ_STARTED\n",
911 bnad
->netdev
->name
, txq_id
);
912 netif_wake_subqueue(bnad
->netdev
, txq_id
);
913 BNAD_UPDATE_CTR(bnad
, netif_queue_wakeup
);
918 * Workaround for first ioceth enable failure & we
919 * get a 0 MAC address. We try to get the MAC address
922 if (is_zero_ether_addr(&bnad
->perm_addr
.mac
[0])) {
923 bna_enet_perm_mac_get(&bnad
->bna
.enet
, &bnad
->perm_addr
);
924 bnad_set_netdev_perm_addr(bnad
);
929 * Free all TxQs buffers and then notify TX_E_CLEANUP_DONE to Tx fsm.
932 bnad_tx_cleanup(struct delayed_work
*work
)
934 struct bnad_tx_info
*tx_info
=
935 container_of(work
, struct bnad_tx_info
, tx_cleanup_work
);
936 struct bnad
*bnad
= NULL
;
937 struct bnad_unmap_q
*unmap_q
;
940 uint32_t i
, pending
= 0;
942 for (i
= 0; i
< BNAD_MAX_TXQ_PER_TX
; i
++) {
943 tcb
= tx_info
->tcb
[i
];
949 if (test_and_set_bit(BNAD_TXQ_FREE_SENT
, &tcb
->flags
)) {
954 bnad_free_all_txbufs(bnad
, tcb
);
956 unmap_q
= tcb
->unmap_q
;
957 unmap_q
->producer_index
= 0;
958 unmap_q
->consumer_index
= 0;
960 smp_mb__before_clear_bit();
961 clear_bit(BNAD_TXQ_FREE_SENT
, &tcb
->flags
);
965 queue_delayed_work(bnad
->work_q
, &tx_info
->tx_cleanup_work
,
966 msecs_to_jiffies(1));
970 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
971 bna_tx_cleanup_complete(tx_info
->tx
);
972 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
977 bnad_cb_tx_cleanup(struct bnad
*bnad
, struct bna_tx
*tx
)
979 struct bnad_tx_info
*tx_info
= (struct bnad_tx_info
*)tx
->priv
;
983 for (i
= 0; i
< BNAD_MAX_TXQ_PER_TX
; i
++) {
984 tcb
= tx_info
->tcb
[i
];
989 queue_delayed_work(bnad
->work_q
, &tx_info
->tx_cleanup_work
, 0);
993 bnad_cb_rx_stall(struct bnad
*bnad
, struct bna_rx
*rx
)
995 struct bnad_rx_info
*rx_info
= (struct bnad_rx_info
*)rx
->priv
;
997 struct bnad_rx_ctrl
*rx_ctrl
;
1000 for (i
= 0; i
< BNAD_MAX_RXP_PER_RX
; i
++) {
1001 rx_ctrl
= &rx_info
->rx_ctrl
[i
];
1006 clear_bit(BNAD_RXQ_POST_OK
, &ccb
->rcb
[0]->flags
);
1009 clear_bit(BNAD_RXQ_POST_OK
, &ccb
->rcb
[1]->flags
);
1014 * Free all RxQs buffers and then notify RX_E_CLEANUP_DONE to Rx fsm.
1017 bnad_rx_cleanup(void *work
)
1019 struct bnad_rx_info
*rx_info
=
1020 container_of(work
, struct bnad_rx_info
, rx_cleanup_work
);
1021 struct bnad_rx_ctrl
*rx_ctrl
;
1022 struct bnad
*bnad
= NULL
;
1023 unsigned long flags
;
1026 for (i
= 0; i
< BNAD_MAX_RXP_PER_RX
; i
++) {
1027 rx_ctrl
= &rx_info
->rx_ctrl
[i
];
1032 bnad
= rx_ctrl
->ccb
->bnad
;
1035 * Wait till the poll handler has exited
1036 * and nothing can be scheduled anymore
1038 napi_disable(&rx_ctrl
->napi
);
1040 bnad_cq_cmpl_init(bnad
, rx_ctrl
->ccb
);
1041 bnad_free_all_rxbufs(bnad
, rx_ctrl
->ccb
->rcb
[0]);
1042 if (rx_ctrl
->ccb
->rcb
[1])
1043 bnad_free_all_rxbufs(bnad
, rx_ctrl
->ccb
->rcb
[1]);
1046 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1047 bna_rx_cleanup_complete(rx_info
->rx
);
1048 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1052 bnad_cb_rx_cleanup(struct bnad
*bnad
, struct bna_rx
*rx
)
1054 struct bnad_rx_info
*rx_info
= (struct bnad_rx_info
*)rx
->priv
;
1055 struct bna_ccb
*ccb
;
1056 struct bnad_rx_ctrl
*rx_ctrl
;
1059 for (i
= 0; i
< BNAD_MAX_RXP_PER_RX
; i
++) {
1060 rx_ctrl
= &rx_info
->rx_ctrl
[i
];
1065 clear_bit(BNAD_RXQ_STARTED
, &ccb
->rcb
[0]->flags
);
1068 clear_bit(BNAD_RXQ_STARTED
, &ccb
->rcb
[1]->flags
);
1071 queue_work(bnad
->work_q
, &rx_info
->rx_cleanup_work
);
1075 bnad_cb_rx_post(struct bnad
*bnad
, struct bna_rx
*rx
)
1077 struct bnad_rx_info
*rx_info
= (struct bnad_rx_info
*)rx
->priv
;
1078 struct bna_ccb
*ccb
;
1079 struct bna_rcb
*rcb
;
1080 struct bnad_rx_ctrl
*rx_ctrl
;
1081 struct bnad_unmap_q
*unmap_q
;
1085 for (i
= 0; i
< BNAD_MAX_RXP_PER_RX
; i
++) {
1086 rx_ctrl
= &rx_info
->rx_ctrl
[i
];
1091 napi_enable(&rx_ctrl
->napi
);
1093 for (j
= 0; j
< BNAD_MAX_RXQ_PER_RXP
; j
++) {
1098 set_bit(BNAD_RXQ_STARTED
, &rcb
->flags
);
1099 set_bit(BNAD_RXQ_POST_OK
, &rcb
->flags
);
1100 unmap_q
= rcb
->unmap_q
;
1102 /* Now allocate & post buffers for this RCB */
1103 /* !!Allocation in callback context */
1104 if (!test_and_set_bit(BNAD_RXQ_REFILL
, &rcb
->flags
)) {
1105 if (BNA_QE_FREE_CNT(unmap_q
, unmap_q
->q_depth
)
1106 >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT
)
1107 bnad_alloc_n_post_rxbufs(bnad
, rcb
);
1108 smp_mb__before_clear_bit();
1109 clear_bit(BNAD_RXQ_REFILL
, &rcb
->flags
);
1116 bnad_cb_rx_disabled(void *arg
, struct bna_rx
*rx
)
1118 struct bnad
*bnad
= (struct bnad
*)arg
;
1120 complete(&bnad
->bnad_completions
.rx_comp
);
1124 bnad_cb_rx_mcast_add(struct bnad
*bnad
, struct bna_rx
*rx
)
1126 bnad
->bnad_completions
.mcast_comp_status
= BNA_CB_SUCCESS
;
1127 complete(&bnad
->bnad_completions
.mcast_comp
);
1131 bnad_cb_stats_get(struct bnad
*bnad
, enum bna_cb_status status
,
1132 struct bna_stats
*stats
)
1134 if (status
== BNA_CB_SUCCESS
)
1135 BNAD_UPDATE_CTR(bnad
, hw_stats_updates
);
1137 if (!netif_running(bnad
->netdev
) ||
1138 !test_bit(BNAD_RF_STATS_TIMER_RUNNING
, &bnad
->run_flags
))
1141 mod_timer(&bnad
->stats_timer
,
1142 jiffies
+ msecs_to_jiffies(BNAD_STATS_TIMER_FREQ
));
1146 bnad_cb_enet_mtu_set(struct bnad
*bnad
)
1148 bnad
->bnad_completions
.mtu_comp_status
= BNA_CB_SUCCESS
;
1149 complete(&bnad
->bnad_completions
.mtu_comp
);
1153 bnad_cb_completion(void *arg
, enum bfa_status status
)
1155 struct bnad_iocmd_comp
*iocmd_comp
=
1156 (struct bnad_iocmd_comp
*)arg
;
1158 iocmd_comp
->comp_status
= (u32
) status
;
1159 complete(&iocmd_comp
->comp
);
1162 /* Resource allocation, free functions */
1165 bnad_mem_free(struct bnad
*bnad
,
1166 struct bna_mem_info
*mem_info
)
1171 if (mem_info
->mdl
== NULL
)
1174 for (i
= 0; i
< mem_info
->num
; i
++) {
1175 if (mem_info
->mdl
[i
].kva
!= NULL
) {
1176 if (mem_info
->mem_type
== BNA_MEM_T_DMA
) {
1177 BNA_GET_DMA_ADDR(&(mem_info
->mdl
[i
].dma
),
1179 dma_free_coherent(&bnad
->pcidev
->dev
,
1180 mem_info
->mdl
[i
].len
,
1181 mem_info
->mdl
[i
].kva
, dma_pa
);
1183 kfree(mem_info
->mdl
[i
].kva
);
1186 kfree(mem_info
->mdl
);
1187 mem_info
->mdl
= NULL
;
1191 bnad_mem_alloc(struct bnad
*bnad
,
1192 struct bna_mem_info
*mem_info
)
1197 if ((mem_info
->num
== 0) || (mem_info
->len
== 0)) {
1198 mem_info
->mdl
= NULL
;
1202 mem_info
->mdl
= kcalloc(mem_info
->num
, sizeof(struct bna_mem_descr
),
1204 if (mem_info
->mdl
== NULL
)
1207 if (mem_info
->mem_type
== BNA_MEM_T_DMA
) {
1208 for (i
= 0; i
< mem_info
->num
; i
++) {
1209 mem_info
->mdl
[i
].len
= mem_info
->len
;
1210 mem_info
->mdl
[i
].kva
=
1211 dma_alloc_coherent(&bnad
->pcidev
->dev
,
1212 mem_info
->len
, &dma_pa
,
1215 if (mem_info
->mdl
[i
].kva
== NULL
)
1218 BNA_SET_DMA_ADDR(dma_pa
,
1219 &(mem_info
->mdl
[i
].dma
));
1222 for (i
= 0; i
< mem_info
->num
; i
++) {
1223 mem_info
->mdl
[i
].len
= mem_info
->len
;
1224 mem_info
->mdl
[i
].kva
= kzalloc(mem_info
->len
,
1226 if (mem_info
->mdl
[i
].kva
== NULL
)
1234 bnad_mem_free(bnad
, mem_info
);
1238 /* Free IRQ for Mailbox */
1240 bnad_mbox_irq_free(struct bnad
*bnad
)
1243 unsigned long flags
;
1245 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1246 bnad_disable_mbox_irq(bnad
);
1247 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1249 irq
= BNAD_GET_MBOX_IRQ(bnad
);
1250 free_irq(irq
, bnad
);
1254 * Allocates IRQ for Mailbox, but keep it disabled
1255 * This will be enabled once we get the mbox enable callback
1259 bnad_mbox_irq_alloc(struct bnad
*bnad
)
1262 unsigned long irq_flags
, flags
;
1264 irq_handler_t irq_handler
;
1266 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1267 if (bnad
->cfg_flags
& BNAD_CF_MSIX
) {
1268 irq_handler
= (irq_handler_t
)bnad_msix_mbox_handler
;
1269 irq
= bnad
->msix_table
[BNAD_MAILBOX_MSIX_INDEX
].vector
;
1272 irq_handler
= (irq_handler_t
)bnad_isr
;
1273 irq
= bnad
->pcidev
->irq
;
1274 irq_flags
= IRQF_SHARED
;
1277 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1278 sprintf(bnad
->mbox_irq_name
, "%s", BNAD_NAME
);
1281 * Set the Mbox IRQ disable flag, so that the IRQ handler
1282 * called from request_irq() for SHARED IRQs do not execute
1284 set_bit(BNAD_RF_MBOX_IRQ_DISABLED
, &bnad
->run_flags
);
1286 BNAD_UPDATE_CTR(bnad
, mbox_intr_disabled
);
1288 err
= request_irq(irq
, irq_handler
, irq_flags
,
1289 bnad
->mbox_irq_name
, bnad
);
1295 bnad_txrx_irq_free(struct bnad
*bnad
, struct bna_intr_info
*intr_info
)
1297 kfree(intr_info
->idl
);
1298 intr_info
->idl
= NULL
;
1301 /* Allocates Interrupt Descriptor List for MSIX/INT-X vectors */
1303 bnad_txrx_irq_alloc(struct bnad
*bnad
, enum bnad_intr_source src
,
1304 u32 txrx_id
, struct bna_intr_info
*intr_info
)
1306 int i
, vector_start
= 0;
1308 unsigned long flags
;
1310 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1311 cfg_flags
= bnad
->cfg_flags
;
1312 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1314 if (cfg_flags
& BNAD_CF_MSIX
) {
1315 intr_info
->intr_type
= BNA_INTR_T_MSIX
;
1316 intr_info
->idl
= kcalloc(intr_info
->num
,
1317 sizeof(struct bna_intr_descr
),
1319 if (!intr_info
->idl
)
1324 vector_start
= BNAD_MAILBOX_MSIX_VECTORS
+ txrx_id
;
1328 vector_start
= BNAD_MAILBOX_MSIX_VECTORS
+
1329 (bnad
->num_tx
* bnad
->num_txq_per_tx
) +
1337 for (i
= 0; i
< intr_info
->num
; i
++)
1338 intr_info
->idl
[i
].vector
= vector_start
+ i
;
1340 intr_info
->intr_type
= BNA_INTR_T_INTX
;
1342 intr_info
->idl
= kcalloc(intr_info
->num
,
1343 sizeof(struct bna_intr_descr
),
1345 if (!intr_info
->idl
)
1350 intr_info
->idl
[0].vector
= BNAD_INTX_TX_IB_BITMASK
;
1354 intr_info
->idl
[0].vector
= BNAD_INTX_RX_IB_BITMASK
;
1362 * NOTE: Should be called for MSIX only
1363 * Unregisters Tx MSIX vector(s) from the kernel
1366 bnad_tx_msix_unregister(struct bnad
*bnad
, struct bnad_tx_info
*tx_info
,
1372 for (i
= 0; i
< num_txqs
; i
++) {
1373 if (tx_info
->tcb
[i
] == NULL
)
1376 vector_num
= tx_info
->tcb
[i
]->intr_vector
;
1377 free_irq(bnad
->msix_table
[vector_num
].vector
, tx_info
->tcb
[i
]);
1382 * NOTE: Should be called for MSIX only
1383 * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1386 bnad_tx_msix_register(struct bnad
*bnad
, struct bnad_tx_info
*tx_info
,
1387 u32 tx_id
, int num_txqs
)
1393 for (i
= 0; i
< num_txqs
; i
++) {
1394 vector_num
= tx_info
->tcb
[i
]->intr_vector
;
1395 sprintf(tx_info
->tcb
[i
]->name
, "%s TXQ %d", bnad
->netdev
->name
,
1396 tx_id
+ tx_info
->tcb
[i
]->id
);
1397 err
= request_irq(bnad
->msix_table
[vector_num
].vector
,
1398 (irq_handler_t
)bnad_msix_tx
, 0,
1399 tx_info
->tcb
[i
]->name
,
1409 bnad_tx_msix_unregister(bnad
, tx_info
, (i
- 1));
1414 * NOTE: Should be called for MSIX only
1415 * Unregisters Rx MSIX vector(s) from the kernel
1418 bnad_rx_msix_unregister(struct bnad
*bnad
, struct bnad_rx_info
*rx_info
,
1424 for (i
= 0; i
< num_rxps
; i
++) {
1425 if (rx_info
->rx_ctrl
[i
].ccb
== NULL
)
1428 vector_num
= rx_info
->rx_ctrl
[i
].ccb
->intr_vector
;
1429 free_irq(bnad
->msix_table
[vector_num
].vector
,
1430 rx_info
->rx_ctrl
[i
].ccb
);
1435 * NOTE: Should be called for MSIX only
1436 * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1439 bnad_rx_msix_register(struct bnad
*bnad
, struct bnad_rx_info
*rx_info
,
1440 u32 rx_id
, int num_rxps
)
1446 for (i
= 0; i
< num_rxps
; i
++) {
1447 vector_num
= rx_info
->rx_ctrl
[i
].ccb
->intr_vector
;
1448 sprintf(rx_info
->rx_ctrl
[i
].ccb
->name
, "%s CQ %d",
1450 rx_id
+ rx_info
->rx_ctrl
[i
].ccb
->id
);
1451 err
= request_irq(bnad
->msix_table
[vector_num
].vector
,
1452 (irq_handler_t
)bnad_msix_rx
, 0,
1453 rx_info
->rx_ctrl
[i
].ccb
->name
,
1454 rx_info
->rx_ctrl
[i
].ccb
);
1463 bnad_rx_msix_unregister(bnad
, rx_info
, (i
- 1));
1467 /* Free Tx object Resources */
1469 bnad_tx_res_free(struct bnad
*bnad
, struct bna_res_info
*res_info
)
1473 for (i
= 0; i
< BNA_TX_RES_T_MAX
; i
++) {
1474 if (res_info
[i
].res_type
== BNA_RES_T_MEM
)
1475 bnad_mem_free(bnad
, &res_info
[i
].res_u
.mem_info
);
1476 else if (res_info
[i
].res_type
== BNA_RES_T_INTR
)
1477 bnad_txrx_irq_free(bnad
, &res_info
[i
].res_u
.intr_info
);
1481 /* Allocates memory and interrupt resources for Tx object */
1483 bnad_tx_res_alloc(struct bnad
*bnad
, struct bna_res_info
*res_info
,
1488 for (i
= 0; i
< BNA_TX_RES_T_MAX
; i
++) {
1489 if (res_info
[i
].res_type
== BNA_RES_T_MEM
)
1490 err
= bnad_mem_alloc(bnad
,
1491 &res_info
[i
].res_u
.mem_info
);
1492 else if (res_info
[i
].res_type
== BNA_RES_T_INTR
)
1493 err
= bnad_txrx_irq_alloc(bnad
, BNAD_INTR_TX
, tx_id
,
1494 &res_info
[i
].res_u
.intr_info
);
1501 bnad_tx_res_free(bnad
, res_info
);
1505 /* Free Rx object Resources */
1507 bnad_rx_res_free(struct bnad
*bnad
, struct bna_res_info
*res_info
)
1511 for (i
= 0; i
< BNA_RX_RES_T_MAX
; i
++) {
1512 if (res_info
[i
].res_type
== BNA_RES_T_MEM
)
1513 bnad_mem_free(bnad
, &res_info
[i
].res_u
.mem_info
);
1514 else if (res_info
[i
].res_type
== BNA_RES_T_INTR
)
1515 bnad_txrx_irq_free(bnad
, &res_info
[i
].res_u
.intr_info
);
1519 /* Allocates memory and interrupt resources for Rx object */
1521 bnad_rx_res_alloc(struct bnad
*bnad
, struct bna_res_info
*res_info
,
1526 /* All memory needs to be allocated before setup_ccbs */
1527 for (i
= 0; i
< BNA_RX_RES_T_MAX
; i
++) {
1528 if (res_info
[i
].res_type
== BNA_RES_T_MEM
)
1529 err
= bnad_mem_alloc(bnad
,
1530 &res_info
[i
].res_u
.mem_info
);
1531 else if (res_info
[i
].res_type
== BNA_RES_T_INTR
)
1532 err
= bnad_txrx_irq_alloc(bnad
, BNAD_INTR_RX
, rx_id
,
1533 &res_info
[i
].res_u
.intr_info
);
1540 bnad_rx_res_free(bnad
, res_info
);
1544 /* Timer callbacks */
1547 bnad_ioc_timeout(unsigned long data
)
1549 struct bnad
*bnad
= (struct bnad
*)data
;
1550 unsigned long flags
;
1552 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1553 bfa_nw_ioc_timeout((void *) &bnad
->bna
.ioceth
.ioc
);
1554 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1558 bnad_ioc_hb_check(unsigned long data
)
1560 struct bnad
*bnad
= (struct bnad
*)data
;
1561 unsigned long flags
;
1563 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1564 bfa_nw_ioc_hb_check((void *) &bnad
->bna
.ioceth
.ioc
);
1565 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1569 bnad_iocpf_timeout(unsigned long data
)
1571 struct bnad
*bnad
= (struct bnad
*)data
;
1572 unsigned long flags
;
1574 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1575 bfa_nw_iocpf_timeout((void *) &bnad
->bna
.ioceth
.ioc
);
1576 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1580 bnad_iocpf_sem_timeout(unsigned long data
)
1582 struct bnad
*bnad
= (struct bnad
*)data
;
1583 unsigned long flags
;
1585 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1586 bfa_nw_iocpf_sem_timeout((void *) &bnad
->bna
.ioceth
.ioc
);
1587 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1591 * All timer routines use bnad->bna_lock to protect against
1592 * the following race, which may occur in case of no locking:
1600 /* b) Dynamic Interrupt Moderation Timer */
1602 bnad_dim_timeout(unsigned long data
)
1604 struct bnad
*bnad
= (struct bnad
*)data
;
1605 struct bnad_rx_info
*rx_info
;
1606 struct bnad_rx_ctrl
*rx_ctrl
;
1608 unsigned long flags
;
1610 if (!netif_carrier_ok(bnad
->netdev
))
1613 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1614 for (i
= 0; i
< bnad
->num_rx
; i
++) {
1615 rx_info
= &bnad
->rx_info
[i
];
1618 for (j
= 0; j
< bnad
->num_rxp_per_rx
; j
++) {
1619 rx_ctrl
= &rx_info
->rx_ctrl
[j
];
1622 bna_rx_dim_update(rx_ctrl
->ccb
);
1626 /* Check for BNAD_CF_DIM_ENABLED, does not eleminate a race */
1627 if (test_bit(BNAD_RF_DIM_TIMER_RUNNING
, &bnad
->run_flags
))
1628 mod_timer(&bnad
->dim_timer
,
1629 jiffies
+ msecs_to_jiffies(BNAD_DIM_TIMER_FREQ
));
1630 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1633 /* c) Statistics Timer */
1635 bnad_stats_timeout(unsigned long data
)
1637 struct bnad
*bnad
= (struct bnad
*)data
;
1638 unsigned long flags
;
1640 if (!netif_running(bnad
->netdev
) ||
1641 !test_bit(BNAD_RF_STATS_TIMER_RUNNING
, &bnad
->run_flags
))
1644 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1645 bna_hw_stats_get(&bnad
->bna
);
1646 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1650 * Set up timer for DIM
1651 * Called with bnad->bna_lock held
1654 bnad_dim_timer_start(struct bnad
*bnad
)
1656 if (bnad
->cfg_flags
& BNAD_CF_DIM_ENABLED
&&
1657 !test_bit(BNAD_RF_DIM_TIMER_RUNNING
, &bnad
->run_flags
)) {
1658 setup_timer(&bnad
->dim_timer
, bnad_dim_timeout
,
1659 (unsigned long)bnad
);
1660 set_bit(BNAD_RF_DIM_TIMER_RUNNING
, &bnad
->run_flags
);
1661 mod_timer(&bnad
->dim_timer
,
1662 jiffies
+ msecs_to_jiffies(BNAD_DIM_TIMER_FREQ
));
1667 * Set up timer for statistics
1668 * Called with mutex_lock(&bnad->conf_mutex) held
1671 bnad_stats_timer_start(struct bnad
*bnad
)
1673 unsigned long flags
;
1675 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1676 if (!test_and_set_bit(BNAD_RF_STATS_TIMER_RUNNING
, &bnad
->run_flags
)) {
1677 setup_timer(&bnad
->stats_timer
, bnad_stats_timeout
,
1678 (unsigned long)bnad
);
1679 mod_timer(&bnad
->stats_timer
,
1680 jiffies
+ msecs_to_jiffies(BNAD_STATS_TIMER_FREQ
));
1682 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1686 * Stops the stats timer
1687 * Called with mutex_lock(&bnad->conf_mutex) held
1690 bnad_stats_timer_stop(struct bnad
*bnad
)
1693 unsigned long flags
;
1695 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1696 if (test_and_clear_bit(BNAD_RF_STATS_TIMER_RUNNING
, &bnad
->run_flags
))
1698 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1700 del_timer_sync(&bnad
->stats_timer
);
1706 bnad_netdev_mc_list_get(struct net_device
*netdev
, u8
*mc_list
)
1708 int i
= 1; /* Index 0 has broadcast address */
1709 struct netdev_hw_addr
*mc_addr
;
1711 netdev_for_each_mc_addr(mc_addr
, netdev
) {
1712 memcpy(&mc_list
[i
* ETH_ALEN
], &mc_addr
->addr
[0],
1719 bnad_napi_poll_rx(struct napi_struct
*napi
, int budget
)
1721 struct bnad_rx_ctrl
*rx_ctrl
=
1722 container_of(napi
, struct bnad_rx_ctrl
, napi
);
1723 struct bnad
*bnad
= rx_ctrl
->bnad
;
1726 rx_ctrl
->rx_poll_ctr
++;
1728 if (!netif_carrier_ok(bnad
->netdev
))
1731 rcvd
= bnad_poll_cq(bnad
, rx_ctrl
->ccb
, budget
);
1736 napi_complete(napi
);
1738 rx_ctrl
->rx_complete
++;
1741 bnad_enable_rx_irq_unsafe(rx_ctrl
->ccb
);
1746 #define BNAD_NAPI_POLL_QUOTA 64
1748 bnad_napi_add(struct bnad
*bnad
, u32 rx_id
)
1750 struct bnad_rx_ctrl
*rx_ctrl
;
1753 /* Initialize & enable NAPI */
1754 for (i
= 0; i
< bnad
->num_rxp_per_rx
; i
++) {
1755 rx_ctrl
= &bnad
->rx_info
[rx_id
].rx_ctrl
[i
];
1756 netif_napi_add(bnad
->netdev
, &rx_ctrl
->napi
,
1757 bnad_napi_poll_rx
, BNAD_NAPI_POLL_QUOTA
);
1762 bnad_napi_delete(struct bnad
*bnad
, u32 rx_id
)
1766 /* First disable and then clean up */
1767 for (i
= 0; i
< bnad
->num_rxp_per_rx
; i
++)
1768 netif_napi_del(&bnad
->rx_info
[rx_id
].rx_ctrl
[i
].napi
);
1771 /* Should be held with conf_lock held */
1773 bnad_cleanup_tx(struct bnad
*bnad
, u32 tx_id
)
1775 struct bnad_tx_info
*tx_info
= &bnad
->tx_info
[tx_id
];
1776 struct bna_res_info
*res_info
= &bnad
->tx_res_info
[tx_id
].res_info
[0];
1777 unsigned long flags
;
1782 init_completion(&bnad
->bnad_completions
.tx_comp
);
1783 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1784 bna_tx_disable(tx_info
->tx
, BNA_HARD_CLEANUP
, bnad_cb_tx_disabled
);
1785 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1786 wait_for_completion(&bnad
->bnad_completions
.tx_comp
);
1788 if (tx_info
->tcb
[0]->intr_type
== BNA_INTR_T_MSIX
)
1789 bnad_tx_msix_unregister(bnad
, tx_info
,
1790 bnad
->num_txq_per_tx
);
1793 tasklet_kill(&bnad
->tx_free_tasklet
);
1795 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1796 bna_tx_destroy(tx_info
->tx
);
1797 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1802 bnad_tx_res_free(bnad
, res_info
);
1805 /* Should be held with conf_lock held */
1807 bnad_setup_tx(struct bnad
*bnad
, u32 tx_id
)
1810 struct bnad_tx_info
*tx_info
= &bnad
->tx_info
[tx_id
];
1811 struct bna_res_info
*res_info
= &bnad
->tx_res_info
[tx_id
].res_info
[0];
1812 struct bna_intr_info
*intr_info
=
1813 &res_info
[BNA_TX_RES_INTR_T_TXCMPL
].res_u
.intr_info
;
1814 struct bna_tx_config
*tx_config
= &bnad
->tx_config
[tx_id
];
1815 static const struct bna_tx_event_cbfn tx_cbfn
= {
1816 .tcb_setup_cbfn
= bnad_cb_tcb_setup
,
1817 .tcb_destroy_cbfn
= bnad_cb_tcb_destroy
,
1818 .tx_stall_cbfn
= bnad_cb_tx_stall
,
1819 .tx_resume_cbfn
= bnad_cb_tx_resume
,
1820 .tx_cleanup_cbfn
= bnad_cb_tx_cleanup
,
1824 unsigned long flags
;
1826 tx_info
->tx_id
= tx_id
;
1828 /* Initialize the Tx object configuration */
1829 tx_config
->num_txq
= bnad
->num_txq_per_tx
;
1830 tx_config
->txq_depth
= bnad
->txq_depth
;
1831 tx_config
->tx_type
= BNA_TX_T_REGULAR
;
1832 tx_config
->coalescing_timeo
= bnad
->tx_coalescing_timeo
;
1834 /* Get BNA's resource requirement for one tx object */
1835 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1836 bna_tx_res_req(bnad
->num_txq_per_tx
,
1837 bnad
->txq_depth
, res_info
);
1838 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1840 /* Fill Unmap Q memory requirements */
1841 BNAD_FILL_UNMAPQ_MEM_REQ(
1842 &res_info
[BNA_TX_RES_MEM_T_UNMAPQ
],
1843 bnad
->num_txq_per_tx
,
1844 BNAD_TX_UNMAPQ_DEPTH
);
1846 /* Allocate resources */
1847 err
= bnad_tx_res_alloc(bnad
, res_info
, tx_id
);
1851 /* Ask BNA to create one Tx object, supplying required resources */
1852 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1853 tx
= bna_tx_create(&bnad
->bna
, bnad
, tx_config
, &tx_cbfn
, res_info
,
1855 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1860 INIT_DELAYED_WORK(&tx_info
->tx_cleanup_work
,
1861 (work_func_t
)bnad_tx_cleanup
);
1863 /* Register ISR for the Tx object */
1864 if (intr_info
->intr_type
== BNA_INTR_T_MSIX
) {
1865 err
= bnad_tx_msix_register(bnad
, tx_info
,
1866 tx_id
, bnad
->num_txq_per_tx
);
1871 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1873 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1878 bnad_tx_res_free(bnad
, res_info
);
1882 /* Setup the rx config for bna_rx_create */
1883 /* bnad decides the configuration */
1885 bnad_init_rx_config(struct bnad
*bnad
, struct bna_rx_config
*rx_config
)
1887 rx_config
->rx_type
= BNA_RX_T_REGULAR
;
1888 rx_config
->num_paths
= bnad
->num_rxp_per_rx
;
1889 rx_config
->coalescing_timeo
= bnad
->rx_coalescing_timeo
;
1891 if (bnad
->num_rxp_per_rx
> 1) {
1892 rx_config
->rss_status
= BNA_STATUS_T_ENABLED
;
1893 rx_config
->rss_config
.hash_type
=
1894 (BFI_ENET_RSS_IPV6
|
1895 BFI_ENET_RSS_IPV6_TCP
|
1897 BFI_ENET_RSS_IPV4_TCP
);
1898 rx_config
->rss_config
.hash_mask
=
1899 bnad
->num_rxp_per_rx
- 1;
1900 get_random_bytes(rx_config
->rss_config
.toeplitz_hash_key
,
1901 sizeof(rx_config
->rss_config
.toeplitz_hash_key
));
1903 rx_config
->rss_status
= BNA_STATUS_T_DISABLED
;
1904 memset(&rx_config
->rss_config
, 0,
1905 sizeof(rx_config
->rss_config
));
1907 rx_config
->rxp_type
= BNA_RXP_SLR
;
1908 rx_config
->q_depth
= bnad
->rxq_depth
;
1910 rx_config
->small_buff_size
= BFI_SMALL_RXBUF_SIZE
;
1912 rx_config
->vlan_strip_status
= BNA_STATUS_T_ENABLED
;
1916 bnad_rx_ctrl_init(struct bnad
*bnad
, u32 rx_id
)
1918 struct bnad_rx_info
*rx_info
= &bnad
->rx_info
[rx_id
];
1921 for (i
= 0; i
< bnad
->num_rxp_per_rx
; i
++)
1922 rx_info
->rx_ctrl
[i
].bnad
= bnad
;
1925 /* Called with mutex_lock(&bnad->conf_mutex) held */
1927 bnad_cleanup_rx(struct bnad
*bnad
, u32 rx_id
)
1929 struct bnad_rx_info
*rx_info
= &bnad
->rx_info
[rx_id
];
1930 struct bna_rx_config
*rx_config
= &bnad
->rx_config
[rx_id
];
1931 struct bna_res_info
*res_info
= &bnad
->rx_res_info
[rx_id
].res_info
[0];
1932 unsigned long flags
;
1939 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1940 if (bnad
->cfg_flags
& BNAD_CF_DIM_ENABLED
&&
1941 test_bit(BNAD_RF_DIM_TIMER_RUNNING
, &bnad
->run_flags
)) {
1942 clear_bit(BNAD_RF_DIM_TIMER_RUNNING
, &bnad
->run_flags
);
1945 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1947 del_timer_sync(&bnad
->dim_timer
);
1950 init_completion(&bnad
->bnad_completions
.rx_comp
);
1951 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1952 bna_rx_disable(rx_info
->rx
, BNA_HARD_CLEANUP
, bnad_cb_rx_disabled
);
1953 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1954 wait_for_completion(&bnad
->bnad_completions
.rx_comp
);
1956 if (rx_info
->rx_ctrl
[0].ccb
->intr_type
== BNA_INTR_T_MSIX
)
1957 bnad_rx_msix_unregister(bnad
, rx_info
, rx_config
->num_paths
);
1959 bnad_napi_delete(bnad
, rx_id
);
1961 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
1962 bna_rx_destroy(rx_info
->rx
);
1966 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
1968 bnad_rx_res_free(bnad
, res_info
);
1971 /* Called with mutex_lock(&bnad->conf_mutex) held */
1973 bnad_setup_rx(struct bnad
*bnad
, u32 rx_id
)
1976 struct bnad_rx_info
*rx_info
= &bnad
->rx_info
[rx_id
];
1977 struct bna_res_info
*res_info
= &bnad
->rx_res_info
[rx_id
].res_info
[0];
1978 struct bna_intr_info
*intr_info
=
1979 &res_info
[BNA_RX_RES_T_INTR
].res_u
.intr_info
;
1980 struct bna_rx_config
*rx_config
= &bnad
->rx_config
[rx_id
];
1981 static const struct bna_rx_event_cbfn rx_cbfn
= {
1982 .rcb_setup_cbfn
= bnad_cb_rcb_setup
,
1983 .rcb_destroy_cbfn
= NULL
,
1984 .ccb_setup_cbfn
= bnad_cb_ccb_setup
,
1985 .ccb_destroy_cbfn
= bnad_cb_ccb_destroy
,
1986 .rx_stall_cbfn
= bnad_cb_rx_stall
,
1987 .rx_cleanup_cbfn
= bnad_cb_rx_cleanup
,
1988 .rx_post_cbfn
= bnad_cb_rx_post
,
1991 unsigned long flags
;
1993 rx_info
->rx_id
= rx_id
;
1995 /* Initialize the Rx object configuration */
1996 bnad_init_rx_config(bnad
, rx_config
);
1998 /* Get BNA's resource requirement for one Rx object */
1999 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2000 bna_rx_res_req(rx_config
, res_info
);
2001 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2003 /* Fill Unmap Q memory requirements */
2004 BNAD_FILL_UNMAPQ_MEM_REQ(
2005 &res_info
[BNA_RX_RES_MEM_T_UNMAPQ
],
2006 rx_config
->num_paths
+
2007 ((rx_config
->rxp_type
== BNA_RXP_SINGLE
) ? 0 :
2008 rx_config
->num_paths
), BNAD_RX_UNMAPQ_DEPTH
);
2010 /* Allocate resource */
2011 err
= bnad_rx_res_alloc(bnad
, res_info
, rx_id
);
2015 bnad_rx_ctrl_init(bnad
, rx_id
);
2017 /* Ask BNA to create one Rx object, supplying required resources */
2018 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2019 rx
= bna_rx_create(&bnad
->bna
, bnad
, rx_config
, &rx_cbfn
, res_info
,
2023 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2027 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2029 INIT_WORK(&rx_info
->rx_cleanup_work
,
2030 (work_func_t
)(bnad_rx_cleanup
));
2033 * Init NAPI, so that state is set to NAPI_STATE_SCHED,
2034 * so that IRQ handler cannot schedule NAPI at this point.
2036 bnad_napi_add(bnad
, rx_id
);
2038 /* Register ISR for the Rx object */
2039 if (intr_info
->intr_type
== BNA_INTR_T_MSIX
) {
2040 err
= bnad_rx_msix_register(bnad
, rx_info
, rx_id
,
2041 rx_config
->num_paths
);
2046 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2048 /* Set up Dynamic Interrupt Moderation Vector */
2049 if (bnad
->cfg_flags
& BNAD_CF_DIM_ENABLED
)
2050 bna_rx_dim_reconfig(&bnad
->bna
, bna_napi_dim_vector
);
2052 /* Enable VLAN filtering only on the default Rx */
2053 bna_rx_vlanfilter_enable(rx
);
2055 /* Start the DIM timer */
2056 bnad_dim_timer_start(bnad
);
2060 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2065 bnad_cleanup_rx(bnad
, rx_id
);
2069 /* Called with conf_lock & bnad->bna_lock held */
2071 bnad_tx_coalescing_timeo_set(struct bnad
*bnad
)
2073 struct bnad_tx_info
*tx_info
;
2075 tx_info
= &bnad
->tx_info
[0];
2079 bna_tx_coalescing_timeo_set(tx_info
->tx
, bnad
->tx_coalescing_timeo
);
2082 /* Called with conf_lock & bnad->bna_lock held */
2084 bnad_rx_coalescing_timeo_set(struct bnad
*bnad
)
2086 struct bnad_rx_info
*rx_info
;
2089 for (i
= 0; i
< bnad
->num_rx
; i
++) {
2090 rx_info
= &bnad
->rx_info
[i
];
2093 bna_rx_coalescing_timeo_set(rx_info
->rx
,
2094 bnad
->rx_coalescing_timeo
);
2099 * Called with bnad->bna_lock held
2102 bnad_mac_addr_set_locked(struct bnad
*bnad
, u8
*mac_addr
)
2106 if (!is_valid_ether_addr(mac_addr
))
2107 return -EADDRNOTAVAIL
;
2109 /* If datapath is down, pretend everything went through */
2110 if (!bnad
->rx_info
[0].rx
)
2113 ret
= bna_rx_ucast_set(bnad
->rx_info
[0].rx
, mac_addr
, NULL
);
2114 if (ret
!= BNA_CB_SUCCESS
)
2115 return -EADDRNOTAVAIL
;
2120 /* Should be called with conf_lock held */
2122 bnad_enable_default_bcast(struct bnad
*bnad
)
2124 struct bnad_rx_info
*rx_info
= &bnad
->rx_info
[0];
2126 unsigned long flags
;
2128 init_completion(&bnad
->bnad_completions
.mcast_comp
);
2130 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2131 ret
= bna_rx_mcast_add(rx_info
->rx
, (u8
*)bnad_bcast_addr
,
2132 bnad_cb_rx_mcast_add
);
2133 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2135 if (ret
== BNA_CB_SUCCESS
)
2136 wait_for_completion(&bnad
->bnad_completions
.mcast_comp
);
2140 if (bnad
->bnad_completions
.mcast_comp_status
!= BNA_CB_SUCCESS
)
2146 /* Called with mutex_lock(&bnad->conf_mutex) held */
2148 bnad_restore_vlans(struct bnad
*bnad
, u32 rx_id
)
2151 unsigned long flags
;
2153 for_each_set_bit(vid
, bnad
->active_vlans
, VLAN_N_VID
) {
2154 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2155 bna_rx_vlan_add(bnad
->rx_info
[rx_id
].rx
, vid
);
2156 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2160 /* Statistics utilities */
2162 bnad_netdev_qstats_fill(struct bnad
*bnad
, struct rtnl_link_stats64
*stats
)
2166 for (i
= 0; i
< bnad
->num_rx
; i
++) {
2167 for (j
= 0; j
< bnad
->num_rxp_per_rx
; j
++) {
2168 if (bnad
->rx_info
[i
].rx_ctrl
[j
].ccb
) {
2169 stats
->rx_packets
+= bnad
->rx_info
[i
].
2170 rx_ctrl
[j
].ccb
->rcb
[0]->rxq
->rx_packets
;
2171 stats
->rx_bytes
+= bnad
->rx_info
[i
].
2172 rx_ctrl
[j
].ccb
->rcb
[0]->rxq
->rx_bytes
;
2173 if (bnad
->rx_info
[i
].rx_ctrl
[j
].ccb
->rcb
[1] &&
2174 bnad
->rx_info
[i
].rx_ctrl
[j
].ccb
->
2176 stats
->rx_packets
+=
2177 bnad
->rx_info
[i
].rx_ctrl
[j
].
2178 ccb
->rcb
[1]->rxq
->rx_packets
;
2180 bnad
->rx_info
[i
].rx_ctrl
[j
].
2181 ccb
->rcb
[1]->rxq
->rx_bytes
;
2186 for (i
= 0; i
< bnad
->num_tx
; i
++) {
2187 for (j
= 0; j
< bnad
->num_txq_per_tx
; j
++) {
2188 if (bnad
->tx_info
[i
].tcb
[j
]) {
2189 stats
->tx_packets
+=
2190 bnad
->tx_info
[i
].tcb
[j
]->txq
->tx_packets
;
2192 bnad
->tx_info
[i
].tcb
[j
]->txq
->tx_bytes
;
2199 * Must be called with the bna_lock held.
2202 bnad_netdev_hwstats_fill(struct bnad
*bnad
, struct rtnl_link_stats64
*stats
)
2204 struct bfi_enet_stats_mac
*mac_stats
;
2208 mac_stats
= &bnad
->stats
.bna_stats
->hw_stats
.mac_stats
;
2210 mac_stats
->rx_fcs_error
+ mac_stats
->rx_alignment_error
+
2211 mac_stats
->rx_frame_length_error
+ mac_stats
->rx_code_error
+
2212 mac_stats
->rx_undersize
;
2213 stats
->tx_errors
= mac_stats
->tx_fcs_error
+
2214 mac_stats
->tx_undersize
;
2215 stats
->rx_dropped
= mac_stats
->rx_drop
;
2216 stats
->tx_dropped
= mac_stats
->tx_drop
;
2217 stats
->multicast
= mac_stats
->rx_multicast
;
2218 stats
->collisions
= mac_stats
->tx_total_collision
;
2220 stats
->rx_length_errors
= mac_stats
->rx_frame_length_error
;
2222 /* receive ring buffer overflow ?? */
2224 stats
->rx_crc_errors
= mac_stats
->rx_fcs_error
;
2225 stats
->rx_frame_errors
= mac_stats
->rx_alignment_error
;
2226 /* recv'r fifo overrun */
2227 bmap
= bna_rx_rid_mask(&bnad
->bna
);
2228 for (i
= 0; bmap
; i
++) {
2230 stats
->rx_fifo_errors
+=
2231 bnad
->stats
.bna_stats
->
2232 hw_stats
.rxf_stats
[i
].frame_drops
;
2240 bnad_mbox_irq_sync(struct bnad
*bnad
)
2243 unsigned long flags
;
2245 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2246 if (bnad
->cfg_flags
& BNAD_CF_MSIX
)
2247 irq
= bnad
->msix_table
[BNAD_MAILBOX_MSIX_INDEX
].vector
;
2249 irq
= bnad
->pcidev
->irq
;
2250 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2252 synchronize_irq(irq
);
2255 /* Utility used by bnad_start_xmit, for doing TSO */
2257 bnad_tso_prepare(struct bnad
*bnad
, struct sk_buff
*skb
)
2261 if (skb_header_cloned(skb
)) {
2262 err
= pskb_expand_head(skb
, 0, 0, GFP_ATOMIC
);
2264 BNAD_UPDATE_CTR(bnad
, tso_err
);
2270 * For TSO, the TCP checksum field is seeded with pseudo-header sum
2271 * excluding the length field.
2273 if (skb
->protocol
== htons(ETH_P_IP
)) {
2274 struct iphdr
*iph
= ip_hdr(skb
);
2276 /* Do we really need these? */
2280 tcp_hdr(skb
)->check
=
2281 ~csum_tcpudp_magic(iph
->saddr
, iph
->daddr
, 0,
2283 BNAD_UPDATE_CTR(bnad
, tso4
);
2285 struct ipv6hdr
*ipv6h
= ipv6_hdr(skb
);
2287 ipv6h
->payload_len
= 0;
2288 tcp_hdr(skb
)->check
=
2289 ~csum_ipv6_magic(&ipv6h
->saddr
, &ipv6h
->daddr
, 0,
2291 BNAD_UPDATE_CTR(bnad
, tso6
);
2298 * Initialize Q numbers depending on Rx Paths
2299 * Called with bnad->bna_lock held, because of cfg_flags
2303 bnad_q_num_init(struct bnad
*bnad
)
2307 rxps
= min((uint
)num_online_cpus(),
2308 (uint
)(BNAD_MAX_RX
* BNAD_MAX_RXP_PER_RX
));
2310 if (!(bnad
->cfg_flags
& BNAD_CF_MSIX
))
2311 rxps
= 1; /* INTx */
2315 bnad
->num_rxp_per_rx
= rxps
;
2316 bnad
->num_txq_per_tx
= BNAD_TXQ_NUM
;
2320 * Adjusts the Q numbers, given a number of msix vectors
2321 * Give preference to RSS as opposed to Tx priority Queues,
2322 * in such a case, just use 1 Tx Q
2323 * Called with bnad->bna_lock held b'cos of cfg_flags access
2326 bnad_q_num_adjust(struct bnad
*bnad
, int msix_vectors
, int temp
)
2328 bnad
->num_txq_per_tx
= 1;
2329 if ((msix_vectors
>= (bnad
->num_tx
* bnad
->num_txq_per_tx
) +
2330 bnad_rxqs_per_cq
+ BNAD_MAILBOX_MSIX_VECTORS
) &&
2331 (bnad
->cfg_flags
& BNAD_CF_MSIX
)) {
2332 bnad
->num_rxp_per_rx
= msix_vectors
-
2333 (bnad
->num_tx
* bnad
->num_txq_per_tx
) -
2334 BNAD_MAILBOX_MSIX_VECTORS
;
2336 bnad
->num_rxp_per_rx
= 1;
2339 /* Enable / disable ioceth */
2341 bnad_ioceth_disable(struct bnad
*bnad
)
2343 unsigned long flags
;
2346 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2347 init_completion(&bnad
->bnad_completions
.ioc_comp
);
2348 bna_ioceth_disable(&bnad
->bna
.ioceth
, BNA_HARD_CLEANUP
);
2349 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2351 wait_for_completion_timeout(&bnad
->bnad_completions
.ioc_comp
,
2352 msecs_to_jiffies(BNAD_IOCETH_TIMEOUT
));
2354 err
= bnad
->bnad_completions
.ioc_comp_status
;
2359 bnad_ioceth_enable(struct bnad
*bnad
)
2362 unsigned long flags
;
2364 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2365 init_completion(&bnad
->bnad_completions
.ioc_comp
);
2366 bnad
->bnad_completions
.ioc_comp_status
= BNA_CB_WAITING
;
2367 bna_ioceth_enable(&bnad
->bna
.ioceth
);
2368 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2370 wait_for_completion_timeout(&bnad
->bnad_completions
.ioc_comp
,
2371 msecs_to_jiffies(BNAD_IOCETH_TIMEOUT
));
2373 err
= bnad
->bnad_completions
.ioc_comp_status
;
2378 /* Free BNA resources */
2380 bnad_res_free(struct bnad
*bnad
, struct bna_res_info
*res_info
,
2385 for (i
= 0; i
< res_val_max
; i
++)
2386 bnad_mem_free(bnad
, &res_info
[i
].res_u
.mem_info
);
2389 /* Allocates memory and interrupt resources for BNA */
2391 bnad_res_alloc(struct bnad
*bnad
, struct bna_res_info
*res_info
,
2396 for (i
= 0; i
< res_val_max
; i
++) {
2397 err
= bnad_mem_alloc(bnad
, &res_info
[i
].res_u
.mem_info
);
2404 bnad_res_free(bnad
, res_info
, res_val_max
);
2408 /* Interrupt enable / disable */
2410 bnad_enable_msix(struct bnad
*bnad
)
2413 unsigned long flags
;
2415 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2416 if (!(bnad
->cfg_flags
& BNAD_CF_MSIX
)) {
2417 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2420 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2422 if (bnad
->msix_table
)
2426 kcalloc(bnad
->msix_num
, sizeof(struct msix_entry
), GFP_KERNEL
);
2428 if (!bnad
->msix_table
)
2431 for (i
= 0; i
< bnad
->msix_num
; i
++)
2432 bnad
->msix_table
[i
].entry
= i
;
2434 ret
= pci_enable_msix(bnad
->pcidev
, bnad
->msix_table
, bnad
->msix_num
);
2436 /* Not enough MSI-X vectors. */
2437 pr_warn("BNA: %d MSI-X vectors allocated < %d requested\n",
2438 ret
, bnad
->msix_num
);
2440 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2441 /* ret = #of vectors that we got */
2442 bnad_q_num_adjust(bnad
, (ret
- BNAD_MAILBOX_MSIX_VECTORS
) / 2,
2443 (ret
- BNAD_MAILBOX_MSIX_VECTORS
) / 2);
2444 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2446 bnad
->msix_num
= BNAD_NUM_TXQ
+ BNAD_NUM_RXP
+
2447 BNAD_MAILBOX_MSIX_VECTORS
;
2449 if (bnad
->msix_num
> ret
)
2452 /* Try once more with adjusted numbers */
2453 /* If this fails, fall back to INTx */
2454 ret
= pci_enable_msix(bnad
->pcidev
, bnad
->msix_table
,
2462 pci_intx(bnad
->pcidev
, 0);
2467 pr_warn("BNA: MSI-X enable failed - operating in INTx mode\n");
2469 kfree(bnad
->msix_table
);
2470 bnad
->msix_table
= NULL
;
2472 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2473 bnad
->cfg_flags
&= ~BNAD_CF_MSIX
;
2474 bnad_q_num_init(bnad
);
2475 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2479 bnad_disable_msix(struct bnad
*bnad
)
2482 unsigned long flags
;
2484 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2485 cfg_flags
= bnad
->cfg_flags
;
2486 if (bnad
->cfg_flags
& BNAD_CF_MSIX
)
2487 bnad
->cfg_flags
&= ~BNAD_CF_MSIX
;
2488 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2490 if (cfg_flags
& BNAD_CF_MSIX
) {
2491 pci_disable_msix(bnad
->pcidev
);
2492 kfree(bnad
->msix_table
);
2493 bnad
->msix_table
= NULL
;
2497 /* Netdev entry points */
2499 bnad_open(struct net_device
*netdev
)
2502 struct bnad
*bnad
= netdev_priv(netdev
);
2503 struct bna_pause_config pause_config
;
2505 unsigned long flags
;
2507 mutex_lock(&bnad
->conf_mutex
);
2510 err
= bnad_setup_tx(bnad
, 0);
2515 err
= bnad_setup_rx(bnad
, 0);
2520 pause_config
.tx_pause
= 0;
2521 pause_config
.rx_pause
= 0;
2523 mtu
= ETH_HLEN
+ VLAN_HLEN
+ bnad
->netdev
->mtu
+ ETH_FCS_LEN
;
2525 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2526 bna_enet_mtu_set(&bnad
->bna
.enet
, mtu
, NULL
);
2527 bna_enet_pause_config(&bnad
->bna
.enet
, &pause_config
, NULL
);
2528 bna_enet_enable(&bnad
->bna
.enet
);
2529 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2531 /* Enable broadcast */
2532 bnad_enable_default_bcast(bnad
);
2534 /* Restore VLANs, if any */
2535 bnad_restore_vlans(bnad
, 0);
2537 /* Set the UCAST address */
2538 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2539 bnad_mac_addr_set_locked(bnad
, netdev
->dev_addr
);
2540 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2542 /* Start the stats timer */
2543 bnad_stats_timer_start(bnad
);
2545 mutex_unlock(&bnad
->conf_mutex
);
2550 bnad_cleanup_tx(bnad
, 0);
2553 mutex_unlock(&bnad
->conf_mutex
);
2558 bnad_stop(struct net_device
*netdev
)
2560 struct bnad
*bnad
= netdev_priv(netdev
);
2561 unsigned long flags
;
2563 mutex_lock(&bnad
->conf_mutex
);
2565 /* Stop the stats timer */
2566 bnad_stats_timer_stop(bnad
);
2568 init_completion(&bnad
->bnad_completions
.enet_comp
);
2570 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2571 bna_enet_disable(&bnad
->bna
.enet
, BNA_HARD_CLEANUP
,
2572 bnad_cb_enet_disabled
);
2573 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2575 wait_for_completion(&bnad
->bnad_completions
.enet_comp
);
2577 bnad_cleanup_tx(bnad
, 0);
2578 bnad_cleanup_rx(bnad
, 0);
2580 /* Synchronize mailbox IRQ */
2581 bnad_mbox_irq_sync(bnad
);
2583 mutex_unlock(&bnad
->conf_mutex
);
2590 * bnad_start_xmit : Netdev entry point for Transmit
2591 * Called under lock held by net_device
2594 bnad_start_xmit(struct sk_buff
*skb
, struct net_device
*netdev
)
2596 struct bnad
*bnad
= netdev_priv(netdev
);
2598 struct bna_tcb
*tcb
= bnad
->tx_info
[0].tcb
[txq_id
];
2600 u16 txq_prod
, vlan_tag
= 0;
2601 u32 unmap_prod
, wis
, wis_used
, wi_range
;
2602 u32 vectors
, vect_id
, i
, acked
;
2607 struct bnad_unmap_q
*unmap_q
= tcb
->unmap_q
;
2608 dma_addr_t dma_addr
;
2609 struct bna_txq_entry
*txqent
;
2612 if (unlikely(skb
->len
<= ETH_HLEN
)) {
2614 BNAD_UPDATE_CTR(bnad
, tx_skb_too_short
);
2615 return NETDEV_TX_OK
;
2617 if (unlikely(skb_headlen(skb
) > BFI_TX_MAX_DATA_PER_VECTOR
)) {
2619 BNAD_UPDATE_CTR(bnad
, tx_skb_headlen_too_long
);
2620 return NETDEV_TX_OK
;
2622 if (unlikely(skb_headlen(skb
) == 0)) {
2624 BNAD_UPDATE_CTR(bnad
, tx_skb_headlen_zero
);
2625 return NETDEV_TX_OK
;
2629 * Takes care of the Tx that is scheduled between clearing the flag
2630 * and the netif_tx_stop_all_queues() call.
2632 if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED
, &tcb
->flags
))) {
2634 BNAD_UPDATE_CTR(bnad
, tx_skb_stopping
);
2635 return NETDEV_TX_OK
;
2638 vectors
= 1 + skb_shinfo(skb
)->nr_frags
;
2639 if (unlikely(vectors
> BFI_TX_MAX_VECTORS_PER_PKT
)) {
2641 BNAD_UPDATE_CTR(bnad
, tx_skb_max_vectors
);
2642 return NETDEV_TX_OK
;
2644 wis
= BNA_TXQ_WI_NEEDED(vectors
); /* 4 vectors per work item */
2646 if (unlikely(wis
> BNA_QE_FREE_CNT(tcb
, tcb
->q_depth
) ||
2647 vectors
> BNA_QE_FREE_CNT(unmap_q
, unmap_q
->q_depth
))) {
2648 if ((u16
) (*tcb
->hw_consumer_index
) !=
2649 tcb
->consumer_index
&&
2650 !test_and_set_bit(BNAD_TXQ_FREE_SENT
, &tcb
->flags
)) {
2651 acked
= bnad_free_txbufs(bnad
, tcb
);
2652 if (likely(test_bit(BNAD_TXQ_TX_STARTED
, &tcb
->flags
)))
2653 bna_ib_ack(tcb
->i_dbell
, acked
);
2654 smp_mb__before_clear_bit();
2655 clear_bit(BNAD_TXQ_FREE_SENT
, &tcb
->flags
);
2657 netif_stop_queue(netdev
);
2658 BNAD_UPDATE_CTR(bnad
, netif_queue_stop
);
2663 * Check again to deal with race condition between
2664 * netif_stop_queue here, and netif_wake_queue in
2665 * interrupt handler which is not inside netif tx lock.
2668 (wis
> BNA_QE_FREE_CNT(tcb
, tcb
->q_depth
) ||
2669 vectors
> BNA_QE_FREE_CNT(unmap_q
, unmap_q
->q_depth
))) {
2670 BNAD_UPDATE_CTR(bnad
, netif_queue_stop
);
2671 return NETDEV_TX_BUSY
;
2673 netif_wake_queue(netdev
);
2674 BNAD_UPDATE_CTR(bnad
, netif_queue_wakeup
);
2678 unmap_prod
= unmap_q
->producer_index
;
2681 txq_prod
= tcb
->producer_index
;
2682 BNA_TXQ_QPGE_PTR_GET(txq_prod
, tcb
->sw_qpt
, txqent
, wi_range
);
2683 txqent
->hdr
.wi
.reserved
= 0;
2684 txqent
->hdr
.wi
.num_vectors
= vectors
;
2686 if (vlan_tx_tag_present(skb
)) {
2687 vlan_tag
= (u16
) vlan_tx_tag_get(skb
);
2688 flags
|= (BNA_TXQ_WI_CF_INS_PRIO
| BNA_TXQ_WI_CF_INS_VLAN
);
2690 if (test_bit(BNAD_RF_CEE_RUNNING
, &bnad
->run_flags
)) {
2692 (tcb
->priority
& 0x7) << 13 | (vlan_tag
& 0x1fff);
2693 flags
|= (BNA_TXQ_WI_CF_INS_PRIO
| BNA_TXQ_WI_CF_INS_VLAN
);
2696 txqent
->hdr
.wi
.vlan_tag
= htons(vlan_tag
);
2698 if (skb_is_gso(skb
)) {
2699 gso_size
= skb_shinfo(skb
)->gso_size
;
2701 if (unlikely(gso_size
> netdev
->mtu
)) {
2703 BNAD_UPDATE_CTR(bnad
, tx_skb_mss_too_long
);
2704 return NETDEV_TX_OK
;
2706 if (unlikely((gso_size
+ skb_transport_offset(skb
) +
2707 tcp_hdrlen(skb
)) >= skb
->len
)) {
2708 txqent
->hdr
.wi
.opcode
=
2709 __constant_htons(BNA_TXQ_WI_SEND
);
2710 txqent
->hdr
.wi
.lso_mss
= 0;
2711 BNAD_UPDATE_CTR(bnad
, tx_skb_tso_too_short
);
2713 txqent
->hdr
.wi
.opcode
=
2714 __constant_htons(BNA_TXQ_WI_SEND_LSO
);
2715 txqent
->hdr
.wi
.lso_mss
= htons(gso_size
);
2718 err
= bnad_tso_prepare(bnad
, skb
);
2719 if (unlikely(err
)) {
2721 BNAD_UPDATE_CTR(bnad
, tx_skb_tso_prepare
);
2722 return NETDEV_TX_OK
;
2724 flags
|= (BNA_TXQ_WI_CF_IP_CKSUM
| BNA_TXQ_WI_CF_TCP_CKSUM
);
2725 txqent
->hdr
.wi
.l4_hdr_size_n_offset
=
2726 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2727 (tcp_hdrlen(skb
) >> 2,
2728 skb_transport_offset(skb
)));
2730 txqent
->hdr
.wi
.opcode
= __constant_htons(BNA_TXQ_WI_SEND
);
2731 txqent
->hdr
.wi
.lso_mss
= 0;
2733 if (unlikely(skb
->len
> (netdev
->mtu
+ ETH_HLEN
))) {
2735 BNAD_UPDATE_CTR(bnad
, tx_skb_non_tso_too_long
);
2736 return NETDEV_TX_OK
;
2739 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
2742 if (skb
->protocol
== __constant_htons(ETH_P_IP
))
2743 proto
= ip_hdr(skb
)->protocol
;
2744 else if (skb
->protocol
==
2745 __constant_htons(ETH_P_IPV6
)) {
2746 /* nexthdr may not be TCP immediately. */
2747 proto
= ipv6_hdr(skb
)->nexthdr
;
2749 if (proto
== IPPROTO_TCP
) {
2750 flags
|= BNA_TXQ_WI_CF_TCP_CKSUM
;
2751 txqent
->hdr
.wi
.l4_hdr_size_n_offset
=
2752 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2753 (0, skb_transport_offset(skb
)));
2755 BNAD_UPDATE_CTR(bnad
, tcpcsum_offload
);
2757 if (unlikely(skb_headlen(skb
) <
2758 skb_transport_offset(skb
) + tcp_hdrlen(skb
))) {
2760 BNAD_UPDATE_CTR(bnad
, tx_skb_tcp_hdr
);
2761 return NETDEV_TX_OK
;
2764 } else if (proto
== IPPROTO_UDP
) {
2765 flags
|= BNA_TXQ_WI_CF_UDP_CKSUM
;
2766 txqent
->hdr
.wi
.l4_hdr_size_n_offset
=
2767 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2768 (0, skb_transport_offset(skb
)));
2770 BNAD_UPDATE_CTR(bnad
, udpcsum_offload
);
2771 if (unlikely(skb_headlen(skb
) <
2772 skb_transport_offset(skb
) +
2773 sizeof(struct udphdr
))) {
2775 BNAD_UPDATE_CTR(bnad
, tx_skb_udp_hdr
);
2776 return NETDEV_TX_OK
;
2780 BNAD_UPDATE_CTR(bnad
, tx_skb_csum_err
);
2781 return NETDEV_TX_OK
;
2784 txqent
->hdr
.wi
.l4_hdr_size_n_offset
= 0;
2788 txqent
->hdr
.wi
.flags
= htons(flags
);
2790 txqent
->hdr
.wi
.frame_length
= htonl(skb
->len
);
2792 unmap_q
->unmap_array
[unmap_prod
].skb
= skb
;
2793 len
= skb_headlen(skb
);
2794 txqent
->vector
[0].length
= htons(len
);
2795 dma_addr
= dma_map_single(&bnad
->pcidev
->dev
, skb
->data
,
2796 skb_headlen(skb
), DMA_TO_DEVICE
);
2797 dma_unmap_addr_set(&unmap_q
->unmap_array
[unmap_prod
], dma_addr
,
2800 BNA_SET_DMA_ADDR(dma_addr
, &txqent
->vector
[0].host_addr
);
2801 BNA_QE_INDX_ADD(unmap_prod
, 1, unmap_q
->q_depth
);
2806 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
2807 const struct skb_frag_struct
*frag
= &skb_shinfo(skb
)->frags
[i
];
2808 u16 size
= skb_frag_size(frag
);
2810 if (unlikely(size
== 0)) {
2811 unmap_prod
= unmap_q
->producer_index
;
2813 unmap_prod
= bnad_pci_unmap_skb(&bnad
->pcidev
->dev
,
2814 unmap_q
->unmap_array
,
2815 unmap_prod
, unmap_q
->q_depth
, skb
,
2818 BNAD_UPDATE_CTR(bnad
, tx_skb_frag_zero
);
2819 return NETDEV_TX_OK
;
2824 if (++vect_id
== BFI_TX_MAX_VECTORS_PER_WI
) {
2829 BNA_QE_INDX_ADD(txq_prod
, wis_used
,
2832 BNA_TXQ_QPGE_PTR_GET(txq_prod
, tcb
->sw_qpt
,
2836 txqent
->hdr
.wi_ext
.opcode
=
2837 __constant_htons(BNA_TXQ_WI_EXTENSION
);
2840 BUG_ON(!(size
<= BFI_TX_MAX_DATA_PER_VECTOR
));
2841 txqent
->vector
[vect_id
].length
= htons(size
);
2842 dma_addr
= skb_frag_dma_map(&bnad
->pcidev
->dev
, frag
,
2843 0, size
, DMA_TO_DEVICE
);
2844 dma_unmap_addr_set(&unmap_q
->unmap_array
[unmap_prod
], dma_addr
,
2846 BNA_SET_DMA_ADDR(dma_addr
, &txqent
->vector
[vect_id
].host_addr
);
2847 BNA_QE_INDX_ADD(unmap_prod
, 1, unmap_q
->q_depth
);
2850 if (unlikely(len
!= skb
->len
)) {
2851 unmap_prod
= unmap_q
->producer_index
;
2853 unmap_prod
= bnad_pci_unmap_skb(&bnad
->pcidev
->dev
,
2854 unmap_q
->unmap_array
, unmap_prod
,
2855 unmap_q
->q_depth
, skb
,
2856 skb_shinfo(skb
)->nr_frags
);
2858 BNAD_UPDATE_CTR(bnad
, tx_skb_len_mismatch
);
2859 return NETDEV_TX_OK
;
2862 unmap_q
->producer_index
= unmap_prod
;
2863 BNA_QE_INDX_ADD(txq_prod
, wis_used
, tcb
->q_depth
);
2864 tcb
->producer_index
= txq_prod
;
2868 if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED
, &tcb
->flags
)))
2869 return NETDEV_TX_OK
;
2871 bna_txq_prod_indx_doorbell(tcb
);
2874 if ((u16
) (*tcb
->hw_consumer_index
) != tcb
->consumer_index
)
2875 tasklet_schedule(&bnad
->tx_free_tasklet
);
2877 return NETDEV_TX_OK
;
2881 * Used spin_lock to synchronize reading of stats structures, which
2882 * is written by BNA under the same lock.
2884 static struct rtnl_link_stats64
*
2885 bnad_get_stats64(struct net_device
*netdev
, struct rtnl_link_stats64
*stats
)
2887 struct bnad
*bnad
= netdev_priv(netdev
);
2888 unsigned long flags
;
2890 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2892 bnad_netdev_qstats_fill(bnad
, stats
);
2893 bnad_netdev_hwstats_fill(bnad
, stats
);
2895 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2901 bnad_set_rx_mode(struct net_device
*netdev
)
2903 struct bnad
*bnad
= netdev_priv(netdev
);
2904 u32 new_mask
, valid_mask
;
2905 unsigned long flags
;
2907 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2909 new_mask
= valid_mask
= 0;
2911 if (netdev
->flags
& IFF_PROMISC
) {
2912 if (!(bnad
->cfg_flags
& BNAD_CF_PROMISC
)) {
2913 new_mask
= BNAD_RXMODE_PROMISC_DEFAULT
;
2914 valid_mask
= BNAD_RXMODE_PROMISC_DEFAULT
;
2915 bnad
->cfg_flags
|= BNAD_CF_PROMISC
;
2918 if (bnad
->cfg_flags
& BNAD_CF_PROMISC
) {
2919 new_mask
= ~BNAD_RXMODE_PROMISC_DEFAULT
;
2920 valid_mask
= BNAD_RXMODE_PROMISC_DEFAULT
;
2921 bnad
->cfg_flags
&= ~BNAD_CF_PROMISC
;
2925 if (netdev
->flags
& IFF_ALLMULTI
) {
2926 if (!(bnad
->cfg_flags
& BNAD_CF_ALLMULTI
)) {
2927 new_mask
|= BNA_RXMODE_ALLMULTI
;
2928 valid_mask
|= BNA_RXMODE_ALLMULTI
;
2929 bnad
->cfg_flags
|= BNAD_CF_ALLMULTI
;
2932 if (bnad
->cfg_flags
& BNAD_CF_ALLMULTI
) {
2933 new_mask
&= ~BNA_RXMODE_ALLMULTI
;
2934 valid_mask
|= BNA_RXMODE_ALLMULTI
;
2935 bnad
->cfg_flags
&= ~BNAD_CF_ALLMULTI
;
2939 if (bnad
->rx_info
[0].rx
== NULL
)
2942 bna_rx_mode_set(bnad
->rx_info
[0].rx
, new_mask
, valid_mask
, NULL
);
2944 if (!netdev_mc_empty(netdev
)) {
2946 int mc_count
= netdev_mc_count(netdev
);
2948 /* Index 0 holds the broadcast address */
2950 kzalloc((mc_count
+ 1) * ETH_ALEN
,
2955 memcpy(&mcaddr_list
[0], &bnad_bcast_addr
[0], ETH_ALEN
);
2957 /* Copy rest of the MC addresses */
2958 bnad_netdev_mc_list_get(netdev
, mcaddr_list
);
2960 bna_rx_mcast_listset(bnad
->rx_info
[0].rx
, mc_count
+ 1,
2963 /* Should we enable BNAD_CF_ALLMULTI for err != 0 ? */
2967 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2971 * bna_lock is used to sync writes to netdev->addr
2972 * conf_lock cannot be used since this call may be made
2973 * in a non-blocking context.
2976 bnad_set_mac_address(struct net_device
*netdev
, void *mac_addr
)
2979 struct bnad
*bnad
= netdev_priv(netdev
);
2980 struct sockaddr
*sa
= (struct sockaddr
*)mac_addr
;
2981 unsigned long flags
;
2983 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
2985 err
= bnad_mac_addr_set_locked(bnad
, sa
->sa_data
);
2988 memcpy(netdev
->dev_addr
, sa
->sa_data
, netdev
->addr_len
);
2990 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
2996 bnad_mtu_set(struct bnad
*bnad
, int mtu
)
2998 unsigned long flags
;
3000 init_completion(&bnad
->bnad_completions
.mtu_comp
);
3002 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
3003 bna_enet_mtu_set(&bnad
->bna
.enet
, mtu
, bnad_cb_enet_mtu_set
);
3004 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
3006 wait_for_completion(&bnad
->bnad_completions
.mtu_comp
);
3008 return bnad
->bnad_completions
.mtu_comp_status
;
3012 bnad_change_mtu(struct net_device
*netdev
, int new_mtu
)
3014 int err
, mtu
= netdev
->mtu
;
3015 struct bnad
*bnad
= netdev_priv(netdev
);
3017 if (new_mtu
+ ETH_HLEN
< ETH_ZLEN
|| new_mtu
> BNAD_JUMBO_MTU
)
3020 mutex_lock(&bnad
->conf_mutex
);
3022 netdev
->mtu
= new_mtu
;
3024 mtu
= ETH_HLEN
+ VLAN_HLEN
+ new_mtu
+ ETH_FCS_LEN
;
3025 err
= bnad_mtu_set(bnad
, mtu
);
3029 mutex_unlock(&bnad
->conf_mutex
);
3034 bnad_vlan_rx_add_vid(struct net_device
*netdev
,
3037 struct bnad
*bnad
= netdev_priv(netdev
);
3038 unsigned long flags
;
3040 if (!bnad
->rx_info
[0].rx
)
3043 mutex_lock(&bnad
->conf_mutex
);
3045 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
3046 bna_rx_vlan_add(bnad
->rx_info
[0].rx
, vid
);
3047 set_bit(vid
, bnad
->active_vlans
);
3048 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
3050 mutex_unlock(&bnad
->conf_mutex
);
3056 bnad_vlan_rx_kill_vid(struct net_device
*netdev
,
3059 struct bnad
*bnad
= netdev_priv(netdev
);
3060 unsigned long flags
;
3062 if (!bnad
->rx_info
[0].rx
)
3065 mutex_lock(&bnad
->conf_mutex
);
3067 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
3068 clear_bit(vid
, bnad
->active_vlans
);
3069 bna_rx_vlan_del(bnad
->rx_info
[0].rx
, vid
);
3070 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
3072 mutex_unlock(&bnad
->conf_mutex
);
3077 #ifdef CONFIG_NET_POLL_CONTROLLER
3079 bnad_netpoll(struct net_device
*netdev
)
3081 struct bnad
*bnad
= netdev_priv(netdev
);
3082 struct bnad_rx_info
*rx_info
;
3083 struct bnad_rx_ctrl
*rx_ctrl
;
3087 if (!(bnad
->cfg_flags
& BNAD_CF_MSIX
)) {
3088 bna_intx_disable(&bnad
->bna
, curr_mask
);
3089 bnad_isr(bnad
->pcidev
->irq
, netdev
);
3090 bna_intx_enable(&bnad
->bna
, curr_mask
);
3093 * Tx processing may happen in sending context, so no need
3094 * to explicitly process completions here
3098 for (i
= 0; i
< bnad
->num_rx
; i
++) {
3099 rx_info
= &bnad
->rx_info
[i
];
3102 for (j
= 0; j
< bnad
->num_rxp_per_rx
; j
++) {
3103 rx_ctrl
= &rx_info
->rx_ctrl
[j
];
3105 bnad_netif_rx_schedule_poll(bnad
,
3113 static const struct net_device_ops bnad_netdev_ops
= {
3114 .ndo_open
= bnad_open
,
3115 .ndo_stop
= bnad_stop
,
3116 .ndo_start_xmit
= bnad_start_xmit
,
3117 .ndo_get_stats64
= bnad_get_stats64
,
3118 .ndo_set_rx_mode
= bnad_set_rx_mode
,
3119 .ndo_validate_addr
= eth_validate_addr
,
3120 .ndo_set_mac_address
= bnad_set_mac_address
,
3121 .ndo_change_mtu
= bnad_change_mtu
,
3122 .ndo_vlan_rx_add_vid
= bnad_vlan_rx_add_vid
,
3123 .ndo_vlan_rx_kill_vid
= bnad_vlan_rx_kill_vid
,
3124 #ifdef CONFIG_NET_POLL_CONTROLLER
3125 .ndo_poll_controller
= bnad_netpoll
3130 bnad_netdev_init(struct bnad
*bnad
, bool using_dac
)
3132 struct net_device
*netdev
= bnad
->netdev
;
3134 netdev
->hw_features
= NETIF_F_SG
| NETIF_F_RXCSUM
|
3135 NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
|
3136 NETIF_F_TSO
| NETIF_F_TSO6
| NETIF_F_HW_VLAN_TX
;
3138 netdev
->vlan_features
= NETIF_F_SG
| NETIF_F_HIGHDMA
|
3139 NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
|
3140 NETIF_F_TSO
| NETIF_F_TSO6
;
3142 netdev
->features
|= netdev
->hw_features
|
3143 NETIF_F_HW_VLAN_RX
| NETIF_F_HW_VLAN_FILTER
;
3146 netdev
->features
|= NETIF_F_HIGHDMA
;
3148 netdev
->mem_start
= bnad
->mmio_start
;
3149 netdev
->mem_end
= bnad
->mmio_start
+ bnad
->mmio_len
- 1;
3151 netdev
->netdev_ops
= &bnad_netdev_ops
;
3152 bnad_set_ethtool_ops(netdev
);
3156 * 1. Initialize the bnad structure
3157 * 2. Setup netdev pointer in pci_dev
3158 * 3. Initialze Tx free tasklet
3159 * 4. Initialize no. of TxQ & CQs & MSIX vectors
3160 * 5. Initialize work queue.
3163 bnad_init(struct bnad
*bnad
,
3164 struct pci_dev
*pdev
, struct net_device
*netdev
)
3166 unsigned long flags
;
3168 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
3169 pci_set_drvdata(pdev
, netdev
);
3171 bnad
->netdev
= netdev
;
3172 bnad
->pcidev
= pdev
;
3173 bnad
->mmio_start
= pci_resource_start(pdev
, 0);
3174 bnad
->mmio_len
= pci_resource_len(pdev
, 0);
3175 bnad
->bar0
= ioremap_nocache(bnad
->mmio_start
, bnad
->mmio_len
);
3177 dev_err(&pdev
->dev
, "ioremap for bar0 failed\n");
3178 pci_set_drvdata(pdev
, NULL
);
3181 pr_info("bar0 mapped to %p, len %llu\n", bnad
->bar0
,
3182 (unsigned long long) bnad
->mmio_len
);
3184 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
3185 if (!bnad_msix_disable
)
3186 bnad
->cfg_flags
= BNAD_CF_MSIX
;
3188 bnad
->cfg_flags
|= BNAD_CF_DIM_ENABLED
;
3190 bnad_q_num_init(bnad
);
3191 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
3193 bnad
->msix_num
= (bnad
->num_tx
* bnad
->num_txq_per_tx
) +
3194 (bnad
->num_rx
* bnad
->num_rxp_per_rx
) +
3195 BNAD_MAILBOX_MSIX_VECTORS
;
3197 bnad
->txq_depth
= BNAD_TXQ_DEPTH
;
3198 bnad
->rxq_depth
= BNAD_RXQ_DEPTH
;
3200 bnad
->tx_coalescing_timeo
= BFI_TX_COALESCING_TIMEO
;
3201 bnad
->rx_coalescing_timeo
= BFI_RX_COALESCING_TIMEO
;
3203 tasklet_init(&bnad
->tx_free_tasklet
, bnad_tx_free_tasklet
,
3204 (unsigned long)bnad
);
3206 sprintf(bnad
->wq_name
, "%s_wq_%d", BNAD_NAME
, bnad
->id
);
3207 bnad
->work_q
= create_singlethread_workqueue(bnad
->wq_name
);
3216 * Must be called after bnad_pci_uninit()
3217 * so that iounmap() and pci_set_drvdata(NULL)
3218 * happens only after PCI uninitialization.
3221 bnad_uninit(struct bnad
*bnad
)
3224 flush_workqueue(bnad
->work_q
);
3225 destroy_workqueue(bnad
->work_q
);
3226 bnad
->work_q
= NULL
;
3230 iounmap(bnad
->bar0
);
3231 pci_set_drvdata(bnad
->pcidev
, NULL
);
3236 a) Per ioceth mutes used for serializing configuration
3237 changes from OS interface
3238 b) spin lock used to protect bna state machine
3241 bnad_lock_init(struct bnad
*bnad
)
3243 spin_lock_init(&bnad
->bna_lock
);
3244 mutex_init(&bnad
->conf_mutex
);
3245 mutex_init(&bnad_list_mutex
);
3249 bnad_lock_uninit(struct bnad
*bnad
)
3251 mutex_destroy(&bnad
->conf_mutex
);
3252 mutex_destroy(&bnad_list_mutex
);
3255 /* PCI Initialization */
3257 bnad_pci_init(struct bnad
*bnad
,
3258 struct pci_dev
*pdev
, bool *using_dac
)
3262 err
= pci_enable_device(pdev
);
3265 err
= pci_request_regions(pdev
, BNAD_NAME
);
3267 goto disable_device
;
3268 if (!dma_set_mask(&pdev
->dev
, DMA_BIT_MASK(64)) &&
3269 !dma_set_coherent_mask(&pdev
->dev
, DMA_BIT_MASK(64))) {
3272 err
= dma_set_mask(&pdev
->dev
, DMA_BIT_MASK(32));
3274 err
= dma_set_coherent_mask(&pdev
->dev
,
3277 goto release_regions
;
3281 pci_set_master(pdev
);
3285 pci_release_regions(pdev
);
3287 pci_disable_device(pdev
);
3293 bnad_pci_uninit(struct pci_dev
*pdev
)
3295 pci_release_regions(pdev
);
3296 pci_disable_device(pdev
);
3299 static int __devinit
3300 bnad_pci_probe(struct pci_dev
*pdev
,
3301 const struct pci_device_id
*pcidev_id
)
3307 struct net_device
*netdev
;
3308 struct bfa_pcidev pcidev_info
;
3309 unsigned long flags
;
3311 pr_info("bnad_pci_probe : (0x%p, 0x%p) PCI Func : (%d)\n",
3312 pdev
, pcidev_id
, PCI_FUNC(pdev
->devfn
));
3314 mutex_lock(&bnad_fwimg_mutex
);
3315 if (!cna_get_firmware_buf(pdev
)) {
3316 mutex_unlock(&bnad_fwimg_mutex
);
3317 pr_warn("Failed to load Firmware Image!\n");
3320 mutex_unlock(&bnad_fwimg_mutex
);
3323 * Allocates sizeof(struct net_device + struct bnad)
3324 * bnad = netdev->priv
3326 netdev
= alloc_etherdev(sizeof(struct bnad
));
3331 bnad
= netdev_priv(netdev
);
3332 bnad_lock_init(bnad
);
3333 bnad_add_to_list(bnad
);
3335 mutex_lock(&bnad
->conf_mutex
);
3337 * PCI initialization
3338 * Output : using_dac = 1 for 64 bit DMA
3339 * = 0 for 32 bit DMA
3341 err
= bnad_pci_init(bnad
, pdev
, &using_dac
);
3346 * Initialize bnad structure
3347 * Setup relation between pci_dev & netdev
3348 * Init Tx free tasklet
3350 err
= bnad_init(bnad
, pdev
, netdev
);
3354 /* Initialize netdev structure, set up ethtool ops */
3355 bnad_netdev_init(bnad
, using_dac
);
3357 /* Set link to down state */
3358 netif_carrier_off(netdev
);
3360 /* Setup the debugfs node for this bfad */
3361 if (bna_debugfs_enable
)
3362 bnad_debugfs_init(bnad
);
3364 /* Get resource requirement form bna */
3365 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
3366 bna_res_req(&bnad
->res_info
[0]);
3367 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
3369 /* Allocate resources from bna */
3370 err
= bnad_res_alloc(bnad
, &bnad
->res_info
[0], BNA_RES_T_MAX
);
3376 /* Setup pcidev_info for bna_init() */
3377 pcidev_info
.pci_slot
= PCI_SLOT(bnad
->pcidev
->devfn
);
3378 pcidev_info
.pci_func
= PCI_FUNC(bnad
->pcidev
->devfn
);
3379 pcidev_info
.device_id
= bnad
->pcidev
->device
;
3380 pcidev_info
.pci_bar_kva
= bnad
->bar0
;
3382 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
3383 bna_init(bna
, bnad
, &pcidev_info
, &bnad
->res_info
[0]);
3384 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
3386 bnad
->stats
.bna_stats
= &bna
->stats
;
3388 bnad_enable_msix(bnad
);
3389 err
= bnad_mbox_irq_alloc(bnad
);
3395 setup_timer(&bnad
->bna
.ioceth
.ioc
.ioc_timer
, bnad_ioc_timeout
,
3396 ((unsigned long)bnad
));
3397 setup_timer(&bnad
->bna
.ioceth
.ioc
.hb_timer
, bnad_ioc_hb_check
,
3398 ((unsigned long)bnad
));
3399 setup_timer(&bnad
->bna
.ioceth
.ioc
.iocpf_timer
, bnad_iocpf_timeout
,
3400 ((unsigned long)bnad
));
3401 setup_timer(&bnad
->bna
.ioceth
.ioc
.sem_timer
, bnad_iocpf_sem_timeout
,
3402 ((unsigned long)bnad
));
3404 /* Now start the timer before calling IOC */
3405 mod_timer(&bnad
->bna
.ioceth
.ioc
.iocpf_timer
,
3406 jiffies
+ msecs_to_jiffies(BNA_IOC_TIMER_FREQ
));
3410 * If the call back comes with error, we bail out.
3411 * This is a catastrophic error.
3413 err
= bnad_ioceth_enable(bnad
);
3415 pr_err("BNA: Initialization failed err=%d\n",
3420 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
3421 if (bna_num_txq_set(bna
, BNAD_NUM_TXQ
+ 1) ||
3422 bna_num_rxp_set(bna
, BNAD_NUM_RXP
+ 1)) {
3423 bnad_q_num_adjust(bnad
, bna_attr(bna
)->num_txq
- 1,
3424 bna_attr(bna
)->num_rxp
- 1);
3425 if (bna_num_txq_set(bna
, BNAD_NUM_TXQ
+ 1) ||
3426 bna_num_rxp_set(bna
, BNAD_NUM_RXP
+ 1))
3429 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
3431 goto disable_ioceth
;
3433 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
3434 bna_mod_res_req(&bnad
->bna
, &bnad
->mod_res_info
[0]);
3435 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
3437 err
= bnad_res_alloc(bnad
, &bnad
->mod_res_info
[0], BNA_MOD_RES_T_MAX
);
3440 goto disable_ioceth
;
3443 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
3444 bna_mod_init(&bnad
->bna
, &bnad
->mod_res_info
[0]);
3445 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
3447 /* Get the burnt-in mac */
3448 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
3449 bna_enet_perm_mac_get(&bna
->enet
, &bnad
->perm_addr
);
3450 bnad_set_netdev_perm_addr(bnad
);
3451 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
3453 mutex_unlock(&bnad
->conf_mutex
);
3455 /* Finally, reguister with net_device layer */
3456 err
= register_netdev(netdev
);
3458 pr_err("BNA : Registering with netdev failed\n");
3461 set_bit(BNAD_RF_NETDEV_REGISTERED
, &bnad
->run_flags
);
3466 mutex_unlock(&bnad
->conf_mutex
);
3470 mutex_lock(&bnad
->conf_mutex
);
3471 bnad_res_free(bnad
, &bnad
->mod_res_info
[0], BNA_MOD_RES_T_MAX
);
3473 bnad_ioceth_disable(bnad
);
3474 del_timer_sync(&bnad
->bna
.ioceth
.ioc
.ioc_timer
);
3475 del_timer_sync(&bnad
->bna
.ioceth
.ioc
.sem_timer
);
3476 del_timer_sync(&bnad
->bna
.ioceth
.ioc
.hb_timer
);
3477 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
3479 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
3480 bnad_mbox_irq_free(bnad
);
3481 bnad_disable_msix(bnad
);
3483 bnad_res_free(bnad
, &bnad
->res_info
[0], BNA_RES_T_MAX
);
3485 /* Remove the debugfs node for this bnad */
3486 kfree(bnad
->regdata
);
3487 bnad_debugfs_uninit(bnad
);
3490 bnad_pci_uninit(pdev
);
3492 mutex_unlock(&bnad
->conf_mutex
);
3493 bnad_remove_from_list(bnad
);
3494 bnad_lock_uninit(bnad
);
3495 free_netdev(netdev
);
3499 static void __devexit
3500 bnad_pci_remove(struct pci_dev
*pdev
)
3502 struct net_device
*netdev
= pci_get_drvdata(pdev
);
3505 unsigned long flags
;
3510 pr_info("%s bnad_pci_remove\n", netdev
->name
);
3511 bnad
= netdev_priv(netdev
);
3514 if (test_and_clear_bit(BNAD_RF_NETDEV_REGISTERED
, &bnad
->run_flags
))
3515 unregister_netdev(netdev
);
3517 mutex_lock(&bnad
->conf_mutex
);
3518 bnad_ioceth_disable(bnad
);
3519 del_timer_sync(&bnad
->bna
.ioceth
.ioc
.ioc_timer
);
3520 del_timer_sync(&bnad
->bna
.ioceth
.ioc
.sem_timer
);
3521 del_timer_sync(&bnad
->bna
.ioceth
.ioc
.hb_timer
);
3522 spin_lock_irqsave(&bnad
->bna_lock
, flags
);
3524 spin_unlock_irqrestore(&bnad
->bna_lock
, flags
);
3526 bnad_res_free(bnad
, &bnad
->mod_res_info
[0], BNA_MOD_RES_T_MAX
);
3527 bnad_res_free(bnad
, &bnad
->res_info
[0], BNA_RES_T_MAX
);
3528 bnad_mbox_irq_free(bnad
);
3529 bnad_disable_msix(bnad
);
3530 bnad_pci_uninit(pdev
);
3531 mutex_unlock(&bnad
->conf_mutex
);
3532 bnad_remove_from_list(bnad
);
3533 bnad_lock_uninit(bnad
);
3534 /* Remove the debugfs node for this bnad */
3535 kfree(bnad
->regdata
);
3536 bnad_debugfs_uninit(bnad
);
3538 free_netdev(netdev
);
3541 static DEFINE_PCI_DEVICE_TABLE(bnad_pci_id_table
) = {
3543 PCI_DEVICE(PCI_VENDOR_ID_BROCADE
,
3544 PCI_DEVICE_ID_BROCADE_CT
),
3545 .class = PCI_CLASS_NETWORK_ETHERNET
<< 8,
3546 .class_mask
= 0xffff00
3549 PCI_DEVICE(PCI_VENDOR_ID_BROCADE
,
3550 BFA_PCI_DEVICE_ID_CT2
),
3551 .class = PCI_CLASS_NETWORK_ETHERNET
<< 8,
3552 .class_mask
= 0xffff00
3557 MODULE_DEVICE_TABLE(pci
, bnad_pci_id_table
);
3559 static struct pci_driver bnad_pci_driver
= {
3561 .id_table
= bnad_pci_id_table
,
3562 .probe
= bnad_pci_probe
,
3563 .remove
= __devexit_p(bnad_pci_remove
),
3567 bnad_module_init(void)
3571 pr_info("Brocade 10G Ethernet driver - version: %s\n",
3574 bfa_nw_ioc_auto_recover(bnad_ioc_auto_recover
);
3576 err
= pci_register_driver(&bnad_pci_driver
);
3578 pr_err("bna : PCI registration failed in module init "
3587 bnad_module_exit(void)
3589 pci_unregister_driver(&bnad_pci_driver
);
3592 release_firmware(bfi_fw
);
3595 module_init(bnad_module_init
);
3596 module_exit(bnad_module_exit
);
3598 MODULE_AUTHOR("Brocade");
3599 MODULE_LICENSE("GPL");
3600 MODULE_DESCRIPTION("Brocade 10G PCIe Ethernet driver");
3601 MODULE_VERSION(BNAD_VERSION
);
3602 MODULE_FIRMWARE(CNA_FW_FILE_CT
);
3603 MODULE_FIRMWARE(CNA_FW_FILE_CT2
);