Commit | Line | Data |
---|---|---|
8b230ed8 | 1 | /* |
2732ba56 | 2 | * Linux network driver for QLogic BR-series Converged Network Adapter. |
8b230ed8 RM |
3 | * |
4 | * This program is free software; you can redistribute it and/or modify it | |
5 | * under the terms of the GNU General Public License (GPL) Version 2 as | |
6 | * published by the Free Software Foundation | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful, but | |
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
11 | * General Public License for more details. | |
12 | */ | |
13 | /* | |
2732ba56 RM |
14 | * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. |
15 | * Copyright (c) 2014-2015 QLogic Corporation | |
8b230ed8 | 16 | * All rights reserved |
2732ba56 | 17 | * www.qlogic.com |
8b230ed8 | 18 | */ |
f859d7cb | 19 | #include <linux/bitops.h> |
8b230ed8 RM |
20 | #include <linux/netdevice.h> |
21 | #include <linux/skbuff.h> | |
22 | #include <linux/etherdevice.h> | |
23 | #include <linux/in.h> | |
24 | #include <linux/ethtool.h> | |
25 | #include <linux/if_vlan.h> | |
26 | #include <linux/if_ether.h> | |
27 | #include <linux/ip.h> | |
70c71606 | 28 | #include <linux/prefetch.h> |
9d9779e7 | 29 | #include <linux/module.h> |
8b230ed8 RM |
30 | |
31 | #include "bnad.h" | |
32 | #include "bna.h" | |
33 | #include "cna.h" | |
34 | ||
b7ee31c5 | 35 | static DEFINE_MUTEX(bnad_fwimg_mutex); |
8b230ed8 RM |
36 | |
37 | /* | |
38 | * Module params | |
39 | */ | |
40 | static uint bnad_msix_disable; | |
41 | module_param(bnad_msix_disable, uint, 0444); | |
42 | MODULE_PARM_DESC(bnad_msix_disable, "Disable MSIX mode"); | |
43 | ||
44 | static uint bnad_ioc_auto_recover = 1; | |
45 | module_param(bnad_ioc_auto_recover, uint, 0444); | |
46 | MODULE_PARM_DESC(bnad_ioc_auto_recover, "Enable / Disable auto recovery"); | |
47 | ||
7afc5dbd KG |
48 | static uint bna_debugfs_enable = 1; |
49 | module_param(bna_debugfs_enable, uint, S_IRUGO | S_IWUSR); | |
50 | MODULE_PARM_DESC(bna_debugfs_enable, "Enables debugfs feature, default=1," | |
51 | " Range[false:0|true:1]"); | |
52 | ||
8b230ed8 RM |
53 | /* |
54 | * Global variables | |
55 | */ | |
482da0fa | 56 | static u32 bnad_rxqs_per_cq = 2; |
e1e0918f | 57 | static u32 bna_id; |
58 | static struct mutex bnad_list_mutex; | |
59 | static LIST_HEAD(bnad_list); | |
e2f9ecfc IV |
60 | static const u8 bnad_bcast_addr[] __aligned(2) = |
61 | { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; | |
8b230ed8 RM |
62 | |
63 | /* | |
64 | * Local MACROS | |
65 | */ | |
8b230ed8 RM |
66 | #define BNAD_GET_MBOX_IRQ(_bnad) \ |
67 | (((_bnad)->cfg_flags & BNAD_CF_MSIX) ? \ | |
8811e267 | 68 | ((_bnad)->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector) : \ |
8b230ed8 RM |
69 | ((_bnad)->pcidev->irq)) |
70 | ||
5216562a | 71 | #define BNAD_FILL_UNMAPQ_MEM_REQ(_res_info, _num, _size) \ |
8b230ed8 RM |
72 | do { \ |
73 | (_res_info)->res_type = BNA_RES_T_MEM; \ | |
74 | (_res_info)->res_u.mem_info.mem_type = BNA_MEM_T_KVA; \ | |
75 | (_res_info)->res_u.mem_info.num = (_num); \ | |
5216562a | 76 | (_res_info)->res_u.mem_info.len = (_size); \ |
8b230ed8 RM |
77 | } while (0) |
78 | ||
72a9730b KG |
79 | static void |
80 | bnad_add_to_list(struct bnad *bnad) | |
81 | { | |
82 | mutex_lock(&bnad_list_mutex); | |
83 | list_add_tail(&bnad->list_entry, &bnad_list); | |
84 | bnad->id = bna_id++; | |
85 | mutex_unlock(&bnad_list_mutex); | |
86 | } | |
87 | ||
88 | static void | |
89 | bnad_remove_from_list(struct bnad *bnad) | |
90 | { | |
91 | mutex_lock(&bnad_list_mutex); | |
92 | list_del(&bnad->list_entry); | |
93 | mutex_unlock(&bnad_list_mutex); | |
94 | } | |
95 | ||
8b230ed8 RM |
96 | /* |
97 | * Reinitialize completions in CQ, once Rx is taken down | |
98 | */ | |
99 | static void | |
b3cc6e88 | 100 | bnad_cq_cleanup(struct bnad *bnad, struct bna_ccb *ccb) |
8b230ed8 | 101 | { |
5216562a | 102 | struct bna_cq_entry *cmpl; |
8b230ed8 RM |
103 | int i; |
104 | ||
8b230ed8 | 105 | for (i = 0; i < ccb->q_depth; i++) { |
5216562a | 106 | cmpl = &((struct bna_cq_entry *)ccb->sw_q)[i]; |
8b230ed8 | 107 | cmpl->valid = 0; |
8b230ed8 RM |
108 | } |
109 | } | |
110 | ||
5216562a RM |
111 | /* Tx Datapath functions */ |
112 | ||
113 | ||
114 | /* Caller should ensure that the entry at unmap_q[index] is valid */ | |
271e8b79 | 115 | static u32 |
5216562a RM |
116 | bnad_tx_buff_unmap(struct bnad *bnad, |
117 | struct bnad_tx_unmap *unmap_q, | |
118 | u32 q_depth, u32 index) | |
271e8b79 | 119 | { |
5216562a RM |
120 | struct bnad_tx_unmap *unmap; |
121 | struct sk_buff *skb; | |
122 | int vector, nvecs; | |
123 | ||
124 | unmap = &unmap_q[index]; | |
125 | nvecs = unmap->nvecs; | |
126 | ||
127 | skb = unmap->skb; | |
128 | unmap->skb = NULL; | |
129 | unmap->nvecs = 0; | |
130 | dma_unmap_single(&bnad->pcidev->dev, | |
131 | dma_unmap_addr(&unmap->vectors[0], dma_addr), | |
132 | skb_headlen(skb), DMA_TO_DEVICE); | |
133 | dma_unmap_addr_set(&unmap->vectors[0], dma_addr, 0); | |
134 | nvecs--; | |
135 | ||
136 | vector = 0; | |
137 | while (nvecs) { | |
138 | vector++; | |
139 | if (vector == BFI_TX_MAX_VECTORS_PER_WI) { | |
140 | vector = 0; | |
141 | BNA_QE_INDX_INC(index, q_depth); | |
142 | unmap = &unmap_q[index]; | |
143 | } | |
271e8b79 | 144 | |
5216562a RM |
145 | dma_unmap_page(&bnad->pcidev->dev, |
146 | dma_unmap_addr(&unmap->vectors[vector], dma_addr), | |
24f5d33d RM |
147 | dma_unmap_len(&unmap->vectors[vector], dma_len), |
148 | DMA_TO_DEVICE); | |
5216562a RM |
149 | dma_unmap_addr_set(&unmap->vectors[vector], dma_addr, 0); |
150 | nvecs--; | |
271e8b79 RM |
151 | } |
152 | ||
5216562a RM |
153 | BNA_QE_INDX_INC(index, q_depth); |
154 | ||
271e8b79 RM |
155 | return index; |
156 | } | |
157 | ||
8b230ed8 RM |
158 | /* |
159 | * Frees all pending Tx Bufs | |
160 | * At this point no activity is expected on the Q, | |
161 | * so DMA unmap & freeing is fine. | |
162 | */ | |
163 | static void | |
5216562a | 164 | bnad_txq_cleanup(struct bnad *bnad, struct bna_tcb *tcb) |
8b230ed8 | 165 | { |
5216562a RM |
166 | struct bnad_tx_unmap *unmap_q = tcb->unmap_q; |
167 | struct sk_buff *skb; | |
168 | int i; | |
8b230ed8 | 169 | |
5216562a RM |
170 | for (i = 0; i < tcb->q_depth; i++) { |
171 | skb = unmap_q[i].skb; | |
938fa488 | 172 | if (!skb) |
8b230ed8 | 173 | continue; |
5216562a | 174 | bnad_tx_buff_unmap(bnad, unmap_q, tcb->q_depth, i); |
938fa488 | 175 | |
8b230ed8 RM |
176 | dev_kfree_skb_any(skb); |
177 | } | |
178 | } | |
179 | ||
8b230ed8 | 180 | /* |
b3cc6e88 | 181 | * bnad_txcmpl_process : Frees the Tx bufs on Tx completion |
8b230ed8 RM |
182 | * Can be called in a) Interrupt context |
183 | * b) Sending context | |
8b230ed8 RM |
184 | */ |
185 | static u32 | |
5216562a | 186 | bnad_txcmpl_process(struct bnad *bnad, struct bna_tcb *tcb) |
8b230ed8 | 187 | { |
5216562a RM |
188 | u32 sent_packets = 0, sent_bytes = 0; |
189 | u32 wis, unmap_wis, hw_cons, cons, q_depth; | |
190 | struct bnad_tx_unmap *unmap_q = tcb->unmap_q; | |
191 | struct bnad_tx_unmap *unmap; | |
192 | struct sk_buff *skb; | |
8b230ed8 | 193 | |
d95d1081 | 194 | /* Just return if TX is stopped */ |
be7fa326 | 195 | if (!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) |
8b230ed8 RM |
196 | return 0; |
197 | ||
5216562a RM |
198 | hw_cons = *(tcb->hw_consumer_index); |
199 | cons = tcb->consumer_index; | |
200 | q_depth = tcb->q_depth; | |
8b230ed8 | 201 | |
5216562a | 202 | wis = BNA_Q_INDEX_CHANGE(cons, hw_cons, q_depth); |
8b230ed8 RM |
203 | BUG_ON(!(wis <= BNA_QE_IN_USE_CNT(tcb, tcb->q_depth))); |
204 | ||
8b230ed8 | 205 | while (wis) { |
5216562a RM |
206 | unmap = &unmap_q[cons]; |
207 | ||
208 | skb = unmap->skb; | |
8b230ed8 | 209 | |
8b230ed8 RM |
210 | sent_packets++; |
211 | sent_bytes += skb->len; | |
8b230ed8 | 212 | |
5216562a RM |
213 | unmap_wis = BNA_TXQ_WI_NEEDED(unmap->nvecs); |
214 | wis -= unmap_wis; | |
8b230ed8 | 215 | |
5216562a | 216 | cons = bnad_tx_buff_unmap(bnad, unmap_q, q_depth, cons); |
8b230ed8 RM |
217 | dev_kfree_skb_any(skb); |
218 | } | |
219 | ||
220 | /* Update consumer pointers. */ | |
5216562a | 221 | tcb->consumer_index = hw_cons; |
8b230ed8 RM |
222 | |
223 | tcb->txq->tx_packets += sent_packets; | |
224 | tcb->txq->tx_bytes += sent_bytes; | |
225 | ||
226 | return sent_packets; | |
227 | } | |
228 | ||
8b230ed8 | 229 | static u32 |
b3cc6e88 | 230 | bnad_tx_complete(struct bnad *bnad, struct bna_tcb *tcb) |
8b230ed8 RM |
231 | { |
232 | struct net_device *netdev = bnad->netdev; | |
be7fa326 | 233 | u32 sent = 0; |
8b230ed8 RM |
234 | |
235 | if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) | |
236 | return 0; | |
237 | ||
b3cc6e88 | 238 | sent = bnad_txcmpl_process(bnad, tcb); |
8b230ed8 RM |
239 | if (sent) { |
240 | if (netif_queue_stopped(netdev) && | |
241 | netif_carrier_ok(netdev) && | |
242 | BNA_QE_FREE_CNT(tcb, tcb->q_depth) >= | |
243 | BNAD_NETIF_WAKE_THRESHOLD) { | |
be7fa326 RM |
244 | if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) { |
245 | netif_wake_queue(netdev); | |
246 | BNAD_UPDATE_CTR(bnad, netif_queue_wakeup); | |
247 | } | |
8b230ed8 | 248 | } |
be7fa326 RM |
249 | } |
250 | ||
251 | if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) | |
8b230ed8 | 252 | bna_ib_ack(tcb->i_dbell, sent); |
8b230ed8 | 253 | |
4e857c58 | 254 | smp_mb__before_atomic(); |
8b230ed8 RM |
255 | clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags); |
256 | ||
257 | return sent; | |
258 | } | |
259 | ||
260 | /* MSIX Tx Completion Handler */ | |
261 | static irqreturn_t | |
262 | bnad_msix_tx(int irq, void *data) | |
263 | { | |
264 | struct bna_tcb *tcb = (struct bna_tcb *)data; | |
265 | struct bnad *bnad = tcb->bnad; | |
266 | ||
b3cc6e88 | 267 | bnad_tx_complete(bnad, tcb); |
8b230ed8 RM |
268 | |
269 | return IRQ_HANDLED; | |
270 | } | |
271 | ||
30f9fc94 RM |
272 | static inline void |
273 | bnad_rxq_alloc_uninit(struct bnad *bnad, struct bna_rcb *rcb) | |
274 | { | |
275 | struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q; | |
276 | ||
277 | unmap_q->reuse_pi = -1; | |
278 | unmap_q->alloc_order = -1; | |
279 | unmap_q->map_size = 0; | |
280 | unmap_q->type = BNAD_RXBUF_NONE; | |
281 | } | |
282 | ||
283 | /* Default is page-based allocation. Multi-buffer support - TBD */ | |
284 | static int | |
285 | bnad_rxq_alloc_init(struct bnad *bnad, struct bna_rcb *rcb) | |
286 | { | |
287 | struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q; | |
e29aa339 | 288 | int order; |
30f9fc94 RM |
289 | |
290 | bnad_rxq_alloc_uninit(bnad, rcb); | |
291 | ||
e29aa339 RM |
292 | order = get_order(rcb->rxq->buffer_size); |
293 | ||
294 | unmap_q->type = BNAD_RXBUF_PAGE; | |
30f9fc94 RM |
295 | |
296 | if (bna_is_small_rxq(rcb->id)) { | |
297 | unmap_q->alloc_order = 0; | |
298 | unmap_q->map_size = rcb->rxq->buffer_size; | |
299 | } else { | |
e29aa339 RM |
300 | if (rcb->rxq->multi_buffer) { |
301 | unmap_q->alloc_order = 0; | |
302 | unmap_q->map_size = rcb->rxq->buffer_size; | |
303 | unmap_q->type = BNAD_RXBUF_MULTI_BUFF; | |
304 | } else { | |
305 | unmap_q->alloc_order = order; | |
306 | unmap_q->map_size = | |
307 | (rcb->rxq->buffer_size > 2048) ? | |
308 | PAGE_SIZE << order : 2048; | |
309 | } | |
30f9fc94 RM |
310 | } |
311 | ||
ebb56d37 | 312 | BUG_ON((PAGE_SIZE << order) % unmap_q->map_size); |
30f9fc94 | 313 | |
30f9fc94 RM |
314 | return 0; |
315 | } | |
316 | ||
317 | static inline void | |
318 | bnad_rxq_cleanup_page(struct bnad *bnad, struct bnad_rx_unmap *unmap) | |
319 | { | |
320 | if (!unmap->page) | |
321 | return; | |
322 | ||
323 | dma_unmap_page(&bnad->pcidev->dev, | |
324 | dma_unmap_addr(&unmap->vector, dma_addr), | |
325 | unmap->vector.len, DMA_FROM_DEVICE); | |
326 | put_page(unmap->page); | |
327 | unmap->page = NULL; | |
328 | dma_unmap_addr_set(&unmap->vector, dma_addr, 0); | |
329 | unmap->vector.len = 0; | |
330 | } | |
331 | ||
332 | static inline void | |
333 | bnad_rxq_cleanup_skb(struct bnad *bnad, struct bnad_rx_unmap *unmap) | |
334 | { | |
335 | if (!unmap->skb) | |
336 | return; | |
337 | ||
338 | dma_unmap_single(&bnad->pcidev->dev, | |
339 | dma_unmap_addr(&unmap->vector, dma_addr), | |
340 | unmap->vector.len, DMA_FROM_DEVICE); | |
341 | dev_kfree_skb_any(unmap->skb); | |
342 | unmap->skb = NULL; | |
343 | dma_unmap_addr_set(&unmap->vector, dma_addr, 0); | |
344 | unmap->vector.len = 0; | |
345 | } | |
346 | ||
8b230ed8 | 347 | static void |
b3cc6e88 | 348 | bnad_rxq_cleanup(struct bnad *bnad, struct bna_rcb *rcb) |
8b230ed8 | 349 | { |
30f9fc94 | 350 | struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q; |
5216562a RM |
351 | int i; |
352 | ||
353 | for (i = 0; i < rcb->q_depth; i++) { | |
30f9fc94 | 354 | struct bnad_rx_unmap *unmap = &unmap_q->unmap[i]; |
8b230ed8 | 355 | |
e29aa339 | 356 | if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type)) |
30f9fc94 | 357 | bnad_rxq_cleanup_skb(bnad, unmap); |
e29aa339 RM |
358 | else |
359 | bnad_rxq_cleanup_page(bnad, unmap); | |
30f9fc94 RM |
360 | } |
361 | bnad_rxq_alloc_uninit(bnad, rcb); | |
362 | } | |
5216562a | 363 | |
30f9fc94 RM |
364 | static u32 |
365 | bnad_rxq_refill_page(struct bnad *bnad, struct bna_rcb *rcb, u32 nalloc) | |
366 | { | |
367 | u32 alloced, prod, q_depth; | |
368 | struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q; | |
369 | struct bnad_rx_unmap *unmap, *prev; | |
370 | struct bna_rxq_entry *rxent; | |
371 | struct page *page; | |
372 | u32 page_offset, alloc_size; | |
373 | dma_addr_t dma_addr; | |
374 | ||
375 | prod = rcb->producer_index; | |
376 | q_depth = rcb->q_depth; | |
377 | ||
378 | alloc_size = PAGE_SIZE << unmap_q->alloc_order; | |
379 | alloced = 0; | |
380 | ||
381 | while (nalloc--) { | |
382 | unmap = &unmap_q->unmap[prod]; | |
383 | ||
384 | if (unmap_q->reuse_pi < 0) { | |
385 | page = alloc_pages(GFP_ATOMIC | __GFP_COMP, | |
386 | unmap_q->alloc_order); | |
387 | page_offset = 0; | |
388 | } else { | |
389 | prev = &unmap_q->unmap[unmap_q->reuse_pi]; | |
390 | page = prev->page; | |
391 | page_offset = prev->page_offset + unmap_q->map_size; | |
392 | get_page(page); | |
393 | } | |
394 | ||
395 | if (unlikely(!page)) { | |
396 | BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed); | |
397 | rcb->rxq->rxbuf_alloc_failed++; | |
398 | goto finishing; | |
399 | } | |
400 | ||
401 | dma_addr = dma_map_page(&bnad->pcidev->dev, page, page_offset, | |
ba5ca784 IV |
402 | unmap_q->map_size, DMA_FROM_DEVICE); |
403 | if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) { | |
404 | put_page(page); | |
405 | BNAD_UPDATE_CTR(bnad, rxbuf_map_failed); | |
406 | rcb->rxq->rxbuf_map_failed++; | |
407 | goto finishing; | |
408 | } | |
30f9fc94 RM |
409 | |
410 | unmap->page = page; | |
411 | unmap->page_offset = page_offset; | |
412 | dma_unmap_addr_set(&unmap->vector, dma_addr, dma_addr); | |
413 | unmap->vector.len = unmap_q->map_size; | |
414 | page_offset += unmap_q->map_size; | |
415 | ||
416 | if (page_offset < alloc_size) | |
417 | unmap_q->reuse_pi = prod; | |
418 | else | |
419 | unmap_q->reuse_pi = -1; | |
420 | ||
421 | rxent = &((struct bna_rxq_entry *)rcb->sw_q)[prod]; | |
422 | BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr); | |
423 | BNA_QE_INDX_INC(prod, q_depth); | |
424 | alloced++; | |
425 | } | |
426 | ||
427 | finishing: | |
428 | if (likely(alloced)) { | |
429 | rcb->producer_index = prod; | |
430 | smp_mb(); | |
431 | if (likely(test_bit(BNAD_RXQ_POST_OK, &rcb->flags))) | |
432 | bna_rxq_prod_indx_doorbell(rcb); | |
8b230ed8 | 433 | } |
30f9fc94 RM |
434 | |
435 | return alloced; | |
8b230ed8 RM |
436 | } |
437 | ||
30f9fc94 RM |
438 | static u32 |
439 | bnad_rxq_refill_skb(struct bnad *bnad, struct bna_rcb *rcb, u32 nalloc) | |
8b230ed8 | 440 | { |
30f9fc94 RM |
441 | u32 alloced, prod, q_depth, buff_sz; |
442 | struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q; | |
5216562a | 443 | struct bnad_rx_unmap *unmap; |
8b230ed8 RM |
444 | struct bna_rxq_entry *rxent; |
445 | struct sk_buff *skb; | |
446 | dma_addr_t dma_addr; | |
447 | ||
5216562a | 448 | buff_sz = rcb->rxq->buffer_size; |
5216562a RM |
449 | prod = rcb->producer_index; |
450 | q_depth = rcb->q_depth; | |
8b230ed8 | 451 | |
30f9fc94 RM |
452 | alloced = 0; |
453 | while (nalloc--) { | |
454 | unmap = &unmap_q->unmap[prod]; | |
455 | ||
456 | skb = netdev_alloc_skb_ip_align(bnad->netdev, buff_sz); | |
457 | ||
8b230ed8 RM |
458 | if (unlikely(!skb)) { |
459 | BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed); | |
3caa1e95 | 460 | rcb->rxq->rxbuf_alloc_failed++; |
8b230ed8 RM |
461 | goto finishing; |
462 | } | |
ba5ca784 | 463 | |
5ea74318 | 464 | dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data, |
5216562a | 465 | buff_sz, DMA_FROM_DEVICE); |
ba5ca784 IV |
466 | if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) { |
467 | dev_kfree_skb_any(skb); | |
468 | BNAD_UPDATE_CTR(bnad, rxbuf_map_failed); | |
469 | rcb->rxq->rxbuf_map_failed++; | |
470 | goto finishing; | |
471 | } | |
8b230ed8 | 472 | |
5216562a RM |
473 | unmap->skb = skb; |
474 | dma_unmap_addr_set(&unmap->vector, dma_addr, dma_addr); | |
475 | unmap->vector.len = buff_sz; | |
30f9fc94 RM |
476 | |
477 | rxent = &((struct bna_rxq_entry *)rcb->sw_q)[prod]; | |
478 | BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr); | |
5216562a | 479 | BNA_QE_INDX_INC(prod, q_depth); |
8b230ed8 RM |
480 | alloced++; |
481 | } | |
482 | ||
483 | finishing: | |
484 | if (likely(alloced)) { | |
5216562a | 485 | rcb->producer_index = prod; |
8b230ed8 | 486 | smp_mb(); |
5bcf6ac0 | 487 | if (likely(test_bit(BNAD_RXQ_POST_OK, &rcb->flags))) |
be7fa326 | 488 | bna_rxq_prod_indx_doorbell(rcb); |
8b230ed8 | 489 | } |
30f9fc94 RM |
490 | |
491 | return alloced; | |
492 | } | |
493 | ||
494 | static inline void | |
495 | bnad_rxq_post(struct bnad *bnad, struct bna_rcb *rcb) | |
496 | { | |
497 | struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q; | |
498 | u32 to_alloc; | |
499 | ||
500 | to_alloc = BNA_QE_FREE_CNT(rcb, rcb->q_depth); | |
501 | if (!(to_alloc >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT)) | |
502 | return; | |
503 | ||
e29aa339 | 504 | if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type)) |
30f9fc94 | 505 | bnad_rxq_refill_skb(bnad, rcb, to_alloc); |
e29aa339 RM |
506 | else |
507 | bnad_rxq_refill_page(bnad, rcb, to_alloc); | |
8b230ed8 RM |
508 | } |
509 | ||
5e46631f RM |
510 | #define flags_cksum_prot_mask (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \ |
511 | BNA_CQ_EF_IPV6 | \ | |
512 | BNA_CQ_EF_TCP | BNA_CQ_EF_UDP | \ | |
513 | BNA_CQ_EF_L4_CKSUM_OK) | |
514 | ||
515 | #define flags_tcp4 (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \ | |
516 | BNA_CQ_EF_TCP | BNA_CQ_EF_L4_CKSUM_OK) | |
517 | #define flags_tcp6 (BNA_CQ_EF_IPV6 | \ | |
518 | BNA_CQ_EF_TCP | BNA_CQ_EF_L4_CKSUM_OK) | |
519 | #define flags_udp4 (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \ | |
520 | BNA_CQ_EF_UDP | BNA_CQ_EF_L4_CKSUM_OK) | |
521 | #define flags_udp6 (BNA_CQ_EF_IPV6 | \ | |
522 | BNA_CQ_EF_UDP | BNA_CQ_EF_L4_CKSUM_OK) | |
523 | ||
e29aa339 RM |
524 | static void |
525 | bnad_cq_drop_packet(struct bnad *bnad, struct bna_rcb *rcb, | |
526 | u32 sop_ci, u32 nvecs) | |
30f9fc94 | 527 | { |
e29aa339 RM |
528 | struct bnad_rx_unmap_q *unmap_q; |
529 | struct bnad_rx_unmap *unmap; | |
530 | u32 ci, vec; | |
30f9fc94 | 531 | |
e29aa339 RM |
532 | unmap_q = rcb->unmap_q; |
533 | for (vec = 0, ci = sop_ci; vec < nvecs; vec++) { | |
534 | unmap = &unmap_q->unmap[ci]; | |
535 | BNA_QE_INDX_INC(ci, rcb->q_depth); | |
536 | ||
537 | if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type)) | |
538 | bnad_rxq_cleanup_skb(bnad, unmap); | |
539 | else | |
540 | bnad_rxq_cleanup_page(bnad, unmap); | |
541 | } | |
542 | } | |
543 | ||
544 | static void | |
6c3f5aef | 545 | bnad_cq_setup_skb_frags(struct bna_ccb *ccb, struct sk_buff *skb, u32 nvecs) |
e29aa339 | 546 | { |
6c3f5aef | 547 | struct bna_rcb *rcb; |
e29aa339 | 548 | struct bnad *bnad; |
e29aa339 | 549 | struct bnad_rx_unmap_q *unmap_q; |
6c3f5aef IV |
550 | struct bna_cq_entry *cq, *cmpl; |
551 | u32 ci, pi, totlen = 0; | |
552 | ||
553 | cq = ccb->sw_q; | |
554 | pi = ccb->producer_index; | |
555 | cmpl = &cq[pi]; | |
e29aa339 | 556 | |
6c3f5aef | 557 | rcb = bna_is_small_rxq(cmpl->rxq_id) ? ccb->rcb[1] : ccb->rcb[0]; |
e29aa339 RM |
558 | unmap_q = rcb->unmap_q; |
559 | bnad = rcb->bnad; | |
6c3f5aef | 560 | ci = rcb->consumer_index; |
66f9513a RM |
561 | |
562 | /* prefetch header */ | |
6c3f5aef IV |
563 | prefetch(page_address(unmap_q->unmap[ci].page) + |
564 | unmap_q->unmap[ci].page_offset); | |
565 | ||
566 | while (nvecs--) { | |
567 | struct bnad_rx_unmap *unmap; | |
568 | u32 len; | |
66f9513a | 569 | |
e29aa339 RM |
570 | unmap = &unmap_q->unmap[ci]; |
571 | BNA_QE_INDX_INC(ci, rcb->q_depth); | |
30f9fc94 RM |
572 | |
573 | dma_unmap_page(&bnad->pcidev->dev, | |
6c3f5aef IV |
574 | dma_unmap_addr(&unmap->vector, dma_addr), |
575 | unmap->vector.len, DMA_FROM_DEVICE); | |
e29aa339 | 576 | |
6c3f5aef | 577 | len = ntohs(cmpl->length); |
f2d9da1a | 578 | skb->truesize += unmap->vector.len; |
e29aa339 RM |
579 | totlen += len; |
580 | ||
30f9fc94 | 581 | skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, |
6c3f5aef | 582 | unmap->page, unmap->page_offset, len); |
30f9fc94 RM |
583 | |
584 | unmap->page = NULL; | |
585 | unmap->vector.len = 0; | |
6c3f5aef IV |
586 | |
587 | BNA_QE_INDX_INC(pi, ccb->q_depth); | |
588 | cmpl = &cq[pi]; | |
30f9fc94 RM |
589 | } |
590 | ||
e29aa339 RM |
591 | skb->len += totlen; |
592 | skb->data_len += totlen; | |
e29aa339 RM |
593 | } |
594 | ||
595 | static inline void | |
596 | bnad_cq_setup_skb(struct bnad *bnad, struct sk_buff *skb, | |
597 | struct bnad_rx_unmap *unmap, u32 len) | |
598 | { | |
599 | prefetch(skb->data); | |
30f9fc94 RM |
600 | |
601 | dma_unmap_single(&bnad->pcidev->dev, | |
602 | dma_unmap_addr(&unmap->vector, dma_addr), | |
603 | unmap->vector.len, DMA_FROM_DEVICE); | |
604 | ||
e29aa339 | 605 | skb_put(skb, len); |
30f9fc94 RM |
606 | skb->protocol = eth_type_trans(skb, bnad->netdev); |
607 | ||
608 | unmap->skb = NULL; | |
609 | unmap->vector.len = 0; | |
30f9fc94 RM |
610 | } |
611 | ||
8b230ed8 | 612 | static u32 |
b3cc6e88 | 613 | bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget) |
8b230ed8 | 614 | { |
e29aa339 | 615 | struct bna_cq_entry *cq, *cmpl, *next_cmpl; |
8b230ed8 | 616 | struct bna_rcb *rcb = NULL; |
30f9fc94 | 617 | struct bnad_rx_unmap_q *unmap_q; |
e29aa339 RM |
618 | struct bnad_rx_unmap *unmap = NULL; |
619 | struct sk_buff *skb = NULL; | |
8b230ed8 | 620 | struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate; |
30f9fc94 | 621 | struct bnad_rx_ctrl *rx_ctrl = ccb->ctrl; |
e29aa339 RM |
622 | u32 packets = 0, len = 0, totlen = 0; |
623 | u32 pi, vec, sop_ci = 0, nvecs = 0; | |
624 | u32 flags, masked_flags; | |
078086f3 | 625 | |
8b230ed8 | 626 | prefetch(bnad->netdev); |
5216562a RM |
627 | |
628 | cq = ccb->sw_q; | |
5216562a | 629 | |
17a30a14 | 630 | while (packets < budget) { |
c36c9d50 | 631 | cmpl = &cq[ccb->producer_index]; |
17a30a14 RM |
632 | if (!cmpl->valid) |
633 | break; | |
634 | /* The 'valid' field is set by the adapter, only after writing | |
635 | * the other fields of completion entry. Hence, do not load | |
636 | * other fields of completion entry *before* the 'valid' is | |
637 | * loaded. Adding the rmb() here prevents the compiler and/or | |
638 | * CPU from reordering the reads which would potentially result | |
639 | * in reading stale values in completion entry. | |
640 | */ | |
641 | rmb(); | |
642 | ||
8b230ed8 RM |
643 | BNA_UPDATE_PKT_CNT(pkt_rt, ntohs(cmpl->length)); |
644 | ||
078086f3 | 645 | if (bna_is_small_rxq(cmpl->rxq_id)) |
8b230ed8 | 646 | rcb = ccb->rcb[1]; |
078086f3 RM |
647 | else |
648 | rcb = ccb->rcb[0]; | |
8b230ed8 RM |
649 | |
650 | unmap_q = rcb->unmap_q; | |
651 | ||
e29aa339 RM |
652 | /* start of packet ci */ |
653 | sop_ci = rcb->consumer_index; | |
654 | ||
655 | if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type)) { | |
656 | unmap = &unmap_q->unmap[sop_ci]; | |
657 | skb = unmap->skb; | |
658 | } else { | |
659 | skb = napi_get_frags(&rx_ctrl->napi); | |
660 | if (unlikely(!skb)) | |
661 | break; | |
662 | } | |
663 | prefetch(skb); | |
664 | ||
665 | flags = ntohl(cmpl->flags); | |
666 | len = ntohs(cmpl->length); | |
667 | totlen = len; | |
668 | nvecs = 1; | |
669 | ||
670 | /* Check all the completions for this frame. | |
671 | * busy-wait doesn't help much, break here. | |
672 | */ | |
673 | if (BNAD_RXBUF_IS_MULTI_BUFF(unmap_q->type) && | |
674 | (flags & BNA_CQ_EF_EOP) == 0) { | |
675 | pi = ccb->producer_index; | |
676 | do { | |
677 | BNA_QE_INDX_INC(pi, ccb->q_depth); | |
678 | next_cmpl = &cq[pi]; | |
679 | ||
680 | if (!next_cmpl->valid) | |
681 | break; | |
17a30a14 RM |
682 | /* The 'valid' field is set by the adapter, only |
683 | * after writing the other fields of completion | |
684 | * entry. Hence, do not load other fields of | |
685 | * completion entry *before* the 'valid' is | |
686 | * loaded. Adding the rmb() here prevents the | |
687 | * compiler and/or CPU from reordering the reads | |
688 | * which would potentially result in reading | |
689 | * stale values in completion entry. | |
690 | */ | |
691 | rmb(); | |
5216562a | 692 | |
e29aa339 RM |
693 | len = ntohs(next_cmpl->length); |
694 | flags = ntohl(next_cmpl->flags); | |
695 | ||
696 | nvecs++; | |
697 | totlen += len; | |
698 | } while ((flags & BNA_CQ_EF_EOP) == 0); | |
699 | ||
700 | if (!next_cmpl->valid) | |
701 | break; | |
702 | } | |
ade4dc3e | 703 | packets++; |
e29aa339 RM |
704 | |
705 | /* TODO: BNA_CQ_EF_LOCAL ? */ | |
706 | if (unlikely(flags & (BNA_CQ_EF_MAC_ERROR | | |
707 | BNA_CQ_EF_FCS_ERROR | | |
708 | BNA_CQ_EF_TOO_LONG))) { | |
709 | bnad_cq_drop_packet(bnad, rcb, sop_ci, nvecs); | |
8b230ed8 | 710 | rcb->rxq->rx_packets_with_error++; |
e29aa339 | 711 | |
8b230ed8 RM |
712 | goto next; |
713 | } | |
714 | ||
e29aa339 RM |
715 | if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type)) |
716 | bnad_cq_setup_skb(bnad, skb, unmap, len); | |
717 | else | |
6c3f5aef | 718 | bnad_cq_setup_skb_frags(ccb, skb, nvecs); |
30f9fc94 | 719 | |
e29aa339 RM |
720 | rcb->rxq->rx_packets++; |
721 | rcb->rxq->rx_bytes += totlen; | |
722 | ccb->bytes_per_intr += totlen; | |
5e46631f RM |
723 | |
724 | masked_flags = flags & flags_cksum_prot_mask; | |
725 | ||
8b230ed8 | 726 | if (likely |
e5ee20e7 | 727 | ((bnad->netdev->features & NETIF_F_RXCSUM) && |
5e46631f RM |
728 | ((masked_flags == flags_tcp4) || |
729 | (masked_flags == flags_udp4) || | |
730 | (masked_flags == flags_tcp6) || | |
731 | (masked_flags == flags_udp6)))) | |
8b230ed8 RM |
732 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
733 | else | |
bc8acf2c | 734 | skb_checksum_none_assert(skb); |
8b230ed8 | 735 | |
877767dc IV |
736 | if ((flags & BNA_CQ_EF_VLAN) && |
737 | (bnad->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)) | |
86a9bad3 | 738 | __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(cmpl->vlan_tag)); |
f859d7cb | 739 | |
e29aa339 | 740 | if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type)) |
f859d7cb | 741 | netif_receive_skb(skb); |
e29aa339 RM |
742 | else |
743 | napi_gro_frags(&rx_ctrl->napi); | |
8b230ed8 RM |
744 | |
745 | next: | |
e29aa339 RM |
746 | BNA_QE_INDX_ADD(rcb->consumer_index, nvecs, rcb->q_depth); |
747 | for (vec = 0; vec < nvecs; vec++) { | |
748 | cmpl = &cq[ccb->producer_index]; | |
749 | cmpl->valid = 0; | |
750 | BNA_QE_INDX_INC(ccb->producer_index, ccb->q_depth); | |
751 | } | |
8b230ed8 RM |
752 | } |
753 | ||
30f9fc94 | 754 | napi_gro_flush(&rx_ctrl->napi, false); |
2be67144 | 755 | if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags))) |
271e8b79 RM |
756 | bna_ib_ack_disable_irq(ccb->i_dbell, packets); |
757 | ||
5216562a | 758 | bnad_rxq_post(bnad, ccb->rcb[0]); |
2be67144 | 759 | if (ccb->rcb[1]) |
5216562a | 760 | bnad_rxq_post(bnad, ccb->rcb[1]); |
078086f3 | 761 | |
8b230ed8 RM |
762 | return packets; |
763 | } | |
764 | ||
8b230ed8 RM |
765 | static void |
766 | bnad_netif_rx_schedule_poll(struct bnad *bnad, struct bna_ccb *ccb) | |
767 | { | |
768 | struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl); | |
be7fa326 RM |
769 | struct napi_struct *napi = &rx_ctrl->napi; |
770 | ||
771 | if (likely(napi_schedule_prep(napi))) { | |
be7fa326 | 772 | __napi_schedule(napi); |
271e8b79 | 773 | rx_ctrl->rx_schedule++; |
8b230ed8 | 774 | } |
8b230ed8 RM |
775 | } |
776 | ||
777 | /* MSIX Rx Path Handler */ | |
778 | static irqreturn_t | |
779 | bnad_msix_rx(int irq, void *data) | |
780 | { | |
781 | struct bna_ccb *ccb = (struct bna_ccb *)data; | |
8b230ed8 | 782 | |
271e8b79 | 783 | if (ccb) { |
ebb56d37 | 784 | ((struct bnad_rx_ctrl *)ccb->ctrl)->rx_intr_ctr++; |
2be67144 | 785 | bnad_netif_rx_schedule_poll(ccb->bnad, ccb); |
271e8b79 | 786 | } |
8b230ed8 RM |
787 | |
788 | return IRQ_HANDLED; | |
789 | } | |
790 | ||
791 | /* Interrupt handlers */ | |
792 | ||
793 | /* Mbox Interrupt Handlers */ | |
794 | static irqreturn_t | |
795 | bnad_msix_mbox_handler(int irq, void *data) | |
796 | { | |
797 | u32 intr_status; | |
e2fa6f2e | 798 | unsigned long flags; |
be7fa326 | 799 | struct bnad *bnad = (struct bnad *)data; |
8b230ed8 | 800 | |
8b230ed8 | 801 | spin_lock_irqsave(&bnad->bna_lock, flags); |
dfee325a RM |
802 | if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) { |
803 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | |
804 | return IRQ_HANDLED; | |
805 | } | |
8b230ed8 RM |
806 | |
807 | bna_intr_status_get(&bnad->bna, intr_status); | |
808 | ||
078086f3 | 809 | if (BNA_IS_MBOX_ERR_INTR(&bnad->bna, intr_status)) |
8b230ed8 RM |
810 | bna_mbox_handler(&bnad->bna, intr_status); |
811 | ||
812 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | |
813 | ||
8b230ed8 RM |
814 | return IRQ_HANDLED; |
815 | } | |
816 | ||
817 | static irqreturn_t | |
818 | bnad_isr(int irq, void *data) | |
819 | { | |
820 | int i, j; | |
821 | u32 intr_status; | |
822 | unsigned long flags; | |
be7fa326 | 823 | struct bnad *bnad = (struct bnad *)data; |
8b230ed8 RM |
824 | struct bnad_rx_info *rx_info; |
825 | struct bnad_rx_ctrl *rx_ctrl; | |
078086f3 | 826 | struct bna_tcb *tcb = NULL; |
8b230ed8 | 827 | |
dfee325a RM |
828 | spin_lock_irqsave(&bnad->bna_lock, flags); |
829 | if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) { | |
830 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | |
e2fa6f2e | 831 | return IRQ_NONE; |
dfee325a | 832 | } |
8b230ed8 RM |
833 | |
834 | bna_intr_status_get(&bnad->bna, intr_status); | |
e2fa6f2e | 835 | |
dfee325a RM |
836 | if (unlikely(!intr_status)) { |
837 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | |
8b230ed8 | 838 | return IRQ_NONE; |
dfee325a | 839 | } |
8b230ed8 | 840 | |
078086f3 | 841 | if (BNA_IS_MBOX_ERR_INTR(&bnad->bna, intr_status)) |
8b230ed8 | 842 | bna_mbox_handler(&bnad->bna, intr_status); |
be7fa326 | 843 | |
8b230ed8 RM |
844 | spin_unlock_irqrestore(&bnad->bna_lock, flags); |
845 | ||
be7fa326 RM |
846 | if (!BNA_IS_INTX_DATA_INTR(intr_status)) |
847 | return IRQ_HANDLED; | |
848 | ||
8b230ed8 | 849 | /* Process data interrupts */ |
be7fa326 RM |
850 | /* Tx processing */ |
851 | for (i = 0; i < bnad->num_tx; i++) { | |
078086f3 RM |
852 | for (j = 0; j < bnad->num_txq_per_tx; j++) { |
853 | tcb = bnad->tx_info[i].tcb[j]; | |
854 | if (tcb && test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) | |
b3cc6e88 | 855 | bnad_tx_complete(bnad, bnad->tx_info[i].tcb[j]); |
078086f3 | 856 | } |
be7fa326 RM |
857 | } |
858 | /* Rx processing */ | |
8b230ed8 RM |
859 | for (i = 0; i < bnad->num_rx; i++) { |
860 | rx_info = &bnad->rx_info[i]; | |
861 | if (!rx_info->rx) | |
862 | continue; | |
863 | for (j = 0; j < bnad->num_rxp_per_rx; j++) { | |
864 | rx_ctrl = &rx_info->rx_ctrl[j]; | |
865 | if (rx_ctrl->ccb) | |
866 | bnad_netif_rx_schedule_poll(bnad, | |
867 | rx_ctrl->ccb); | |
868 | } | |
869 | } | |
8b230ed8 RM |
870 | return IRQ_HANDLED; |
871 | } | |
872 | ||
873 | /* | |
874 | * Called in interrupt / callback context | |
875 | * with bna_lock held, so cfg_flags access is OK | |
876 | */ | |
877 | static void | |
878 | bnad_enable_mbox_irq(struct bnad *bnad) | |
879 | { | |
be7fa326 | 880 | clear_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags); |
e2fa6f2e | 881 | |
8b230ed8 RM |
882 | BNAD_UPDATE_CTR(bnad, mbox_intr_enabled); |
883 | } | |
884 | ||
885 | /* | |
886 | * Called with bnad->bna_lock held b'cos of | |
887 | * bnad->cfg_flags access. | |
888 | */ | |
b7ee31c5 | 889 | static void |
8b230ed8 RM |
890 | bnad_disable_mbox_irq(struct bnad *bnad) |
891 | { | |
be7fa326 | 892 | set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags); |
8b230ed8 | 893 | |
be7fa326 RM |
894 | BNAD_UPDATE_CTR(bnad, mbox_intr_disabled); |
895 | } | |
8b230ed8 | 896 | |
be7fa326 RM |
897 | static void |
898 | bnad_set_netdev_perm_addr(struct bnad *bnad) | |
899 | { | |
900 | struct net_device *netdev = bnad->netdev; | |
e2fa6f2e | 901 | |
d6b30598 | 902 | ether_addr_copy(netdev->perm_addr, bnad->perm_addr); |
be7fa326 | 903 | if (is_zero_ether_addr(netdev->dev_addr)) |
d6b30598 | 904 | ether_addr_copy(netdev->dev_addr, bnad->perm_addr); |
8b230ed8 RM |
905 | } |
906 | ||
907 | /* Control Path Handlers */ | |
908 | ||
909 | /* Callbacks */ | |
910 | void | |
078086f3 | 911 | bnad_cb_mbox_intr_enable(struct bnad *bnad) |
8b230ed8 RM |
912 | { |
913 | bnad_enable_mbox_irq(bnad); | |
914 | } | |
915 | ||
916 | void | |
078086f3 | 917 | bnad_cb_mbox_intr_disable(struct bnad *bnad) |
8b230ed8 RM |
918 | { |
919 | bnad_disable_mbox_irq(bnad); | |
920 | } | |
921 | ||
922 | void | |
078086f3 RM |
923 | bnad_cb_ioceth_ready(struct bnad *bnad) |
924 | { | |
925 | bnad->bnad_completions.ioc_comp_status = BNA_CB_SUCCESS; | |
926 | complete(&bnad->bnad_completions.ioc_comp); | |
927 | } | |
928 | ||
929 | void | |
930 | bnad_cb_ioceth_failed(struct bnad *bnad) | |
8b230ed8 | 931 | { |
078086f3 | 932 | bnad->bnad_completions.ioc_comp_status = BNA_CB_FAIL; |
8b230ed8 | 933 | complete(&bnad->bnad_completions.ioc_comp); |
8b230ed8 RM |
934 | } |
935 | ||
936 | void | |
078086f3 | 937 | bnad_cb_ioceth_disabled(struct bnad *bnad) |
8b230ed8 | 938 | { |
078086f3 | 939 | bnad->bnad_completions.ioc_comp_status = BNA_CB_SUCCESS; |
8b230ed8 | 940 | complete(&bnad->bnad_completions.ioc_comp); |
8b230ed8 RM |
941 | } |
942 | ||
943 | static void | |
078086f3 | 944 | bnad_cb_enet_disabled(void *arg) |
8b230ed8 RM |
945 | { |
946 | struct bnad *bnad = (struct bnad *)arg; | |
947 | ||
8b230ed8 | 948 | netif_carrier_off(bnad->netdev); |
078086f3 | 949 | complete(&bnad->bnad_completions.enet_comp); |
8b230ed8 RM |
950 | } |
951 | ||
952 | void | |
078086f3 | 953 | bnad_cb_ethport_link_status(struct bnad *bnad, |
8b230ed8 RM |
954 | enum bna_link_status link_status) |
955 | { | |
3db1cd5c | 956 | bool link_up = false; |
8b230ed8 RM |
957 | |
958 | link_up = (link_status == BNA_LINK_UP) || (link_status == BNA_CEE_UP); | |
959 | ||
960 | if (link_status == BNA_CEE_UP) { | |
078086f3 RM |
961 | if (!test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags)) |
962 | BNAD_UPDATE_CTR(bnad, cee_toggle); | |
8b230ed8 | 963 | set_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags); |
078086f3 RM |
964 | } else { |
965 | if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags)) | |
966 | BNAD_UPDATE_CTR(bnad, cee_toggle); | |
8b230ed8 | 967 | clear_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags); |
078086f3 | 968 | } |
8b230ed8 RM |
969 | |
970 | if (link_up) { | |
971 | if (!netif_carrier_ok(bnad->netdev)) { | |
078086f3 | 972 | uint tx_id, tcb_id; |
ecc46789 | 973 | netdev_info(bnad->netdev, "link up\n"); |
8b230ed8 RM |
974 | netif_carrier_on(bnad->netdev); |
975 | BNAD_UPDATE_CTR(bnad, link_toggle); | |
078086f3 RM |
976 | for (tx_id = 0; tx_id < bnad->num_tx; tx_id++) { |
977 | for (tcb_id = 0; tcb_id < bnad->num_txq_per_tx; | |
978 | tcb_id++) { | |
979 | struct bna_tcb *tcb = | |
980 | bnad->tx_info[tx_id].tcb[tcb_id]; | |
981 | u32 txq_id; | |
982 | if (!tcb) | |
983 | continue; | |
984 | ||
985 | txq_id = tcb->id; | |
986 | ||
987 | if (test_bit(BNAD_TXQ_TX_STARTED, | |
988 | &tcb->flags)) { | |
989 | /* | |
990 | * Force an immediate | |
991 | * Transmit Schedule */ | |
078086f3 RM |
992 | netif_wake_subqueue( |
993 | bnad->netdev, | |
994 | txq_id); | |
995 | BNAD_UPDATE_CTR(bnad, | |
996 | netif_queue_wakeup); | |
997 | } else { | |
998 | netif_stop_subqueue( | |
999 | bnad->netdev, | |
1000 | txq_id); | |
1001 | BNAD_UPDATE_CTR(bnad, | |
1002 | netif_queue_stop); | |
1003 | } | |
1004 | } | |
8b230ed8 RM |
1005 | } |
1006 | } | |
1007 | } else { | |
1008 | if (netif_carrier_ok(bnad->netdev)) { | |
ecc46789 | 1009 | netdev_info(bnad->netdev, "link down\n"); |
8b230ed8 RM |
1010 | netif_carrier_off(bnad->netdev); |
1011 | BNAD_UPDATE_CTR(bnad, link_toggle); | |
1012 | } | |
1013 | } | |
1014 | } | |
1015 | ||
1016 | static void | |
078086f3 | 1017 | bnad_cb_tx_disabled(void *arg, struct bna_tx *tx) |
8b230ed8 RM |
1018 | { |
1019 | struct bnad *bnad = (struct bnad *)arg; | |
1020 | ||
1021 | complete(&bnad->bnad_completions.tx_comp); | |
1022 | } | |
1023 | ||
1024 | static void | |
1025 | bnad_cb_tcb_setup(struct bnad *bnad, struct bna_tcb *tcb) | |
1026 | { | |
1027 | struct bnad_tx_info *tx_info = | |
1028 | (struct bnad_tx_info *)tcb->txq->tx->priv; | |
8b230ed8 | 1029 | |
5216562a | 1030 | tcb->priv = tcb; |
8b230ed8 | 1031 | tx_info->tcb[tcb->id] = tcb; |
8b230ed8 RM |
1032 | } |
1033 | ||
1034 | static void | |
1035 | bnad_cb_tcb_destroy(struct bnad *bnad, struct bna_tcb *tcb) | |
1036 | { | |
1037 | struct bnad_tx_info *tx_info = | |
1038 | (struct bnad_tx_info *)tcb->txq->tx->priv; | |
1039 | ||
1040 | tx_info->tcb[tcb->id] = NULL; | |
01b54b14 | 1041 | tcb->priv = NULL; |
8b230ed8 RM |
1042 | } |
1043 | ||
8b230ed8 RM |
1044 | static void |
1045 | bnad_cb_ccb_setup(struct bnad *bnad, struct bna_ccb *ccb) | |
1046 | { | |
1047 | struct bnad_rx_info *rx_info = | |
1048 | (struct bnad_rx_info *)ccb->cq->rx->priv; | |
1049 | ||
1050 | rx_info->rx_ctrl[ccb->id].ccb = ccb; | |
1051 | ccb->ctrl = &rx_info->rx_ctrl[ccb->id]; | |
1052 | } | |
1053 | ||
1054 | static void | |
1055 | bnad_cb_ccb_destroy(struct bnad *bnad, struct bna_ccb *ccb) | |
1056 | { | |
1057 | struct bnad_rx_info *rx_info = | |
1058 | (struct bnad_rx_info *)ccb->cq->rx->priv; | |
1059 | ||
1060 | rx_info->rx_ctrl[ccb->id].ccb = NULL; | |
1061 | } | |
1062 | ||
1063 | static void | |
078086f3 | 1064 | bnad_cb_tx_stall(struct bnad *bnad, struct bna_tx *tx) |
8b230ed8 RM |
1065 | { |
1066 | struct bnad_tx_info *tx_info = | |
078086f3 RM |
1067 | (struct bnad_tx_info *)tx->priv; |
1068 | struct bna_tcb *tcb; | |
1069 | u32 txq_id; | |
1070 | int i; | |
8b230ed8 | 1071 | |
078086f3 RM |
1072 | for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) { |
1073 | tcb = tx_info->tcb[i]; | |
1074 | if (!tcb) | |
1075 | continue; | |
1076 | txq_id = tcb->id; | |
1077 | clear_bit(BNAD_TXQ_TX_STARTED, &tcb->flags); | |
1078 | netif_stop_subqueue(bnad->netdev, txq_id); | |
078086f3 | 1079 | } |
8b230ed8 RM |
1080 | } |
1081 | ||
1082 | static void | |
078086f3 | 1083 | bnad_cb_tx_resume(struct bnad *bnad, struct bna_tx *tx) |
8b230ed8 | 1084 | { |
078086f3 RM |
1085 | struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tx->priv; |
1086 | struct bna_tcb *tcb; | |
078086f3 RM |
1087 | u32 txq_id; |
1088 | int i; | |
8b230ed8 | 1089 | |
078086f3 RM |
1090 | for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) { |
1091 | tcb = tx_info->tcb[i]; | |
1092 | if (!tcb) | |
1093 | continue; | |
1094 | txq_id = tcb->id; | |
8b230ed8 | 1095 | |
01b54b14 | 1096 | BUG_ON(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)); |
078086f3 | 1097 | set_bit(BNAD_TXQ_TX_STARTED, &tcb->flags); |
01b54b14 | 1098 | BUG_ON(*(tcb->hw_consumer_index) != 0); |
078086f3 RM |
1099 | |
1100 | if (netif_carrier_ok(bnad->netdev)) { | |
078086f3 RM |
1101 | netif_wake_subqueue(bnad->netdev, txq_id); |
1102 | BNAD_UPDATE_CTR(bnad, netif_queue_wakeup); | |
1103 | } | |
1104 | } | |
be7fa326 RM |
1105 | |
1106 | /* | |
078086f3 | 1107 | * Workaround for first ioceth enable failure & we |
be7fa326 RM |
1108 | * get a 0 MAC address. We try to get the MAC address |
1109 | * again here. | |
1110 | */ | |
d6b30598 IV |
1111 | if (is_zero_ether_addr(bnad->perm_addr)) { |
1112 | bna_enet_perm_mac_get(&bnad->bna.enet, bnad->perm_addr); | |
be7fa326 RM |
1113 | bnad_set_netdev_perm_addr(bnad); |
1114 | } | |
be7fa326 RM |
1115 | } |
1116 | ||
01b54b14 JH |
1117 | /* |
1118 | * Free all TxQs buffers and then notify TX_E_CLEANUP_DONE to Tx fsm. | |
1119 | */ | |
1120 | static void | |
1121 | bnad_tx_cleanup(struct delayed_work *work) | |
1122 | { | |
1123 | struct bnad_tx_info *tx_info = | |
1124 | container_of(work, struct bnad_tx_info, tx_cleanup_work); | |
1125 | struct bnad *bnad = NULL; | |
01b54b14 JH |
1126 | struct bna_tcb *tcb; |
1127 | unsigned long flags; | |
5216562a | 1128 | u32 i, pending = 0; |
01b54b14 JH |
1129 | |
1130 | for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) { | |
1131 | tcb = tx_info->tcb[i]; | |
1132 | if (!tcb) | |
1133 | continue; | |
1134 | ||
1135 | bnad = tcb->bnad; | |
1136 | ||
1137 | if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) { | |
1138 | pending++; | |
1139 | continue; | |
1140 | } | |
1141 | ||
b3cc6e88 | 1142 | bnad_txq_cleanup(bnad, tcb); |
01b54b14 | 1143 | |
4e857c58 | 1144 | smp_mb__before_atomic(); |
01b54b14 JH |
1145 | clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags); |
1146 | } | |
1147 | ||
1148 | if (pending) { | |
1149 | queue_delayed_work(bnad->work_q, &tx_info->tx_cleanup_work, | |
1150 | msecs_to_jiffies(1)); | |
1151 | return; | |
1152 | } | |
1153 | ||
1154 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
1155 | bna_tx_cleanup_complete(tx_info->tx); | |
1156 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | |
1157 | } | |
1158 | ||
be7fa326 | 1159 | static void |
078086f3 | 1160 | bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tx *tx) |
be7fa326 | 1161 | { |
078086f3 RM |
1162 | struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tx->priv; |
1163 | struct bna_tcb *tcb; | |
1164 | int i; | |
1165 | ||
1166 | for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) { | |
1167 | tcb = tx_info->tcb[i]; | |
1168 | if (!tcb) | |
1169 | continue; | |
1170 | } | |
1171 | ||
01b54b14 | 1172 | queue_delayed_work(bnad->work_q, &tx_info->tx_cleanup_work, 0); |
8b230ed8 RM |
1173 | } |
1174 | ||
5bcf6ac0 RM |
1175 | static void |
1176 | bnad_cb_rx_stall(struct bnad *bnad, struct bna_rx *rx) | |
1177 | { | |
1178 | struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv; | |
1179 | struct bna_ccb *ccb; | |
1180 | struct bnad_rx_ctrl *rx_ctrl; | |
1181 | int i; | |
1182 | ||
1183 | for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) { | |
1184 | rx_ctrl = &rx_info->rx_ctrl[i]; | |
1185 | ccb = rx_ctrl->ccb; | |
1186 | if (!ccb) | |
1187 | continue; | |
1188 | ||
1189 | clear_bit(BNAD_RXQ_POST_OK, &ccb->rcb[0]->flags); | |
1190 | ||
1191 | if (ccb->rcb[1]) | |
1192 | clear_bit(BNAD_RXQ_POST_OK, &ccb->rcb[1]->flags); | |
1193 | } | |
1194 | } | |
1195 | ||
01b54b14 JH |
1196 | /* |
1197 | * Free all RxQs buffers and then notify RX_E_CLEANUP_DONE to Rx fsm. | |
1198 | */ | |
1199 | static void | |
1200 | bnad_rx_cleanup(void *work) | |
1201 | { | |
1202 | struct bnad_rx_info *rx_info = | |
1203 | container_of(work, struct bnad_rx_info, rx_cleanup_work); | |
1204 | struct bnad_rx_ctrl *rx_ctrl; | |
1205 | struct bnad *bnad = NULL; | |
1206 | unsigned long flags; | |
5216562a | 1207 | u32 i; |
01b54b14 JH |
1208 | |
1209 | for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) { | |
1210 | rx_ctrl = &rx_info->rx_ctrl[i]; | |
1211 | ||
1212 | if (!rx_ctrl->ccb) | |
1213 | continue; | |
1214 | ||
1215 | bnad = rx_ctrl->ccb->bnad; | |
1216 | ||
1217 | /* | |
1218 | * Wait till the poll handler has exited | |
1219 | * and nothing can be scheduled anymore | |
1220 | */ | |
1221 | napi_disable(&rx_ctrl->napi); | |
1222 | ||
b3cc6e88 JH |
1223 | bnad_cq_cleanup(bnad, rx_ctrl->ccb); |
1224 | bnad_rxq_cleanup(bnad, rx_ctrl->ccb->rcb[0]); | |
01b54b14 | 1225 | if (rx_ctrl->ccb->rcb[1]) |
b3cc6e88 | 1226 | bnad_rxq_cleanup(bnad, rx_ctrl->ccb->rcb[1]); |
01b54b14 JH |
1227 | } |
1228 | ||
1229 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
1230 | bna_rx_cleanup_complete(rx_info->rx); | |
1231 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | |
1232 | } | |
1233 | ||
8b230ed8 | 1234 | static void |
078086f3 | 1235 | bnad_cb_rx_cleanup(struct bnad *bnad, struct bna_rx *rx) |
8b230ed8 | 1236 | { |
078086f3 RM |
1237 | struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv; |
1238 | struct bna_ccb *ccb; | |
1239 | struct bnad_rx_ctrl *rx_ctrl; | |
1240 | int i; | |
1241 | ||
772b5235 | 1242 | for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) { |
078086f3 RM |
1243 | rx_ctrl = &rx_info->rx_ctrl[i]; |
1244 | ccb = rx_ctrl->ccb; | |
1245 | if (!ccb) | |
1246 | continue; | |
1247 | ||
1248 | clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags); | |
1249 | ||
1250 | if (ccb->rcb[1]) | |
1251 | clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[1]->flags); | |
078086f3 | 1252 | } |
be7fa326 | 1253 | |
01b54b14 | 1254 | queue_work(bnad->work_q, &rx_info->rx_cleanup_work); |
8b230ed8 RM |
1255 | } |
1256 | ||
1257 | static void | |
078086f3 | 1258 | bnad_cb_rx_post(struct bnad *bnad, struct bna_rx *rx) |
8b230ed8 | 1259 | { |
078086f3 RM |
1260 | struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv; |
1261 | struct bna_ccb *ccb; | |
1262 | struct bna_rcb *rcb; | |
1263 | struct bnad_rx_ctrl *rx_ctrl; | |
30f9fc94 | 1264 | int i, j; |
be7fa326 | 1265 | |
772b5235 | 1266 | for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) { |
078086f3 RM |
1267 | rx_ctrl = &rx_info->rx_ctrl[i]; |
1268 | ccb = rx_ctrl->ccb; | |
1269 | if (!ccb) | |
1270 | continue; | |
be7fa326 | 1271 | |
01b54b14 | 1272 | napi_enable(&rx_ctrl->napi); |
8b230ed8 | 1273 | |
078086f3 RM |
1274 | for (j = 0; j < BNAD_MAX_RXQ_PER_RXP; j++) { |
1275 | rcb = ccb->rcb[j]; | |
1276 | if (!rcb) | |
1277 | continue; | |
078086f3 | 1278 | |
30f9fc94 | 1279 | bnad_rxq_alloc_init(bnad, rcb); |
078086f3 | 1280 | set_bit(BNAD_RXQ_STARTED, &rcb->flags); |
5bcf6ac0 | 1281 | set_bit(BNAD_RXQ_POST_OK, &rcb->flags); |
5216562a | 1282 | bnad_rxq_post(bnad, rcb); |
078086f3 | 1283 | } |
8b230ed8 RM |
1284 | } |
1285 | } | |
1286 | ||
1287 | static void | |
078086f3 | 1288 | bnad_cb_rx_disabled(void *arg, struct bna_rx *rx) |
8b230ed8 RM |
1289 | { |
1290 | struct bnad *bnad = (struct bnad *)arg; | |
1291 | ||
1292 | complete(&bnad->bnad_completions.rx_comp); | |
1293 | } | |
1294 | ||
1295 | static void | |
078086f3 | 1296 | bnad_cb_rx_mcast_add(struct bnad *bnad, struct bna_rx *rx) |
8b230ed8 | 1297 | { |
078086f3 | 1298 | bnad->bnad_completions.mcast_comp_status = BNA_CB_SUCCESS; |
8b230ed8 RM |
1299 | complete(&bnad->bnad_completions.mcast_comp); |
1300 | } | |
1301 | ||
1302 | void | |
1303 | bnad_cb_stats_get(struct bnad *bnad, enum bna_cb_status status, | |
1304 | struct bna_stats *stats) | |
1305 | { | |
1306 | if (status == BNA_CB_SUCCESS) | |
1307 | BNAD_UPDATE_CTR(bnad, hw_stats_updates); | |
1308 | ||
1309 | if (!netif_running(bnad->netdev) || | |
1310 | !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags)) | |
1311 | return; | |
1312 | ||
1313 | mod_timer(&bnad->stats_timer, | |
1314 | jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ)); | |
1315 | } | |
1316 | ||
078086f3 RM |
1317 | static void |
1318 | bnad_cb_enet_mtu_set(struct bnad *bnad) | |
1319 | { | |
1320 | bnad->bnad_completions.mtu_comp_status = BNA_CB_SUCCESS; | |
1321 | complete(&bnad->bnad_completions.mtu_comp); | |
1322 | } | |
1323 | ||
72a9730b KG |
1324 | void |
1325 | bnad_cb_completion(void *arg, enum bfa_status status) | |
1326 | { | |
1327 | struct bnad_iocmd_comp *iocmd_comp = | |
1328 | (struct bnad_iocmd_comp *)arg; | |
1329 | ||
1330 | iocmd_comp->comp_status = (u32) status; | |
1331 | complete(&iocmd_comp->comp); | |
1332 | } | |
1333 | ||
8b230ed8 RM |
1334 | /* Resource allocation, free functions */ |
1335 | ||
1336 | static void | |
1337 | bnad_mem_free(struct bnad *bnad, | |
1338 | struct bna_mem_info *mem_info) | |
1339 | { | |
1340 | int i; | |
1341 | dma_addr_t dma_pa; | |
1342 | ||
1343 | if (mem_info->mdl == NULL) | |
1344 | return; | |
1345 | ||
1346 | for (i = 0; i < mem_info->num; i++) { | |
1347 | if (mem_info->mdl[i].kva != NULL) { | |
1348 | if (mem_info->mem_type == BNA_MEM_T_DMA) { | |
1349 | BNA_GET_DMA_ADDR(&(mem_info->mdl[i].dma), | |
1350 | dma_pa); | |
5ea74318 IV |
1351 | dma_free_coherent(&bnad->pcidev->dev, |
1352 | mem_info->mdl[i].len, | |
1353 | mem_info->mdl[i].kva, dma_pa); | |
8b230ed8 RM |
1354 | } else |
1355 | kfree(mem_info->mdl[i].kva); | |
1356 | } | |
1357 | } | |
1358 | kfree(mem_info->mdl); | |
1359 | mem_info->mdl = NULL; | |
1360 | } | |
1361 | ||
1362 | static int | |
1363 | bnad_mem_alloc(struct bnad *bnad, | |
1364 | struct bna_mem_info *mem_info) | |
1365 | { | |
1366 | int i; | |
1367 | dma_addr_t dma_pa; | |
1368 | ||
1369 | if ((mem_info->num == 0) || (mem_info->len == 0)) { | |
1370 | mem_info->mdl = NULL; | |
1371 | return 0; | |
1372 | } | |
1373 | ||
1374 | mem_info->mdl = kcalloc(mem_info->num, sizeof(struct bna_mem_descr), | |
1375 | GFP_KERNEL); | |
1376 | if (mem_info->mdl == NULL) | |
1377 | return -ENOMEM; | |
1378 | ||
1379 | if (mem_info->mem_type == BNA_MEM_T_DMA) { | |
1380 | for (i = 0; i < mem_info->num; i++) { | |
1381 | mem_info->mdl[i].len = mem_info->len; | |
1382 | mem_info->mdl[i].kva = | |
5ea74318 | 1383 | dma_alloc_coherent(&bnad->pcidev->dev, |
1f9061d2 JP |
1384 | mem_info->len, &dma_pa, |
1385 | GFP_KERNEL); | |
8b230ed8 RM |
1386 | if (mem_info->mdl[i].kva == NULL) |
1387 | goto err_return; | |
1388 | ||
1389 | BNA_SET_DMA_ADDR(dma_pa, | |
1390 | &(mem_info->mdl[i].dma)); | |
1391 | } | |
1392 | } else { | |
1393 | for (i = 0; i < mem_info->num; i++) { | |
1394 | mem_info->mdl[i].len = mem_info->len; | |
1395 | mem_info->mdl[i].kva = kzalloc(mem_info->len, | |
1396 | GFP_KERNEL); | |
1397 | if (mem_info->mdl[i].kva == NULL) | |
1398 | goto err_return; | |
1399 | } | |
1400 | } | |
1401 | ||
1402 | return 0; | |
1403 | ||
1404 | err_return: | |
1405 | bnad_mem_free(bnad, mem_info); | |
1406 | return -ENOMEM; | |
1407 | } | |
1408 | ||
1409 | /* Free IRQ for Mailbox */ | |
1410 | static void | |
078086f3 | 1411 | bnad_mbox_irq_free(struct bnad *bnad) |
8b230ed8 RM |
1412 | { |
1413 | int irq; | |
1414 | unsigned long flags; | |
1415 | ||
8b230ed8 | 1416 | spin_lock_irqsave(&bnad->bna_lock, flags); |
8b230ed8 | 1417 | bnad_disable_mbox_irq(bnad); |
e2fa6f2e | 1418 | spin_unlock_irqrestore(&bnad->bna_lock, flags); |
8b230ed8 RM |
1419 | |
1420 | irq = BNAD_GET_MBOX_IRQ(bnad); | |
be7fa326 | 1421 | free_irq(irq, bnad); |
8b230ed8 RM |
1422 | } |
1423 | ||
1424 | /* | |
1425 | * Allocates IRQ for Mailbox, but keep it disabled | |
1426 | * This will be enabled once we get the mbox enable callback | |
1427 | * from bna | |
1428 | */ | |
1429 | static int | |
078086f3 | 1430 | bnad_mbox_irq_alloc(struct bnad *bnad) |
8b230ed8 | 1431 | { |
0120b99c RM |
1432 | int err = 0; |
1433 | unsigned long irq_flags, flags; | |
8b230ed8 | 1434 | u32 irq; |
0120b99c | 1435 | irq_handler_t irq_handler; |
8b230ed8 | 1436 | |
8b230ed8 RM |
1437 | spin_lock_irqsave(&bnad->bna_lock, flags); |
1438 | if (bnad->cfg_flags & BNAD_CF_MSIX) { | |
1439 | irq_handler = (irq_handler_t)bnad_msix_mbox_handler; | |
8811e267 | 1440 | irq = bnad->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector; |
8279171a | 1441 | irq_flags = 0; |
8b230ed8 RM |
1442 | } else { |
1443 | irq_handler = (irq_handler_t)bnad_isr; | |
1444 | irq = bnad->pcidev->irq; | |
5f77898d | 1445 | irq_flags = IRQF_SHARED; |
8b230ed8 | 1446 | } |
8811e267 | 1447 | |
8b230ed8 | 1448 | spin_unlock_irqrestore(&bnad->bna_lock, flags); |
8b230ed8 RM |
1449 | sprintf(bnad->mbox_irq_name, "%s", BNAD_NAME); |
1450 | ||
e2fa6f2e RM |
1451 | /* |
1452 | * Set the Mbox IRQ disable flag, so that the IRQ handler | |
1453 | * called from request_irq() for SHARED IRQs do not execute | |
1454 | */ | |
1455 | set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags); | |
1456 | ||
be7fa326 RM |
1457 | BNAD_UPDATE_CTR(bnad, mbox_intr_disabled); |
1458 | ||
8279171a | 1459 | err = request_irq(irq, irq_handler, irq_flags, |
be7fa326 | 1460 | bnad->mbox_irq_name, bnad); |
e2fa6f2e | 1461 | |
be7fa326 | 1462 | return err; |
8b230ed8 RM |
1463 | } |
1464 | ||
1465 | static void | |
1466 | bnad_txrx_irq_free(struct bnad *bnad, struct bna_intr_info *intr_info) | |
1467 | { | |
1468 | kfree(intr_info->idl); | |
1469 | intr_info->idl = NULL; | |
1470 | } | |
1471 | ||
1472 | /* Allocates Interrupt Descriptor List for MSIX/INT-X vectors */ | |
1473 | static int | |
1474 | bnad_txrx_irq_alloc(struct bnad *bnad, enum bnad_intr_source src, | |
078086f3 | 1475 | u32 txrx_id, struct bna_intr_info *intr_info) |
8b230ed8 RM |
1476 | { |
1477 | int i, vector_start = 0; | |
1478 | u32 cfg_flags; | |
1479 | unsigned long flags; | |
1480 | ||
1481 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
1482 | cfg_flags = bnad->cfg_flags; | |
1483 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | |
1484 | ||
1485 | if (cfg_flags & BNAD_CF_MSIX) { | |
1486 | intr_info->intr_type = BNA_INTR_T_MSIX; | |
1487 | intr_info->idl = kcalloc(intr_info->num, | |
1488 | sizeof(struct bna_intr_descr), | |
1489 | GFP_KERNEL); | |
1490 | if (!intr_info->idl) | |
1491 | return -ENOMEM; | |
1492 | ||
1493 | switch (src) { | |
1494 | case BNAD_INTR_TX: | |
8811e267 | 1495 | vector_start = BNAD_MAILBOX_MSIX_VECTORS + txrx_id; |
8b230ed8 RM |
1496 | break; |
1497 | ||
1498 | case BNAD_INTR_RX: | |
8811e267 RM |
1499 | vector_start = BNAD_MAILBOX_MSIX_VECTORS + |
1500 | (bnad->num_tx * bnad->num_txq_per_tx) + | |
8b230ed8 RM |
1501 | txrx_id; |
1502 | break; | |
1503 | ||
1504 | default: | |
1505 | BUG(); | |
1506 | } | |
1507 | ||
1508 | for (i = 0; i < intr_info->num; i++) | |
1509 | intr_info->idl[i].vector = vector_start + i; | |
1510 | } else { | |
1511 | intr_info->intr_type = BNA_INTR_T_INTX; | |
1512 | intr_info->num = 1; | |
1513 | intr_info->idl = kcalloc(intr_info->num, | |
1514 | sizeof(struct bna_intr_descr), | |
1515 | GFP_KERNEL); | |
1516 | if (!intr_info->idl) | |
1517 | return -ENOMEM; | |
1518 | ||
1519 | switch (src) { | |
1520 | case BNAD_INTR_TX: | |
8811e267 | 1521 | intr_info->idl[0].vector = BNAD_INTX_TX_IB_BITMASK; |
8b230ed8 RM |
1522 | break; |
1523 | ||
1524 | case BNAD_INTR_RX: | |
8811e267 | 1525 | intr_info->idl[0].vector = BNAD_INTX_RX_IB_BITMASK; |
8b230ed8 RM |
1526 | break; |
1527 | } | |
1528 | } | |
1529 | return 0; | |
1530 | } | |
1531 | ||
1aa8b471 | 1532 | /* NOTE: Should be called for MSIX only |
8b230ed8 RM |
1533 | * Unregisters Tx MSIX vector(s) from the kernel |
1534 | */ | |
1535 | static void | |
1536 | bnad_tx_msix_unregister(struct bnad *bnad, struct bnad_tx_info *tx_info, | |
1537 | int num_txqs) | |
1538 | { | |
1539 | int i; | |
1540 | int vector_num; | |
1541 | ||
1542 | for (i = 0; i < num_txqs; i++) { | |
1543 | if (tx_info->tcb[i] == NULL) | |
1544 | continue; | |
1545 | ||
1546 | vector_num = tx_info->tcb[i]->intr_vector; | |
1547 | free_irq(bnad->msix_table[vector_num].vector, tx_info->tcb[i]); | |
1548 | } | |
1549 | } | |
1550 | ||
1aa8b471 | 1551 | /* NOTE: Should be called for MSIX only |
8b230ed8 RM |
1552 | * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel |
1553 | */ | |
1554 | static int | |
1555 | bnad_tx_msix_register(struct bnad *bnad, struct bnad_tx_info *tx_info, | |
078086f3 | 1556 | u32 tx_id, int num_txqs) |
8b230ed8 RM |
1557 | { |
1558 | int i; | |
1559 | int err; | |
1560 | int vector_num; | |
1561 | ||
1562 | for (i = 0; i < num_txqs; i++) { | |
1563 | vector_num = tx_info->tcb[i]->intr_vector; | |
1564 | sprintf(tx_info->tcb[i]->name, "%s TXQ %d", bnad->netdev->name, | |
1565 | tx_id + tx_info->tcb[i]->id); | |
1566 | err = request_irq(bnad->msix_table[vector_num].vector, | |
1567 | (irq_handler_t)bnad_msix_tx, 0, | |
1568 | tx_info->tcb[i]->name, | |
1569 | tx_info->tcb[i]); | |
1570 | if (err) | |
1571 | goto err_return; | |
1572 | } | |
1573 | ||
1574 | return 0; | |
1575 | ||
1576 | err_return: | |
1577 | if (i > 0) | |
1578 | bnad_tx_msix_unregister(bnad, tx_info, (i - 1)); | |
1579 | return -1; | |
1580 | } | |
1581 | ||
1aa8b471 | 1582 | /* NOTE: Should be called for MSIX only |
8b230ed8 RM |
1583 | * Unregisters Rx MSIX vector(s) from the kernel |
1584 | */ | |
1585 | static void | |
1586 | bnad_rx_msix_unregister(struct bnad *bnad, struct bnad_rx_info *rx_info, | |
1587 | int num_rxps) | |
1588 | { | |
1589 | int i; | |
1590 | int vector_num; | |
1591 | ||
1592 | for (i = 0; i < num_rxps; i++) { | |
1593 | if (rx_info->rx_ctrl[i].ccb == NULL) | |
1594 | continue; | |
1595 | ||
1596 | vector_num = rx_info->rx_ctrl[i].ccb->intr_vector; | |
1597 | free_irq(bnad->msix_table[vector_num].vector, | |
1598 | rx_info->rx_ctrl[i].ccb); | |
1599 | } | |
1600 | } | |
1601 | ||
1aa8b471 | 1602 | /* NOTE: Should be called for MSIX only |
8b230ed8 RM |
1603 | * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel |
1604 | */ | |
1605 | static int | |
1606 | bnad_rx_msix_register(struct bnad *bnad, struct bnad_rx_info *rx_info, | |
078086f3 | 1607 | u32 rx_id, int num_rxps) |
8b230ed8 RM |
1608 | { |
1609 | int i; | |
1610 | int err; | |
1611 | int vector_num; | |
1612 | ||
1613 | for (i = 0; i < num_rxps; i++) { | |
1614 | vector_num = rx_info->rx_ctrl[i].ccb->intr_vector; | |
1615 | sprintf(rx_info->rx_ctrl[i].ccb->name, "%s CQ %d", | |
1616 | bnad->netdev->name, | |
1617 | rx_id + rx_info->rx_ctrl[i].ccb->id); | |
1618 | err = request_irq(bnad->msix_table[vector_num].vector, | |
1619 | (irq_handler_t)bnad_msix_rx, 0, | |
1620 | rx_info->rx_ctrl[i].ccb->name, | |
1621 | rx_info->rx_ctrl[i].ccb); | |
1622 | if (err) | |
1623 | goto err_return; | |
1624 | } | |
1625 | ||
1626 | return 0; | |
1627 | ||
1628 | err_return: | |
1629 | if (i > 0) | |
1630 | bnad_rx_msix_unregister(bnad, rx_info, (i - 1)); | |
1631 | return -1; | |
1632 | } | |
1633 | ||
1634 | /* Free Tx object Resources */ | |
1635 | static void | |
1636 | bnad_tx_res_free(struct bnad *bnad, struct bna_res_info *res_info) | |
1637 | { | |
1638 | int i; | |
1639 | ||
1640 | for (i = 0; i < BNA_TX_RES_T_MAX; i++) { | |
1641 | if (res_info[i].res_type == BNA_RES_T_MEM) | |
1642 | bnad_mem_free(bnad, &res_info[i].res_u.mem_info); | |
1643 | else if (res_info[i].res_type == BNA_RES_T_INTR) | |
1644 | bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info); | |
1645 | } | |
1646 | } | |
1647 | ||
1648 | /* Allocates memory and interrupt resources for Tx object */ | |
1649 | static int | |
1650 | bnad_tx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info, | |
078086f3 | 1651 | u32 tx_id) |
8b230ed8 RM |
1652 | { |
1653 | int i, err = 0; | |
1654 | ||
1655 | for (i = 0; i < BNA_TX_RES_T_MAX; i++) { | |
1656 | if (res_info[i].res_type == BNA_RES_T_MEM) | |
1657 | err = bnad_mem_alloc(bnad, | |
1658 | &res_info[i].res_u.mem_info); | |
1659 | else if (res_info[i].res_type == BNA_RES_T_INTR) | |
1660 | err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_TX, tx_id, | |
1661 | &res_info[i].res_u.intr_info); | |
1662 | if (err) | |
1663 | goto err_return; | |
1664 | } | |
1665 | return 0; | |
1666 | ||
1667 | err_return: | |
1668 | bnad_tx_res_free(bnad, res_info); | |
1669 | return err; | |
1670 | } | |
1671 | ||
1672 | /* Free Rx object Resources */ | |
1673 | static void | |
1674 | bnad_rx_res_free(struct bnad *bnad, struct bna_res_info *res_info) | |
1675 | { | |
1676 | int i; | |
1677 | ||
1678 | for (i = 0; i < BNA_RX_RES_T_MAX; i++) { | |
1679 | if (res_info[i].res_type == BNA_RES_T_MEM) | |
1680 | bnad_mem_free(bnad, &res_info[i].res_u.mem_info); | |
1681 | else if (res_info[i].res_type == BNA_RES_T_INTR) | |
1682 | bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info); | |
1683 | } | |
1684 | } | |
1685 | ||
1686 | /* Allocates memory and interrupt resources for Rx object */ | |
1687 | static int | |
1688 | bnad_rx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info, | |
1689 | uint rx_id) | |
1690 | { | |
1691 | int i, err = 0; | |
1692 | ||
1693 | /* All memory needs to be allocated before setup_ccbs */ | |
1694 | for (i = 0; i < BNA_RX_RES_T_MAX; i++) { | |
1695 | if (res_info[i].res_type == BNA_RES_T_MEM) | |
1696 | err = bnad_mem_alloc(bnad, | |
1697 | &res_info[i].res_u.mem_info); | |
1698 | else if (res_info[i].res_type == BNA_RES_T_INTR) | |
1699 | err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_RX, rx_id, | |
1700 | &res_info[i].res_u.intr_info); | |
1701 | if (err) | |
1702 | goto err_return; | |
1703 | } | |
1704 | return 0; | |
1705 | ||
1706 | err_return: | |
1707 | bnad_rx_res_free(bnad, res_info); | |
1708 | return err; | |
1709 | } | |
1710 | ||
1711 | /* Timer callbacks */ | |
1712 | /* a) IOC timer */ | |
1713 | static void | |
1714 | bnad_ioc_timeout(unsigned long data) | |
1715 | { | |
1716 | struct bnad *bnad = (struct bnad *)data; | |
1717 | unsigned long flags; | |
1718 | ||
1719 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
ad24d6f0 | 1720 | bfa_nw_ioc_timeout(&bnad->bna.ioceth.ioc); |
8b230ed8 RM |
1721 | spin_unlock_irqrestore(&bnad->bna_lock, flags); |
1722 | } | |
1723 | ||
1724 | static void | |
1725 | bnad_ioc_hb_check(unsigned long data) | |
1726 | { | |
1727 | struct bnad *bnad = (struct bnad *)data; | |
1728 | unsigned long flags; | |
1729 | ||
1730 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
ad24d6f0 | 1731 | bfa_nw_ioc_hb_check(&bnad->bna.ioceth.ioc); |
8b230ed8 RM |
1732 | spin_unlock_irqrestore(&bnad->bna_lock, flags); |
1733 | } | |
1734 | ||
1735 | static void | |
1d32f769 | 1736 | bnad_iocpf_timeout(unsigned long data) |
8b230ed8 RM |
1737 | { |
1738 | struct bnad *bnad = (struct bnad *)data; | |
1739 | unsigned long flags; | |
1740 | ||
1741 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
ad24d6f0 | 1742 | bfa_nw_iocpf_timeout(&bnad->bna.ioceth.ioc); |
1d32f769 RM |
1743 | spin_unlock_irqrestore(&bnad->bna_lock, flags); |
1744 | } | |
1745 | ||
1746 | static void | |
1747 | bnad_iocpf_sem_timeout(unsigned long data) | |
1748 | { | |
1749 | struct bnad *bnad = (struct bnad *)data; | |
1750 | unsigned long flags; | |
1751 | ||
1752 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
ad24d6f0 | 1753 | bfa_nw_iocpf_sem_timeout(&bnad->bna.ioceth.ioc); |
8b230ed8 RM |
1754 | spin_unlock_irqrestore(&bnad->bna_lock, flags); |
1755 | } | |
1756 | ||
1757 | /* | |
1758 | * All timer routines use bnad->bna_lock to protect against | |
1759 | * the following race, which may occur in case of no locking: | |
0120b99c | 1760 | * Time CPU m CPU n |
8b230ed8 RM |
1761 | * 0 1 = test_bit |
1762 | * 1 clear_bit | |
1763 | * 2 del_timer_sync | |
1764 | * 3 mod_timer | |
1765 | */ | |
1766 | ||
1767 | /* b) Dynamic Interrupt Moderation Timer */ | |
1768 | static void | |
1769 | bnad_dim_timeout(unsigned long data) | |
1770 | { | |
1771 | struct bnad *bnad = (struct bnad *)data; | |
1772 | struct bnad_rx_info *rx_info; | |
1773 | struct bnad_rx_ctrl *rx_ctrl; | |
1774 | int i, j; | |
1775 | unsigned long flags; | |
1776 | ||
1777 | if (!netif_carrier_ok(bnad->netdev)) | |
1778 | return; | |
1779 | ||
1780 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
1781 | for (i = 0; i < bnad->num_rx; i++) { | |
1782 | rx_info = &bnad->rx_info[i]; | |
1783 | if (!rx_info->rx) | |
1784 | continue; | |
1785 | for (j = 0; j < bnad->num_rxp_per_rx; j++) { | |
1786 | rx_ctrl = &rx_info->rx_ctrl[j]; | |
1787 | if (!rx_ctrl->ccb) | |
1788 | continue; | |
1789 | bna_rx_dim_update(rx_ctrl->ccb); | |
1790 | } | |
1791 | } | |
1792 | ||
1793 | /* Check for BNAD_CF_DIM_ENABLED, does not eleminate a race */ | |
1794 | if (test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) | |
1795 | mod_timer(&bnad->dim_timer, | |
1796 | jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ)); | |
1797 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | |
1798 | } | |
1799 | ||
1800 | /* c) Statistics Timer */ | |
1801 | static void | |
1802 | bnad_stats_timeout(unsigned long data) | |
1803 | { | |
1804 | struct bnad *bnad = (struct bnad *)data; | |
1805 | unsigned long flags; | |
1806 | ||
1807 | if (!netif_running(bnad->netdev) || | |
1808 | !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags)) | |
1809 | return; | |
1810 | ||
1811 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
078086f3 | 1812 | bna_hw_stats_get(&bnad->bna); |
8b230ed8 RM |
1813 | spin_unlock_irqrestore(&bnad->bna_lock, flags); |
1814 | } | |
1815 | ||
1816 | /* | |
1817 | * Set up timer for DIM | |
1818 | * Called with bnad->bna_lock held | |
1819 | */ | |
1820 | void | |
1821 | bnad_dim_timer_start(struct bnad *bnad) | |
1822 | { | |
1823 | if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED && | |
1824 | !test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) { | |
1825 | setup_timer(&bnad->dim_timer, bnad_dim_timeout, | |
1826 | (unsigned long)bnad); | |
1827 | set_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags); | |
1828 | mod_timer(&bnad->dim_timer, | |
1829 | jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ)); | |
1830 | } | |
1831 | } | |
1832 | ||
1833 | /* | |
1834 | * Set up timer for statistics | |
1835 | * Called with mutex_lock(&bnad->conf_mutex) held | |
1836 | */ | |
1837 | static void | |
1838 | bnad_stats_timer_start(struct bnad *bnad) | |
1839 | { | |
1840 | unsigned long flags; | |
1841 | ||
1842 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
1843 | if (!test_and_set_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags)) { | |
1844 | setup_timer(&bnad->stats_timer, bnad_stats_timeout, | |
1845 | (unsigned long)bnad); | |
1846 | mod_timer(&bnad->stats_timer, | |
1847 | jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ)); | |
1848 | } | |
1849 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | |
8b230ed8 RM |
1850 | } |
1851 | ||
1852 | /* | |
1853 | * Stops the stats timer | |
1854 | * Called with mutex_lock(&bnad->conf_mutex) held | |
1855 | */ | |
1856 | static void | |
1857 | bnad_stats_timer_stop(struct bnad *bnad) | |
1858 | { | |
1859 | int to_del = 0; | |
1860 | unsigned long flags; | |
1861 | ||
1862 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
1863 | if (test_and_clear_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags)) | |
1864 | to_del = 1; | |
1865 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | |
1866 | if (to_del) | |
1867 | del_timer_sync(&bnad->stats_timer); | |
1868 | } | |
1869 | ||
1870 | /* Utilities */ | |
1871 | ||
1872 | static void | |
1873 | bnad_netdev_mc_list_get(struct net_device *netdev, u8 *mc_list) | |
1874 | { | |
1875 | int i = 1; /* Index 0 has broadcast address */ | |
1876 | struct netdev_hw_addr *mc_addr; | |
1877 | ||
1878 | netdev_for_each_mc_addr(mc_addr, netdev) { | |
e2f9ecfc | 1879 | ether_addr_copy(&mc_list[i * ETH_ALEN], &mc_addr->addr[0]); |
8b230ed8 RM |
1880 | i++; |
1881 | } | |
1882 | } | |
1883 | ||
1884 | static int | |
1885 | bnad_napi_poll_rx(struct napi_struct *napi, int budget) | |
1886 | { | |
1887 | struct bnad_rx_ctrl *rx_ctrl = | |
1888 | container_of(napi, struct bnad_rx_ctrl, napi); | |
2be67144 | 1889 | struct bnad *bnad = rx_ctrl->bnad; |
8b230ed8 RM |
1890 | int rcvd = 0; |
1891 | ||
271e8b79 | 1892 | rx_ctrl->rx_poll_ctr++; |
8b230ed8 RM |
1893 | |
1894 | if (!netif_carrier_ok(bnad->netdev)) | |
1895 | goto poll_exit; | |
1896 | ||
b3cc6e88 | 1897 | rcvd = bnad_cq_process(bnad, rx_ctrl->ccb, budget); |
271e8b79 | 1898 | if (rcvd >= budget) |
8b230ed8 RM |
1899 | return rcvd; |
1900 | ||
1901 | poll_exit: | |
19dbff9f | 1902 | napi_complete(napi); |
8b230ed8 | 1903 | |
271e8b79 | 1904 | rx_ctrl->rx_complete++; |
2be67144 RM |
1905 | |
1906 | if (rx_ctrl->ccb) | |
271e8b79 RM |
1907 | bnad_enable_rx_irq_unsafe(rx_ctrl->ccb); |
1908 | ||
8b230ed8 RM |
1909 | return rcvd; |
1910 | } | |
1911 | ||
2be67144 | 1912 | #define BNAD_NAPI_POLL_QUOTA 64 |
8b230ed8 | 1913 | static void |
01b54b14 | 1914 | bnad_napi_add(struct bnad *bnad, u32 rx_id) |
8b230ed8 | 1915 | { |
8b230ed8 RM |
1916 | struct bnad_rx_ctrl *rx_ctrl; |
1917 | int i; | |
8b230ed8 RM |
1918 | |
1919 | /* Initialize & enable NAPI */ | |
1920 | for (i = 0; i < bnad->num_rxp_per_rx; i++) { | |
1921 | rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i]; | |
1922 | netif_napi_add(bnad->netdev, &rx_ctrl->napi, | |
2be67144 RM |
1923 | bnad_napi_poll_rx, BNAD_NAPI_POLL_QUOTA); |
1924 | } | |
1925 | } | |
1926 | ||
1927 | static void | |
01b54b14 | 1928 | bnad_napi_delete(struct bnad *bnad, u32 rx_id) |
8b230ed8 RM |
1929 | { |
1930 | int i; | |
1931 | ||
1932 | /* First disable and then clean up */ | |
01b54b14 | 1933 | for (i = 0; i < bnad->num_rxp_per_rx; i++) |
8b230ed8 | 1934 | netif_napi_del(&bnad->rx_info[rx_id].rx_ctrl[i].napi); |
8b230ed8 RM |
1935 | } |
1936 | ||
1937 | /* Should be held with conf_lock held */ | |
1938 | void | |
b3cc6e88 | 1939 | bnad_destroy_tx(struct bnad *bnad, u32 tx_id) |
8b230ed8 RM |
1940 | { |
1941 | struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id]; | |
1942 | struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0]; | |
1943 | unsigned long flags; | |
1944 | ||
1945 | if (!tx_info->tx) | |
1946 | return; | |
1947 | ||
1948 | init_completion(&bnad->bnad_completions.tx_comp); | |
1949 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
1950 | bna_tx_disable(tx_info->tx, BNA_HARD_CLEANUP, bnad_cb_tx_disabled); | |
1951 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | |
1952 | wait_for_completion(&bnad->bnad_completions.tx_comp); | |
1953 | ||
1954 | if (tx_info->tcb[0]->intr_type == BNA_INTR_T_MSIX) | |
1955 | bnad_tx_msix_unregister(bnad, tx_info, | |
1956 | bnad->num_txq_per_tx); | |
1957 | ||
1958 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
1959 | bna_tx_destroy(tx_info->tx); | |
1960 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | |
1961 | ||
1962 | tx_info->tx = NULL; | |
078086f3 | 1963 | tx_info->tx_id = 0; |
8b230ed8 | 1964 | |
8b230ed8 RM |
1965 | bnad_tx_res_free(bnad, res_info); |
1966 | } | |
1967 | ||
1968 | /* Should be held with conf_lock held */ | |
1969 | int | |
078086f3 | 1970 | bnad_setup_tx(struct bnad *bnad, u32 tx_id) |
8b230ed8 RM |
1971 | { |
1972 | int err; | |
1973 | struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id]; | |
1974 | struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0]; | |
1975 | struct bna_intr_info *intr_info = | |
1976 | &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info; | |
1977 | struct bna_tx_config *tx_config = &bnad->tx_config[tx_id]; | |
d91d25d5 | 1978 | static const struct bna_tx_event_cbfn tx_cbfn = { |
1979 | .tcb_setup_cbfn = bnad_cb_tcb_setup, | |
1980 | .tcb_destroy_cbfn = bnad_cb_tcb_destroy, | |
1981 | .tx_stall_cbfn = bnad_cb_tx_stall, | |
1982 | .tx_resume_cbfn = bnad_cb_tx_resume, | |
1983 | .tx_cleanup_cbfn = bnad_cb_tx_cleanup, | |
1984 | }; | |
1985 | ||
8b230ed8 RM |
1986 | struct bna_tx *tx; |
1987 | unsigned long flags; | |
1988 | ||
078086f3 RM |
1989 | tx_info->tx_id = tx_id; |
1990 | ||
8b230ed8 RM |
1991 | /* Initialize the Tx object configuration */ |
1992 | tx_config->num_txq = bnad->num_txq_per_tx; | |
1993 | tx_config->txq_depth = bnad->txq_depth; | |
1994 | tx_config->tx_type = BNA_TX_T_REGULAR; | |
078086f3 | 1995 | tx_config->coalescing_timeo = bnad->tx_coalescing_timeo; |
8b230ed8 | 1996 | |
8b230ed8 RM |
1997 | /* Get BNA's resource requirement for one tx object */ |
1998 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
1999 | bna_tx_res_req(bnad->num_txq_per_tx, | |
2000 | bnad->txq_depth, res_info); | |
2001 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | |
2002 | ||
2003 | /* Fill Unmap Q memory requirements */ | |
5216562a RM |
2004 | BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_TX_RES_MEM_T_UNMAPQ], |
2005 | bnad->num_txq_per_tx, (sizeof(struct bnad_tx_unmap) * | |
2006 | bnad->txq_depth)); | |
8b230ed8 RM |
2007 | |
2008 | /* Allocate resources */ | |
2009 | err = bnad_tx_res_alloc(bnad, res_info, tx_id); | |
2010 | if (err) | |
2011 | return err; | |
2012 | ||
2013 | /* Ask BNA to create one Tx object, supplying required resources */ | |
2014 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
2015 | tx = bna_tx_create(&bnad->bna, bnad, tx_config, &tx_cbfn, res_info, | |
2016 | tx_info); | |
2017 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | |
f29eeb79 RM |
2018 | if (!tx) { |
2019 | err = -ENOMEM; | |
8b230ed8 | 2020 | goto err_return; |
f29eeb79 | 2021 | } |
8b230ed8 RM |
2022 | tx_info->tx = tx; |
2023 | ||
01b54b14 JH |
2024 | INIT_DELAYED_WORK(&tx_info->tx_cleanup_work, |
2025 | (work_func_t)bnad_tx_cleanup); | |
2026 | ||
8b230ed8 RM |
2027 | /* Register ISR for the Tx object */ |
2028 | if (intr_info->intr_type == BNA_INTR_T_MSIX) { | |
2029 | err = bnad_tx_msix_register(bnad, tx_info, | |
2030 | tx_id, bnad->num_txq_per_tx); | |
2031 | if (err) | |
f29eeb79 | 2032 | goto cleanup_tx; |
8b230ed8 RM |
2033 | } |
2034 | ||
2035 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
2036 | bna_tx_enable(tx); | |
2037 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | |
2038 | ||
2039 | return 0; | |
2040 | ||
f29eeb79 RM |
2041 | cleanup_tx: |
2042 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
2043 | bna_tx_destroy(tx_info->tx); | |
2044 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | |
2045 | tx_info->tx = NULL; | |
2046 | tx_info->tx_id = 0; | |
8b230ed8 RM |
2047 | err_return: |
2048 | bnad_tx_res_free(bnad, res_info); | |
2049 | return err; | |
2050 | } | |
2051 | ||
2052 | /* Setup the rx config for bna_rx_create */ | |
2053 | /* bnad decides the configuration */ | |
2054 | static void | |
2055 | bnad_init_rx_config(struct bnad *bnad, struct bna_rx_config *rx_config) | |
2056 | { | |
e29aa339 | 2057 | memset(rx_config, 0, sizeof(*rx_config)); |
8b230ed8 RM |
2058 | rx_config->rx_type = BNA_RX_T_REGULAR; |
2059 | rx_config->num_paths = bnad->num_rxp_per_rx; | |
078086f3 | 2060 | rx_config->coalescing_timeo = bnad->rx_coalescing_timeo; |
8b230ed8 RM |
2061 | |
2062 | if (bnad->num_rxp_per_rx > 1) { | |
2063 | rx_config->rss_status = BNA_STATUS_T_ENABLED; | |
2064 | rx_config->rss_config.hash_type = | |
078086f3 RM |
2065 | (BFI_ENET_RSS_IPV6 | |
2066 | BFI_ENET_RSS_IPV6_TCP | | |
2067 | BFI_ENET_RSS_IPV4 | | |
2068 | BFI_ENET_RSS_IPV4_TCP); | |
8b230ed8 RM |
2069 | rx_config->rss_config.hash_mask = |
2070 | bnad->num_rxp_per_rx - 1; | |
0fa6aa4a | 2071 | netdev_rss_key_fill(rx_config->rss_config.toeplitz_hash_key, |
8b230ed8 RM |
2072 | sizeof(rx_config->rss_config.toeplitz_hash_key)); |
2073 | } else { | |
2074 | rx_config->rss_status = BNA_STATUS_T_DISABLED; | |
2075 | memset(&rx_config->rss_config, 0, | |
2076 | sizeof(rx_config->rss_config)); | |
2077 | } | |
e29aa339 RM |
2078 | |
2079 | rx_config->frame_size = BNAD_FRAME_SIZE(bnad->netdev->mtu); | |
2080 | rx_config->q0_multi_buf = BNA_STATUS_T_DISABLED; | |
2081 | ||
2082 | /* BNA_RXP_SINGLE - one data-buffer queue | |
2083 | * BNA_RXP_SLR - one small-buffer and one large-buffer queues | |
2084 | * BNA_RXP_HDS - one header-buffer and one data-buffer queues | |
2085 | */ | |
2086 | /* TODO: configurable param for queue type */ | |
8b230ed8 | 2087 | rx_config->rxp_type = BNA_RXP_SLR; |
8b230ed8 | 2088 | |
e29aa339 RM |
2089 | if (BNAD_PCI_DEV_IS_CAT2(bnad) && |
2090 | rx_config->frame_size > 4096) { | |
2091 | /* though size_routing_enable is set in SLR, | |
2092 | * small packets may get routed to same rxq. | |
2093 | * set buf_size to 2048 instead of PAGE_SIZE. | |
2094 | */ | |
2095 | rx_config->q0_buf_size = 2048; | |
2096 | /* this should be in multiples of 2 */ | |
2097 | rx_config->q0_num_vecs = 4; | |
2098 | rx_config->q0_depth = bnad->rxq_depth * rx_config->q0_num_vecs; | |
2099 | rx_config->q0_multi_buf = BNA_STATUS_T_ENABLED; | |
2100 | } else { | |
2101 | rx_config->q0_buf_size = rx_config->frame_size; | |
2102 | rx_config->q0_num_vecs = 1; | |
2103 | rx_config->q0_depth = bnad->rxq_depth; | |
2104 | } | |
2105 | ||
2106 | /* initialize for q1 for BNA_RXP_SLR/BNA_RXP_HDS */ | |
2107 | if (rx_config->rxp_type == BNA_RXP_SLR) { | |
2108 | rx_config->q1_depth = bnad->rxq_depth; | |
2109 | rx_config->q1_buf_size = BFI_SMALL_RXBUF_SIZE; | |
2110 | } | |
8b230ed8 | 2111 | |
877767dc IV |
2112 | rx_config->vlan_strip_status = |
2113 | (bnad->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) ? | |
2114 | BNA_STATUS_T_ENABLED : BNA_STATUS_T_DISABLED; | |
8b230ed8 RM |
2115 | } |
2116 | ||
2be67144 RM |
2117 | static void |
2118 | bnad_rx_ctrl_init(struct bnad *bnad, u32 rx_id) | |
2119 | { | |
2120 | struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id]; | |
2121 | int i; | |
2122 | ||
2123 | for (i = 0; i < bnad->num_rxp_per_rx; i++) | |
2124 | rx_info->rx_ctrl[i].bnad = bnad; | |
2125 | } | |
2126 | ||
8b230ed8 | 2127 | /* Called with mutex_lock(&bnad->conf_mutex) held */ |
2fd888a5 | 2128 | static u32 |
e29aa339 RM |
2129 | bnad_reinit_rx(struct bnad *bnad) |
2130 | { | |
2131 | struct net_device *netdev = bnad->netdev; | |
2132 | u32 err = 0, current_err = 0; | |
2133 | u32 rx_id = 0, count = 0; | |
2134 | unsigned long flags; | |
2135 | ||
2136 | /* destroy and create new rx objects */ | |
2137 | for (rx_id = 0; rx_id < bnad->num_rx; rx_id++) { | |
2138 | if (!bnad->rx_info[rx_id].rx) | |
2139 | continue; | |
2140 | bnad_destroy_rx(bnad, rx_id); | |
2141 | } | |
2142 | ||
2143 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
2144 | bna_enet_mtu_set(&bnad->bna.enet, | |
2145 | BNAD_FRAME_SIZE(bnad->netdev->mtu), NULL); | |
2146 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | |
2147 | ||
2148 | for (rx_id = 0; rx_id < bnad->num_rx; rx_id++) { | |
2149 | count++; | |
2150 | current_err = bnad_setup_rx(bnad, rx_id); | |
2151 | if (current_err && !err) { | |
2152 | err = current_err; | |
ecc46789 | 2153 | netdev_err(netdev, "RXQ:%u setup failed\n", rx_id); |
e29aa339 RM |
2154 | } |
2155 | } | |
2156 | ||
2157 | /* restore rx configuration */ | |
2158 | if (bnad->rx_info[0].rx && !err) { | |
2159 | bnad_restore_vlans(bnad, 0); | |
2160 | bnad_enable_default_bcast(bnad); | |
2161 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
2162 | bnad_mac_addr_set_locked(bnad, netdev->dev_addr); | |
2163 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | |
2164 | bnad_set_rx_mode(netdev); | |
2165 | } | |
2166 | ||
2167 | return count; | |
2168 | } | |
2169 | ||
2170 | /* Called with bnad_conf_lock() held */ | |
8b230ed8 | 2171 | void |
b3cc6e88 | 2172 | bnad_destroy_rx(struct bnad *bnad, u32 rx_id) |
8b230ed8 RM |
2173 | { |
2174 | struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id]; | |
2175 | struct bna_rx_config *rx_config = &bnad->rx_config[rx_id]; | |
2176 | struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0]; | |
2177 | unsigned long flags; | |
271e8b79 | 2178 | int to_del = 0; |
8b230ed8 RM |
2179 | |
2180 | if (!rx_info->rx) | |
2181 | return; | |
2182 | ||
2183 | if (0 == rx_id) { | |
2184 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
271e8b79 RM |
2185 | if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED && |
2186 | test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) { | |
8b230ed8 | 2187 | clear_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags); |
271e8b79 RM |
2188 | to_del = 1; |
2189 | } | |
8b230ed8 | 2190 | spin_unlock_irqrestore(&bnad->bna_lock, flags); |
271e8b79 | 2191 | if (to_del) |
8b230ed8 RM |
2192 | del_timer_sync(&bnad->dim_timer); |
2193 | } | |
2194 | ||
8b230ed8 RM |
2195 | init_completion(&bnad->bnad_completions.rx_comp); |
2196 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
2197 | bna_rx_disable(rx_info->rx, BNA_HARD_CLEANUP, bnad_cb_rx_disabled); | |
2198 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | |
2199 | wait_for_completion(&bnad->bnad_completions.rx_comp); | |
2200 | ||
2201 | if (rx_info->rx_ctrl[0].ccb->intr_type == BNA_INTR_T_MSIX) | |
2202 | bnad_rx_msix_unregister(bnad, rx_info, rx_config->num_paths); | |
2203 | ||
01b54b14 | 2204 | bnad_napi_delete(bnad, rx_id); |
2be67144 | 2205 | |
8b230ed8 RM |
2206 | spin_lock_irqsave(&bnad->bna_lock, flags); |
2207 | bna_rx_destroy(rx_info->rx); | |
8b230ed8 RM |
2208 | |
2209 | rx_info->rx = NULL; | |
3caa1e95 | 2210 | rx_info->rx_id = 0; |
b9fa1fbf | 2211 | spin_unlock_irqrestore(&bnad->bna_lock, flags); |
8b230ed8 RM |
2212 | |
2213 | bnad_rx_res_free(bnad, res_info); | |
2214 | } | |
2215 | ||
2216 | /* Called with mutex_lock(&bnad->conf_mutex) held */ | |
2217 | int | |
078086f3 | 2218 | bnad_setup_rx(struct bnad *bnad, u32 rx_id) |
8b230ed8 RM |
2219 | { |
2220 | int err; | |
2221 | struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id]; | |
2222 | struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0]; | |
2223 | struct bna_intr_info *intr_info = | |
2224 | &res_info[BNA_RX_RES_T_INTR].res_u.intr_info; | |
2225 | struct bna_rx_config *rx_config = &bnad->rx_config[rx_id]; | |
d91d25d5 | 2226 | static const struct bna_rx_event_cbfn rx_cbfn = { |
5216562a | 2227 | .rcb_setup_cbfn = NULL, |
01b54b14 | 2228 | .rcb_destroy_cbfn = NULL, |
d91d25d5 | 2229 | .ccb_setup_cbfn = bnad_cb_ccb_setup, |
2230 | .ccb_destroy_cbfn = bnad_cb_ccb_destroy, | |
5bcf6ac0 | 2231 | .rx_stall_cbfn = bnad_cb_rx_stall, |
d91d25d5 | 2232 | .rx_cleanup_cbfn = bnad_cb_rx_cleanup, |
2233 | .rx_post_cbfn = bnad_cb_rx_post, | |
2234 | }; | |
8b230ed8 RM |
2235 | struct bna_rx *rx; |
2236 | unsigned long flags; | |
2237 | ||
078086f3 RM |
2238 | rx_info->rx_id = rx_id; |
2239 | ||
8b230ed8 RM |
2240 | /* Initialize the Rx object configuration */ |
2241 | bnad_init_rx_config(bnad, rx_config); | |
2242 | ||
8b230ed8 RM |
2243 | /* Get BNA's resource requirement for one Rx object */ |
2244 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
2245 | bna_rx_res_req(rx_config, res_info); | |
2246 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | |
2247 | ||
2248 | /* Fill Unmap Q memory requirements */ | |
e29aa339 RM |
2249 | BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_RX_RES_MEM_T_UNMAPDQ], |
2250 | rx_config->num_paths, | |
2251 | (rx_config->q0_depth * | |
2252 | sizeof(struct bnad_rx_unmap)) + | |
2253 | sizeof(struct bnad_rx_unmap_q)); | |
2254 | ||
2255 | if (rx_config->rxp_type != BNA_RXP_SINGLE) { | |
2256 | BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_RX_RES_MEM_T_UNMAPHQ], | |
2257 | rx_config->num_paths, | |
2258 | (rx_config->q1_depth * | |
2259 | sizeof(struct bnad_rx_unmap) + | |
2260 | sizeof(struct bnad_rx_unmap_q))); | |
2261 | } | |
8b230ed8 RM |
2262 | /* Allocate resource */ |
2263 | err = bnad_rx_res_alloc(bnad, res_info, rx_id); | |
2264 | if (err) | |
2265 | return err; | |
2266 | ||
2be67144 RM |
2267 | bnad_rx_ctrl_init(bnad, rx_id); |
2268 | ||
8b230ed8 RM |
2269 | /* Ask BNA to create one Rx object, supplying required resources */ |
2270 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
2271 | rx = bna_rx_create(&bnad->bna, bnad, rx_config, &rx_cbfn, res_info, | |
2272 | rx_info); | |
3caa1e95 RM |
2273 | if (!rx) { |
2274 | err = -ENOMEM; | |
b9fa1fbf | 2275 | spin_unlock_irqrestore(&bnad->bna_lock, flags); |
8b230ed8 | 2276 | goto err_return; |
3caa1e95 | 2277 | } |
8b230ed8 | 2278 | rx_info->rx = rx; |
b9fa1fbf | 2279 | spin_unlock_irqrestore(&bnad->bna_lock, flags); |
8b230ed8 | 2280 | |
01b54b14 JH |
2281 | INIT_WORK(&rx_info->rx_cleanup_work, |
2282 | (work_func_t)(bnad_rx_cleanup)); | |
2283 | ||
2be67144 RM |
2284 | /* |
2285 | * Init NAPI, so that state is set to NAPI_STATE_SCHED, | |
2286 | * so that IRQ handler cannot schedule NAPI at this point. | |
2287 | */ | |
01b54b14 | 2288 | bnad_napi_add(bnad, rx_id); |
2be67144 | 2289 | |
8b230ed8 RM |
2290 | /* Register ISR for the Rx object */ |
2291 | if (intr_info->intr_type == BNA_INTR_T_MSIX) { | |
2292 | err = bnad_rx_msix_register(bnad, rx_info, rx_id, | |
2293 | rx_config->num_paths); | |
2294 | if (err) | |
2295 | goto err_return; | |
2296 | } | |
2297 | ||
8b230ed8 RM |
2298 | spin_lock_irqsave(&bnad->bna_lock, flags); |
2299 | if (0 == rx_id) { | |
2300 | /* Set up Dynamic Interrupt Moderation Vector */ | |
2301 | if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED) | |
2302 | bna_rx_dim_reconfig(&bnad->bna, bna_napi_dim_vector); | |
2303 | ||
2304 | /* Enable VLAN filtering only on the default Rx */ | |
2305 | bna_rx_vlanfilter_enable(rx); | |
2306 | ||
2307 | /* Start the DIM timer */ | |
2308 | bnad_dim_timer_start(bnad); | |
2309 | } | |
2310 | ||
2311 | bna_rx_enable(rx); | |
2312 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | |
2313 | ||
2314 | return 0; | |
2315 | ||
2316 | err_return: | |
b3cc6e88 | 2317 | bnad_destroy_rx(bnad, rx_id); |
8b230ed8 RM |
2318 | return err; |
2319 | } | |
2320 | ||
2321 | /* Called with conf_lock & bnad->bna_lock held */ | |
2322 | void | |
2323 | bnad_tx_coalescing_timeo_set(struct bnad *bnad) | |
2324 | { | |
2325 | struct bnad_tx_info *tx_info; | |
2326 | ||
2327 | tx_info = &bnad->tx_info[0]; | |
2328 | if (!tx_info->tx) | |
2329 | return; | |
2330 | ||
2331 | bna_tx_coalescing_timeo_set(tx_info->tx, bnad->tx_coalescing_timeo); | |
2332 | } | |
2333 | ||
2334 | /* Called with conf_lock & bnad->bna_lock held */ | |
2335 | void | |
2336 | bnad_rx_coalescing_timeo_set(struct bnad *bnad) | |
2337 | { | |
2338 | struct bnad_rx_info *rx_info; | |
0120b99c | 2339 | int i; |
8b230ed8 RM |
2340 | |
2341 | for (i = 0; i < bnad->num_rx; i++) { | |
2342 | rx_info = &bnad->rx_info[i]; | |
2343 | if (!rx_info->rx) | |
2344 | continue; | |
2345 | bna_rx_coalescing_timeo_set(rx_info->rx, | |
2346 | bnad->rx_coalescing_timeo); | |
2347 | } | |
2348 | } | |
2349 | ||
2350 | /* | |
2351 | * Called with bnad->bna_lock held | |
2352 | */ | |
a2122d95 | 2353 | int |
558caad7 | 2354 | bnad_mac_addr_set_locked(struct bnad *bnad, const u8 *mac_addr) |
8b230ed8 RM |
2355 | { |
2356 | int ret; | |
2357 | ||
2358 | if (!is_valid_ether_addr(mac_addr)) | |
2359 | return -EADDRNOTAVAIL; | |
2360 | ||
2361 | /* If datapath is down, pretend everything went through */ | |
2362 | if (!bnad->rx_info[0].rx) | |
2363 | return 0; | |
2364 | ||
1f9883e0 | 2365 | ret = bna_rx_ucast_set(bnad->rx_info[0].rx, mac_addr); |
8b230ed8 RM |
2366 | if (ret != BNA_CB_SUCCESS) |
2367 | return -EADDRNOTAVAIL; | |
2368 | ||
2369 | return 0; | |
2370 | } | |
2371 | ||
2372 | /* Should be called with conf_lock held */ | |
a2122d95 | 2373 | int |
8b230ed8 RM |
2374 | bnad_enable_default_bcast(struct bnad *bnad) |
2375 | { | |
2376 | struct bnad_rx_info *rx_info = &bnad->rx_info[0]; | |
2377 | int ret; | |
2378 | unsigned long flags; | |
2379 | ||
2380 | init_completion(&bnad->bnad_completions.mcast_comp); | |
2381 | ||
2382 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
558caad7 IV |
2383 | ret = bna_rx_mcast_add(rx_info->rx, bnad_bcast_addr, |
2384 | bnad_cb_rx_mcast_add); | |
8b230ed8 RM |
2385 | spin_unlock_irqrestore(&bnad->bna_lock, flags); |
2386 | ||
2387 | if (ret == BNA_CB_SUCCESS) | |
2388 | wait_for_completion(&bnad->bnad_completions.mcast_comp); | |
2389 | else | |
2390 | return -ENODEV; | |
2391 | ||
2392 | if (bnad->bnad_completions.mcast_comp_status != BNA_CB_SUCCESS) | |
2393 | return -ENODEV; | |
2394 | ||
2395 | return 0; | |
2396 | } | |
2397 | ||
19dbff9f | 2398 | /* Called with mutex_lock(&bnad->conf_mutex) held */ |
a2122d95 | 2399 | void |
aad75b66 RM |
2400 | bnad_restore_vlans(struct bnad *bnad, u32 rx_id) |
2401 | { | |
f859d7cb | 2402 | u16 vid; |
aad75b66 RM |
2403 | unsigned long flags; |
2404 | ||
f859d7cb | 2405 | for_each_set_bit(vid, bnad->active_vlans, VLAN_N_VID) { |
aad75b66 | 2406 | spin_lock_irqsave(&bnad->bna_lock, flags); |
f859d7cb | 2407 | bna_rx_vlan_add(bnad->rx_info[rx_id].rx, vid); |
aad75b66 RM |
2408 | spin_unlock_irqrestore(&bnad->bna_lock, flags); |
2409 | } | |
2410 | } | |
2411 | ||
8b230ed8 RM |
2412 | /* Statistics utilities */ |
2413 | void | |
250e061e | 2414 | bnad_netdev_qstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats) |
8b230ed8 | 2415 | { |
8b230ed8 RM |
2416 | int i, j; |
2417 | ||
2418 | for (i = 0; i < bnad->num_rx; i++) { | |
2419 | for (j = 0; j < bnad->num_rxp_per_rx; j++) { | |
2420 | if (bnad->rx_info[i].rx_ctrl[j].ccb) { | |
250e061e | 2421 | stats->rx_packets += bnad->rx_info[i]. |
8b230ed8 | 2422 | rx_ctrl[j].ccb->rcb[0]->rxq->rx_packets; |
250e061e | 2423 | stats->rx_bytes += bnad->rx_info[i]. |
8b230ed8 RM |
2424 | rx_ctrl[j].ccb->rcb[0]->rxq->rx_bytes; |
2425 | if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] && | |
2426 | bnad->rx_info[i].rx_ctrl[j].ccb-> | |
2427 | rcb[1]->rxq) { | |
250e061e | 2428 | stats->rx_packets += |
8b230ed8 RM |
2429 | bnad->rx_info[i].rx_ctrl[j]. |
2430 | ccb->rcb[1]->rxq->rx_packets; | |
250e061e | 2431 | stats->rx_bytes += |
8b230ed8 RM |
2432 | bnad->rx_info[i].rx_ctrl[j]. |
2433 | ccb->rcb[1]->rxq->rx_bytes; | |
2434 | } | |
2435 | } | |
2436 | } | |
2437 | } | |
2438 | for (i = 0; i < bnad->num_tx; i++) { | |
2439 | for (j = 0; j < bnad->num_txq_per_tx; j++) { | |
2440 | if (bnad->tx_info[i].tcb[j]) { | |
250e061e | 2441 | stats->tx_packets += |
8b230ed8 | 2442 | bnad->tx_info[i].tcb[j]->txq->tx_packets; |
250e061e | 2443 | stats->tx_bytes += |
8b230ed8 RM |
2444 | bnad->tx_info[i].tcb[j]->txq->tx_bytes; |
2445 | } | |
2446 | } | |
2447 | } | |
2448 | } | |
2449 | ||
2450 | /* | |
2451 | * Must be called with the bna_lock held. | |
2452 | */ | |
2453 | void | |
250e061e | 2454 | bnad_netdev_hwstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats) |
8b230ed8 | 2455 | { |
078086f3 RM |
2456 | struct bfi_enet_stats_mac *mac_stats; |
2457 | u32 bmap; | |
8b230ed8 RM |
2458 | int i; |
2459 | ||
078086f3 | 2460 | mac_stats = &bnad->stats.bna_stats->hw_stats.mac_stats; |
250e061e | 2461 | stats->rx_errors = |
8b230ed8 RM |
2462 | mac_stats->rx_fcs_error + mac_stats->rx_alignment_error + |
2463 | mac_stats->rx_frame_length_error + mac_stats->rx_code_error + | |
2464 | mac_stats->rx_undersize; | |
250e061e | 2465 | stats->tx_errors = mac_stats->tx_fcs_error + |
8b230ed8 | 2466 | mac_stats->tx_undersize; |
250e061e ED |
2467 | stats->rx_dropped = mac_stats->rx_drop; |
2468 | stats->tx_dropped = mac_stats->tx_drop; | |
2469 | stats->multicast = mac_stats->rx_multicast; | |
2470 | stats->collisions = mac_stats->tx_total_collision; | |
8b230ed8 | 2471 | |
250e061e | 2472 | stats->rx_length_errors = mac_stats->rx_frame_length_error; |
8b230ed8 RM |
2473 | |
2474 | /* receive ring buffer overflow ?? */ | |
2475 | ||
250e061e ED |
2476 | stats->rx_crc_errors = mac_stats->rx_fcs_error; |
2477 | stats->rx_frame_errors = mac_stats->rx_alignment_error; | |
8b230ed8 | 2478 | /* recv'r fifo overrun */ |
078086f3 RM |
2479 | bmap = bna_rx_rid_mask(&bnad->bna); |
2480 | for (i = 0; bmap; i++) { | |
8b230ed8 | 2481 | if (bmap & 1) { |
250e061e | 2482 | stats->rx_fifo_errors += |
8b230ed8 | 2483 | bnad->stats.bna_stats-> |
078086f3 | 2484 | hw_stats.rxf_stats[i].frame_drops; |
8b230ed8 RM |
2485 | break; |
2486 | } | |
2487 | bmap >>= 1; | |
2488 | } | |
2489 | } | |
2490 | ||
2491 | static void | |
2492 | bnad_mbox_irq_sync(struct bnad *bnad) | |
2493 | { | |
2494 | u32 irq; | |
2495 | unsigned long flags; | |
2496 | ||
2497 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
2498 | if (bnad->cfg_flags & BNAD_CF_MSIX) | |
8811e267 | 2499 | irq = bnad->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector; |
8b230ed8 RM |
2500 | else |
2501 | irq = bnad->pcidev->irq; | |
2502 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | |
2503 | ||
2504 | synchronize_irq(irq); | |
2505 | } | |
2506 | ||
2507 | /* Utility used by bnad_start_xmit, for doing TSO */ | |
2508 | static int | |
2509 | bnad_tso_prepare(struct bnad *bnad, struct sk_buff *skb) | |
2510 | { | |
2511 | int err; | |
2512 | ||
b13a8a99 | 2513 | err = skb_cow_head(skb, 0); |
2514 | if (err < 0) { | |
2515 | BNAD_UPDATE_CTR(bnad, tso_err); | |
2516 | return err; | |
8b230ed8 RM |
2517 | } |
2518 | ||
2519 | /* | |
2520 | * For TSO, the TCP checksum field is seeded with pseudo-header sum | |
2521 | * excluding the length field. | |
2522 | */ | |
1c53730a | 2523 | if (vlan_get_protocol(skb) == htons(ETH_P_IP)) { |
8b230ed8 RM |
2524 | struct iphdr *iph = ip_hdr(skb); |
2525 | ||
2526 | /* Do we really need these? */ | |
2527 | iph->tot_len = 0; | |
2528 | iph->check = 0; | |
2529 | ||
2530 | tcp_hdr(skb)->check = | |
2531 | ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0, | |
2532 | IPPROTO_TCP, 0); | |
2533 | BNAD_UPDATE_CTR(bnad, tso4); | |
2534 | } else { | |
2535 | struct ipv6hdr *ipv6h = ipv6_hdr(skb); | |
2536 | ||
8b230ed8 RM |
2537 | ipv6h->payload_len = 0; |
2538 | tcp_hdr(skb)->check = | |
2539 | ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, 0, | |
2540 | IPPROTO_TCP, 0); | |
2541 | BNAD_UPDATE_CTR(bnad, tso6); | |
2542 | } | |
2543 | ||
2544 | return 0; | |
2545 | } | |
2546 | ||
2547 | /* | |
2548 | * Initialize Q numbers depending on Rx Paths | |
2549 | * Called with bnad->bna_lock held, because of cfg_flags | |
2550 | * access. | |
2551 | */ | |
2552 | static void | |
2553 | bnad_q_num_init(struct bnad *bnad) | |
2554 | { | |
2555 | int rxps; | |
2556 | ||
2557 | rxps = min((uint)num_online_cpus(), | |
772b5235 | 2558 | (uint)(BNAD_MAX_RX * BNAD_MAX_RXP_PER_RX)); |
8b230ed8 RM |
2559 | |
2560 | if (!(bnad->cfg_flags & BNAD_CF_MSIX)) | |
2561 | rxps = 1; /* INTx */ | |
2562 | ||
2563 | bnad->num_rx = 1; | |
2564 | bnad->num_tx = 1; | |
2565 | bnad->num_rxp_per_rx = rxps; | |
2566 | bnad->num_txq_per_tx = BNAD_TXQ_NUM; | |
2567 | } | |
2568 | ||
2569 | /* | |
2570 | * Adjusts the Q numbers, given a number of msix vectors | |
2571 | * Give preference to RSS as opposed to Tx priority Queues, | |
2572 | * in such a case, just use 1 Tx Q | |
2573 | * Called with bnad->bna_lock held b'cos of cfg_flags access | |
2574 | */ | |
2575 | static void | |
078086f3 | 2576 | bnad_q_num_adjust(struct bnad *bnad, int msix_vectors, int temp) |
8b230ed8 RM |
2577 | { |
2578 | bnad->num_txq_per_tx = 1; | |
2579 | if ((msix_vectors >= (bnad->num_tx * bnad->num_txq_per_tx) + | |
2580 | bnad_rxqs_per_cq + BNAD_MAILBOX_MSIX_VECTORS) && | |
2581 | (bnad->cfg_flags & BNAD_CF_MSIX)) { | |
2582 | bnad->num_rxp_per_rx = msix_vectors - | |
2583 | (bnad->num_tx * bnad->num_txq_per_tx) - | |
2584 | BNAD_MAILBOX_MSIX_VECTORS; | |
2585 | } else | |
2586 | bnad->num_rxp_per_rx = 1; | |
2587 | } | |
2588 | ||
078086f3 RM |
2589 | /* Enable / disable ioceth */ |
2590 | static int | |
2591 | bnad_ioceth_disable(struct bnad *bnad) | |
8b230ed8 RM |
2592 | { |
2593 | unsigned long flags; | |
078086f3 | 2594 | int err = 0; |
8b230ed8 RM |
2595 | |
2596 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
078086f3 RM |
2597 | init_completion(&bnad->bnad_completions.ioc_comp); |
2598 | bna_ioceth_disable(&bnad->bna.ioceth, BNA_HARD_CLEANUP); | |
8b230ed8 RM |
2599 | spin_unlock_irqrestore(&bnad->bna_lock, flags); |
2600 | ||
078086f3 RM |
2601 | wait_for_completion_timeout(&bnad->bnad_completions.ioc_comp, |
2602 | msecs_to_jiffies(BNAD_IOCETH_TIMEOUT)); | |
2603 | ||
2604 | err = bnad->bnad_completions.ioc_comp_status; | |
2605 | return err; | |
8b230ed8 RM |
2606 | } |
2607 | ||
2608 | static int | |
078086f3 | 2609 | bnad_ioceth_enable(struct bnad *bnad) |
8b230ed8 RM |
2610 | { |
2611 | int err = 0; | |
2612 | unsigned long flags; | |
2613 | ||
8b230ed8 | 2614 | spin_lock_irqsave(&bnad->bna_lock, flags); |
078086f3 RM |
2615 | init_completion(&bnad->bnad_completions.ioc_comp); |
2616 | bnad->bnad_completions.ioc_comp_status = BNA_CB_WAITING; | |
2617 | bna_ioceth_enable(&bnad->bna.ioceth); | |
8b230ed8 RM |
2618 | spin_unlock_irqrestore(&bnad->bna_lock, flags); |
2619 | ||
078086f3 RM |
2620 | wait_for_completion_timeout(&bnad->bnad_completions.ioc_comp, |
2621 | msecs_to_jiffies(BNAD_IOCETH_TIMEOUT)); | |
8b230ed8 | 2622 | |
078086f3 | 2623 | err = bnad->bnad_completions.ioc_comp_status; |
8b230ed8 RM |
2624 | |
2625 | return err; | |
2626 | } | |
2627 | ||
2628 | /* Free BNA resources */ | |
2629 | static void | |
078086f3 RM |
2630 | bnad_res_free(struct bnad *bnad, struct bna_res_info *res_info, |
2631 | u32 res_val_max) | |
8b230ed8 RM |
2632 | { |
2633 | int i; | |
8b230ed8 | 2634 | |
078086f3 RM |
2635 | for (i = 0; i < res_val_max; i++) |
2636 | bnad_mem_free(bnad, &res_info[i].res_u.mem_info); | |
8b230ed8 RM |
2637 | } |
2638 | ||
2639 | /* Allocates memory and interrupt resources for BNA */ | |
2640 | static int | |
078086f3 RM |
2641 | bnad_res_alloc(struct bnad *bnad, struct bna_res_info *res_info, |
2642 | u32 res_val_max) | |
8b230ed8 RM |
2643 | { |
2644 | int i, err; | |
8b230ed8 | 2645 | |
078086f3 RM |
2646 | for (i = 0; i < res_val_max; i++) { |
2647 | err = bnad_mem_alloc(bnad, &res_info[i].res_u.mem_info); | |
8b230ed8 RM |
2648 | if (err) |
2649 | goto err_return; | |
2650 | } | |
2651 | return 0; | |
2652 | ||
2653 | err_return: | |
078086f3 | 2654 | bnad_res_free(bnad, res_info, res_val_max); |
8b230ed8 RM |
2655 | return err; |
2656 | } | |
2657 | ||
2658 | /* Interrupt enable / disable */ | |
2659 | static void | |
2660 | bnad_enable_msix(struct bnad *bnad) | |
2661 | { | |
2662 | int i, ret; | |
8b230ed8 RM |
2663 | unsigned long flags; |
2664 | ||
2665 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
2666 | if (!(bnad->cfg_flags & BNAD_CF_MSIX)) { | |
2667 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | |
2668 | return; | |
2669 | } | |
2670 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | |
2671 | ||
2672 | if (bnad->msix_table) | |
2673 | return; | |
2674 | ||
8b230ed8 | 2675 | bnad->msix_table = |
b7ee31c5 | 2676 | kcalloc(bnad->msix_num, sizeof(struct msix_entry), GFP_KERNEL); |
8b230ed8 RM |
2677 | |
2678 | if (!bnad->msix_table) | |
2679 | goto intx_mode; | |
2680 | ||
b7ee31c5 | 2681 | for (i = 0; i < bnad->msix_num; i++) |
8b230ed8 RM |
2682 | bnad->msix_table[i].entry = i; |
2683 | ||
43c20200 AG |
2684 | ret = pci_enable_msix_range(bnad->pcidev, bnad->msix_table, |
2685 | 1, bnad->msix_num); | |
2686 | if (ret < 0) { | |
2687 | goto intx_mode; | |
2688 | } else if (ret < bnad->msix_num) { | |
ecc46789 IV |
2689 | dev_warn(&bnad->pcidev->dev, |
2690 | "%d MSI-X vectors allocated < %d requested\n", | |
2691 | ret, bnad->msix_num); | |
8b230ed8 RM |
2692 | |
2693 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
2694 | /* ret = #of vectors that we got */ | |
271e8b79 RM |
2695 | bnad_q_num_adjust(bnad, (ret - BNAD_MAILBOX_MSIX_VECTORS) / 2, |
2696 | (ret - BNAD_MAILBOX_MSIX_VECTORS) / 2); | |
8b230ed8 RM |
2697 | spin_unlock_irqrestore(&bnad->bna_lock, flags); |
2698 | ||
271e8b79 | 2699 | bnad->msix_num = BNAD_NUM_TXQ + BNAD_NUM_RXP + |
8b230ed8 | 2700 | BNAD_MAILBOX_MSIX_VECTORS; |
8b230ed8 | 2701 | |
43c20200 AG |
2702 | if (bnad->msix_num > ret) { |
2703 | pci_disable_msix(bnad->pcidev); | |
8b230ed8 | 2704 | goto intx_mode; |
43c20200 AG |
2705 | } |
2706 | } | |
078086f3 RM |
2707 | |
2708 | pci_intx(bnad->pcidev, 0); | |
2709 | ||
8b230ed8 RM |
2710 | return; |
2711 | ||
2712 | intx_mode: | |
ecc46789 IV |
2713 | dev_warn(&bnad->pcidev->dev, |
2714 | "MSI-X enable failed - operating in INTx mode\n"); | |
8b230ed8 RM |
2715 | |
2716 | kfree(bnad->msix_table); | |
2717 | bnad->msix_table = NULL; | |
2718 | bnad->msix_num = 0; | |
8b230ed8 RM |
2719 | spin_lock_irqsave(&bnad->bna_lock, flags); |
2720 | bnad->cfg_flags &= ~BNAD_CF_MSIX; | |
2721 | bnad_q_num_init(bnad); | |
2722 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | |
2723 | } | |
2724 | ||
2725 | static void | |
2726 | bnad_disable_msix(struct bnad *bnad) | |
2727 | { | |
2728 | u32 cfg_flags; | |
2729 | unsigned long flags; | |
2730 | ||
2731 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
2732 | cfg_flags = bnad->cfg_flags; | |
2733 | if (bnad->cfg_flags & BNAD_CF_MSIX) | |
2734 | bnad->cfg_flags &= ~BNAD_CF_MSIX; | |
2735 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | |
2736 | ||
2737 | if (cfg_flags & BNAD_CF_MSIX) { | |
2738 | pci_disable_msix(bnad->pcidev); | |
2739 | kfree(bnad->msix_table); | |
2740 | bnad->msix_table = NULL; | |
2741 | } | |
2742 | } | |
2743 | ||
2744 | /* Netdev entry points */ | |
2745 | static int | |
2746 | bnad_open(struct net_device *netdev) | |
2747 | { | |
2748 | int err; | |
2749 | struct bnad *bnad = netdev_priv(netdev); | |
2750 | struct bna_pause_config pause_config; | |
8b230ed8 RM |
2751 | unsigned long flags; |
2752 | ||
2753 | mutex_lock(&bnad->conf_mutex); | |
2754 | ||
2755 | /* Tx */ | |
2756 | err = bnad_setup_tx(bnad, 0); | |
2757 | if (err) | |
2758 | goto err_return; | |
2759 | ||
2760 | /* Rx */ | |
2761 | err = bnad_setup_rx(bnad, 0); | |
2762 | if (err) | |
2763 | goto cleanup_tx; | |
2764 | ||
2765 | /* Port */ | |
2766 | pause_config.tx_pause = 0; | |
2767 | pause_config.rx_pause = 0; | |
2768 | ||
8b230ed8 | 2769 | spin_lock_irqsave(&bnad->bna_lock, flags); |
e29aa339 RM |
2770 | bna_enet_mtu_set(&bnad->bna.enet, |
2771 | BNAD_FRAME_SIZE(bnad->netdev->mtu), NULL); | |
1f9883e0 | 2772 | bna_enet_pause_config(&bnad->bna.enet, &pause_config); |
078086f3 | 2773 | bna_enet_enable(&bnad->bna.enet); |
8b230ed8 RM |
2774 | spin_unlock_irqrestore(&bnad->bna_lock, flags); |
2775 | ||
2776 | /* Enable broadcast */ | |
2777 | bnad_enable_default_bcast(bnad); | |
2778 | ||
aad75b66 RM |
2779 | /* Restore VLANs, if any */ |
2780 | bnad_restore_vlans(bnad, 0); | |
2781 | ||
8b230ed8 RM |
2782 | /* Set the UCAST address */ |
2783 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
2784 | bnad_mac_addr_set_locked(bnad, netdev->dev_addr); | |
2785 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | |
2786 | ||
2787 | /* Start the stats timer */ | |
2788 | bnad_stats_timer_start(bnad); | |
2789 | ||
2790 | mutex_unlock(&bnad->conf_mutex); | |
2791 | ||
2792 | return 0; | |
2793 | ||
2794 | cleanup_tx: | |
b3cc6e88 | 2795 | bnad_destroy_tx(bnad, 0); |
8b230ed8 RM |
2796 | |
2797 | err_return: | |
2798 | mutex_unlock(&bnad->conf_mutex); | |
2799 | return err; | |
2800 | } | |
2801 | ||
2802 | static int | |
2803 | bnad_stop(struct net_device *netdev) | |
2804 | { | |
2805 | struct bnad *bnad = netdev_priv(netdev); | |
2806 | unsigned long flags; | |
2807 | ||
2808 | mutex_lock(&bnad->conf_mutex); | |
2809 | ||
2810 | /* Stop the stats timer */ | |
2811 | bnad_stats_timer_stop(bnad); | |
2812 | ||
078086f3 | 2813 | init_completion(&bnad->bnad_completions.enet_comp); |
8b230ed8 RM |
2814 | |
2815 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
078086f3 RM |
2816 | bna_enet_disable(&bnad->bna.enet, BNA_HARD_CLEANUP, |
2817 | bnad_cb_enet_disabled); | |
8b230ed8 RM |
2818 | spin_unlock_irqrestore(&bnad->bna_lock, flags); |
2819 | ||
078086f3 | 2820 | wait_for_completion(&bnad->bnad_completions.enet_comp); |
8b230ed8 | 2821 | |
b3cc6e88 JH |
2822 | bnad_destroy_tx(bnad, 0); |
2823 | bnad_destroy_rx(bnad, 0); | |
8b230ed8 RM |
2824 | |
2825 | /* Synchronize mailbox IRQ */ | |
2826 | bnad_mbox_irq_sync(bnad); | |
2827 | ||
2828 | mutex_unlock(&bnad->conf_mutex); | |
2829 | ||
2830 | return 0; | |
2831 | } | |
2832 | ||
2833 | /* TX */ | |
5216562a RM |
2834 | /* Returns 0 for success */ |
2835 | static int | |
2836 | bnad_txq_wi_prepare(struct bnad *bnad, struct bna_tcb *tcb, | |
2837 | struct sk_buff *skb, struct bna_txq_entry *txqent) | |
8b230ed8 | 2838 | { |
5216562a RM |
2839 | u16 flags = 0; |
2840 | u32 gso_size; | |
2841 | u16 vlan_tag = 0; | |
8b230ed8 | 2842 | |
df8a39de JP |
2843 | if (skb_vlan_tag_present(skb)) { |
2844 | vlan_tag = (u16)skb_vlan_tag_get(skb); | |
8b230ed8 RM |
2845 | flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN); |
2846 | } | |
2847 | if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags)) { | |
5216562a RM |
2848 | vlan_tag = ((tcb->priority & 0x7) << VLAN_PRIO_SHIFT) |
2849 | | (vlan_tag & 0x1fff); | |
8b230ed8 RM |
2850 | flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN); |
2851 | } | |
8b230ed8 RM |
2852 | txqent->hdr.wi.vlan_tag = htons(vlan_tag); |
2853 | ||
2854 | if (skb_is_gso(skb)) { | |
271e8b79 | 2855 | gso_size = skb_shinfo(skb)->gso_size; |
5216562a | 2856 | if (unlikely(gso_size > bnad->netdev->mtu)) { |
271e8b79 | 2857 | BNAD_UPDATE_CTR(bnad, tx_skb_mss_too_long); |
5216562a | 2858 | return -EINVAL; |
271e8b79 RM |
2859 | } |
2860 | if (unlikely((gso_size + skb_transport_offset(skb) + | |
5216562a | 2861 | tcp_hdrlen(skb)) >= skb->len)) { |
b779d0af | 2862 | txqent->hdr.wi.opcode = htons(BNA_TXQ_WI_SEND); |
271e8b79 RM |
2863 | txqent->hdr.wi.lso_mss = 0; |
2864 | BNAD_UPDATE_CTR(bnad, tx_skb_tso_too_short); | |
2865 | } else { | |
b779d0af | 2866 | txqent->hdr.wi.opcode = htons(BNA_TXQ_WI_SEND_LSO); |
271e8b79 RM |
2867 | txqent->hdr.wi.lso_mss = htons(gso_size); |
2868 | } | |
2869 | ||
5216562a | 2870 | if (bnad_tso_prepare(bnad, skb)) { |
271e8b79 | 2871 | BNAD_UPDATE_CTR(bnad, tx_skb_tso_prepare); |
5216562a | 2872 | return -EINVAL; |
8b230ed8 | 2873 | } |
5216562a | 2874 | |
8b230ed8 RM |
2875 | flags |= (BNA_TXQ_WI_CF_IP_CKSUM | BNA_TXQ_WI_CF_TCP_CKSUM); |
2876 | txqent->hdr.wi.l4_hdr_size_n_offset = | |
5216562a RM |
2877 | htons(BNA_TXQ_WI_L4_HDR_N_OFFSET( |
2878 | tcp_hdrlen(skb) >> 2, skb_transport_offset(skb))); | |
2879 | } else { | |
b779d0af | 2880 | txqent->hdr.wi.opcode = htons(BNA_TXQ_WI_SEND); |
8b230ed8 RM |
2881 | txqent->hdr.wi.lso_mss = 0; |
2882 | ||
6654cf60 | 2883 | if (unlikely(skb->len > (bnad->netdev->mtu + VLAN_ETH_HLEN))) { |
271e8b79 | 2884 | BNAD_UPDATE_CTR(bnad, tx_skb_non_tso_too_long); |
5216562a | 2885 | return -EINVAL; |
8b230ed8 | 2886 | } |
8b230ed8 | 2887 | |
271e8b79 | 2888 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
1c53730a | 2889 | __be16 net_proto = vlan_get_protocol(skb); |
271e8b79 | 2890 | u8 proto = 0; |
8b230ed8 | 2891 | |
1c53730a | 2892 | if (net_proto == htons(ETH_P_IP)) |
271e8b79 | 2893 | proto = ip_hdr(skb)->protocol; |
5216562a | 2894 | #ifdef NETIF_F_IPV6_CSUM |
1c53730a | 2895 | else if (net_proto == htons(ETH_P_IPV6)) { |
271e8b79 RM |
2896 | /* nexthdr may not be TCP immediately. */ |
2897 | proto = ipv6_hdr(skb)->nexthdr; | |
2898 | } | |
5216562a | 2899 | #endif |
271e8b79 RM |
2900 | if (proto == IPPROTO_TCP) { |
2901 | flags |= BNA_TXQ_WI_CF_TCP_CKSUM; | |
2902 | txqent->hdr.wi.l4_hdr_size_n_offset = | |
2903 | htons(BNA_TXQ_WI_L4_HDR_N_OFFSET | |
2904 | (0, skb_transport_offset(skb))); | |
2905 | ||
2906 | BNAD_UPDATE_CTR(bnad, tcpcsum_offload); | |
2907 | ||
2908 | if (unlikely(skb_headlen(skb) < | |
5216562a RM |
2909 | skb_transport_offset(skb) + |
2910 | tcp_hdrlen(skb))) { | |
271e8b79 | 2911 | BNAD_UPDATE_CTR(bnad, tx_skb_tcp_hdr); |
5216562a | 2912 | return -EINVAL; |
271e8b79 | 2913 | } |
271e8b79 RM |
2914 | } else if (proto == IPPROTO_UDP) { |
2915 | flags |= BNA_TXQ_WI_CF_UDP_CKSUM; | |
2916 | txqent->hdr.wi.l4_hdr_size_n_offset = | |
2917 | htons(BNA_TXQ_WI_L4_HDR_N_OFFSET | |
2918 | (0, skb_transport_offset(skb))); | |
2919 | ||
2920 | BNAD_UPDATE_CTR(bnad, udpcsum_offload); | |
2921 | if (unlikely(skb_headlen(skb) < | |
5216562a | 2922 | skb_transport_offset(skb) + |
271e8b79 | 2923 | sizeof(struct udphdr))) { |
271e8b79 | 2924 | BNAD_UPDATE_CTR(bnad, tx_skb_udp_hdr); |
5216562a | 2925 | return -EINVAL; |
271e8b79 RM |
2926 | } |
2927 | } else { | |
5216562a | 2928 | |
271e8b79 | 2929 | BNAD_UPDATE_CTR(bnad, tx_skb_csum_err); |
5216562a | 2930 | return -EINVAL; |
8b230ed8 | 2931 | } |
5216562a | 2932 | } else |
271e8b79 | 2933 | txqent->hdr.wi.l4_hdr_size_n_offset = 0; |
8b230ed8 RM |
2934 | } |
2935 | ||
2936 | txqent->hdr.wi.flags = htons(flags); | |
8b230ed8 RM |
2937 | txqent->hdr.wi.frame_length = htonl(skb->len); |
2938 | ||
5216562a RM |
2939 | return 0; |
2940 | } | |
2941 | ||
2942 | /* | |
2943 | * bnad_start_xmit : Netdev entry point for Transmit | |
2944 | * Called under lock held by net_device | |
2945 | */ | |
2946 | static netdev_tx_t | |
2947 | bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev) | |
2948 | { | |
2949 | struct bnad *bnad = netdev_priv(netdev); | |
2950 | u32 txq_id = 0; | |
2951 | struct bna_tcb *tcb = NULL; | |
2952 | struct bnad_tx_unmap *unmap_q, *unmap, *head_unmap; | |
2953 | u32 prod, q_depth, vect_id; | |
2954 | u32 wis, vectors, len; | |
2955 | int i; | |
2956 | dma_addr_t dma_addr; | |
2957 | struct bna_txq_entry *txqent; | |
2958 | ||
271e8b79 | 2959 | len = skb_headlen(skb); |
8b230ed8 | 2960 | |
5216562a RM |
2961 | /* Sanity checks for the skb */ |
2962 | ||
2963 | if (unlikely(skb->len <= ETH_HLEN)) { | |
27400df8 | 2964 | dev_kfree_skb_any(skb); |
5216562a RM |
2965 | BNAD_UPDATE_CTR(bnad, tx_skb_too_short); |
2966 | return NETDEV_TX_OK; | |
2967 | } | |
2968 | if (unlikely(len > BFI_TX_MAX_DATA_PER_VECTOR)) { | |
27400df8 | 2969 | dev_kfree_skb_any(skb); |
5216562a RM |
2970 | BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero); |
2971 | return NETDEV_TX_OK; | |
2972 | } | |
2973 | if (unlikely(len == 0)) { | |
27400df8 | 2974 | dev_kfree_skb_any(skb); |
5216562a RM |
2975 | BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero); |
2976 | return NETDEV_TX_OK; | |
2977 | } | |
2978 | ||
2979 | tcb = bnad->tx_info[0].tcb[txq_id]; | |
271e8b79 | 2980 | |
5216562a RM |
2981 | /* |
2982 | * Takes care of the Tx that is scheduled between clearing the flag | |
2983 | * and the netif_tx_stop_all_queues() call. | |
2984 | */ | |
96e31adf | 2985 | if (unlikely(!tcb || !test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) { |
27400df8 | 2986 | dev_kfree_skb_any(skb); |
5216562a RM |
2987 | BNAD_UPDATE_CTR(bnad, tx_skb_stopping); |
2988 | return NETDEV_TX_OK; | |
2989 | } | |
2990 | ||
96e31adf RM |
2991 | q_depth = tcb->q_depth; |
2992 | prod = tcb->producer_index; | |
2993 | unmap_q = tcb->unmap_q; | |
2994 | ||
5216562a RM |
2995 | vectors = 1 + skb_shinfo(skb)->nr_frags; |
2996 | wis = BNA_TXQ_WI_NEEDED(vectors); /* 4 vectors per work item */ | |
2997 | ||
2998 | if (unlikely(vectors > BFI_TX_MAX_VECTORS_PER_PKT)) { | |
27400df8 | 2999 | dev_kfree_skb_any(skb); |
5216562a RM |
3000 | BNAD_UPDATE_CTR(bnad, tx_skb_max_vectors); |
3001 | return NETDEV_TX_OK; | |
3002 | } | |
3003 | ||
3004 | /* Check for available TxQ resources */ | |
3005 | if (unlikely(wis > BNA_QE_FREE_CNT(tcb, q_depth))) { | |
3006 | if ((*tcb->hw_consumer_index != tcb->consumer_index) && | |
3007 | !test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) { | |
3008 | u32 sent; | |
3009 | sent = bnad_txcmpl_process(bnad, tcb); | |
3010 | if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) | |
3011 | bna_ib_ack(tcb->i_dbell, sent); | |
4e857c58 | 3012 | smp_mb__before_atomic(); |
5216562a RM |
3013 | clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags); |
3014 | } else { | |
3015 | netif_stop_queue(netdev); | |
3016 | BNAD_UPDATE_CTR(bnad, netif_queue_stop); | |
3017 | } | |
3018 | ||
3019 | smp_mb(); | |
3020 | /* | |
3021 | * Check again to deal with race condition between | |
3022 | * netif_stop_queue here, and netif_wake_queue in | |
3023 | * interrupt handler which is not inside netif tx lock. | |
3024 | */ | |
3025 | if (likely(wis > BNA_QE_FREE_CNT(tcb, q_depth))) { | |
3026 | BNAD_UPDATE_CTR(bnad, netif_queue_stop); | |
3027 | return NETDEV_TX_BUSY; | |
3028 | } else { | |
3029 | netif_wake_queue(netdev); | |
3030 | BNAD_UPDATE_CTR(bnad, netif_queue_wakeup); | |
3031 | } | |
3032 | } | |
3033 | ||
3034 | txqent = &((struct bna_txq_entry *)tcb->sw_q)[prod]; | |
3035 | head_unmap = &unmap_q[prod]; | |
3036 | ||
3037 | /* Program the opcode, flags, frame_len, num_vectors in WI */ | |
3038 | if (bnad_txq_wi_prepare(bnad, tcb, skb, txqent)) { | |
27400df8 | 3039 | dev_kfree_skb_any(skb); |
5216562a RM |
3040 | return NETDEV_TX_OK; |
3041 | } | |
3042 | txqent->hdr.wi.reserved = 0; | |
3043 | txqent->hdr.wi.num_vectors = vectors; | |
3044 | ||
3045 | head_unmap->skb = skb; | |
3046 | head_unmap->nvecs = 0; | |
3047 | ||
3048 | /* Program the vectors */ | |
3049 | unmap = head_unmap; | |
3050 | dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data, | |
3051 | len, DMA_TO_DEVICE); | |
ba5ca784 IV |
3052 | if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) { |
3053 | dev_kfree_skb_any(skb); | |
3054 | BNAD_UPDATE_CTR(bnad, tx_skb_map_failed); | |
3055 | return NETDEV_TX_OK; | |
3056 | } | |
5216562a RM |
3057 | BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[0].host_addr); |
3058 | txqent->vector[0].length = htons(len); | |
3059 | dma_unmap_addr_set(&unmap->vectors[0], dma_addr, dma_addr); | |
3060 | head_unmap->nvecs++; | |
3061 | ||
3062 | for (i = 0, vect_id = 0; i < vectors - 1; i++) { | |
9e903e08 | 3063 | const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i]; |
24f5d33d | 3064 | u32 size = skb_frag_size(frag); |
8b230ed8 | 3065 | |
271e8b79 | 3066 | if (unlikely(size == 0)) { |
5216562a RM |
3067 | /* Undo the changes starting at tcb->producer_index */ |
3068 | bnad_tx_buff_unmap(bnad, unmap_q, q_depth, | |
3069 | tcb->producer_index); | |
27400df8 | 3070 | dev_kfree_skb_any(skb); |
271e8b79 RM |
3071 | BNAD_UPDATE_CTR(bnad, tx_skb_frag_zero); |
3072 | return NETDEV_TX_OK; | |
3073 | } | |
3074 | ||
3075 | len += size; | |
3076 | ||
5216562a RM |
3077 | vect_id++; |
3078 | if (vect_id == BFI_TX_MAX_VECTORS_PER_WI) { | |
8b230ed8 | 3079 | vect_id = 0; |
5216562a RM |
3080 | BNA_QE_INDX_INC(prod, q_depth); |
3081 | txqent = &((struct bna_txq_entry *)tcb->sw_q)[prod]; | |
b779d0af | 3082 | txqent->hdr.wi_ext.opcode = htons(BNA_TXQ_WI_EXTENSION); |
5216562a | 3083 | unmap = &unmap_q[prod]; |
8b230ed8 RM |
3084 | } |
3085 | ||
4d5b1a67 IC |
3086 | dma_addr = skb_frag_dma_map(&bnad->pcidev->dev, frag, |
3087 | 0, size, DMA_TO_DEVICE); | |
ba5ca784 IV |
3088 | if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) { |
3089 | /* Undo the changes starting at tcb->producer_index */ | |
3090 | bnad_tx_buff_unmap(bnad, unmap_q, q_depth, | |
3091 | tcb->producer_index); | |
3092 | dev_kfree_skb_any(skb); | |
3093 | BNAD_UPDATE_CTR(bnad, tx_skb_map_failed); | |
3094 | return NETDEV_TX_OK; | |
3095 | } | |
3096 | ||
ecca6a96 | 3097 | dma_unmap_len_set(&unmap->vectors[vect_id], dma_len, size); |
8b230ed8 | 3098 | BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr); |
5216562a RM |
3099 | txqent->vector[vect_id].length = htons(size); |
3100 | dma_unmap_addr_set(&unmap->vectors[vect_id], dma_addr, | |
ecca6a96 | 3101 | dma_addr); |
5216562a | 3102 | head_unmap->nvecs++; |
8b230ed8 RM |
3103 | } |
3104 | ||
271e8b79 | 3105 | if (unlikely(len != skb->len)) { |
5216562a RM |
3106 | /* Undo the changes starting at tcb->producer_index */ |
3107 | bnad_tx_buff_unmap(bnad, unmap_q, q_depth, tcb->producer_index); | |
27400df8 | 3108 | dev_kfree_skb_any(skb); |
271e8b79 RM |
3109 | BNAD_UPDATE_CTR(bnad, tx_skb_len_mismatch); |
3110 | return NETDEV_TX_OK; | |
3111 | } | |
3112 | ||
5216562a RM |
3113 | BNA_QE_INDX_INC(prod, q_depth); |
3114 | tcb->producer_index = prod; | |
8b230ed8 RM |
3115 | |
3116 | smp_mb(); | |
be7fa326 RM |
3117 | |
3118 | if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) | |
3119 | return NETDEV_TX_OK; | |
3120 | ||
fee1253e RM |
3121 | skb_tx_timestamp(skb); |
3122 | ||
8b230ed8 | 3123 | bna_txq_prod_indx_doorbell(tcb); |
271e8b79 | 3124 | smp_mb(); |
8b230ed8 | 3125 | |
8b230ed8 RM |
3126 | return NETDEV_TX_OK; |
3127 | } | |
3128 | ||
3129 | /* | |
3130 | * Used spin_lock to synchronize reading of stats structures, which | |
3131 | * is written by BNA under the same lock. | |
3132 | */ | |
250e061e ED |
3133 | static struct rtnl_link_stats64 * |
3134 | bnad_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) | |
8b230ed8 RM |
3135 | { |
3136 | struct bnad *bnad = netdev_priv(netdev); | |
3137 | unsigned long flags; | |
3138 | ||
3139 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
3140 | ||
250e061e ED |
3141 | bnad_netdev_qstats_fill(bnad, stats); |
3142 | bnad_netdev_hwstats_fill(bnad, stats); | |
8b230ed8 RM |
3143 | |
3144 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | |
3145 | ||
250e061e | 3146 | return stats; |
8b230ed8 RM |
3147 | } |
3148 | ||
fe1624cf RM |
3149 | static void |
3150 | bnad_set_rx_ucast_fltr(struct bnad *bnad) | |
3151 | { | |
3152 | struct net_device *netdev = bnad->netdev; | |
3153 | int uc_count = netdev_uc_count(netdev); | |
3154 | enum bna_cb_status ret; | |
3155 | u8 *mac_list; | |
3156 | struct netdev_hw_addr *ha; | |
3157 | int entry; | |
3158 | ||
3159 | if (netdev_uc_empty(bnad->netdev)) { | |
1f9883e0 | 3160 | bna_rx_ucast_listset(bnad->rx_info[0].rx, 0, NULL); |
fe1624cf RM |
3161 | return; |
3162 | } | |
3163 | ||
3164 | if (uc_count > bna_attr(&bnad->bna)->num_ucmac) | |
3165 | goto mode_default; | |
3166 | ||
3167 | mac_list = kzalloc(uc_count * ETH_ALEN, GFP_ATOMIC); | |
3168 | if (mac_list == NULL) | |
3169 | goto mode_default; | |
3170 | ||
3171 | entry = 0; | |
3172 | netdev_for_each_uc_addr(ha, netdev) { | |
e2f9ecfc | 3173 | ether_addr_copy(&mac_list[entry * ETH_ALEN], &ha->addr[0]); |
fe1624cf RM |
3174 | entry++; |
3175 | } | |
3176 | ||
1f9883e0 | 3177 | ret = bna_rx_ucast_listset(bnad->rx_info[0].rx, entry, mac_list); |
fe1624cf RM |
3178 | kfree(mac_list); |
3179 | ||
3180 | if (ret != BNA_CB_SUCCESS) | |
3181 | goto mode_default; | |
3182 | ||
3183 | return; | |
3184 | ||
3185 | /* ucast packets not in UCAM are routed to default function */ | |
3186 | mode_default: | |
3187 | bnad->cfg_flags |= BNAD_CF_DEFAULT; | |
1f9883e0 | 3188 | bna_rx_ucast_listset(bnad->rx_info[0].rx, 0, NULL); |
fe1624cf RM |
3189 | } |
3190 | ||
3191 | static void | |
3192 | bnad_set_rx_mcast_fltr(struct bnad *bnad) | |
3193 | { | |
3194 | struct net_device *netdev = bnad->netdev; | |
3195 | int mc_count = netdev_mc_count(netdev); | |
3196 | enum bna_cb_status ret; | |
3197 | u8 *mac_list; | |
3198 | ||
3199 | if (netdev->flags & IFF_ALLMULTI) | |
3200 | goto mode_allmulti; | |
3201 | ||
3202 | if (netdev_mc_empty(netdev)) | |
3203 | return; | |
3204 | ||
3205 | if (mc_count > bna_attr(&bnad->bna)->num_mcmac) | |
3206 | goto mode_allmulti; | |
3207 | ||
3208 | mac_list = kzalloc((mc_count + 1) * ETH_ALEN, GFP_ATOMIC); | |
3209 | ||
3210 | if (mac_list == NULL) | |
3211 | goto mode_allmulti; | |
3212 | ||
e2f9ecfc | 3213 | ether_addr_copy(&mac_list[0], &bnad_bcast_addr[0]); |
fe1624cf RM |
3214 | |
3215 | /* copy rest of the MCAST addresses */ | |
3216 | bnad_netdev_mc_list_get(netdev, mac_list); | |
1f9883e0 | 3217 | ret = bna_rx_mcast_listset(bnad->rx_info[0].rx, mc_count + 1, mac_list); |
fe1624cf RM |
3218 | kfree(mac_list); |
3219 | ||
3220 | if (ret != BNA_CB_SUCCESS) | |
3221 | goto mode_allmulti; | |
3222 | ||
3223 | return; | |
3224 | ||
3225 | mode_allmulti: | |
3226 | bnad->cfg_flags |= BNAD_CF_ALLMULTI; | |
1f9883e0 | 3227 | bna_rx_mcast_delall(bnad->rx_info[0].rx); |
fe1624cf RM |
3228 | } |
3229 | ||
a2122d95 | 3230 | void |
8b230ed8 RM |
3231 | bnad_set_rx_mode(struct net_device *netdev) |
3232 | { | |
3233 | struct bnad *bnad = netdev_priv(netdev); | |
fe1624cf | 3234 | enum bna_rxmode new_mode, mode_mask; |
8b230ed8 RM |
3235 | unsigned long flags; |
3236 | ||
3237 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
3238 | ||
fe1624cf RM |
3239 | if (bnad->rx_info[0].rx == NULL) { |
3240 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | |
3241 | return; | |
8b230ed8 RM |
3242 | } |
3243 | ||
fe1624cf RM |
3244 | /* clear bnad flags to update it with new settings */ |
3245 | bnad->cfg_flags &= ~(BNAD_CF_PROMISC | BNAD_CF_DEFAULT | | |
3246 | BNAD_CF_ALLMULTI); | |
271e8b79 | 3247 | |
fe1624cf RM |
3248 | new_mode = 0; |
3249 | if (netdev->flags & IFF_PROMISC) { | |
3250 | new_mode |= BNAD_RXMODE_PROMISC_DEFAULT; | |
3251 | bnad->cfg_flags |= BNAD_CF_PROMISC; | |
3252 | } else { | |
3253 | bnad_set_rx_mcast_fltr(bnad); | |
8b230ed8 | 3254 | |
fe1624cf RM |
3255 | if (bnad->cfg_flags & BNAD_CF_ALLMULTI) |
3256 | new_mode |= BNA_RXMODE_ALLMULTI; | |
8b230ed8 | 3257 | |
fe1624cf | 3258 | bnad_set_rx_ucast_fltr(bnad); |
8b230ed8 | 3259 | |
fe1624cf RM |
3260 | if (bnad->cfg_flags & BNAD_CF_DEFAULT) |
3261 | new_mode |= BNA_RXMODE_DEFAULT; | |
3262 | } | |
8b230ed8 | 3263 | |
fe1624cf RM |
3264 | mode_mask = BNA_RXMODE_PROMISC | BNA_RXMODE_DEFAULT | |
3265 | BNA_RXMODE_ALLMULTI; | |
1f9883e0 | 3266 | bna_rx_mode_set(bnad->rx_info[0].rx, new_mode, mode_mask); |
8b230ed8 | 3267 | |
8b230ed8 RM |
3268 | spin_unlock_irqrestore(&bnad->bna_lock, flags); |
3269 | } | |
3270 | ||
3271 | /* | |
3272 | * bna_lock is used to sync writes to netdev->addr | |
3273 | * conf_lock cannot be used since this call may be made | |
3274 | * in a non-blocking context. | |
3275 | */ | |
3276 | static int | |
e2f9ecfc | 3277 | bnad_set_mac_address(struct net_device *netdev, void *addr) |
8b230ed8 RM |
3278 | { |
3279 | int err; | |
3280 | struct bnad *bnad = netdev_priv(netdev); | |
e2f9ecfc | 3281 | struct sockaddr *sa = (struct sockaddr *)addr; |
8b230ed8 RM |
3282 | unsigned long flags; |
3283 | ||
3284 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
3285 | ||
3286 | err = bnad_mac_addr_set_locked(bnad, sa->sa_data); | |
8b230ed8 | 3287 | if (!err) |
e2f9ecfc | 3288 | ether_addr_copy(netdev->dev_addr, sa->sa_data); |
8b230ed8 RM |
3289 | |
3290 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | |
3291 | ||
3292 | return err; | |
3293 | } | |
3294 | ||
3295 | static int | |
e29aa339 | 3296 | bnad_mtu_set(struct bnad *bnad, int frame_size) |
8b230ed8 | 3297 | { |
8b230ed8 RM |
3298 | unsigned long flags; |
3299 | ||
078086f3 RM |
3300 | init_completion(&bnad->bnad_completions.mtu_comp); |
3301 | ||
3302 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
e29aa339 | 3303 | bna_enet_mtu_set(&bnad->bna.enet, frame_size, bnad_cb_enet_mtu_set); |
078086f3 RM |
3304 | spin_unlock_irqrestore(&bnad->bna_lock, flags); |
3305 | ||
3306 | wait_for_completion(&bnad->bnad_completions.mtu_comp); | |
3307 | ||
3308 | return bnad->bnad_completions.mtu_comp_status; | |
3309 | } | |
3310 | ||
3311 | static int | |
3312 | bnad_change_mtu(struct net_device *netdev, int new_mtu) | |
3313 | { | |
e29aa339 | 3314 | int err, mtu; |
8b230ed8 | 3315 | struct bnad *bnad = netdev_priv(netdev); |
e29aa339 | 3316 | u32 rx_count = 0, frame, new_frame; |
8b230ed8 RM |
3317 | |
3318 | if (new_mtu + ETH_HLEN < ETH_ZLEN || new_mtu > BNAD_JUMBO_MTU) | |
3319 | return -EINVAL; | |
3320 | ||
3321 | mutex_lock(&bnad->conf_mutex); | |
3322 | ||
e29aa339 | 3323 | mtu = netdev->mtu; |
8b230ed8 RM |
3324 | netdev->mtu = new_mtu; |
3325 | ||
e29aa339 RM |
3326 | frame = BNAD_FRAME_SIZE(mtu); |
3327 | new_frame = BNAD_FRAME_SIZE(new_mtu); | |
3328 | ||
3329 | /* check if multi-buffer needs to be enabled */ | |
3330 | if (BNAD_PCI_DEV_IS_CAT2(bnad) && | |
3331 | netif_running(bnad->netdev)) { | |
3332 | /* only when transition is over 4K */ | |
3333 | if ((frame <= 4096 && new_frame > 4096) || | |
3334 | (frame > 4096 && new_frame <= 4096)) | |
3335 | rx_count = bnad_reinit_rx(bnad); | |
3336 | } | |
3337 | ||
3338 | /* rx_count > 0 - new rx created | |
3339 | * - Linux set err = 0 and return | |
3340 | */ | |
3341 | err = bnad_mtu_set(bnad, new_frame); | |
078086f3 RM |
3342 | if (err) |
3343 | err = -EBUSY; | |
8b230ed8 RM |
3344 | |
3345 | mutex_unlock(&bnad->conf_mutex); | |
3346 | return err; | |
3347 | } | |
3348 | ||
8e586137 | 3349 | static int |
80d5c368 | 3350 | bnad_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid) |
8b230ed8 RM |
3351 | { |
3352 | struct bnad *bnad = netdev_priv(netdev); | |
3353 | unsigned long flags; | |
3354 | ||
3355 | if (!bnad->rx_info[0].rx) | |
8e586137 | 3356 | return 0; |
8b230ed8 RM |
3357 | |
3358 | mutex_lock(&bnad->conf_mutex); | |
3359 | ||
3360 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
3361 | bna_rx_vlan_add(bnad->rx_info[0].rx, vid); | |
f859d7cb | 3362 | set_bit(vid, bnad->active_vlans); |
8b230ed8 RM |
3363 | spin_unlock_irqrestore(&bnad->bna_lock, flags); |
3364 | ||
3365 | mutex_unlock(&bnad->conf_mutex); | |
8e586137 JP |
3366 | |
3367 | return 0; | |
8b230ed8 RM |
3368 | } |
3369 | ||
8e586137 | 3370 | static int |
80d5c368 | 3371 | bnad_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid) |
8b230ed8 RM |
3372 | { |
3373 | struct bnad *bnad = netdev_priv(netdev); | |
3374 | unsigned long flags; | |
3375 | ||
3376 | if (!bnad->rx_info[0].rx) | |
8e586137 | 3377 | return 0; |
8b230ed8 RM |
3378 | |
3379 | mutex_lock(&bnad->conf_mutex); | |
3380 | ||
3381 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
f859d7cb | 3382 | clear_bit(vid, bnad->active_vlans); |
8b230ed8 RM |
3383 | bna_rx_vlan_del(bnad->rx_info[0].rx, vid); |
3384 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | |
3385 | ||
3386 | mutex_unlock(&bnad->conf_mutex); | |
8e586137 JP |
3387 | |
3388 | return 0; | |
8b230ed8 RM |
3389 | } |
3390 | ||
877767dc IV |
3391 | static int bnad_set_features(struct net_device *dev, netdev_features_t features) |
3392 | { | |
3393 | struct bnad *bnad = netdev_priv(dev); | |
3394 | netdev_features_t changed = features ^ dev->features; | |
3395 | ||
3396 | if ((changed & NETIF_F_HW_VLAN_CTAG_RX) && netif_running(dev)) { | |
3397 | unsigned long flags; | |
3398 | ||
3399 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
3400 | ||
3401 | if (features & NETIF_F_HW_VLAN_CTAG_RX) | |
3402 | bna_rx_vlan_strip_enable(bnad->rx_info[0].rx); | |
3403 | else | |
3404 | bna_rx_vlan_strip_disable(bnad->rx_info[0].rx); | |
3405 | ||
3406 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | |
3407 | } | |
3408 | ||
3409 | return 0; | |
3410 | } | |
3411 | ||
8b230ed8 RM |
3412 | #ifdef CONFIG_NET_POLL_CONTROLLER |
3413 | static void | |
3414 | bnad_netpoll(struct net_device *netdev) | |
3415 | { | |
3416 | struct bnad *bnad = netdev_priv(netdev); | |
3417 | struct bnad_rx_info *rx_info; | |
3418 | struct bnad_rx_ctrl *rx_ctrl; | |
3419 | u32 curr_mask; | |
3420 | int i, j; | |
3421 | ||
3422 | if (!(bnad->cfg_flags & BNAD_CF_MSIX)) { | |
3423 | bna_intx_disable(&bnad->bna, curr_mask); | |
3424 | bnad_isr(bnad->pcidev->irq, netdev); | |
3425 | bna_intx_enable(&bnad->bna, curr_mask); | |
3426 | } else { | |
19dbff9f RM |
3427 | /* |
3428 | * Tx processing may happen in sending context, so no need | |
3429 | * to explicitly process completions here | |
3430 | */ | |
3431 | ||
3432 | /* Rx processing */ | |
8b230ed8 RM |
3433 | for (i = 0; i < bnad->num_rx; i++) { |
3434 | rx_info = &bnad->rx_info[i]; | |
3435 | if (!rx_info->rx) | |
3436 | continue; | |
3437 | for (j = 0; j < bnad->num_rxp_per_rx; j++) { | |
3438 | rx_ctrl = &rx_info->rx_ctrl[j]; | |
271e8b79 | 3439 | if (rx_ctrl->ccb) |
8b230ed8 RM |
3440 | bnad_netif_rx_schedule_poll(bnad, |
3441 | rx_ctrl->ccb); | |
8b230ed8 RM |
3442 | } |
3443 | } | |
3444 | } | |
3445 | } | |
3446 | #endif | |
3447 | ||
3448 | static const struct net_device_ops bnad_netdev_ops = { | |
3449 | .ndo_open = bnad_open, | |
3450 | .ndo_stop = bnad_stop, | |
3451 | .ndo_start_xmit = bnad_start_xmit, | |
250e061e | 3452 | .ndo_get_stats64 = bnad_get_stats64, |
8b230ed8 | 3453 | .ndo_set_rx_mode = bnad_set_rx_mode, |
8b230ed8 RM |
3454 | .ndo_validate_addr = eth_validate_addr, |
3455 | .ndo_set_mac_address = bnad_set_mac_address, | |
3456 | .ndo_change_mtu = bnad_change_mtu, | |
8b230ed8 RM |
3457 | .ndo_vlan_rx_add_vid = bnad_vlan_rx_add_vid, |
3458 | .ndo_vlan_rx_kill_vid = bnad_vlan_rx_kill_vid, | |
877767dc | 3459 | .ndo_set_features = bnad_set_features, |
8b230ed8 RM |
3460 | #ifdef CONFIG_NET_POLL_CONTROLLER |
3461 | .ndo_poll_controller = bnad_netpoll | |
3462 | #endif | |
3463 | }; | |
3464 | ||
3465 | static void | |
3466 | bnad_netdev_init(struct bnad *bnad, bool using_dac) | |
3467 | { | |
3468 | struct net_device *netdev = bnad->netdev; | |
3469 | ||
e5ee20e7 MM |
3470 | netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM | |
3471 | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | | |
877767dc IV |
3472 | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_VLAN_CTAG_TX | |
3473 | NETIF_F_HW_VLAN_CTAG_RX; | |
8b230ed8 | 3474 | |
e5ee20e7 MM |
3475 | netdev->vlan_features = NETIF_F_SG | NETIF_F_HIGHDMA | |
3476 | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | | |
3477 | NETIF_F_TSO | NETIF_F_TSO6; | |
8b230ed8 | 3478 | |
877767dc | 3479 | netdev->features |= netdev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER; |
8b230ed8 RM |
3480 | |
3481 | if (using_dac) | |
3482 | netdev->features |= NETIF_F_HIGHDMA; | |
3483 | ||
8b230ed8 RM |
3484 | netdev->mem_start = bnad->mmio_start; |
3485 | netdev->mem_end = bnad->mmio_start + bnad->mmio_len - 1; | |
3486 | ||
3487 | netdev->netdev_ops = &bnad_netdev_ops; | |
3488 | bnad_set_ethtool_ops(netdev); | |
3489 | } | |
3490 | ||
3491 | /* | |
3492 | * 1. Initialize the bnad structure | |
3493 | * 2. Setup netdev pointer in pci_dev | |
d95d1081 JH |
3494 | * 3. Initialize no. of TxQ & CQs & MSIX vectors |
3495 | * 4. Initialize work queue. | |
8b230ed8 RM |
3496 | */ |
3497 | static int | |
3498 | bnad_init(struct bnad *bnad, | |
3499 | struct pci_dev *pdev, struct net_device *netdev) | |
3500 | { | |
3501 | unsigned long flags; | |
3502 | ||
3503 | SET_NETDEV_DEV(netdev, &pdev->dev); | |
3504 | pci_set_drvdata(pdev, netdev); | |
3505 | ||
3506 | bnad->netdev = netdev; | |
3507 | bnad->pcidev = pdev; | |
3508 | bnad->mmio_start = pci_resource_start(pdev, 0); | |
3509 | bnad->mmio_len = pci_resource_len(pdev, 0); | |
3510 | bnad->bar0 = ioremap_nocache(bnad->mmio_start, bnad->mmio_len); | |
3511 | if (!bnad->bar0) { | |
3512 | dev_err(&pdev->dev, "ioremap for bar0 failed\n"); | |
8b230ed8 RM |
3513 | return -ENOMEM; |
3514 | } | |
ecc46789 IV |
3515 | dev_info(&pdev->dev, "bar0 mapped to %p, len %llu\n", bnad->bar0, |
3516 | (unsigned long long) bnad->mmio_len); | |
8b230ed8 RM |
3517 | |
3518 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
3519 | if (!bnad_msix_disable) | |
3520 | bnad->cfg_flags = BNAD_CF_MSIX; | |
3521 | ||
3522 | bnad->cfg_flags |= BNAD_CF_DIM_ENABLED; | |
3523 | ||
3524 | bnad_q_num_init(bnad); | |
3525 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | |
3526 | ||
3527 | bnad->msix_num = (bnad->num_tx * bnad->num_txq_per_tx) + | |
3528 | (bnad->num_rx * bnad->num_rxp_per_rx) + | |
3529 | BNAD_MAILBOX_MSIX_VECTORS; | |
8b230ed8 RM |
3530 | |
3531 | bnad->txq_depth = BNAD_TXQ_DEPTH; | |
3532 | bnad->rxq_depth = BNAD_RXQ_DEPTH; | |
8b230ed8 RM |
3533 | |
3534 | bnad->tx_coalescing_timeo = BFI_TX_COALESCING_TIMEO; | |
3535 | bnad->rx_coalescing_timeo = BFI_RX_COALESCING_TIMEO; | |
3536 | ||
01b54b14 JH |
3537 | sprintf(bnad->wq_name, "%s_wq_%d", BNAD_NAME, bnad->id); |
3538 | bnad->work_q = create_singlethread_workqueue(bnad->wq_name); | |
ba21fc69 WY |
3539 | if (!bnad->work_q) { |
3540 | iounmap(bnad->bar0); | |
01b54b14 | 3541 | return -ENOMEM; |
ba21fc69 | 3542 | } |
01b54b14 | 3543 | |
8b230ed8 RM |
3544 | return 0; |
3545 | } | |
3546 | ||
3547 | /* | |
3548 | * Must be called after bnad_pci_uninit() | |
3549 | * so that iounmap() and pci_set_drvdata(NULL) | |
3550 | * happens only after PCI uninitialization. | |
3551 | */ | |
3552 | static void | |
3553 | bnad_uninit(struct bnad *bnad) | |
3554 | { | |
01b54b14 JH |
3555 | if (bnad->work_q) { |
3556 | flush_workqueue(bnad->work_q); | |
3557 | destroy_workqueue(bnad->work_q); | |
3558 | bnad->work_q = NULL; | |
3559 | } | |
3560 | ||
8b230ed8 RM |
3561 | if (bnad->bar0) |
3562 | iounmap(bnad->bar0); | |
8b230ed8 RM |
3563 | } |
3564 | ||
3565 | /* | |
3566 | * Initialize locks | |
078086f3 | 3567 | a) Per ioceth mutes used for serializing configuration |
8b230ed8 RM |
3568 | changes from OS interface |
3569 | b) spin lock used to protect bna state machine | |
3570 | */ | |
3571 | static void | |
3572 | bnad_lock_init(struct bnad *bnad) | |
3573 | { | |
3574 | spin_lock_init(&bnad->bna_lock); | |
3575 | mutex_init(&bnad->conf_mutex); | |
72a9730b | 3576 | mutex_init(&bnad_list_mutex); |
8b230ed8 RM |
3577 | } |
3578 | ||
3579 | static void | |
3580 | bnad_lock_uninit(struct bnad *bnad) | |
3581 | { | |
3582 | mutex_destroy(&bnad->conf_mutex); | |
72a9730b | 3583 | mutex_destroy(&bnad_list_mutex); |
8b230ed8 RM |
3584 | } |
3585 | ||
3586 | /* PCI Initialization */ | |
3587 | static int | |
3588 | bnad_pci_init(struct bnad *bnad, | |
3589 | struct pci_dev *pdev, bool *using_dac) | |
3590 | { | |
3591 | int err; | |
3592 | ||
3593 | err = pci_enable_device(pdev); | |
3594 | if (err) | |
3595 | return err; | |
3596 | err = pci_request_regions(pdev, BNAD_NAME); | |
3597 | if (err) | |
3598 | goto disable_device; | |
3e548079 | 3599 | if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) { |
3db1cd5c | 3600 | *using_dac = true; |
8b230ed8 | 3601 | } else { |
3e548079 RK |
3602 | err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); |
3603 | if (err) | |
3604 | goto release_regions; | |
3db1cd5c | 3605 | *using_dac = false; |
8b230ed8 RM |
3606 | } |
3607 | pci_set_master(pdev); | |
3608 | return 0; | |
3609 | ||
3610 | release_regions: | |
3611 | pci_release_regions(pdev); | |
3612 | disable_device: | |
3613 | pci_disable_device(pdev); | |
3614 | ||
3615 | return err; | |
3616 | } | |
3617 | ||
3618 | static void | |
3619 | bnad_pci_uninit(struct pci_dev *pdev) | |
3620 | { | |
3621 | pci_release_regions(pdev); | |
3622 | pci_disable_device(pdev); | |
3623 | } | |
3624 | ||
c4eef189 | 3625 | static int |
8b230ed8 RM |
3626 | bnad_pci_probe(struct pci_dev *pdev, |
3627 | const struct pci_device_id *pcidev_id) | |
3628 | { | |
3caa1e95 | 3629 | bool using_dac; |
0120b99c | 3630 | int err; |
8b230ed8 RM |
3631 | struct bnad *bnad; |
3632 | struct bna *bna; | |
3633 | struct net_device *netdev; | |
3634 | struct bfa_pcidev pcidev_info; | |
3635 | unsigned long flags; | |
3636 | ||
8b230ed8 RM |
3637 | mutex_lock(&bnad_fwimg_mutex); |
3638 | if (!cna_get_firmware_buf(pdev)) { | |
3639 | mutex_unlock(&bnad_fwimg_mutex); | |
ecc46789 | 3640 | dev_err(&pdev->dev, "failed to load firmware image!\n"); |
8b230ed8 RM |
3641 | return -ENODEV; |
3642 | } | |
3643 | mutex_unlock(&bnad_fwimg_mutex); | |
3644 | ||
3645 | /* | |
3646 | * Allocates sizeof(struct net_device + struct bnad) | |
3647 | * bnad = netdev->priv | |
3648 | */ | |
3649 | netdev = alloc_etherdev(sizeof(struct bnad)); | |
3650 | if (!netdev) { | |
8b230ed8 RM |
3651 | err = -ENOMEM; |
3652 | return err; | |
3653 | } | |
3654 | bnad = netdev_priv(netdev); | |
078086f3 | 3655 | bnad_lock_init(bnad); |
72a9730b | 3656 | bnad_add_to_list(bnad); |
078086f3 RM |
3657 | |
3658 | mutex_lock(&bnad->conf_mutex); | |
8b230ed8 RM |
3659 | /* |
3660 | * PCI initialization | |
0120b99c | 3661 | * Output : using_dac = 1 for 64 bit DMA |
be7fa326 | 3662 | * = 0 for 32 bit DMA |
8b230ed8 | 3663 | */ |
e905ed57 | 3664 | using_dac = false; |
8b230ed8 RM |
3665 | err = bnad_pci_init(bnad, pdev, &using_dac); |
3666 | if (err) | |
44861f44 | 3667 | goto unlock_mutex; |
8b230ed8 | 3668 | |
8b230ed8 RM |
3669 | /* |
3670 | * Initialize bnad structure | |
3671 | * Setup relation between pci_dev & netdev | |
8b230ed8 RM |
3672 | */ |
3673 | err = bnad_init(bnad, pdev, netdev); | |
3674 | if (err) | |
3675 | goto pci_uninit; | |
078086f3 | 3676 | |
8b230ed8 RM |
3677 | /* Initialize netdev structure, set up ethtool ops */ |
3678 | bnad_netdev_init(bnad, using_dac); | |
3679 | ||
815f41e7 RM |
3680 | /* Set link to down state */ |
3681 | netif_carrier_off(netdev); | |
3682 | ||
7afc5dbd KG |
3683 | /* Setup the debugfs node for this bfad */ |
3684 | if (bna_debugfs_enable) | |
3685 | bnad_debugfs_init(bnad); | |
3686 | ||
8b230ed8 | 3687 | /* Get resource requirement form bna */ |
078086f3 | 3688 | spin_lock_irqsave(&bnad->bna_lock, flags); |
8b230ed8 | 3689 | bna_res_req(&bnad->res_info[0]); |
078086f3 | 3690 | spin_unlock_irqrestore(&bnad->bna_lock, flags); |
8b230ed8 RM |
3691 | |
3692 | /* Allocate resources from bna */ | |
078086f3 | 3693 | err = bnad_res_alloc(bnad, &bnad->res_info[0], BNA_RES_T_MAX); |
8b230ed8 | 3694 | if (err) |
078086f3 | 3695 | goto drv_uninit; |
8b230ed8 RM |
3696 | |
3697 | bna = &bnad->bna; | |
3698 | ||
3699 | /* Setup pcidev_info for bna_init() */ | |
3700 | pcidev_info.pci_slot = PCI_SLOT(bnad->pcidev->devfn); | |
3701 | pcidev_info.pci_func = PCI_FUNC(bnad->pcidev->devfn); | |
3702 | pcidev_info.device_id = bnad->pcidev->device; | |
3703 | pcidev_info.pci_bar_kva = bnad->bar0; | |
3704 | ||
8b230ed8 RM |
3705 | spin_lock_irqsave(&bnad->bna_lock, flags); |
3706 | bna_init(bna, bnad, &pcidev_info, &bnad->res_info[0]); | |
8b230ed8 RM |
3707 | spin_unlock_irqrestore(&bnad->bna_lock, flags); |
3708 | ||
3709 | bnad->stats.bna_stats = &bna->stats; | |
3710 | ||
078086f3 RM |
3711 | bnad_enable_msix(bnad); |
3712 | err = bnad_mbox_irq_alloc(bnad); | |
3713 | if (err) | |
3714 | goto res_free; | |
3715 | ||
8b230ed8 | 3716 | /* Set up timers */ |
078086f3 | 3717 | setup_timer(&bnad->bna.ioceth.ioc.ioc_timer, bnad_ioc_timeout, |
ebb56d37 | 3718 | (unsigned long)bnad); |
078086f3 | 3719 | setup_timer(&bnad->bna.ioceth.ioc.hb_timer, bnad_ioc_hb_check, |
ebb56d37 | 3720 | (unsigned long)bnad); |
078086f3 | 3721 | setup_timer(&bnad->bna.ioceth.ioc.iocpf_timer, bnad_iocpf_timeout, |
ebb56d37 | 3722 | (unsigned long)bnad); |
078086f3 | 3723 | setup_timer(&bnad->bna.ioceth.ioc.sem_timer, bnad_iocpf_sem_timeout, |
ebb56d37 | 3724 | (unsigned long)bnad); |
8b230ed8 | 3725 | |
8b230ed8 RM |
3726 | /* |
3727 | * Start the chip | |
078086f3 RM |
3728 | * If the call back comes with error, we bail out. |
3729 | * This is a catastrophic error. | |
8b230ed8 | 3730 | */ |
078086f3 RM |
3731 | err = bnad_ioceth_enable(bnad); |
3732 | if (err) { | |
ecc46789 | 3733 | dev_err(&pdev->dev, "initialization failed err=%d\n", err); |
078086f3 RM |
3734 | goto probe_success; |
3735 | } | |
3736 | ||
3737 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
3738 | if (bna_num_txq_set(bna, BNAD_NUM_TXQ + 1) || | |
3739 | bna_num_rxp_set(bna, BNAD_NUM_RXP + 1)) { | |
3740 | bnad_q_num_adjust(bnad, bna_attr(bna)->num_txq - 1, | |
3741 | bna_attr(bna)->num_rxp - 1); | |
3742 | if (bna_num_txq_set(bna, BNAD_NUM_TXQ + 1) || | |
3743 | bna_num_rxp_set(bna, BNAD_NUM_RXP + 1)) | |
3744 | err = -EIO; | |
3745 | } | |
3caa1e95 RM |
3746 | spin_unlock_irqrestore(&bnad->bna_lock, flags); |
3747 | if (err) | |
3748 | goto disable_ioceth; | |
3749 | ||
3750 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
078086f3 RM |
3751 | bna_mod_res_req(&bnad->bna, &bnad->mod_res_info[0]); |
3752 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | |
3753 | ||
3754 | err = bnad_res_alloc(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX); | |
0caa9aae RM |
3755 | if (err) { |
3756 | err = -EIO; | |
078086f3 | 3757 | goto disable_ioceth; |
0caa9aae | 3758 | } |
078086f3 RM |
3759 | |
3760 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
3761 | bna_mod_init(&bnad->bna, &bnad->mod_res_info[0]); | |
3762 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | |
8b230ed8 RM |
3763 | |
3764 | /* Get the burnt-in mac */ | |
3765 | spin_lock_irqsave(&bnad->bna_lock, flags); | |
d6b30598 | 3766 | bna_enet_perm_mac_get(&bna->enet, bnad->perm_addr); |
8b230ed8 RM |
3767 | bnad_set_netdev_perm_addr(bnad); |
3768 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | |
3769 | ||
0caa9aae RM |
3770 | mutex_unlock(&bnad->conf_mutex); |
3771 | ||
8b230ed8 RM |
3772 | /* Finally, reguister with net_device layer */ |
3773 | err = register_netdev(netdev); | |
3774 | if (err) { | |
ecc46789 | 3775 | dev_err(&pdev->dev, "registering net device failed\n"); |
078086f3 | 3776 | goto probe_uninit; |
8b230ed8 | 3777 | } |
078086f3 | 3778 | set_bit(BNAD_RF_NETDEV_REGISTERED, &bnad->run_flags); |
8b230ed8 | 3779 | |
0caa9aae RM |
3780 | return 0; |
3781 | ||
078086f3 RM |
3782 | probe_success: |
3783 | mutex_unlock(&bnad->conf_mutex); | |
8b230ed8 RM |
3784 | return 0; |
3785 | ||
078086f3 | 3786 | probe_uninit: |
3fc72370 | 3787 | mutex_lock(&bnad->conf_mutex); |
078086f3 RM |
3788 | bnad_res_free(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX); |
3789 | disable_ioceth: | |
3790 | bnad_ioceth_disable(bnad); | |
3791 | del_timer_sync(&bnad->bna.ioceth.ioc.ioc_timer); | |
3792 | del_timer_sync(&bnad->bna.ioceth.ioc.sem_timer); | |
3793 | del_timer_sync(&bnad->bna.ioceth.ioc.hb_timer); | |
8b230ed8 RM |
3794 | spin_lock_irqsave(&bnad->bna_lock, flags); |
3795 | bna_uninit(bna); | |
3796 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | |
078086f3 | 3797 | bnad_mbox_irq_free(bnad); |
8b230ed8 | 3798 | bnad_disable_msix(bnad); |
078086f3 RM |
3799 | res_free: |
3800 | bnad_res_free(bnad, &bnad->res_info[0], BNA_RES_T_MAX); | |
3801 | drv_uninit: | |
7afc5dbd KG |
3802 | /* Remove the debugfs node for this bnad */ |
3803 | kfree(bnad->regdata); | |
3804 | bnad_debugfs_uninit(bnad); | |
078086f3 | 3805 | bnad_uninit(bnad); |
8b230ed8 RM |
3806 | pci_uninit: |
3807 | bnad_pci_uninit(pdev); | |
44861f44 | 3808 | unlock_mutex: |
078086f3 | 3809 | mutex_unlock(&bnad->conf_mutex); |
72a9730b | 3810 | bnad_remove_from_list(bnad); |
8b230ed8 | 3811 | bnad_lock_uninit(bnad); |
8b230ed8 RM |
3812 | free_netdev(netdev); |
3813 | return err; | |
3814 | } | |
3815 | ||
c4eef189 | 3816 | static void |
8b230ed8 RM |
3817 | bnad_pci_remove(struct pci_dev *pdev) |
3818 | { | |
3819 | struct net_device *netdev = pci_get_drvdata(pdev); | |
3820 | struct bnad *bnad; | |
3821 | struct bna *bna; | |
3822 | unsigned long flags; | |
3823 | ||
3824 | if (!netdev) | |
3825 | return; | |
3826 | ||
8b230ed8 RM |
3827 | bnad = netdev_priv(netdev); |
3828 | bna = &bnad->bna; | |
3829 | ||
078086f3 RM |
3830 | if (test_and_clear_bit(BNAD_RF_NETDEV_REGISTERED, &bnad->run_flags)) |
3831 | unregister_netdev(netdev); | |
8b230ed8 RM |
3832 | |
3833 | mutex_lock(&bnad->conf_mutex); | |
078086f3 RM |
3834 | bnad_ioceth_disable(bnad); |
3835 | del_timer_sync(&bnad->bna.ioceth.ioc.ioc_timer); | |
3836 | del_timer_sync(&bnad->bna.ioceth.ioc.sem_timer); | |
3837 | del_timer_sync(&bnad->bna.ioceth.ioc.hb_timer); | |
8b230ed8 RM |
3838 | spin_lock_irqsave(&bnad->bna_lock, flags); |
3839 | bna_uninit(bna); | |
3840 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | |
8b230ed8 | 3841 | |
078086f3 RM |
3842 | bnad_res_free(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX); |
3843 | bnad_res_free(bnad, &bnad->res_info[0], BNA_RES_T_MAX); | |
3844 | bnad_mbox_irq_free(bnad); | |
8b230ed8 RM |
3845 | bnad_disable_msix(bnad); |
3846 | bnad_pci_uninit(pdev); | |
078086f3 | 3847 | mutex_unlock(&bnad->conf_mutex); |
72a9730b | 3848 | bnad_remove_from_list(bnad); |
8b230ed8 | 3849 | bnad_lock_uninit(bnad); |
7afc5dbd KG |
3850 | /* Remove the debugfs node for this bnad */ |
3851 | kfree(bnad->regdata); | |
3852 | bnad_debugfs_uninit(bnad); | |
8b230ed8 RM |
3853 | bnad_uninit(bnad); |
3854 | free_netdev(netdev); | |
3855 | } | |
3856 | ||
9baa3c34 | 3857 | static const struct pci_device_id bnad_pci_id_table[] = { |
8b230ed8 RM |
3858 | { |
3859 | PCI_DEVICE(PCI_VENDOR_ID_BROCADE, | |
3860 | PCI_DEVICE_ID_BROCADE_CT), | |
3861 | .class = PCI_CLASS_NETWORK_ETHERNET << 8, | |
3862 | .class_mask = 0xffff00 | |
586b2816 RM |
3863 | }, |
3864 | { | |
3865 | PCI_DEVICE(PCI_VENDOR_ID_BROCADE, | |
3866 | BFA_PCI_DEVICE_ID_CT2), | |
3867 | .class = PCI_CLASS_NETWORK_ETHERNET << 8, | |
3868 | .class_mask = 0xffff00 | |
3869 | }, | |
3870 | {0, }, | |
8b230ed8 RM |
3871 | }; |
3872 | ||
3873 | MODULE_DEVICE_TABLE(pci, bnad_pci_id_table); | |
3874 | ||
3875 | static struct pci_driver bnad_pci_driver = { | |
3876 | .name = BNAD_NAME, | |
3877 | .id_table = bnad_pci_id_table, | |
3878 | .probe = bnad_pci_probe, | |
c4eef189 | 3879 | .remove = bnad_pci_remove, |
8b230ed8 RM |
3880 | }; |
3881 | ||
3882 | static int __init | |
3883 | bnad_module_init(void) | |
3884 | { | |
3885 | int err; | |
3886 | ||
ecc46789 IV |
3887 | pr_info("bna: QLogic BR-series 10G Ethernet driver - version: %s\n", |
3888 | BNAD_VERSION); | |
8b230ed8 | 3889 | |
8a891429 | 3890 | bfa_nw_ioc_auto_recover(bnad_ioc_auto_recover); |
8b230ed8 RM |
3891 | |
3892 | err = pci_register_driver(&bnad_pci_driver); | |
3893 | if (err < 0) { | |
ecc46789 | 3894 | pr_err("bna: PCI driver registration failed err=%d\n", err); |
8b230ed8 RM |
3895 | return err; |
3896 | } | |
3897 | ||
3898 | return 0; | |
3899 | } | |
3900 | ||
3901 | static void __exit | |
3902 | bnad_module_exit(void) | |
3903 | { | |
3904 | pci_unregister_driver(&bnad_pci_driver); | |
294ca868 | 3905 | release_firmware(bfi_fw); |
8b230ed8 RM |
3906 | } |
3907 | ||
3908 | module_init(bnad_module_init); | |
3909 | module_exit(bnad_module_exit); | |
3910 | ||
3911 | MODULE_AUTHOR("Brocade"); | |
3912 | MODULE_LICENSE("GPL"); | |
2732ba56 | 3913 | MODULE_DESCRIPTION("QLogic BR-series 10G PCIe Ethernet driver"); |
8b230ed8 RM |
3914 | MODULE_VERSION(BNAD_VERSION); |
3915 | MODULE_FIRMWARE(CNA_FW_FILE_CT); | |
1bf9fd70 | 3916 | MODULE_FIRMWARE(CNA_FW_FILE_CT2); |