1 /* bnx2x_cmn.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2011 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
18 #include <linux/etherdevice.h>
19 #include <linux/if_vlan.h>
22 #include <net/ip6_checksum.h>
23 #include <linux/firmware.h>
24 #include "bnx2x_cmn.h"
26 #include "bnx2x_init.h"
28 static int bnx2x_setup_irqs(struct bnx2x
*bp
);
31 * bnx2x_bz_fp - zero content of the fastpath structure.
34 * @index: fastpath index to be zeroed
36 * Makes sure the contents of the bp->fp[index].napi is kept
39 static inline void bnx2x_bz_fp(struct bnx2x
*bp
, int index
)
41 struct bnx2x_fastpath
*fp
= &bp
->fp
[index
];
42 struct napi_struct orig_napi
= fp
->napi
;
43 /* bzero bnx2x_fastpath contents */
44 memset(fp
, 0, sizeof(*fp
));
46 /* Restore the NAPI object as it has been already initialized */
51 * bnx2x_move_fp - move content of the fastpath structure.
54 * @from: source FP index
55 * @to: destination FP index
57 * Makes sure the contents of the bp->fp[to].napi is kept
60 static inline void bnx2x_move_fp(struct bnx2x
*bp
, int from
, int to
)
62 struct bnx2x_fastpath
*from_fp
= &bp
->fp
[from
];
63 struct bnx2x_fastpath
*to_fp
= &bp
->fp
[to
];
64 struct napi_struct orig_napi
= to_fp
->napi
;
65 /* Move bnx2x_fastpath contents */
66 memcpy(to_fp
, from_fp
, sizeof(*to_fp
));
69 /* Restore the NAPI object as it has been already initialized */
70 to_fp
->napi
= orig_napi
;
73 /* free skb in the packet ring at pos idx
74 * return idx of last bd freed
76 static u16
bnx2x_free_tx_pkt(struct bnx2x
*bp
, struct bnx2x_fastpath
*fp
,
79 struct sw_tx_bd
*tx_buf
= &fp
->tx_buf_ring
[idx
];
80 struct eth_tx_start_bd
*tx_start_bd
;
81 struct eth_tx_bd
*tx_data_bd
;
82 struct sk_buff
*skb
= tx_buf
->skb
;
83 u16 bd_idx
= TX_BD(tx_buf
->first_bd
), new_cons
;
86 /* prefetch skb end pointer to speedup dev_kfree_skb() */
89 DP(BNX2X_MSG_OFF
, "pkt_idx %d buff @(%p)->skb %p\n",
93 DP(BNX2X_MSG_OFF
, "free bd_idx %d\n", bd_idx
);
94 tx_start_bd
= &fp
->tx_desc_ring
[bd_idx
].start_bd
;
95 dma_unmap_single(&bp
->pdev
->dev
, BD_UNMAP_ADDR(tx_start_bd
),
96 BD_UNMAP_LEN(tx_start_bd
), DMA_TO_DEVICE
);
98 nbd
= le16_to_cpu(tx_start_bd
->nbd
) - 1;
99 #ifdef BNX2X_STOP_ON_ERROR
100 if ((nbd
- 1) > (MAX_SKB_FRAGS
+ 2)) {
101 BNX2X_ERR("BAD nbd!\n");
105 new_cons
= nbd
+ tx_buf
->first_bd
;
107 /* Get the next bd */
108 bd_idx
= TX_BD(NEXT_TX_IDX(bd_idx
));
110 /* Skip a parse bd... */
112 bd_idx
= TX_BD(NEXT_TX_IDX(bd_idx
));
114 /* ...and the TSO split header bd since they have no mapping */
115 if (tx_buf
->flags
& BNX2X_TSO_SPLIT_BD
) {
117 bd_idx
= TX_BD(NEXT_TX_IDX(bd_idx
));
123 DP(BNX2X_MSG_OFF
, "free frag bd_idx %d\n", bd_idx
);
124 tx_data_bd
= &fp
->tx_desc_ring
[bd_idx
].reg_bd
;
125 dma_unmap_page(&bp
->pdev
->dev
, BD_UNMAP_ADDR(tx_data_bd
),
126 BD_UNMAP_LEN(tx_data_bd
), DMA_TO_DEVICE
);
128 bd_idx
= TX_BD(NEXT_TX_IDX(bd_idx
));
134 tx_buf
->first_bd
= 0;
140 int bnx2x_tx_int(struct bnx2x_fastpath
*fp
)
142 struct bnx2x
*bp
= fp
->bp
;
143 struct netdev_queue
*txq
;
144 u16 hw_cons
, sw_cons
, bd_cons
= fp
->tx_bd_cons
;
146 #ifdef BNX2X_STOP_ON_ERROR
147 if (unlikely(bp
->panic
))
151 txq
= netdev_get_tx_queue(bp
->dev
, fp
->index
);
152 hw_cons
= le16_to_cpu(*fp
->tx_cons_sb
);
153 sw_cons
= fp
->tx_pkt_cons
;
155 while (sw_cons
!= hw_cons
) {
158 pkt_cons
= TX_BD(sw_cons
);
160 DP(NETIF_MSG_TX_DONE
, "queue[%d]: hw_cons %u sw_cons %u "
162 fp
->index
, hw_cons
, sw_cons
, pkt_cons
);
164 bd_cons
= bnx2x_free_tx_pkt(bp
, fp
, pkt_cons
);
168 fp
->tx_pkt_cons
= sw_cons
;
169 fp
->tx_bd_cons
= bd_cons
;
171 /* Need to make the tx_bd_cons update visible to start_xmit()
172 * before checking for netif_tx_queue_stopped(). Without the
173 * memory barrier, there is a small possibility that
174 * start_xmit() will miss it and cause the queue to be stopped
179 if (unlikely(netif_tx_queue_stopped(txq
))) {
180 /* Taking tx_lock() is needed to prevent reenabling the queue
181 * while it's empty. This could have happen if rx_action() gets
182 * suspended in bnx2x_tx_int() after the condition before
183 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
185 * stops the queue->sees fresh tx_bd_cons->releases the queue->
186 * sends some packets consuming the whole queue again->
190 __netif_tx_lock(txq
, smp_processor_id());
192 if ((netif_tx_queue_stopped(txq
)) &&
193 (bp
->state
== BNX2X_STATE_OPEN
) &&
194 (bnx2x_tx_avail(fp
) >= MAX_SKB_FRAGS
+ 3))
195 netif_tx_wake_queue(txq
);
197 __netif_tx_unlock(txq
);
202 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath
*fp
,
205 u16 last_max
= fp
->last_max_sge
;
207 if (SUB_S16(idx
, last_max
) > 0)
208 fp
->last_max_sge
= idx
;
211 static void bnx2x_update_sge_prod(struct bnx2x_fastpath
*fp
,
212 struct eth_fast_path_rx_cqe
*fp_cqe
)
214 struct bnx2x
*bp
= fp
->bp
;
215 u16 sge_len
= SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe
->pkt_len
) -
216 le16_to_cpu(fp_cqe
->len_on_bd
)) >>
218 u16 last_max
, last_elem
, first_elem
;
225 /* First mark all used pages */
226 for (i
= 0; i
< sge_len
; i
++)
227 SGE_MASK_CLEAR_BIT(fp
,
228 RX_SGE(le16_to_cpu(fp_cqe
->sgl_or_raw_data
.sgl
[i
])));
230 DP(NETIF_MSG_RX_STATUS
, "fp_cqe->sgl[%d] = %d\n",
231 sge_len
- 1, le16_to_cpu(fp_cqe
->sgl_or_raw_data
.sgl
[sge_len
- 1]));
233 /* Here we assume that the last SGE index is the biggest */
234 prefetch((void *)(fp
->sge_mask
));
235 bnx2x_update_last_max_sge(fp
,
236 le16_to_cpu(fp_cqe
->sgl_or_raw_data
.sgl
[sge_len
- 1]));
238 last_max
= RX_SGE(fp
->last_max_sge
);
239 last_elem
= last_max
>> RX_SGE_MASK_ELEM_SHIFT
;
240 first_elem
= RX_SGE(fp
->rx_sge_prod
) >> RX_SGE_MASK_ELEM_SHIFT
;
242 /* If ring is not full */
243 if (last_elem
+ 1 != first_elem
)
246 /* Now update the prod */
247 for (i
= first_elem
; i
!= last_elem
; i
= NEXT_SGE_MASK_ELEM(i
)) {
248 if (likely(fp
->sge_mask
[i
]))
251 fp
->sge_mask
[i
] = RX_SGE_MASK_ELEM_ONE_MASK
;
252 delta
+= RX_SGE_MASK_ELEM_SZ
;
256 fp
->rx_sge_prod
+= delta
;
257 /* clear page-end entries */
258 bnx2x_clear_sge_mask_next_elems(fp
);
261 DP(NETIF_MSG_RX_STATUS
,
262 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
263 fp
->last_max_sge
, fp
->rx_sge_prod
);
266 static void bnx2x_tpa_start(struct bnx2x_fastpath
*fp
, u16 queue
,
267 struct sk_buff
*skb
, u16 cons
, u16 prod
)
269 struct bnx2x
*bp
= fp
->bp
;
270 struct sw_rx_bd
*cons_rx_buf
= &fp
->rx_buf_ring
[cons
];
271 struct sw_rx_bd
*prod_rx_buf
= &fp
->rx_buf_ring
[prod
];
272 struct eth_rx_bd
*prod_bd
= &fp
->rx_desc_ring
[prod
];
275 /* move empty skb from pool to prod and map it */
276 prod_rx_buf
->skb
= fp
->tpa_pool
[queue
].skb
;
277 mapping
= dma_map_single(&bp
->pdev
->dev
, fp
->tpa_pool
[queue
].skb
->data
,
278 fp
->rx_buf_size
, DMA_FROM_DEVICE
);
279 dma_unmap_addr_set(prod_rx_buf
, mapping
, mapping
);
281 /* move partial skb from cons to pool (don't unmap yet) */
282 fp
->tpa_pool
[queue
] = *cons_rx_buf
;
284 /* mark bin state as start - print error if current state != stop */
285 if (fp
->tpa_state
[queue
] != BNX2X_TPA_STOP
)
286 BNX2X_ERR("start of bin not in stop [%d]\n", queue
);
288 fp
->tpa_state
[queue
] = BNX2X_TPA_START
;
290 /* point prod_bd to new skb */
291 prod_bd
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
292 prod_bd
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
294 #ifdef BNX2X_STOP_ON_ERROR
295 fp
->tpa_queue_used
|= (1 << queue
);
296 #ifdef _ASM_GENERIC_INT_L64_H
297 DP(NETIF_MSG_RX_STATUS
, "fp->tpa_queue_used = 0x%lx\n",
299 DP(NETIF_MSG_RX_STATUS
, "fp->tpa_queue_used = 0x%llx\n",
305 /* Timestamp option length allowed for TPA aggregation:
307 * nop nop kind length echo val
309 #define TPA_TSTAMP_OPT_LEN 12
311 * Calculate the approximate value of the MSS for this
312 * aggregation using the first packet of it.
315 * @param parsing_flags Parsing flags from the START CQE
316 * @param len_on_bd Total length of the first packet for the
319 static inline u16
bnx2x_set_lro_mss(struct bnx2x
*bp
, u16 parsing_flags
,
322 /* TPA arrgregation won't have an IP options and TCP options
323 * other than timestamp.
325 u16 hdrs_len
= ETH_HLEN
+ sizeof(struct iphdr
) + sizeof(struct tcphdr
);
328 /* Check if there was a TCP timestamp, if there is it's will
329 * always be 12 bytes length: nop nop kind length echo val.
331 * Otherwise FW would close the aggregation.
333 if (parsing_flags
& PARSING_FLAGS_TIME_STAMP_EXIST_FLAG
)
334 hdrs_len
+= TPA_TSTAMP_OPT_LEN
;
336 return len_on_bd
- hdrs_len
;
339 static int bnx2x_fill_frag_skb(struct bnx2x
*bp
, struct bnx2x_fastpath
*fp
,
341 struct eth_fast_path_rx_cqe
*fp_cqe
,
342 u16 cqe_idx
, u16 parsing_flags
)
344 struct sw_rx_page
*rx_pg
, old_rx_pg
;
345 u16 len_on_bd
= le16_to_cpu(fp_cqe
->len_on_bd
);
346 u32 i
, frag_len
, frag_size
, pages
;
350 frag_size
= le16_to_cpu(fp_cqe
->pkt_len
) - len_on_bd
;
351 pages
= SGE_PAGE_ALIGN(frag_size
) >> SGE_PAGE_SHIFT
;
353 /* This is needed in order to enable forwarding support */
355 skb_shinfo(skb
)->gso_size
= bnx2x_set_lro_mss(bp
, parsing_flags
,
358 #ifdef BNX2X_STOP_ON_ERROR
359 if (pages
> min_t(u32
, 8, MAX_SKB_FRAGS
)*SGE_PAGE_SIZE
*PAGES_PER_SGE
) {
360 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
362 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
363 fp_cqe
->pkt_len
, len_on_bd
);
369 /* Run through the SGL and compose the fragmented skb */
370 for (i
= 0, j
= 0; i
< pages
; i
+= PAGES_PER_SGE
, j
++) {
372 RX_SGE(le16_to_cpu(fp_cqe
->sgl_or_raw_data
.sgl
[j
]));
374 /* FW gives the indices of the SGE as if the ring is an array
375 (meaning that "next" element will consume 2 indices) */
376 frag_len
= min(frag_size
, (u32
)(SGE_PAGE_SIZE
*PAGES_PER_SGE
));
377 rx_pg
= &fp
->rx_page_ring
[sge_idx
];
380 /* If we fail to allocate a substitute page, we simply stop
381 where we are and drop the whole packet */
382 err
= bnx2x_alloc_rx_sge(bp
, fp
, sge_idx
);
384 fp
->eth_q_stats
.rx_skb_alloc_failed
++;
388 /* Unmap the page as we r going to pass it to the stack */
389 dma_unmap_page(&bp
->pdev
->dev
,
390 dma_unmap_addr(&old_rx_pg
, mapping
),
391 SGE_PAGE_SIZE
*PAGES_PER_SGE
, DMA_FROM_DEVICE
);
393 /* Add one frag and update the appropriate fields in the skb */
394 skb_fill_page_desc(skb
, j
, old_rx_pg
.page
, 0, frag_len
);
396 skb
->data_len
+= frag_len
;
397 skb
->truesize
+= frag_len
;
398 skb
->len
+= frag_len
;
400 frag_size
-= frag_len
;
406 static void bnx2x_tpa_stop(struct bnx2x
*bp
, struct bnx2x_fastpath
*fp
,
407 u16 queue
, int pad
, int len
, union eth_rx_cqe
*cqe
,
410 struct sw_rx_bd
*rx_buf
= &fp
->tpa_pool
[queue
];
411 struct sk_buff
*skb
= rx_buf
->skb
;
413 struct sk_buff
*new_skb
= netdev_alloc_skb(bp
->dev
, fp
->rx_buf_size
);
415 /* Unmap skb in the pool anyway, as we are going to change
416 pool entry status to BNX2X_TPA_STOP even if new skb allocation
418 dma_unmap_single(&bp
->pdev
->dev
, dma_unmap_addr(rx_buf
, mapping
),
419 fp
->rx_buf_size
, DMA_FROM_DEVICE
);
421 if (likely(new_skb
)) {
422 /* fix ip xsum and give it to the stack */
423 /* (no need to map the new skb) */
425 le16_to_cpu(cqe
->fast_path_cqe
.pars_flags
.flags
);
428 prefetch(((char *)(skb
)) + L1_CACHE_BYTES
);
430 #ifdef BNX2X_STOP_ON_ERROR
431 if (pad
+ len
> fp
->rx_buf_size
) {
432 BNX2X_ERR("skb_put is about to fail... "
433 "pad %d len %d rx_buf_size %d\n",
434 pad
, len
, fp
->rx_buf_size
);
440 skb_reserve(skb
, pad
);
443 skb
->protocol
= eth_type_trans(skb
, bp
->dev
);
444 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
449 iph
= (struct iphdr
*)skb
->data
;
451 iph
->check
= ip_fast_csum((u8
*)iph
, iph
->ihl
);
454 if (!bnx2x_fill_frag_skb(bp
, fp
, skb
,
455 &cqe
->fast_path_cqe
, cqe_idx
,
457 if (parsing_flags
& PARSING_FLAGS_VLAN
)
458 __vlan_hwaccel_put_tag(skb
,
459 le16_to_cpu(cqe
->fast_path_cqe
.
461 napi_gro_receive(&fp
->napi
, skb
);
463 DP(NETIF_MSG_RX_STATUS
, "Failed to allocate new pages"
464 " - dropping packet!\n");
469 /* put new skb in bin */
470 fp
->tpa_pool
[queue
].skb
= new_skb
;
473 /* else drop the packet and keep the buffer in the bin */
474 DP(NETIF_MSG_RX_STATUS
,
475 "Failed to allocate new skb - dropping packet!\n");
476 fp
->eth_q_stats
.rx_skb_alloc_failed
++;
479 fp
->tpa_state
[queue
] = BNX2X_TPA_STOP
;
482 /* Set Toeplitz hash value in the skb using the value from the
483 * CQE (calculated by HW).
485 static inline void bnx2x_set_skb_rxhash(struct bnx2x
*bp
, union eth_rx_cqe
*cqe
,
488 /* Set Toeplitz hash from CQE */
489 if ((bp
->dev
->features
& NETIF_F_RXHASH
) &&
490 (cqe
->fast_path_cqe
.status_flags
&
491 ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG
))
493 le32_to_cpu(cqe
->fast_path_cqe
.rss_hash_result
);
496 int bnx2x_rx_int(struct bnx2x_fastpath
*fp
, int budget
)
498 struct bnx2x
*bp
= fp
->bp
;
499 u16 bd_cons
, bd_prod
, bd_prod_fw
, comp_ring_cons
;
500 u16 hw_comp_cons
, sw_comp_cons
, sw_comp_prod
;
503 #ifdef BNX2X_STOP_ON_ERROR
504 if (unlikely(bp
->panic
))
508 /* CQ "next element" is of the size of the regular element,
509 that's why it's ok here */
510 hw_comp_cons
= le16_to_cpu(*fp
->rx_cons_sb
);
511 if ((hw_comp_cons
& MAX_RCQ_DESC_CNT
) == MAX_RCQ_DESC_CNT
)
514 bd_cons
= fp
->rx_bd_cons
;
515 bd_prod
= fp
->rx_bd_prod
;
516 bd_prod_fw
= bd_prod
;
517 sw_comp_cons
= fp
->rx_comp_cons
;
518 sw_comp_prod
= fp
->rx_comp_prod
;
520 /* Memory barrier necessary as speculative reads of the rx
521 * buffer can be ahead of the index in the status block
525 DP(NETIF_MSG_RX_STATUS
,
526 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
527 fp
->index
, hw_comp_cons
, sw_comp_cons
);
529 while (sw_comp_cons
!= hw_comp_cons
) {
530 struct sw_rx_bd
*rx_buf
= NULL
;
532 union eth_rx_cqe
*cqe
;
536 comp_ring_cons
= RCQ_BD(sw_comp_cons
);
537 bd_prod
= RX_BD(bd_prod
);
538 bd_cons
= RX_BD(bd_cons
);
540 /* Prefetch the page containing the BD descriptor
541 at producer's index. It will be needed when new skb is
543 prefetch((void *)(PAGE_ALIGN((unsigned long)
544 (&fp
->rx_desc_ring
[bd_prod
])) -
547 cqe
= &fp
->rx_comp_ring
[comp_ring_cons
];
548 cqe_fp_flags
= cqe
->fast_path_cqe
.type_error_flags
;
550 DP(NETIF_MSG_RX_STATUS
, "CQE type %x err %x status %x"
551 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags
),
552 cqe_fp_flags
, cqe
->fast_path_cqe
.status_flags
,
553 le32_to_cpu(cqe
->fast_path_cqe
.rss_hash_result
),
554 le16_to_cpu(cqe
->fast_path_cqe
.vlan_tag
),
555 le16_to_cpu(cqe
->fast_path_cqe
.pkt_len
));
557 /* is this a slowpath msg? */
558 if (unlikely(CQE_TYPE(cqe_fp_flags
))) {
559 bnx2x_sp_event(fp
, cqe
);
562 /* this is an rx packet */
564 rx_buf
= &fp
->rx_buf_ring
[bd_cons
];
567 len
= le16_to_cpu(cqe
->fast_path_cqe
.pkt_len
);
568 pad
= cqe
->fast_path_cqe
.placement_offset
;
570 /* - If CQE is marked both TPA_START and TPA_END it is
572 * - FP CQE will always have either TPA_START or/and
573 * TPA_STOP flags set.
575 if ((!fp
->disable_tpa
) &&
576 (TPA_TYPE(cqe_fp_flags
) !=
577 (TPA_TYPE_START
| TPA_TYPE_END
))) {
578 u16 queue
= cqe
->fast_path_cqe
.queue_index
;
580 if (TPA_TYPE(cqe_fp_flags
) == TPA_TYPE_START
) {
581 DP(NETIF_MSG_RX_STATUS
,
582 "calling tpa_start on queue %d\n",
585 bnx2x_tpa_start(fp
, queue
, skb
,
588 /* Set Toeplitz hash for an LRO skb */
589 bnx2x_set_skb_rxhash(bp
, cqe
, skb
);
592 } else { /* TPA_STOP */
593 DP(NETIF_MSG_RX_STATUS
,
594 "calling tpa_stop on queue %d\n",
597 if (!BNX2X_RX_SUM_FIX(cqe
))
598 BNX2X_ERR("STOP on none TCP "
601 /* This is a size of the linear data
603 len
= le16_to_cpu(cqe
->fast_path_cqe
.
605 bnx2x_tpa_stop(bp
, fp
, queue
, pad
,
606 len
, cqe
, comp_ring_cons
);
607 #ifdef BNX2X_STOP_ON_ERROR
612 bnx2x_update_sge_prod(fp
,
613 &cqe
->fast_path_cqe
);
618 dma_sync_single_for_device(&bp
->pdev
->dev
,
619 dma_unmap_addr(rx_buf
, mapping
),
620 pad
+ RX_COPY_THRESH
,
622 prefetch(((char *)(skb
)) + L1_CACHE_BYTES
);
624 /* is this an error packet? */
625 if (unlikely(cqe_fp_flags
& ETH_RX_ERROR_FALGS
)) {
627 "ERROR flags %x rx packet %u\n",
628 cqe_fp_flags
, sw_comp_cons
);
629 fp
->eth_q_stats
.rx_err_discard_pkt
++;
633 /* Since we don't have a jumbo ring
634 * copy small packets if mtu > 1500
636 if ((bp
->dev
->mtu
> ETH_MAX_PACKET_SIZE
) &&
637 (len
<= RX_COPY_THRESH
)) {
638 struct sk_buff
*new_skb
;
640 new_skb
= netdev_alloc_skb(bp
->dev
,
642 if (new_skb
== NULL
) {
644 "ERROR packet dropped "
645 "because of alloc failure\n");
646 fp
->eth_q_stats
.rx_skb_alloc_failed
++;
651 skb_copy_from_linear_data_offset(skb
, pad
,
652 new_skb
->data
+ pad
, len
);
653 skb_reserve(new_skb
, pad
);
654 skb_put(new_skb
, len
);
656 bnx2x_reuse_rx_skb(fp
, bd_cons
, bd_prod
);
661 if (likely(bnx2x_alloc_rx_skb(bp
, fp
, bd_prod
) == 0)) {
662 dma_unmap_single(&bp
->pdev
->dev
,
663 dma_unmap_addr(rx_buf
, mapping
),
666 skb_reserve(skb
, pad
);
671 "ERROR packet dropped because "
672 "of alloc failure\n");
673 fp
->eth_q_stats
.rx_skb_alloc_failed
++;
675 bnx2x_reuse_rx_skb(fp
, bd_cons
, bd_prod
);
679 skb
->protocol
= eth_type_trans(skb
, bp
->dev
);
681 /* Set Toeplitz hash for a none-LRO skb */
682 bnx2x_set_skb_rxhash(bp
, cqe
, skb
);
684 skb_checksum_none_assert(skb
);
686 if (bp
->dev
->features
& NETIF_F_RXCSUM
) {
687 if (likely(BNX2X_RX_CSUM_OK(cqe
)))
688 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
690 fp
->eth_q_stats
.hw_csum_err
++;
694 skb_record_rx_queue(skb
, fp
->index
);
696 if (le16_to_cpu(cqe
->fast_path_cqe
.pars_flags
.flags
) &
698 __vlan_hwaccel_put_tag(skb
,
699 le16_to_cpu(cqe
->fast_path_cqe
.vlan_tag
));
700 napi_gro_receive(&fp
->napi
, skb
);
706 bd_cons
= NEXT_RX_IDX(bd_cons
);
707 bd_prod
= NEXT_RX_IDX(bd_prod
);
708 bd_prod_fw
= NEXT_RX_IDX(bd_prod_fw
);
711 sw_comp_prod
= NEXT_RCQ_IDX(sw_comp_prod
);
712 sw_comp_cons
= NEXT_RCQ_IDX(sw_comp_cons
);
714 if (rx_pkt
== budget
)
718 fp
->rx_bd_cons
= bd_cons
;
719 fp
->rx_bd_prod
= bd_prod_fw
;
720 fp
->rx_comp_cons
= sw_comp_cons
;
721 fp
->rx_comp_prod
= sw_comp_prod
;
723 /* Update producers */
724 bnx2x_update_rx_prod(bp
, fp
, bd_prod_fw
, sw_comp_prod
,
727 fp
->rx_pkt
+= rx_pkt
;
733 static irqreturn_t
bnx2x_msix_fp_int(int irq
, void *fp_cookie
)
735 struct bnx2x_fastpath
*fp
= fp_cookie
;
736 struct bnx2x
*bp
= fp
->bp
;
738 /* Return here if interrupt is disabled */
739 if (unlikely(atomic_read(&bp
->intr_sem
) != 0)) {
740 DP(NETIF_MSG_INTR
, "called but intr_sem not 0, returning\n");
744 DP(BNX2X_MSG_FP
, "got an MSI-X interrupt on IDX:SB "
745 "[fp %d fw_sd %d igusb %d]\n",
746 fp
->index
, fp
->fw_sb_id
, fp
->igu_sb_id
);
747 bnx2x_ack_sb(bp
, fp
->igu_sb_id
, USTORM_ID
, 0, IGU_INT_DISABLE
, 0);
749 #ifdef BNX2X_STOP_ON_ERROR
750 if (unlikely(bp
->panic
))
754 /* Handle Rx and Tx according to MSI-X vector */
755 prefetch(fp
->rx_cons_sb
);
756 prefetch(fp
->tx_cons_sb
);
757 prefetch(&fp
->sb_running_index
[SM_RX_ID
]);
758 napi_schedule(&bnx2x_fp(bp
, fp
->index
, napi
));
763 /* HW Lock for shared dual port PHYs */
764 void bnx2x_acquire_phy_lock(struct bnx2x
*bp
)
766 mutex_lock(&bp
->port
.phy_mutex
);
768 if (bp
->port
.need_hw_lock
)
769 bnx2x_acquire_hw_lock(bp
, HW_LOCK_RESOURCE_MDIO
);
772 void bnx2x_release_phy_lock(struct bnx2x
*bp
)
774 if (bp
->port
.need_hw_lock
)
775 bnx2x_release_hw_lock(bp
, HW_LOCK_RESOURCE_MDIO
);
777 mutex_unlock(&bp
->port
.phy_mutex
);
780 /* calculates MF speed according to current linespeed and MF configuration */
781 u16
bnx2x_get_mf_speed(struct bnx2x
*bp
)
783 u16 line_speed
= bp
->link_vars
.line_speed
;
785 u16 maxCfg
= bnx2x_extract_max_cfg(bp
,
786 bp
->mf_config
[BP_VN(bp
)]);
788 /* Calculate the current MAX line speed limit for the MF
792 line_speed
= (line_speed
* maxCfg
) / 100;
794 u16 vn_max_rate
= maxCfg
* 100;
796 if (vn_max_rate
< line_speed
)
797 line_speed
= vn_max_rate
;
805 * bnx2x_fill_report_data - fill link report data to report
808 * @data: link state to update
810 * It uses a none-atomic bit operations because is called under the mutex.
812 static inline void bnx2x_fill_report_data(struct bnx2x
*bp
,
813 struct bnx2x_link_report_data
*data
)
815 u16 line_speed
= bnx2x_get_mf_speed(bp
);
817 memset(data
, 0, sizeof(*data
));
819 /* Fill the report data: efective line speed */
820 data
->line_speed
= line_speed
;
823 if (!bp
->link_vars
.link_up
|| (bp
->flags
& MF_FUNC_DIS
))
824 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN
,
825 &data
->link_report_flags
);
828 if (bp
->link_vars
.duplex
== DUPLEX_FULL
)
829 __set_bit(BNX2X_LINK_REPORT_FD
, &data
->link_report_flags
);
831 /* Rx Flow Control is ON */
832 if (bp
->link_vars
.flow_ctrl
& BNX2X_FLOW_CTRL_RX
)
833 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON
, &data
->link_report_flags
);
835 /* Tx Flow Control is ON */
836 if (bp
->link_vars
.flow_ctrl
& BNX2X_FLOW_CTRL_TX
)
837 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON
, &data
->link_report_flags
);
841 * bnx2x_link_report - report link status to OS.
845 * Calls the __bnx2x_link_report() under the same locking scheme
846 * as a link/PHY state managing code to ensure a consistent link
850 void bnx2x_link_report(struct bnx2x
*bp
)
852 bnx2x_acquire_phy_lock(bp
);
853 __bnx2x_link_report(bp
);
854 bnx2x_release_phy_lock(bp
);
858 * __bnx2x_link_report - report link status to OS.
862 * None atomic inmlementation.
863 * Should be called under the phy_lock.
865 void __bnx2x_link_report(struct bnx2x
*bp
)
867 struct bnx2x_link_report_data cur_data
;
871 bnx2x_read_mf_cfg(bp
);
873 /* Read the current link report info */
874 bnx2x_fill_report_data(bp
, &cur_data
);
876 /* Don't report link down or exactly the same link status twice */
877 if (!memcmp(&cur_data
, &bp
->last_reported_link
, sizeof(cur_data
)) ||
878 (test_bit(BNX2X_LINK_REPORT_LINK_DOWN
,
879 &bp
->last_reported_link
.link_report_flags
) &&
880 test_bit(BNX2X_LINK_REPORT_LINK_DOWN
,
881 &cur_data
.link_report_flags
)))
886 /* We are going to report a new link parameters now -
887 * remember the current data for the next time.
889 memcpy(&bp
->last_reported_link
, &cur_data
, sizeof(cur_data
));
891 if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN
,
892 &cur_data
.link_report_flags
)) {
893 netif_carrier_off(bp
->dev
);
894 netdev_err(bp
->dev
, "NIC Link is Down\n");
897 netif_carrier_on(bp
->dev
);
898 netdev_info(bp
->dev
, "NIC Link is Up, ");
899 pr_cont("%d Mbps ", cur_data
.line_speed
);
901 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD
,
902 &cur_data
.link_report_flags
))
903 pr_cont("full duplex");
905 pr_cont("half duplex");
907 /* Handle the FC at the end so that only these flags would be
908 * possibly set. This way we may easily check if there is no FC
911 if (cur_data
.link_report_flags
) {
912 if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON
,
913 &cur_data
.link_report_flags
)) {
914 pr_cont(", receive ");
915 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON
,
916 &cur_data
.link_report_flags
))
917 pr_cont("& transmit ");
919 pr_cont(", transmit ");
921 pr_cont("flow control ON");
927 void bnx2x_init_rx_rings(struct bnx2x
*bp
)
929 int func
= BP_FUNC(bp
);
930 int max_agg_queues
= CHIP_IS_E1(bp
) ? ETH_MAX_AGGREGATION_QUEUES_E1
:
931 ETH_MAX_AGGREGATION_QUEUES_E1H
;
935 /* Allocate TPA resources */
936 for_each_rx_queue(bp
, j
) {
937 struct bnx2x_fastpath
*fp
= &bp
->fp
[j
];
940 "mtu %d rx_buf_size %d\n", bp
->dev
->mtu
, fp
->rx_buf_size
);
942 if (!fp
->disable_tpa
) {
943 /* Fill the per-aggregation pool */
944 for (i
= 0; i
< max_agg_queues
; i
++) {
945 fp
->tpa_pool
[i
].skb
=
946 netdev_alloc_skb(bp
->dev
, fp
->rx_buf_size
);
947 if (!fp
->tpa_pool
[i
].skb
) {
948 BNX2X_ERR("Failed to allocate TPA "
949 "skb pool for queue[%d] - "
950 "disabling TPA on this "
952 bnx2x_free_tpa_pool(bp
, fp
, i
);
956 dma_unmap_addr_set((struct sw_rx_bd
*)
957 &bp
->fp
->tpa_pool
[i
],
959 fp
->tpa_state
[i
] = BNX2X_TPA_STOP
;
962 /* "next page" elements initialization */
963 bnx2x_set_next_page_sgl(fp
);
965 /* set SGEs bit mask */
966 bnx2x_init_sge_ring_bit_mask(fp
);
968 /* Allocate SGEs and initialize the ring elements */
969 for (i
= 0, ring_prod
= 0;
970 i
< MAX_RX_SGE_CNT
*NUM_RX_SGE_PAGES
; i
++) {
972 if (bnx2x_alloc_rx_sge(bp
, fp
, ring_prod
) < 0) {
973 BNX2X_ERR("was only able to allocate "
975 BNX2X_ERR("disabling TPA for"
977 /* Cleanup already allocated elements */
978 bnx2x_free_rx_sge_range(bp
,
980 bnx2x_free_tpa_pool(bp
,
986 ring_prod
= NEXT_SGE_IDX(ring_prod
);
989 fp
->rx_sge_prod
= ring_prod
;
993 for_each_rx_queue(bp
, j
) {
994 struct bnx2x_fastpath
*fp
= &bp
->fp
[j
];
998 /* Activate BD ring */
1000 * this will generate an interrupt (to the TSTORM)
1001 * must only be done after chip is initialized
1003 bnx2x_update_rx_prod(bp
, fp
, fp
->rx_bd_prod
, fp
->rx_comp_prod
,
1009 if (!CHIP_IS_E2(bp
)) {
1010 REG_WR(bp
, BAR_USTRORM_INTMEM
+
1011 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func
),
1012 U64_LO(fp
->rx_comp_mapping
));
1013 REG_WR(bp
, BAR_USTRORM_INTMEM
+
1014 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func
) + 4,
1015 U64_HI(fp
->rx_comp_mapping
));
1020 static void bnx2x_free_tx_skbs(struct bnx2x
*bp
)
1024 for_each_tx_queue(bp
, i
) {
1025 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
1027 u16 bd_cons
= fp
->tx_bd_cons
;
1028 u16 sw_prod
= fp
->tx_pkt_prod
;
1029 u16 sw_cons
= fp
->tx_pkt_cons
;
1031 while (sw_cons
!= sw_prod
) {
1032 bd_cons
= bnx2x_free_tx_pkt(bp
, fp
, TX_BD(sw_cons
));
1038 static void bnx2x_free_rx_bds(struct bnx2x_fastpath
*fp
)
1040 struct bnx2x
*bp
= fp
->bp
;
1043 /* ring wasn't allocated */
1044 if (fp
->rx_buf_ring
== NULL
)
1047 for (i
= 0; i
< NUM_RX_BD
; i
++) {
1048 struct sw_rx_bd
*rx_buf
= &fp
->rx_buf_ring
[i
];
1049 struct sk_buff
*skb
= rx_buf
->skb
;
1054 dma_unmap_single(&bp
->pdev
->dev
,
1055 dma_unmap_addr(rx_buf
, mapping
),
1056 fp
->rx_buf_size
, DMA_FROM_DEVICE
);
1063 static void bnx2x_free_rx_skbs(struct bnx2x
*bp
)
1067 for_each_rx_queue(bp
, j
) {
1068 struct bnx2x_fastpath
*fp
= &bp
->fp
[j
];
1070 bnx2x_free_rx_bds(fp
);
1072 if (!fp
->disable_tpa
)
1073 bnx2x_free_tpa_pool(bp
, fp
, CHIP_IS_E1(bp
) ?
1074 ETH_MAX_AGGREGATION_QUEUES_E1
:
1075 ETH_MAX_AGGREGATION_QUEUES_E1H
);
1079 void bnx2x_free_skbs(struct bnx2x
*bp
)
1081 bnx2x_free_tx_skbs(bp
);
1082 bnx2x_free_rx_skbs(bp
);
1085 void bnx2x_update_max_mf_config(struct bnx2x
*bp
, u32 value
)
1087 /* load old values */
1088 u32 mf_cfg
= bp
->mf_config
[BP_VN(bp
)];
1090 if (value
!= bnx2x_extract_max_cfg(bp
, mf_cfg
)) {
1091 /* leave all but MAX value */
1092 mf_cfg
&= ~FUNC_MF_CFG_MAX_BW_MASK
;
1094 /* set new MAX value */
1095 mf_cfg
|= (value
<< FUNC_MF_CFG_MAX_BW_SHIFT
)
1096 & FUNC_MF_CFG_MAX_BW_MASK
;
1098 bnx2x_fw_command(bp
, DRV_MSG_CODE_SET_MF_BW
, mf_cfg
);
1102 static void bnx2x_free_msix_irqs(struct bnx2x
*bp
)
1106 free_irq(bp
->msix_table
[0].vector
, bp
->dev
);
1107 DP(NETIF_MSG_IFDOWN
, "released sp irq (%d)\n",
1108 bp
->msix_table
[0].vector
);
1113 for_each_eth_queue(bp
, i
) {
1114 DP(NETIF_MSG_IFDOWN
, "about to release fp #%d->%d irq "
1115 "state %x\n", i
, bp
->msix_table
[i
+ offset
].vector
,
1116 bnx2x_fp(bp
, i
, state
));
1118 free_irq(bp
->msix_table
[i
+ offset
].vector
, &bp
->fp
[i
]);
1122 void bnx2x_free_irq(struct bnx2x
*bp
)
1124 if (bp
->flags
& USING_MSIX_FLAG
)
1125 bnx2x_free_msix_irqs(bp
);
1126 else if (bp
->flags
& USING_MSI_FLAG
)
1127 free_irq(bp
->pdev
->irq
, bp
->dev
);
1129 free_irq(bp
->pdev
->irq
, bp
->dev
);
1132 int bnx2x_enable_msix(struct bnx2x
*bp
)
1134 int msix_vec
= 0, i
, rc
, req_cnt
;
1136 bp
->msix_table
[msix_vec
].entry
= msix_vec
;
1137 DP(NETIF_MSG_IFUP
, "msix_table[0].entry = %d (slowpath)\n",
1138 bp
->msix_table
[0].entry
);
1142 bp
->msix_table
[msix_vec
].entry
= msix_vec
;
1143 DP(NETIF_MSG_IFUP
, "msix_table[%d].entry = %d (CNIC)\n",
1144 bp
->msix_table
[msix_vec
].entry
, bp
->msix_table
[msix_vec
].entry
);
1147 for_each_eth_queue(bp
, i
) {
1148 bp
->msix_table
[msix_vec
].entry
= msix_vec
;
1149 DP(NETIF_MSG_IFUP
, "msix_table[%d].entry = %d "
1150 "(fastpath #%u)\n", msix_vec
, msix_vec
, i
);
1154 req_cnt
= BNX2X_NUM_ETH_QUEUES(bp
) + CNIC_CONTEXT_USE
+ 1;
1156 rc
= pci_enable_msix(bp
->pdev
, &bp
->msix_table
[0], req_cnt
);
1159 * reconfigure number of tx/rx queues according to available
1162 if (rc
>= BNX2X_MIN_MSIX_VEC_CNT
) {
1163 /* how less vectors we will have? */
1164 int diff
= req_cnt
- rc
;
1167 "Trying to use less MSI-X vectors: %d\n", rc
);
1169 rc
= pci_enable_msix(bp
->pdev
, &bp
->msix_table
[0], rc
);
1173 "MSI-X is not attainable rc %d\n", rc
);
1177 * decrease number of queues by number of unallocated entries
1179 bp
->num_queues
-= diff
;
1181 DP(NETIF_MSG_IFUP
, "New queue configuration set: %d\n",
1184 /* fall to INTx if not enough memory */
1186 bp
->flags
|= DISABLE_MSI_FLAG
;
1187 DP(NETIF_MSG_IFUP
, "MSI-X is not attainable rc %d\n", rc
);
1191 bp
->flags
|= USING_MSIX_FLAG
;
1196 static int bnx2x_req_msix_irqs(struct bnx2x
*bp
)
1198 int i
, rc
, offset
= 1;
1200 rc
= request_irq(bp
->msix_table
[0].vector
, bnx2x_msix_sp_int
, 0,
1201 bp
->dev
->name
, bp
->dev
);
1203 BNX2X_ERR("request sp irq failed\n");
1210 for_each_eth_queue(bp
, i
) {
1211 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
1212 snprintf(fp
->name
, sizeof(fp
->name
), "%s-fp-%d",
1215 rc
= request_irq(bp
->msix_table
[offset
].vector
,
1216 bnx2x_msix_fp_int
, 0, fp
->name
, fp
);
1218 BNX2X_ERR("request fp #%d irq failed rc %d\n", i
, rc
);
1219 bnx2x_free_msix_irqs(bp
);
1224 fp
->state
= BNX2X_FP_STATE_IRQ
;
1227 i
= BNX2X_NUM_ETH_QUEUES(bp
);
1228 offset
= 1 + CNIC_CONTEXT_USE
;
1229 netdev_info(bp
->dev
, "using MSI-X IRQs: sp %d fp[%d] %d"
1231 bp
->msix_table
[0].vector
,
1232 0, bp
->msix_table
[offset
].vector
,
1233 i
- 1, bp
->msix_table
[offset
+ i
- 1].vector
);
1238 int bnx2x_enable_msi(struct bnx2x
*bp
)
1242 rc
= pci_enable_msi(bp
->pdev
);
1244 DP(NETIF_MSG_IFUP
, "MSI is not attainable\n");
1247 bp
->flags
|= USING_MSI_FLAG
;
1252 static int bnx2x_req_irq(struct bnx2x
*bp
)
1254 unsigned long flags
;
1257 if (bp
->flags
& USING_MSI_FLAG
)
1260 flags
= IRQF_SHARED
;
1262 rc
= request_irq(bp
->pdev
->irq
, bnx2x_interrupt
, flags
,
1263 bp
->dev
->name
, bp
->dev
);
1265 bnx2x_fp(bp
, 0, state
) = BNX2X_FP_STATE_IRQ
;
1270 static void bnx2x_napi_enable(struct bnx2x
*bp
)
1274 for_each_napi_queue(bp
, i
)
1275 napi_enable(&bnx2x_fp(bp
, i
, napi
));
1278 static void bnx2x_napi_disable(struct bnx2x
*bp
)
1282 for_each_napi_queue(bp
, i
)
1283 napi_disable(&bnx2x_fp(bp
, i
, napi
));
1286 void bnx2x_netif_start(struct bnx2x
*bp
)
1290 intr_sem
= atomic_dec_and_test(&bp
->intr_sem
);
1291 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
1294 if (netif_running(bp
->dev
)) {
1295 bnx2x_napi_enable(bp
);
1296 bnx2x_int_enable(bp
);
1297 if (bp
->state
== BNX2X_STATE_OPEN
)
1298 netif_tx_wake_all_queues(bp
->dev
);
1303 void bnx2x_netif_stop(struct bnx2x
*bp
, int disable_hw
)
1305 bnx2x_int_disable_sync(bp
, disable_hw
);
1306 bnx2x_napi_disable(bp
);
1307 netif_tx_disable(bp
->dev
);
1310 u16
bnx2x_select_queue(struct net_device
*dev
, struct sk_buff
*skb
)
1313 struct bnx2x
*bp
= netdev_priv(dev
);
1315 return skb_tx_hash(dev
, skb
);
1317 struct ethhdr
*hdr
= (struct ethhdr
*)skb
->data
;
1318 u16 ether_type
= ntohs(hdr
->h_proto
);
1320 /* Skip VLAN tag if present */
1321 if (ether_type
== ETH_P_8021Q
) {
1322 struct vlan_ethhdr
*vhdr
=
1323 (struct vlan_ethhdr
*)skb
->data
;
1325 ether_type
= ntohs(vhdr
->h_vlan_encapsulated_proto
);
1328 /* If ethertype is FCoE or FIP - use FCoE ring */
1329 if ((ether_type
== ETH_P_FCOE
) || (ether_type
== ETH_P_FIP
))
1330 return bnx2x_fcoe(bp
, index
);
1333 /* Select a none-FCoE queue: if FCoE is enabled, exclude FCoE L2 ring
1335 return __skb_tx_hash(dev
, skb
,
1336 dev
->real_num_tx_queues
- FCOE_CONTEXT_USE
);
1339 void bnx2x_set_num_queues(struct bnx2x
*bp
)
1341 switch (bp
->multi_mode
) {
1342 case ETH_RSS_MODE_DISABLED
:
1345 case ETH_RSS_MODE_REGULAR
:
1346 bp
->num_queues
= bnx2x_calc_num_queues(bp
);
1354 /* Add special queues */
1355 bp
->num_queues
+= NONE_ETH_CONTEXT_USE
;
1359 static inline void bnx2x_set_fcoe_eth_macs(struct bnx2x
*bp
)
1363 bnx2x_set_fip_eth_mac_addr(bp
, 1);
1364 bnx2x_set_all_enode_macs(bp
, 1);
1365 bp
->flags
|= FCOE_MACS_SET
;
1370 static void bnx2x_release_firmware(struct bnx2x
*bp
)
1372 kfree(bp
->init_ops_offsets
);
1373 kfree(bp
->init_ops
);
1374 kfree(bp
->init_data
);
1375 release_firmware(bp
->firmware
);
1378 static inline int bnx2x_set_real_num_queues(struct bnx2x
*bp
)
1380 int rc
, num
= bp
->num_queues
;
1384 num
-= FCOE_CONTEXT_USE
;
1387 netif_set_real_num_tx_queues(bp
->dev
, num
);
1388 rc
= netif_set_real_num_rx_queues(bp
->dev
, num
);
1392 static inline void bnx2x_set_rx_buf_size(struct bnx2x
*bp
)
1396 for_each_queue(bp
, i
) {
1397 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
1399 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
1402 * Although there are no IP frames expected to arrive to
1403 * this ring we still want to add an
1404 * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
1408 BNX2X_FCOE_MINI_JUMBO_MTU
+ ETH_OVREHEAD
+
1409 BNX2X_RX_ALIGN
+ IP_HEADER_ALIGNMENT_PADDING
;
1412 bp
->dev
->mtu
+ ETH_OVREHEAD
+ BNX2X_RX_ALIGN
+
1413 IP_HEADER_ALIGNMENT_PADDING
;
1417 /* must be called with rtnl_lock */
1418 int bnx2x_nic_load(struct bnx2x
*bp
, int load_mode
)
1423 /* Set init arrays */
1424 rc
= bnx2x_init_firmware(bp
);
1426 BNX2X_ERR("Error loading firmware\n");
1430 #ifdef BNX2X_STOP_ON_ERROR
1431 if (unlikely(bp
->panic
))
1435 bp
->state
= BNX2X_STATE_OPENING_WAIT4_LOAD
;
1437 /* Set the initial link reported state to link down */
1438 bnx2x_acquire_phy_lock(bp
);
1439 memset(&bp
->last_reported_link
, 0, sizeof(bp
->last_reported_link
));
1440 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN
,
1441 &bp
->last_reported_link
.link_report_flags
);
1442 bnx2x_release_phy_lock(bp
);
1444 /* must be called before memory allocation and HW init */
1445 bnx2x_ilt_set_info(bp
);
1447 /* zero fastpath structures preserving invariants like napi which are
1448 * allocated only once
1450 for_each_queue(bp
, i
)
1453 /* Set the receive queues buffer size */
1454 bnx2x_set_rx_buf_size(bp
);
1456 for_each_queue(bp
, i
)
1457 bnx2x_fp(bp
, i
, disable_tpa
) =
1458 ((bp
->flags
& TPA_ENABLE_FLAG
) == 0);
1461 /* We don't want TPA on FCoE L2 ring */
1462 bnx2x_fcoe(bp
, disable_tpa
) = 1;
1465 if (bnx2x_alloc_mem(bp
))
1468 /* As long as bnx2x_alloc_mem() may possibly update
1469 * bp->num_queues, bnx2x_set_real_num_queues() should always
1472 rc
= bnx2x_set_real_num_queues(bp
);
1474 BNX2X_ERR("Unable to set real_num_queues\n");
1478 bnx2x_napi_enable(bp
);
1480 /* Send LOAD_REQUEST command to MCP
1481 Returns the type of LOAD command:
1482 if it is the first port to be initialized
1483 common blocks should be initialized, otherwise - not
1485 if (!BP_NOMCP(bp
)) {
1486 load_code
= bnx2x_fw_command(bp
, DRV_MSG_CODE_LOAD_REQ
, 0);
1488 BNX2X_ERR("MCP response failure, aborting\n");
1492 if (load_code
== FW_MSG_CODE_DRV_LOAD_REFUSED
) {
1493 rc
= -EBUSY
; /* other port in diagnostic mode */
1498 int path
= BP_PATH(bp
);
1499 int port
= BP_PORT(bp
);
1501 DP(NETIF_MSG_IFUP
, "NO MCP - load counts[%d] %d, %d, %d\n",
1502 path
, load_count
[path
][0], load_count
[path
][1],
1503 load_count
[path
][2]);
1504 load_count
[path
][0]++;
1505 load_count
[path
][1 + port
]++;
1506 DP(NETIF_MSG_IFUP
, "NO MCP - new load counts[%d] %d, %d, %d\n",
1507 path
, load_count
[path
][0], load_count
[path
][1],
1508 load_count
[path
][2]);
1509 if (load_count
[path
][0] == 1)
1510 load_code
= FW_MSG_CODE_DRV_LOAD_COMMON
;
1511 else if (load_count
[path
][1 + port
] == 1)
1512 load_code
= FW_MSG_CODE_DRV_LOAD_PORT
;
1514 load_code
= FW_MSG_CODE_DRV_LOAD_FUNCTION
;
1517 if ((load_code
== FW_MSG_CODE_DRV_LOAD_COMMON
) ||
1518 (load_code
== FW_MSG_CODE_DRV_LOAD_COMMON_CHIP
) ||
1519 (load_code
== FW_MSG_CODE_DRV_LOAD_PORT
))
1523 DP(NETIF_MSG_LINK
, "pmf %d\n", bp
->port
.pmf
);
1526 rc
= bnx2x_init_hw(bp
, load_code
);
1528 BNX2X_ERR("HW init failed, aborting\n");
1529 bnx2x_fw_command(bp
, DRV_MSG_CODE_LOAD_DONE
, 0);
1533 /* Connect to IRQs */
1534 rc
= bnx2x_setup_irqs(bp
);
1536 bnx2x_fw_command(bp
, DRV_MSG_CODE_LOAD_DONE
, 0);
1540 /* Setup NIC internals and enable interrupts */
1541 bnx2x_nic_init(bp
, load_code
);
1543 if (((load_code
== FW_MSG_CODE_DRV_LOAD_COMMON
) ||
1544 (load_code
== FW_MSG_CODE_DRV_LOAD_COMMON_CHIP
)) &&
1545 (bp
->common
.shmem2_base
))
1546 SHMEM2_WR(bp
, dcc_support
,
1547 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV
|
1548 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV
));
1550 /* Send LOAD_DONE command to MCP */
1551 if (!BP_NOMCP(bp
)) {
1552 load_code
= bnx2x_fw_command(bp
, DRV_MSG_CODE_LOAD_DONE
, 0);
1554 BNX2X_ERR("MCP response failure, aborting\n");
1560 bnx2x_dcbx_init(bp
);
1562 bp
->state
= BNX2X_STATE_OPENING_WAIT4_PORT
;
1564 rc
= bnx2x_func_start(bp
);
1566 BNX2X_ERR("Function start failed!\n");
1567 #ifndef BNX2X_STOP_ON_ERROR
1575 rc
= bnx2x_setup_client(bp
, &bp
->fp
[0], 1 /* Leading */);
1577 BNX2X_ERR("Setup leading failed!\n");
1578 #ifndef BNX2X_STOP_ON_ERROR
1586 if (!CHIP_IS_E1(bp
) &&
1587 (bp
->mf_config
[BP_VN(bp
)] & FUNC_MF_CFG_FUNC_DISABLED
)) {
1588 DP(NETIF_MSG_IFUP
, "mf_cfg function disabled\n");
1589 bp
->flags
|= MF_FUNC_DIS
;
1593 /* Enable Timer scan */
1594 REG_WR(bp
, TM_REG_EN_LINEAR0_TIMER
+ BP_PORT(bp
)*4, 1);
1597 for_each_nondefault_queue(bp
, i
) {
1598 rc
= bnx2x_setup_client(bp
, &bp
->fp
[i
], 0);
1607 /* Now when Clients are configured we are ready to work */
1608 bp
->state
= BNX2X_STATE_OPEN
;
1611 bnx2x_set_fcoe_eth_macs(bp
);
1614 bnx2x_set_eth_mac(bp
, 1);
1616 /* Clear MC configuration */
1618 bnx2x_invalidate_e1_mc_list(bp
);
1620 bnx2x_invalidate_e1h_mc_list(bp
);
1622 /* Clear UC lists configuration */
1623 bnx2x_invalidate_uc_list(bp
);
1625 if (bp
->pending_max
) {
1626 bnx2x_update_max_mf_config(bp
, bp
->pending_max
);
1627 bp
->pending_max
= 0;
1631 bnx2x_initial_phy_init(bp
, load_mode
);
1633 /* Initialize Rx filtering */
1634 bnx2x_set_rx_mode(bp
->dev
);
1636 /* Start fast path */
1637 switch (load_mode
) {
1639 /* Tx queue should be only reenabled */
1640 netif_tx_wake_all_queues(bp
->dev
);
1641 /* Initialize the receive filter. */
1645 netif_tx_start_all_queues(bp
->dev
);
1646 smp_mb__after_clear_bit();
1650 bp
->state
= BNX2X_STATE_DIAG
;
1658 bnx2x__link_status_update(bp
);
1660 /* start the timer */
1661 mod_timer(&bp
->timer
, jiffies
+ bp
->current_interval
);
1664 bnx2x_setup_cnic_irq_info(bp
);
1665 if (bp
->state
== BNX2X_STATE_OPEN
)
1666 bnx2x_cnic_notify(bp
, CNIC_CTL_START_CMD
);
1668 bnx2x_inc_load_cnt(bp
);
1670 bnx2x_release_firmware(bp
);
1676 /* Disable Timer scan */
1677 REG_WR(bp
, TM_REG_EN_LINEAR0_TIMER
+ BP_PORT(bp
)*4, 0);
1680 bnx2x_int_disable_sync(bp
, 1);
1682 /* Free SKBs, SGEs, TPA pool and driver internals */
1683 bnx2x_free_skbs(bp
);
1684 for_each_rx_queue(bp
, i
)
1685 bnx2x_free_rx_sge_range(bp
, bp
->fp
+ i
, NUM_RX_SGE
);
1690 if (!BP_NOMCP(bp
)) {
1691 bnx2x_fw_command(bp
, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP
, 0);
1692 bnx2x_fw_command(bp
, DRV_MSG_CODE_UNLOAD_DONE
, 0);
1697 bnx2x_napi_disable(bp
);
1701 bnx2x_release_firmware(bp
);
1706 /* must be called with rtnl_lock */
1707 int bnx2x_nic_unload(struct bnx2x
*bp
, int unload_mode
)
1711 if (bp
->state
== BNX2X_STATE_CLOSED
) {
1712 /* Interface has been removed - nothing to recover */
1713 bp
->recovery_state
= BNX2X_RECOVERY_DONE
;
1715 bnx2x_release_hw_lock(bp
, HW_LOCK_RESOURCE_RESERVED_08
);
1722 bnx2x_cnic_notify(bp
, CNIC_CTL_STOP_CMD
);
1724 bp
->state
= BNX2X_STATE_CLOSING_WAIT4_HALT
;
1726 /* Set "drop all" */
1727 bp
->rx_mode
= BNX2X_RX_MODE_NONE
;
1728 bnx2x_set_storm_rx_mode(bp
);
1731 bnx2x_tx_disable(bp
);
1733 del_timer_sync(&bp
->timer
);
1735 SHMEM_WR(bp
, func_mb
[BP_FW_MB_IDX(bp
)].drv_pulse_mb
,
1736 (DRV_PULSE_ALWAYS_ALIVE
| bp
->fw_drv_pulse_wr_seq
));
1738 bnx2x_stats_handle(bp
, STATS_EVENT_STOP
);
1740 /* Cleanup the chip if needed */
1741 if (unload_mode
!= UNLOAD_RECOVERY
)
1742 bnx2x_chip_cleanup(bp
, unload_mode
);
1744 /* Disable HW interrupts, NAPI and Tx */
1745 bnx2x_netif_stop(bp
, 1);
1753 /* Free SKBs, SGEs, TPA pool and driver internals */
1754 bnx2x_free_skbs(bp
);
1755 for_each_rx_queue(bp
, i
)
1756 bnx2x_free_rx_sge_range(bp
, bp
->fp
+ i
, NUM_RX_SGE
);
1760 bp
->state
= BNX2X_STATE_CLOSED
;
1762 /* The last driver must disable a "close the gate" if there is no
1763 * parity attention or "process kill" pending.
1765 if ((!bnx2x_dec_load_cnt(bp
)) && (!bnx2x_chk_parity_attn(bp
)) &&
1766 bnx2x_reset_is_done(bp
))
1767 bnx2x_disable_close_the_gate(bp
);
1769 /* Reset MCP mail box sequence if there is on going recovery */
1770 if (unload_mode
== UNLOAD_RECOVERY
)
1776 int bnx2x_set_power_state(struct bnx2x
*bp
, pci_power_t state
)
1780 /* If there is no power capability, silently succeed */
1782 DP(NETIF_MSG_HW
, "No power capability. Breaking.\n");
1786 pci_read_config_word(bp
->pdev
, bp
->pm_cap
+ PCI_PM_CTRL
, &pmcsr
);
1790 pci_write_config_word(bp
->pdev
, bp
->pm_cap
+ PCI_PM_CTRL
,
1791 ((pmcsr
& ~PCI_PM_CTRL_STATE_MASK
) |
1792 PCI_PM_CTRL_PME_STATUS
));
1794 if (pmcsr
& PCI_PM_CTRL_STATE_MASK
)
1795 /* delay required during transition out of D3hot */
1800 /* If there are other clients above don't
1801 shut down the power */
1802 if (atomic_read(&bp
->pdev
->enable_cnt
) != 1)
1804 /* Don't shut down the power for emulation and FPGA */
1805 if (CHIP_REV_IS_SLOW(bp
))
1808 pmcsr
&= ~PCI_PM_CTRL_STATE_MASK
;
1812 pmcsr
|= PCI_PM_CTRL_PME_ENABLE
;
1814 pci_write_config_word(bp
->pdev
, bp
->pm_cap
+ PCI_PM_CTRL
,
1817 /* No more memory access after this point until
1818 * device is brought back to D0.
1829 * net_device service functions
1831 int bnx2x_poll(struct napi_struct
*napi
, int budget
)
1834 struct bnx2x_fastpath
*fp
= container_of(napi
, struct bnx2x_fastpath
,
1836 struct bnx2x
*bp
= fp
->bp
;
1839 #ifdef BNX2X_STOP_ON_ERROR
1840 if (unlikely(bp
->panic
)) {
1841 napi_complete(napi
);
1846 if (bnx2x_has_tx_work(fp
))
1849 if (bnx2x_has_rx_work(fp
)) {
1850 work_done
+= bnx2x_rx_int(fp
, budget
- work_done
);
1852 /* must not complete if we consumed full budget */
1853 if (work_done
>= budget
)
1857 /* Fall out from the NAPI loop if needed */
1858 if (!(bnx2x_has_rx_work(fp
) || bnx2x_has_tx_work(fp
))) {
1860 /* No need to update SB for FCoE L2 ring as long as
1861 * it's connected to the default SB and the SB
1862 * has been updated when NAPI was scheduled.
1864 if (IS_FCOE_FP(fp
)) {
1865 napi_complete(napi
);
1870 bnx2x_update_fpsb_idx(fp
);
1871 /* bnx2x_has_rx_work() reads the status block,
1872 * thus we need to ensure that status block indices
1873 * have been actually read (bnx2x_update_fpsb_idx)
1874 * prior to this check (bnx2x_has_rx_work) so that
1875 * we won't write the "newer" value of the status block
1876 * to IGU (if there was a DMA right after
1877 * bnx2x_has_rx_work and if there is no rmb, the memory
1878 * reading (bnx2x_update_fpsb_idx) may be postponed
1879 * to right before bnx2x_ack_sb). In this case there
1880 * will never be another interrupt until there is
1881 * another update of the status block, while there
1882 * is still unhandled work.
1886 if (!(bnx2x_has_rx_work(fp
) || bnx2x_has_tx_work(fp
))) {
1887 napi_complete(napi
);
1888 /* Re-enable interrupts */
1890 "Update index to %d\n", fp
->fp_hc_idx
);
1891 bnx2x_ack_sb(bp
, fp
->igu_sb_id
, USTORM_ID
,
1892 le16_to_cpu(fp
->fp_hc_idx
),
1902 /* we split the first BD into headers and data BDs
1903 * to ease the pain of our fellow microcode engineers
1904 * we use one mapping for both BDs
1905 * So far this has only been observed to happen
1906 * in Other Operating Systems(TM)
1908 static noinline u16
bnx2x_tx_split(struct bnx2x
*bp
,
1909 struct bnx2x_fastpath
*fp
,
1910 struct sw_tx_bd
*tx_buf
,
1911 struct eth_tx_start_bd
**tx_bd
, u16 hlen
,
1912 u16 bd_prod
, int nbd
)
1914 struct eth_tx_start_bd
*h_tx_bd
= *tx_bd
;
1915 struct eth_tx_bd
*d_tx_bd
;
1917 int old_len
= le16_to_cpu(h_tx_bd
->nbytes
);
1919 /* first fix first BD */
1920 h_tx_bd
->nbd
= cpu_to_le16(nbd
);
1921 h_tx_bd
->nbytes
= cpu_to_le16(hlen
);
1923 DP(NETIF_MSG_TX_QUEUED
, "TSO split header size is %d "
1924 "(%x:%x) nbd %d\n", h_tx_bd
->nbytes
, h_tx_bd
->addr_hi
,
1925 h_tx_bd
->addr_lo
, h_tx_bd
->nbd
);
1927 /* now get a new data BD
1928 * (after the pbd) and fill it */
1929 bd_prod
= TX_BD(NEXT_TX_IDX(bd_prod
));
1930 d_tx_bd
= &fp
->tx_desc_ring
[bd_prod
].reg_bd
;
1932 mapping
= HILO_U64(le32_to_cpu(h_tx_bd
->addr_hi
),
1933 le32_to_cpu(h_tx_bd
->addr_lo
)) + hlen
;
1935 d_tx_bd
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
1936 d_tx_bd
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
1937 d_tx_bd
->nbytes
= cpu_to_le16(old_len
- hlen
);
1939 /* this marks the BD as one that has no individual mapping */
1940 tx_buf
->flags
|= BNX2X_TSO_SPLIT_BD
;
1942 DP(NETIF_MSG_TX_QUEUED
,
1943 "TSO split data size is %d (%x:%x)\n",
1944 d_tx_bd
->nbytes
, d_tx_bd
->addr_hi
, d_tx_bd
->addr_lo
);
1947 *tx_bd
= (struct eth_tx_start_bd
*)d_tx_bd
;
1952 static inline u16
bnx2x_csum_fix(unsigned char *t_header
, u16 csum
, s8 fix
)
1955 csum
= (u16
) ~csum_fold(csum_sub(csum
,
1956 csum_partial(t_header
- fix
, fix
, 0)));
1959 csum
= (u16
) ~csum_fold(csum_add(csum
,
1960 csum_partial(t_header
, -fix
, 0)));
1962 return swab16(csum
);
1965 static inline u32
bnx2x_xmit_type(struct bnx2x
*bp
, struct sk_buff
*skb
)
1969 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
)
1973 if (vlan_get_protocol(skb
) == htons(ETH_P_IPV6
)) {
1975 if (ipv6_hdr(skb
)->nexthdr
== IPPROTO_TCP
)
1976 rc
|= XMIT_CSUM_TCP
;
1980 if (ip_hdr(skb
)->protocol
== IPPROTO_TCP
)
1981 rc
|= XMIT_CSUM_TCP
;
1985 if (skb_is_gso_v6(skb
))
1986 rc
|= XMIT_GSO_V6
| XMIT_CSUM_TCP
| XMIT_CSUM_V6
;
1987 else if (skb_is_gso(skb
))
1988 rc
|= XMIT_GSO_V4
| XMIT_CSUM_V4
| XMIT_CSUM_TCP
;
1993 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
1994 /* check if packet requires linearization (packet is too fragmented)
1995 no need to check fragmentation if page size > 8K (there will be no
1996 violation to FW restrictions) */
1997 static int bnx2x_pkt_req_lin(struct bnx2x
*bp
, struct sk_buff
*skb
,
2002 int first_bd_sz
= 0;
2004 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
2005 if (skb_shinfo(skb
)->nr_frags
>= (MAX_FETCH_BD
- 3)) {
2007 if (xmit_type
& XMIT_GSO
) {
2008 unsigned short lso_mss
= skb_shinfo(skb
)->gso_size
;
2009 /* Check if LSO packet needs to be copied:
2010 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
2011 int wnd_size
= MAX_FETCH_BD
- 3;
2012 /* Number of windows to check */
2013 int num_wnds
= skb_shinfo(skb
)->nr_frags
- wnd_size
;
2018 /* Headers length */
2019 hlen
= (int)(skb_transport_header(skb
) - skb
->data
) +
2022 /* Amount of data (w/o headers) on linear part of SKB*/
2023 first_bd_sz
= skb_headlen(skb
) - hlen
;
2025 wnd_sum
= first_bd_sz
;
2027 /* Calculate the first sum - it's special */
2028 for (frag_idx
= 0; frag_idx
< wnd_size
- 1; frag_idx
++)
2030 skb_shinfo(skb
)->frags
[frag_idx
].size
;
2032 /* If there was data on linear skb data - check it */
2033 if (first_bd_sz
> 0) {
2034 if (unlikely(wnd_sum
< lso_mss
)) {
2039 wnd_sum
-= first_bd_sz
;
2042 /* Others are easier: run through the frag list and
2043 check all windows */
2044 for (wnd_idx
= 0; wnd_idx
<= num_wnds
; wnd_idx
++) {
2046 skb_shinfo(skb
)->frags
[wnd_idx
+ wnd_size
- 1].size
;
2048 if (unlikely(wnd_sum
< lso_mss
)) {
2053 skb_shinfo(skb
)->frags
[wnd_idx
].size
;
2056 /* in non-LSO too fragmented packet should always
2063 if (unlikely(to_copy
))
2064 DP(NETIF_MSG_TX_QUEUED
,
2065 "Linearization IS REQUIRED for %s packet. "
2066 "num_frags %d hlen %d first_bd_sz %d\n",
2067 (xmit_type
& XMIT_GSO
) ? "LSO" : "non-LSO",
2068 skb_shinfo(skb
)->nr_frags
, hlen
, first_bd_sz
);
2074 static inline void bnx2x_set_pbd_gso_e2(struct sk_buff
*skb
, u32
*parsing_data
,
2077 *parsing_data
|= (skb_shinfo(skb
)->gso_size
<<
2078 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT
) &
2079 ETH_TX_PARSE_BD_E2_LSO_MSS
;
2080 if ((xmit_type
& XMIT_GSO_V6
) &&
2081 (ipv6_hdr(skb
)->nexthdr
== NEXTHDR_IPV6
))
2082 *parsing_data
|= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR
;
2086 * Update PBD in GSO case.
2089 * @param tx_start_bd
2093 static inline void bnx2x_set_pbd_gso(struct sk_buff
*skb
,
2094 struct eth_tx_parse_bd_e1x
*pbd
,
2097 pbd
->lso_mss
= cpu_to_le16(skb_shinfo(skb
)->gso_size
);
2098 pbd
->tcp_send_seq
= swab32(tcp_hdr(skb
)->seq
);
2099 pbd
->tcp_flags
= pbd_tcp_flags(skb
);
2101 if (xmit_type
& XMIT_GSO_V4
) {
2102 pbd
->ip_id
= swab16(ip_hdr(skb
)->id
);
2103 pbd
->tcp_pseudo_csum
=
2104 swab16(~csum_tcpudp_magic(ip_hdr(skb
)->saddr
,
2106 0, IPPROTO_TCP
, 0));
2109 pbd
->tcp_pseudo_csum
=
2110 swab16(~csum_ipv6_magic(&ipv6_hdr(skb
)->saddr
,
2111 &ipv6_hdr(skb
)->daddr
,
2112 0, IPPROTO_TCP
, 0));
2114 pbd
->global_data
|= ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN
;
2120 * @param tx_start_bd
2124 * @return header len
2126 static inline u8
bnx2x_set_pbd_csum_e2(struct bnx2x
*bp
, struct sk_buff
*skb
,
2127 u32
*parsing_data
, u32 xmit_type
)
2130 ((((u8
*)skb_transport_header(skb
) - skb
->data
) >> 1) <<
2131 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT
) &
2132 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W
;
2134 if (xmit_type
& XMIT_CSUM_TCP
) {
2135 *parsing_data
|= ((tcp_hdrlen(skb
) / 4) <<
2136 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT
) &
2137 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW
;
2139 return skb_transport_header(skb
) + tcp_hdrlen(skb
) - skb
->data
;
2141 /* We support checksum offload for TCP and UDP only.
2142 * No need to pass the UDP header length - it's a constant.
2144 return skb_transport_header(skb
) +
2145 sizeof(struct udphdr
) - skb
->data
;
2151 * @param tx_start_bd
2155 * @return Header length
2157 static inline u8
bnx2x_set_pbd_csum(struct bnx2x
*bp
, struct sk_buff
*skb
,
2158 struct eth_tx_parse_bd_e1x
*pbd
,
2161 u8 hlen
= (skb_network_header(skb
) - skb
->data
) >> 1;
2163 /* for now NS flag is not used in Linux */
2165 (hlen
| ((skb
->protocol
== cpu_to_be16(ETH_P_8021Q
)) <<
2166 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT
));
2168 pbd
->ip_hlen_w
= (skb_transport_header(skb
) -
2169 skb_network_header(skb
)) >> 1;
2171 hlen
+= pbd
->ip_hlen_w
;
2173 /* We support checksum offload for TCP and UDP only */
2174 if (xmit_type
& XMIT_CSUM_TCP
)
2175 hlen
+= tcp_hdrlen(skb
) / 2;
2177 hlen
+= sizeof(struct udphdr
) / 2;
2179 pbd
->total_hlen_w
= cpu_to_le16(hlen
);
2182 if (xmit_type
& XMIT_CSUM_TCP
) {
2183 pbd
->tcp_pseudo_csum
= swab16(tcp_hdr(skb
)->check
);
2186 s8 fix
= SKB_CS_OFF(skb
); /* signed! */
2188 DP(NETIF_MSG_TX_QUEUED
,
2189 "hlen %d fix %d csum before fix %x\n",
2190 le16_to_cpu(pbd
->total_hlen_w
), fix
, SKB_CS(skb
));
2192 /* HW bug: fixup the CSUM */
2193 pbd
->tcp_pseudo_csum
=
2194 bnx2x_csum_fix(skb_transport_header(skb
),
2197 DP(NETIF_MSG_TX_QUEUED
, "csum after fix %x\n",
2198 pbd
->tcp_pseudo_csum
);
2204 /* called with netif_tx_lock
2205 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
2206 * netif_wake_queue()
2208 netdev_tx_t
bnx2x_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
2210 struct bnx2x
*bp
= netdev_priv(dev
);
2211 struct bnx2x_fastpath
*fp
;
2212 struct netdev_queue
*txq
;
2213 struct sw_tx_bd
*tx_buf
;
2214 struct eth_tx_start_bd
*tx_start_bd
;
2215 struct eth_tx_bd
*tx_data_bd
, *total_pkt_bd
= NULL
;
2216 struct eth_tx_parse_bd_e1x
*pbd_e1x
= NULL
;
2217 struct eth_tx_parse_bd_e2
*pbd_e2
= NULL
;
2218 u32 pbd_e2_parsing_data
= 0;
2219 u16 pkt_prod
, bd_prod
;
2222 u32 xmit_type
= bnx2x_xmit_type(bp
, skb
);
2225 __le16 pkt_size
= 0;
2227 u8 mac_type
= UNICAST_ADDRESS
;
2229 #ifdef BNX2X_STOP_ON_ERROR
2230 if (unlikely(bp
->panic
))
2231 return NETDEV_TX_BUSY
;
2234 fp_index
= skb_get_queue_mapping(skb
);
2235 txq
= netdev_get_tx_queue(dev
, fp_index
);
2237 fp
= &bp
->fp
[fp_index
];
2239 if (unlikely(bnx2x_tx_avail(fp
) < (skb_shinfo(skb
)->nr_frags
+ 3))) {
2240 fp
->eth_q_stats
.driver_xoff
++;
2241 netif_tx_stop_queue(txq
);
2242 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
2243 return NETDEV_TX_BUSY
;
2246 DP(NETIF_MSG_TX_QUEUED
, "queue[%d]: SKB: summed %x protocol %x "
2247 "protocol(%x,%x) gso type %x xmit_type %x\n",
2248 fp_index
, skb
->ip_summed
, skb
->protocol
, ipv6_hdr(skb
)->nexthdr
,
2249 ip_hdr(skb
)->protocol
, skb_shinfo(skb
)->gso_type
, xmit_type
);
2251 eth
= (struct ethhdr
*)skb
->data
;
2253 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
2254 if (unlikely(is_multicast_ether_addr(eth
->h_dest
))) {
2255 if (is_broadcast_ether_addr(eth
->h_dest
))
2256 mac_type
= BROADCAST_ADDRESS
;
2258 mac_type
= MULTICAST_ADDRESS
;
2261 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
2262 /* First, check if we need to linearize the skb (due to FW
2263 restrictions). No need to check fragmentation if page size > 8K
2264 (there will be no violation to FW restrictions) */
2265 if (bnx2x_pkt_req_lin(bp
, skb
, xmit_type
)) {
2266 /* Statistics of linearization */
2268 if (skb_linearize(skb
) != 0) {
2269 DP(NETIF_MSG_TX_QUEUED
, "SKB linearization failed - "
2270 "silently dropping this SKB\n");
2271 dev_kfree_skb_any(skb
);
2272 return NETDEV_TX_OK
;
2278 Please read carefully. First we use one BD which we mark as start,
2279 then we have a parsing info BD (used for TSO or xsum),
2280 and only then we have the rest of the TSO BDs.
2281 (don't forget to mark the last one as last,
2282 and to unmap only AFTER you write to the BD ...)
2283 And above all, all pdb sizes are in words - NOT DWORDS!
2286 pkt_prod
= fp
->tx_pkt_prod
++;
2287 bd_prod
= TX_BD(fp
->tx_bd_prod
);
2289 /* get a tx_buf and first BD */
2290 tx_buf
= &fp
->tx_buf_ring
[TX_BD(pkt_prod
)];
2291 tx_start_bd
= &fp
->tx_desc_ring
[bd_prod
].start_bd
;
2293 tx_start_bd
->bd_flags
.as_bitfield
= ETH_TX_BD_FLAGS_START_BD
;
2294 SET_FLAG(tx_start_bd
->general_data
, ETH_TX_START_BD_ETH_ADDR_TYPE
,
2298 SET_FLAG(tx_start_bd
->general_data
, ETH_TX_START_BD_HDR_NBDS
, 1);
2300 /* remember the first BD of the packet */
2301 tx_buf
->first_bd
= fp
->tx_bd_prod
;
2305 DP(NETIF_MSG_TX_QUEUED
,
2306 "sending pkt %u @%p next_idx %u bd %u @%p\n",
2307 pkt_prod
, tx_buf
, fp
->tx_pkt_prod
, bd_prod
, tx_start_bd
);
2309 if (vlan_tx_tag_present(skb
)) {
2310 tx_start_bd
->vlan_or_ethertype
=
2311 cpu_to_le16(vlan_tx_tag_get(skb
));
2312 tx_start_bd
->bd_flags
.as_bitfield
|=
2313 (X_ETH_OUTBAND_VLAN
<< ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT
);
2315 tx_start_bd
->vlan_or_ethertype
= cpu_to_le16(pkt_prod
);
2317 /* turn on parsing and get a BD */
2318 bd_prod
= TX_BD(NEXT_TX_IDX(bd_prod
));
2320 if (xmit_type
& XMIT_CSUM
) {
2321 tx_start_bd
->bd_flags
.as_bitfield
|= ETH_TX_BD_FLAGS_L4_CSUM
;
2323 if (xmit_type
& XMIT_CSUM_V4
)
2324 tx_start_bd
->bd_flags
.as_bitfield
|=
2325 ETH_TX_BD_FLAGS_IP_CSUM
;
2327 tx_start_bd
->bd_flags
.as_bitfield
|=
2328 ETH_TX_BD_FLAGS_IPV6
;
2330 if (!(xmit_type
& XMIT_CSUM_TCP
))
2331 tx_start_bd
->bd_flags
.as_bitfield
|=
2332 ETH_TX_BD_FLAGS_IS_UDP
;
2335 if (CHIP_IS_E2(bp
)) {
2336 pbd_e2
= &fp
->tx_desc_ring
[bd_prod
].parse_bd_e2
;
2337 memset(pbd_e2
, 0, sizeof(struct eth_tx_parse_bd_e2
));
2338 /* Set PBD in checksum offload case */
2339 if (xmit_type
& XMIT_CSUM
)
2340 hlen
= bnx2x_set_pbd_csum_e2(bp
, skb
,
2341 &pbd_e2_parsing_data
,
2344 pbd_e1x
= &fp
->tx_desc_ring
[bd_prod
].parse_bd_e1x
;
2345 memset(pbd_e1x
, 0, sizeof(struct eth_tx_parse_bd_e1x
));
2346 /* Set PBD in checksum offload case */
2347 if (xmit_type
& XMIT_CSUM
)
2348 hlen
= bnx2x_set_pbd_csum(bp
, skb
, pbd_e1x
, xmit_type
);
2352 /* Map skb linear data for DMA */
2353 mapping
= dma_map_single(&bp
->pdev
->dev
, skb
->data
,
2354 skb_headlen(skb
), DMA_TO_DEVICE
);
2356 /* Setup the data pointer of the first BD of the packet */
2357 tx_start_bd
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
2358 tx_start_bd
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
2359 nbd
= skb_shinfo(skb
)->nr_frags
+ 2; /* start_bd + pbd + frags */
2360 tx_start_bd
->nbd
= cpu_to_le16(nbd
);
2361 tx_start_bd
->nbytes
= cpu_to_le16(skb_headlen(skb
));
2362 pkt_size
= tx_start_bd
->nbytes
;
2364 DP(NETIF_MSG_TX_QUEUED
, "first bd @%p addr (%x:%x) nbd %d"
2365 " nbytes %d flags %x vlan %x\n",
2366 tx_start_bd
, tx_start_bd
->addr_hi
, tx_start_bd
->addr_lo
,
2367 le16_to_cpu(tx_start_bd
->nbd
), le16_to_cpu(tx_start_bd
->nbytes
),
2368 tx_start_bd
->bd_flags
.as_bitfield
,
2369 le16_to_cpu(tx_start_bd
->vlan_or_ethertype
));
2371 if (xmit_type
& XMIT_GSO
) {
2373 DP(NETIF_MSG_TX_QUEUED
,
2374 "TSO packet len %d hlen %d total len %d tso size %d\n",
2375 skb
->len
, hlen
, skb_headlen(skb
),
2376 skb_shinfo(skb
)->gso_size
);
2378 tx_start_bd
->bd_flags
.as_bitfield
|= ETH_TX_BD_FLAGS_SW_LSO
;
2380 if (unlikely(skb_headlen(skb
) > hlen
))
2381 bd_prod
= bnx2x_tx_split(bp
, fp
, tx_buf
, &tx_start_bd
,
2382 hlen
, bd_prod
, ++nbd
);
2384 bnx2x_set_pbd_gso_e2(skb
, &pbd_e2_parsing_data
,
2387 bnx2x_set_pbd_gso(skb
, pbd_e1x
, xmit_type
);
2390 /* Set the PBD's parsing_data field if not zero
2391 * (for the chips newer than 57711).
2393 if (pbd_e2_parsing_data
)
2394 pbd_e2
->parsing_data
= cpu_to_le32(pbd_e2_parsing_data
);
2396 tx_data_bd
= (struct eth_tx_bd
*)tx_start_bd
;
2398 /* Handle fragmented skb */
2399 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
2400 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
2402 bd_prod
= TX_BD(NEXT_TX_IDX(bd_prod
));
2403 tx_data_bd
= &fp
->tx_desc_ring
[bd_prod
].reg_bd
;
2404 if (total_pkt_bd
== NULL
)
2405 total_pkt_bd
= &fp
->tx_desc_ring
[bd_prod
].reg_bd
;
2407 mapping
= dma_map_page(&bp
->pdev
->dev
, frag
->page
,
2409 frag
->size
, DMA_TO_DEVICE
);
2411 tx_data_bd
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
2412 tx_data_bd
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
2413 tx_data_bd
->nbytes
= cpu_to_le16(frag
->size
);
2414 le16_add_cpu(&pkt_size
, frag
->size
);
2416 DP(NETIF_MSG_TX_QUEUED
,
2417 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
2418 i
, tx_data_bd
, tx_data_bd
->addr_hi
, tx_data_bd
->addr_lo
,
2419 le16_to_cpu(tx_data_bd
->nbytes
));
2422 DP(NETIF_MSG_TX_QUEUED
, "last bd @%p\n", tx_data_bd
);
2424 bd_prod
= TX_BD(NEXT_TX_IDX(bd_prod
));
2426 /* now send a tx doorbell, counting the next BD
2427 * if the packet contains or ends with it
2429 if (TX_BD_POFF(bd_prod
) < nbd
)
2432 if (total_pkt_bd
!= NULL
)
2433 total_pkt_bd
->total_pkt_bytes
= pkt_size
;
2436 DP(NETIF_MSG_TX_QUEUED
,
2437 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
2438 " tcp_flags %x xsum %x seq %u hlen %u\n",
2439 pbd_e1x
, pbd_e1x
->global_data
, pbd_e1x
->ip_hlen_w
,
2440 pbd_e1x
->ip_id
, pbd_e1x
->lso_mss
, pbd_e1x
->tcp_flags
,
2441 pbd_e1x
->tcp_pseudo_csum
, pbd_e1x
->tcp_send_seq
,
2442 le16_to_cpu(pbd_e1x
->total_hlen_w
));
2444 DP(NETIF_MSG_TX_QUEUED
,
2445 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
2446 pbd_e2
, pbd_e2
->dst_mac_addr_hi
, pbd_e2
->dst_mac_addr_mid
,
2447 pbd_e2
->dst_mac_addr_lo
, pbd_e2
->src_mac_addr_hi
,
2448 pbd_e2
->src_mac_addr_mid
, pbd_e2
->src_mac_addr_lo
,
2449 pbd_e2
->parsing_data
);
2450 DP(NETIF_MSG_TX_QUEUED
, "doorbell: nbd %d bd %u\n", nbd
, bd_prod
);
2453 * Make sure that the BD data is updated before updating the producer
2454 * since FW might read the BD right after the producer is updated.
2455 * This is only applicable for weak-ordered memory model archs such
2456 * as IA-64. The following barrier is also mandatory since FW will
2457 * assumes packets must have BDs.
2461 fp
->tx_db
.data
.prod
+= nbd
;
2464 DOORBELL(bp
, fp
->cid
, fp
->tx_db
.raw
);
2468 fp
->tx_bd_prod
+= nbd
;
2470 if (unlikely(bnx2x_tx_avail(fp
) < MAX_SKB_FRAGS
+ 3)) {
2471 netif_tx_stop_queue(txq
);
2473 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
2474 * ordering of set_bit() in netif_tx_stop_queue() and read of
2478 fp
->eth_q_stats
.driver_xoff
++;
2479 if (bnx2x_tx_avail(fp
) >= MAX_SKB_FRAGS
+ 3)
2480 netif_tx_wake_queue(txq
);
2484 return NETDEV_TX_OK
;
2487 /* called with rtnl_lock */
2488 int bnx2x_change_mac_addr(struct net_device
*dev
, void *p
)
2490 struct sockaddr
*addr
= p
;
2491 struct bnx2x
*bp
= netdev_priv(dev
);
2493 if (!is_valid_ether_addr((u8
*)(addr
->sa_data
)))
2496 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
2497 if (netif_running(dev
))
2498 bnx2x_set_eth_mac(bp
, 1);
2503 static void bnx2x_free_fp_mem_at(struct bnx2x
*bp
, int fp_index
)
2505 union host_hc_status_block
*sb
= &bnx2x_fp(bp
, fp_index
, status_blk
);
2506 struct bnx2x_fastpath
*fp
= &bp
->fp
[fp_index
];
2510 if (IS_FCOE_IDX(fp_index
)) {
2511 memset(sb
, 0, sizeof(union host_hc_status_block
));
2512 fp
->status_blk_mapping
= 0;
2518 BNX2X_PCI_FREE(sb
->e2_sb
,
2519 bnx2x_fp(bp
, fp_index
,
2520 status_blk_mapping
),
2521 sizeof(struct host_hc_status_block_e2
));
2523 BNX2X_PCI_FREE(sb
->e1x_sb
,
2524 bnx2x_fp(bp
, fp_index
,
2525 status_blk_mapping
),
2526 sizeof(struct host_hc_status_block_e1x
));
2531 if (!skip_rx_queue(bp
, fp_index
)) {
2532 bnx2x_free_rx_bds(fp
);
2534 /* fastpath rx rings: rx_buf rx_desc rx_comp */
2535 BNX2X_FREE(bnx2x_fp(bp
, fp_index
, rx_buf_ring
));
2536 BNX2X_PCI_FREE(bnx2x_fp(bp
, fp_index
, rx_desc_ring
),
2537 bnx2x_fp(bp
, fp_index
, rx_desc_mapping
),
2538 sizeof(struct eth_rx_bd
) * NUM_RX_BD
);
2540 BNX2X_PCI_FREE(bnx2x_fp(bp
, fp_index
, rx_comp_ring
),
2541 bnx2x_fp(bp
, fp_index
, rx_comp_mapping
),
2542 sizeof(struct eth_fast_path_rx_cqe
) *
2546 BNX2X_FREE(bnx2x_fp(bp
, fp_index
, rx_page_ring
));
2547 BNX2X_PCI_FREE(bnx2x_fp(bp
, fp_index
, rx_sge_ring
),
2548 bnx2x_fp(bp
, fp_index
, rx_sge_mapping
),
2549 BCM_PAGE_SIZE
* NUM_RX_SGE_PAGES
);
2553 if (!skip_tx_queue(bp
, fp_index
)) {
2554 /* fastpath tx rings: tx_buf tx_desc */
2555 BNX2X_FREE(bnx2x_fp(bp
, fp_index
, tx_buf_ring
));
2556 BNX2X_PCI_FREE(bnx2x_fp(bp
, fp_index
, tx_desc_ring
),
2557 bnx2x_fp(bp
, fp_index
, tx_desc_mapping
),
2558 sizeof(union eth_tx_bd_types
) * NUM_TX_BD
);
2560 /* end of fastpath */
2563 void bnx2x_free_fp_mem(struct bnx2x
*bp
)
2566 for_each_queue(bp
, i
)
2567 bnx2x_free_fp_mem_at(bp
, i
);
2570 static inline void set_sb_shortcuts(struct bnx2x
*bp
, int index
)
2572 union host_hc_status_block status_blk
= bnx2x_fp(bp
, index
, status_blk
);
2573 if (CHIP_IS_E2(bp
)) {
2574 bnx2x_fp(bp
, index
, sb_index_values
) =
2575 (__le16
*)status_blk
.e2_sb
->sb
.index_values
;
2576 bnx2x_fp(bp
, index
, sb_running_index
) =
2577 (__le16
*)status_blk
.e2_sb
->sb
.running_index
;
2579 bnx2x_fp(bp
, index
, sb_index_values
) =
2580 (__le16
*)status_blk
.e1x_sb
->sb
.index_values
;
2581 bnx2x_fp(bp
, index
, sb_running_index
) =
2582 (__le16
*)status_blk
.e1x_sb
->sb
.running_index
;
2586 static int bnx2x_alloc_fp_mem_at(struct bnx2x
*bp
, int index
)
2588 union host_hc_status_block
*sb
;
2589 struct bnx2x_fastpath
*fp
= &bp
->fp
[index
];
2592 /* if rx_ring_size specified - use it */
2593 int rx_ring_size
= bp
->rx_ring_size
? bp
->rx_ring_size
:
2594 MAX_RX_AVAIL
/bp
->num_queues
;
2596 /* allocate at least number of buffers required by FW */
2597 rx_ring_size
= max_t(int, fp
->disable_tpa
? MIN_RX_SIZE_NONTPA
:
2601 bnx2x_fp(bp
, index
, bp
) = bp
;
2602 bnx2x_fp(bp
, index
, index
) = index
;
2605 sb
= &bnx2x_fp(bp
, index
, status_blk
);
2607 if (!IS_FCOE_IDX(index
)) {
2611 BNX2X_PCI_ALLOC(sb
->e2_sb
,
2612 &bnx2x_fp(bp
, index
, status_blk_mapping
),
2613 sizeof(struct host_hc_status_block_e2
));
2615 BNX2X_PCI_ALLOC(sb
->e1x_sb
,
2616 &bnx2x_fp(bp
, index
, status_blk_mapping
),
2617 sizeof(struct host_hc_status_block_e1x
));
2621 set_sb_shortcuts(bp
, index
);
2624 if (!skip_tx_queue(bp
, index
)) {
2625 /* fastpath tx rings: tx_buf tx_desc */
2626 BNX2X_ALLOC(bnx2x_fp(bp
, index
, tx_buf_ring
),
2627 sizeof(struct sw_tx_bd
) * NUM_TX_BD
);
2628 BNX2X_PCI_ALLOC(bnx2x_fp(bp
, index
, tx_desc_ring
),
2629 &bnx2x_fp(bp
, index
, tx_desc_mapping
),
2630 sizeof(union eth_tx_bd_types
) * NUM_TX_BD
);
2634 if (!skip_rx_queue(bp
, index
)) {
2635 /* fastpath rx rings: rx_buf rx_desc rx_comp */
2636 BNX2X_ALLOC(bnx2x_fp(bp
, index
, rx_buf_ring
),
2637 sizeof(struct sw_rx_bd
) * NUM_RX_BD
);
2638 BNX2X_PCI_ALLOC(bnx2x_fp(bp
, index
, rx_desc_ring
),
2639 &bnx2x_fp(bp
, index
, rx_desc_mapping
),
2640 sizeof(struct eth_rx_bd
) * NUM_RX_BD
);
2642 BNX2X_PCI_ALLOC(bnx2x_fp(bp
, index
, rx_comp_ring
),
2643 &bnx2x_fp(bp
, index
, rx_comp_mapping
),
2644 sizeof(struct eth_fast_path_rx_cqe
) *
2648 BNX2X_ALLOC(bnx2x_fp(bp
, index
, rx_page_ring
),
2649 sizeof(struct sw_rx_page
) * NUM_RX_SGE
);
2650 BNX2X_PCI_ALLOC(bnx2x_fp(bp
, index
, rx_sge_ring
),
2651 &bnx2x_fp(bp
, index
, rx_sge_mapping
),
2652 BCM_PAGE_SIZE
* NUM_RX_SGE_PAGES
);
2654 bnx2x_set_next_page_rx_bd(fp
);
2657 bnx2x_set_next_page_rx_cq(fp
);
2660 ring_size
= bnx2x_alloc_rx_bds(fp
, rx_ring_size
);
2661 if (ring_size
< rx_ring_size
)
2667 /* handles low memory cases */
2669 BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
2671 /* FW will drop all packets if queue is not big enough,
2672 * In these cases we disable the queue
2673 * Min size diferent for TPA and non-TPA queues
2675 if (ring_size
< (fp
->disable_tpa
?
2676 MIN_RX_SIZE_TPA
: MIN_RX_SIZE_NONTPA
)) {
2677 /* release memory allocated for this queue */
2678 bnx2x_free_fp_mem_at(bp
, index
);
2684 int bnx2x_alloc_fp_mem(struct bnx2x
*bp
)
2689 * 1. Allocate FP for leading - fatal if error
2690 * 2. {CNIC} Allocate FCoE FP - fatal if error
2691 * 3. Allocate RSS - fix number of queues if error
2695 if (bnx2x_alloc_fp_mem_at(bp
, 0))
2699 if (bnx2x_alloc_fp_mem_at(bp
, FCOE_IDX
))
2703 for_each_nondefault_eth_queue(bp
, i
)
2704 if (bnx2x_alloc_fp_mem_at(bp
, i
))
2707 /* handle memory failures */
2708 if (i
!= BNX2X_NUM_ETH_QUEUES(bp
)) {
2709 int delta
= BNX2X_NUM_ETH_QUEUES(bp
) - i
;
2714 * move non eth FPs next to last eth FP
2715 * must be done in that order
2716 * FCOE_IDX < FWD_IDX < OOO_IDX
2720 bnx2x_move_fp(bp
, FCOE_IDX
, FCOE_IDX
- delta
);
2722 bp
->num_queues
-= delta
;
2723 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
2724 bp
->num_queues
+ delta
, bp
->num_queues
);
2730 static int bnx2x_setup_irqs(struct bnx2x
*bp
)
2733 if (bp
->flags
& USING_MSIX_FLAG
) {
2734 rc
= bnx2x_req_msix_irqs(bp
);
2739 rc
= bnx2x_req_irq(bp
);
2741 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc
);
2744 if (bp
->flags
& USING_MSI_FLAG
) {
2745 bp
->dev
->irq
= bp
->pdev
->irq
;
2746 netdev_info(bp
->dev
, "using MSI IRQ %d\n",
2754 void bnx2x_free_mem_bp(struct bnx2x
*bp
)
2757 kfree(bp
->msix_table
);
2761 int __devinit
bnx2x_alloc_mem_bp(struct bnx2x
*bp
)
2763 struct bnx2x_fastpath
*fp
;
2764 struct msix_entry
*tbl
;
2765 struct bnx2x_ilt
*ilt
;
2768 fp
= kzalloc(L2_FP_COUNT(bp
->l2_cid_count
)*sizeof(*fp
), GFP_KERNEL
);
2774 tbl
= kzalloc((FP_SB_COUNT(bp
->l2_cid_count
) + 1) * sizeof(*tbl
),
2778 bp
->msix_table
= tbl
;
2781 ilt
= kzalloc(sizeof(*ilt
), GFP_KERNEL
);
2788 bnx2x_free_mem_bp(bp
);
2793 static int bnx2x_reload_if_running(struct net_device
*dev
)
2795 struct bnx2x
*bp
= netdev_priv(dev
);
2797 if (unlikely(!netif_running(dev
)))
2800 bnx2x_nic_unload(bp
, UNLOAD_NORMAL
);
2801 return bnx2x_nic_load(bp
, LOAD_NORMAL
);
2804 /* called with rtnl_lock */
2805 int bnx2x_change_mtu(struct net_device
*dev
, int new_mtu
)
2807 struct bnx2x
*bp
= netdev_priv(dev
);
2809 if (bp
->recovery_state
!= BNX2X_RECOVERY_DONE
) {
2810 printk(KERN_ERR
"Handling parity error recovery. Try again later\n");
2814 if ((new_mtu
> ETH_MAX_JUMBO_PACKET_SIZE
) ||
2815 ((new_mtu
+ ETH_HLEN
) < ETH_MIN_PACKET_SIZE
))
2818 /* This does not race with packet allocation
2819 * because the actual alloc size is
2820 * only updated as part of load
2824 return bnx2x_reload_if_running(dev
);
2827 u32
bnx2x_fix_features(struct net_device
*dev
, u32 features
)
2829 struct bnx2x
*bp
= netdev_priv(dev
);
2831 /* TPA requires Rx CSUM offloading */
2832 if (!(features
& NETIF_F_RXCSUM
) || bp
->disable_tpa
)
2833 features
&= ~NETIF_F_LRO
;
2838 int bnx2x_set_features(struct net_device
*dev
, u32 features
)
2840 struct bnx2x
*bp
= netdev_priv(dev
);
2841 u32 flags
= bp
->flags
;
2843 if (features
& NETIF_F_LRO
)
2844 flags
|= TPA_ENABLE_FLAG
;
2846 flags
&= ~TPA_ENABLE_FLAG
;
2848 if (flags
^ bp
->flags
) {
2851 if (bp
->recovery_state
== BNX2X_RECOVERY_DONE
)
2852 return bnx2x_reload_if_running(dev
);
2853 /* else: bnx2x_nic_load() will be called at end of recovery */
2859 void bnx2x_tx_timeout(struct net_device
*dev
)
2861 struct bnx2x
*bp
= netdev_priv(dev
);
2863 #ifdef BNX2X_STOP_ON_ERROR
2867 /* This allows the netif to be shutdown gracefully before resetting */
2868 schedule_delayed_work(&bp
->reset_task
, 0);
2871 int bnx2x_suspend(struct pci_dev
*pdev
, pm_message_t state
)
2873 struct net_device
*dev
= pci_get_drvdata(pdev
);
2877 dev_err(&pdev
->dev
, "BAD net device from bnx2x_init_one\n");
2880 bp
= netdev_priv(dev
);
2884 pci_save_state(pdev
);
2886 if (!netif_running(dev
)) {
2891 netif_device_detach(dev
);
2893 bnx2x_nic_unload(bp
, UNLOAD_CLOSE
);
2895 bnx2x_set_power_state(bp
, pci_choose_state(pdev
, state
));
2902 int bnx2x_resume(struct pci_dev
*pdev
)
2904 struct net_device
*dev
= pci_get_drvdata(pdev
);
2909 dev_err(&pdev
->dev
, "BAD net device from bnx2x_init_one\n");
2912 bp
= netdev_priv(dev
);
2914 if (bp
->recovery_state
!= BNX2X_RECOVERY_DONE
) {
2915 printk(KERN_ERR
"Handling parity error recovery. Try again later\n");
2921 pci_restore_state(pdev
);
2923 if (!netif_running(dev
)) {
2928 bnx2x_set_power_state(bp
, PCI_D0
);
2929 netif_device_attach(dev
);
2931 /* Since the chip was reset, clear the FW sequence number */
2933 rc
= bnx2x_nic_load(bp
, LOAD_OPEN
);