1 /* bnx2x_cmn.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2013 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 #include <linux/etherdevice.h>
21 #include <linux/if_vlan.h>
22 #include <linux/interrupt.h>
24 #include <linux/crash_dump.h>
27 #include <net/ip6_checksum.h>
28 #include <net/busy_poll.h>
29 #include <linux/prefetch.h>
30 #include "bnx2x_cmn.h"
31 #include "bnx2x_init.h"
34 static void bnx2x_free_fp_mem_cnic(struct bnx2x
*bp
);
35 static int bnx2x_alloc_fp_mem_cnic(struct bnx2x
*bp
);
36 static int bnx2x_alloc_fp_mem(struct bnx2x
*bp
);
37 static int bnx2x_poll(struct napi_struct
*napi
, int budget
);
39 static void bnx2x_add_all_napi_cnic(struct bnx2x
*bp
)
43 /* Add NAPI objects */
44 for_each_rx_queue_cnic(bp
, i
) {
45 netif_napi_add(bp
->dev
, &bnx2x_fp(bp
, i
, napi
),
46 bnx2x_poll
, NAPI_POLL_WEIGHT
);
47 napi_hash_add(&bnx2x_fp(bp
, i
, napi
));
51 static void bnx2x_add_all_napi(struct bnx2x
*bp
)
55 /* Add NAPI objects */
56 for_each_eth_queue(bp
, i
) {
57 netif_napi_add(bp
->dev
, &bnx2x_fp(bp
, i
, napi
),
58 bnx2x_poll
, NAPI_POLL_WEIGHT
);
59 napi_hash_add(&bnx2x_fp(bp
, i
, napi
));
63 static int bnx2x_calc_num_queues(struct bnx2x
*bp
)
65 int nq
= bnx2x_num_queues
? : netif_get_num_default_rss_queues();
67 /* Reduce memory usage in kdump environment by using only one queue */
68 if (is_kdump_kernel())
71 nq
= clamp(nq
, 1, BNX2X_MAX_QUEUES(bp
));
76 * bnx2x_move_fp - move content of the fastpath structure.
79 * @from: source FP index
80 * @to: destination FP index
82 * Makes sure the contents of the bp->fp[to].napi is kept
83 * intact. This is done by first copying the napi struct from
84 * the target to the source, and then mem copying the entire
85 * source onto the target. Update txdata pointers and related
88 static inline void bnx2x_move_fp(struct bnx2x
*bp
, int from
, int to
)
90 struct bnx2x_fastpath
*from_fp
= &bp
->fp
[from
];
91 struct bnx2x_fastpath
*to_fp
= &bp
->fp
[to
];
92 struct bnx2x_sp_objs
*from_sp_objs
= &bp
->sp_objs
[from
];
93 struct bnx2x_sp_objs
*to_sp_objs
= &bp
->sp_objs
[to
];
94 struct bnx2x_fp_stats
*from_fp_stats
= &bp
->fp_stats
[from
];
95 struct bnx2x_fp_stats
*to_fp_stats
= &bp
->fp_stats
[to
];
96 int old_max_eth_txqs
, new_max_eth_txqs
;
97 int old_txdata_index
= 0, new_txdata_index
= 0;
98 struct bnx2x_agg_info
*old_tpa_info
= to_fp
->tpa_info
;
100 /* Copy the NAPI object as it has been already initialized */
101 from_fp
->napi
= to_fp
->napi
;
103 /* Move bnx2x_fastpath contents */
104 memcpy(to_fp
, from_fp
, sizeof(*to_fp
));
107 /* Retain the tpa_info of the original `to' version as we don't want
108 * 2 FPs to contain the same tpa_info pointer.
110 to_fp
->tpa_info
= old_tpa_info
;
112 /* move sp_objs contents as well, as their indices match fp ones */
113 memcpy(to_sp_objs
, from_sp_objs
, sizeof(*to_sp_objs
));
115 /* move fp_stats contents as well, as their indices match fp ones */
116 memcpy(to_fp_stats
, from_fp_stats
, sizeof(*to_fp_stats
));
118 /* Update txdata pointers in fp and move txdata content accordingly:
119 * Each fp consumes 'max_cos' txdata structures, so the index should be
120 * decremented by max_cos x delta.
123 old_max_eth_txqs
= BNX2X_NUM_ETH_QUEUES(bp
) * (bp
)->max_cos
;
124 new_max_eth_txqs
= (BNX2X_NUM_ETH_QUEUES(bp
) - from
+ to
) *
126 if (from
== FCOE_IDX(bp
)) {
127 old_txdata_index
= old_max_eth_txqs
+ FCOE_TXQ_IDX_OFFSET
;
128 new_txdata_index
= new_max_eth_txqs
+ FCOE_TXQ_IDX_OFFSET
;
131 memcpy(&bp
->bnx2x_txq
[new_txdata_index
],
132 &bp
->bnx2x_txq
[old_txdata_index
],
133 sizeof(struct bnx2x_fp_txdata
));
134 to_fp
->txdata_ptr
[0] = &bp
->bnx2x_txq
[new_txdata_index
];
138 * bnx2x_fill_fw_str - Fill buffer with FW version string.
141 * @buf: character buffer to fill with the fw name
142 * @buf_len: length of the above buffer
145 void bnx2x_fill_fw_str(struct bnx2x
*bp
, char *buf
, size_t buf_len
)
148 u8 phy_fw_ver
[PHY_FW_VER_LEN
];
150 phy_fw_ver
[0] = '\0';
151 bnx2x_get_ext_phy_fw_version(&bp
->link_params
,
152 phy_fw_ver
, PHY_FW_VER_LEN
);
153 strlcpy(buf
, bp
->fw_ver
, buf_len
);
154 snprintf(buf
+ strlen(bp
->fw_ver
), 32 - strlen(bp
->fw_ver
),
156 (bp
->common
.bc_ver
& 0xff0000) >> 16,
157 (bp
->common
.bc_ver
& 0xff00) >> 8,
158 (bp
->common
.bc_ver
& 0xff),
159 ((phy_fw_ver
[0] != '\0') ? " phy " : ""), phy_fw_ver
);
161 bnx2x_vf_fill_fw_str(bp
, buf
, buf_len
);
166 * bnx2x_shrink_eth_fp - guarantees fastpath structures stay intact
169 * @delta: number of eth queues which were not allocated
171 static void bnx2x_shrink_eth_fp(struct bnx2x
*bp
, int delta
)
173 int i
, cos
, old_eth_num
= BNX2X_NUM_ETH_QUEUES(bp
);
175 /* Queue pointer cannot be re-set on an fp-basis, as moving pointer
176 * backward along the array could cause memory to be overridden
178 for (cos
= 1; cos
< bp
->max_cos
; cos
++) {
179 for (i
= 0; i
< old_eth_num
- delta
; i
++) {
180 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
181 int new_idx
= cos
* (old_eth_num
- delta
) + i
;
183 memcpy(&bp
->bnx2x_txq
[new_idx
], fp
->txdata_ptr
[cos
],
184 sizeof(struct bnx2x_fp_txdata
));
185 fp
->txdata_ptr
[cos
] = &bp
->bnx2x_txq
[new_idx
];
190 int bnx2x_load_count
[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
192 /* free skb in the packet ring at pos idx
193 * return idx of last bd freed
195 static u16
bnx2x_free_tx_pkt(struct bnx2x
*bp
, struct bnx2x_fp_txdata
*txdata
,
196 u16 idx
, unsigned int *pkts_compl
,
197 unsigned int *bytes_compl
)
199 struct sw_tx_bd
*tx_buf
= &txdata
->tx_buf_ring
[idx
];
200 struct eth_tx_start_bd
*tx_start_bd
;
201 struct eth_tx_bd
*tx_data_bd
;
202 struct sk_buff
*skb
= tx_buf
->skb
;
203 u16 bd_idx
= TX_BD(tx_buf
->first_bd
), new_cons
;
205 u16 split_bd_len
= 0;
207 /* prefetch skb end pointer to speedup dev_kfree_skb() */
210 DP(NETIF_MSG_TX_DONE
, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n",
211 txdata
->txq_index
, idx
, tx_buf
, skb
);
213 tx_start_bd
= &txdata
->tx_desc_ring
[bd_idx
].start_bd
;
215 nbd
= le16_to_cpu(tx_start_bd
->nbd
) - 1;
216 #ifdef BNX2X_STOP_ON_ERROR
217 if ((nbd
- 1) > (MAX_SKB_FRAGS
+ 2)) {
218 BNX2X_ERR("BAD nbd!\n");
222 new_cons
= nbd
+ tx_buf
->first_bd
;
224 /* Get the next bd */
225 bd_idx
= TX_BD(NEXT_TX_IDX(bd_idx
));
227 /* Skip a parse bd... */
229 bd_idx
= TX_BD(NEXT_TX_IDX(bd_idx
));
231 if (tx_buf
->flags
& BNX2X_HAS_SECOND_PBD
) {
232 /* Skip second parse bd... */
234 bd_idx
= TX_BD(NEXT_TX_IDX(bd_idx
));
237 /* TSO headers+data bds share a common mapping. See bnx2x_tx_split() */
238 if (tx_buf
->flags
& BNX2X_TSO_SPLIT_BD
) {
239 tx_data_bd
= &txdata
->tx_desc_ring
[bd_idx
].reg_bd
;
240 split_bd_len
= BD_UNMAP_LEN(tx_data_bd
);
242 bd_idx
= TX_BD(NEXT_TX_IDX(bd_idx
));
246 dma_unmap_single(&bp
->pdev
->dev
, BD_UNMAP_ADDR(tx_start_bd
),
247 BD_UNMAP_LEN(tx_start_bd
) + split_bd_len
,
253 tx_data_bd
= &txdata
->tx_desc_ring
[bd_idx
].reg_bd
;
254 dma_unmap_page(&bp
->pdev
->dev
, BD_UNMAP_ADDR(tx_data_bd
),
255 BD_UNMAP_LEN(tx_data_bd
), DMA_TO_DEVICE
);
257 bd_idx
= TX_BD(NEXT_TX_IDX(bd_idx
));
264 (*bytes_compl
) += skb
->len
;
267 dev_kfree_skb_any(skb
);
268 tx_buf
->first_bd
= 0;
274 int bnx2x_tx_int(struct bnx2x
*bp
, struct bnx2x_fp_txdata
*txdata
)
276 struct netdev_queue
*txq
;
277 u16 hw_cons
, sw_cons
, bd_cons
= txdata
->tx_bd_cons
;
278 unsigned int pkts_compl
= 0, bytes_compl
= 0;
280 #ifdef BNX2X_STOP_ON_ERROR
281 if (unlikely(bp
->panic
))
285 txq
= netdev_get_tx_queue(bp
->dev
, txdata
->txq_index
);
286 hw_cons
= le16_to_cpu(*txdata
->tx_cons_sb
);
287 sw_cons
= txdata
->tx_pkt_cons
;
289 while (sw_cons
!= hw_cons
) {
292 pkt_cons
= TX_BD(sw_cons
);
294 DP(NETIF_MSG_TX_DONE
,
295 "queue[%d]: hw_cons %u sw_cons %u pkt_cons %u\n",
296 txdata
->txq_index
, hw_cons
, sw_cons
, pkt_cons
);
298 bd_cons
= bnx2x_free_tx_pkt(bp
, txdata
, pkt_cons
,
299 &pkts_compl
, &bytes_compl
);
304 netdev_tx_completed_queue(txq
, pkts_compl
, bytes_compl
);
306 txdata
->tx_pkt_cons
= sw_cons
;
307 txdata
->tx_bd_cons
= bd_cons
;
309 /* Need to make the tx_bd_cons update visible to start_xmit()
310 * before checking for netif_tx_queue_stopped(). Without the
311 * memory barrier, there is a small possibility that
312 * start_xmit() will miss it and cause the queue to be stopped
314 * On the other hand we need an rmb() here to ensure the proper
315 * ordering of bit testing in the following
316 * netif_tx_queue_stopped(txq) call.
320 if (unlikely(netif_tx_queue_stopped(txq
))) {
321 /* Taking tx_lock() is needed to prevent re-enabling the queue
322 * while it's empty. This could have happen if rx_action() gets
323 * suspended in bnx2x_tx_int() after the condition before
324 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
326 * stops the queue->sees fresh tx_bd_cons->releases the queue->
327 * sends some packets consuming the whole queue again->
331 __netif_tx_lock(txq
, smp_processor_id());
333 if ((netif_tx_queue_stopped(txq
)) &&
334 (bp
->state
== BNX2X_STATE_OPEN
) &&
335 (bnx2x_tx_avail(bp
, txdata
) >= MAX_DESC_PER_TX_PKT
))
336 netif_tx_wake_queue(txq
);
338 __netif_tx_unlock(txq
);
343 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath
*fp
,
346 u16 last_max
= fp
->last_max_sge
;
348 if (SUB_S16(idx
, last_max
) > 0)
349 fp
->last_max_sge
= idx
;
352 static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath
*fp
,
354 struct eth_end_agg_rx_cqe
*cqe
)
356 struct bnx2x
*bp
= fp
->bp
;
357 u16 last_max
, last_elem
, first_elem
;
364 /* First mark all used pages */
365 for (i
= 0; i
< sge_len
; i
++)
366 BIT_VEC64_CLEAR_BIT(fp
->sge_mask
,
367 RX_SGE(le16_to_cpu(cqe
->sgl_or_raw_data
.sgl
[i
])));
369 DP(NETIF_MSG_RX_STATUS
, "fp_cqe->sgl[%d] = %d\n",
370 sge_len
- 1, le16_to_cpu(cqe
->sgl_or_raw_data
.sgl
[sge_len
- 1]));
372 /* Here we assume that the last SGE index is the biggest */
373 prefetch((void *)(fp
->sge_mask
));
374 bnx2x_update_last_max_sge(fp
,
375 le16_to_cpu(cqe
->sgl_or_raw_data
.sgl
[sge_len
- 1]));
377 last_max
= RX_SGE(fp
->last_max_sge
);
378 last_elem
= last_max
>> BIT_VEC64_ELEM_SHIFT
;
379 first_elem
= RX_SGE(fp
->rx_sge_prod
) >> BIT_VEC64_ELEM_SHIFT
;
381 /* If ring is not full */
382 if (last_elem
+ 1 != first_elem
)
385 /* Now update the prod */
386 for (i
= first_elem
; i
!= last_elem
; i
= NEXT_SGE_MASK_ELEM(i
)) {
387 if (likely(fp
->sge_mask
[i
]))
390 fp
->sge_mask
[i
] = BIT_VEC64_ELEM_ONE_MASK
;
391 delta
+= BIT_VEC64_ELEM_SZ
;
395 fp
->rx_sge_prod
+= delta
;
396 /* clear page-end entries */
397 bnx2x_clear_sge_mask_next_elems(fp
);
400 DP(NETIF_MSG_RX_STATUS
,
401 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
402 fp
->last_max_sge
, fp
->rx_sge_prod
);
405 /* Get Toeplitz hash value in the skb using the value from the
406 * CQE (calculated by HW).
408 static u32
bnx2x_get_rxhash(const struct bnx2x
*bp
,
409 const struct eth_fast_path_rx_cqe
*cqe
,
410 enum pkt_hash_types
*rxhash_type
)
412 /* Get Toeplitz hash from CQE */
413 if ((bp
->dev
->features
& NETIF_F_RXHASH
) &&
414 (cqe
->status_flags
& ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG
)) {
415 enum eth_rss_hash_type htype
;
417 htype
= cqe
->status_flags
& ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE
;
418 *rxhash_type
= ((htype
== TCP_IPV4_HASH_TYPE
) ||
419 (htype
== TCP_IPV6_HASH_TYPE
)) ?
420 PKT_HASH_TYPE_L4
: PKT_HASH_TYPE_L3
;
422 return le32_to_cpu(cqe
->rss_hash_result
);
424 *rxhash_type
= PKT_HASH_TYPE_NONE
;
428 static void bnx2x_tpa_start(struct bnx2x_fastpath
*fp
, u16 queue
,
430 struct eth_fast_path_rx_cqe
*cqe
)
432 struct bnx2x
*bp
= fp
->bp
;
433 struct sw_rx_bd
*cons_rx_buf
= &fp
->rx_buf_ring
[cons
];
434 struct sw_rx_bd
*prod_rx_buf
= &fp
->rx_buf_ring
[prod
];
435 struct eth_rx_bd
*prod_bd
= &fp
->rx_desc_ring
[prod
];
437 struct bnx2x_agg_info
*tpa_info
= &fp
->tpa_info
[queue
];
438 struct sw_rx_bd
*first_buf
= &tpa_info
->first_buf
;
440 /* print error if current state != stop */
441 if (tpa_info
->tpa_state
!= BNX2X_TPA_STOP
)
442 BNX2X_ERR("start of bin not in stop [%d]\n", queue
);
444 /* Try to map an empty data buffer from the aggregation info */
445 mapping
= dma_map_single(&bp
->pdev
->dev
,
446 first_buf
->data
+ NET_SKB_PAD
,
447 fp
->rx_buf_size
, DMA_FROM_DEVICE
);
449 * ...if it fails - move the skb from the consumer to the producer
450 * and set the current aggregation state as ERROR to drop it
451 * when TPA_STOP arrives.
454 if (unlikely(dma_mapping_error(&bp
->pdev
->dev
, mapping
))) {
455 /* Move the BD from the consumer to the producer */
456 bnx2x_reuse_rx_data(fp
, cons
, prod
);
457 tpa_info
->tpa_state
= BNX2X_TPA_ERROR
;
461 /* move empty data from pool to prod */
462 prod_rx_buf
->data
= first_buf
->data
;
463 dma_unmap_addr_set(prod_rx_buf
, mapping
, mapping
);
464 /* point prod_bd to new data */
465 prod_bd
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
466 prod_bd
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
468 /* move partial skb from cons to pool (don't unmap yet) */
469 *first_buf
= *cons_rx_buf
;
471 /* mark bin state as START */
472 tpa_info
->parsing_flags
=
473 le16_to_cpu(cqe
->pars_flags
.flags
);
474 tpa_info
->vlan_tag
= le16_to_cpu(cqe
->vlan_tag
);
475 tpa_info
->tpa_state
= BNX2X_TPA_START
;
476 tpa_info
->len_on_bd
= le16_to_cpu(cqe
->len_on_bd
);
477 tpa_info
->placement_offset
= cqe
->placement_offset
;
478 tpa_info
->rxhash
= bnx2x_get_rxhash(bp
, cqe
, &tpa_info
->rxhash_type
);
479 if (fp
->mode
== TPA_MODE_GRO
) {
480 u16 gro_size
= le16_to_cpu(cqe
->pkt_len_or_gro_seg_len
);
481 tpa_info
->full_page
= SGE_PAGES
/ gro_size
* gro_size
;
482 tpa_info
->gro_size
= gro_size
;
485 #ifdef BNX2X_STOP_ON_ERROR
486 fp
->tpa_queue_used
|= (1 << queue
);
487 DP(NETIF_MSG_RX_STATUS
, "fp->tpa_queue_used = 0x%llx\n",
492 /* Timestamp option length allowed for TPA aggregation:
494 * nop nop kind length echo val
496 #define TPA_TSTAMP_OPT_LEN 12
498 * bnx2x_set_gro_params - compute GRO values
501 * @parsing_flags: parsing flags from the START CQE
502 * @len_on_bd: total length of the first packet for the
504 * @pkt_len: length of all segments
506 * Approximate value of the MSS for this aggregation calculated using
507 * the first packet of it.
508 * Compute number of aggregated segments, and gso_type.
510 static void bnx2x_set_gro_params(struct sk_buff
*skb
, u16 parsing_flags
,
511 u16 len_on_bd
, unsigned int pkt_len
,
512 u16 num_of_coalesced_segs
)
514 /* TPA aggregation won't have either IP options or TCP options
515 * other than timestamp or IPv6 extension headers.
517 u16 hdrs_len
= ETH_HLEN
+ sizeof(struct tcphdr
);
519 if (GET_FLAG(parsing_flags
, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL
) ==
520 PRS_FLAG_OVERETH_IPV6
) {
521 hdrs_len
+= sizeof(struct ipv6hdr
);
522 skb_shinfo(skb
)->gso_type
= SKB_GSO_TCPV6
;
524 hdrs_len
+= sizeof(struct iphdr
);
525 skb_shinfo(skb
)->gso_type
= SKB_GSO_TCPV4
;
528 /* Check if there was a TCP timestamp, if there is it's will
529 * always be 12 bytes length: nop nop kind length echo val.
531 * Otherwise FW would close the aggregation.
533 if (parsing_flags
& PARSING_FLAGS_TIME_STAMP_EXIST_FLAG
)
534 hdrs_len
+= TPA_TSTAMP_OPT_LEN
;
536 skb_shinfo(skb
)->gso_size
= len_on_bd
- hdrs_len
;
538 /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
539 * to skb_shinfo(skb)->gso_segs
541 NAPI_GRO_CB(skb
)->count
= num_of_coalesced_segs
;
544 static int bnx2x_alloc_rx_sge(struct bnx2x
*bp
, struct bnx2x_fastpath
*fp
,
545 u16 index
, gfp_t gfp_mask
)
547 struct page
*page
= alloc_pages(gfp_mask
, PAGES_PER_SGE_SHIFT
);
548 struct sw_rx_page
*sw_buf
= &fp
->rx_page_ring
[index
];
549 struct eth_rx_sge
*sge
= &fp
->rx_sge_ring
[index
];
552 if (unlikely(page
== NULL
)) {
553 BNX2X_ERR("Can't alloc sge\n");
557 mapping
= dma_map_page(&bp
->pdev
->dev
, page
, 0,
558 SGE_PAGES
, DMA_FROM_DEVICE
);
559 if (unlikely(dma_mapping_error(&bp
->pdev
->dev
, mapping
))) {
560 __free_pages(page
, PAGES_PER_SGE_SHIFT
);
561 BNX2X_ERR("Can't map sge\n");
566 dma_unmap_addr_set(sw_buf
, mapping
, mapping
);
568 sge
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
569 sge
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
574 static int bnx2x_fill_frag_skb(struct bnx2x
*bp
, struct bnx2x_fastpath
*fp
,
575 struct bnx2x_agg_info
*tpa_info
,
578 struct eth_end_agg_rx_cqe
*cqe
,
581 struct sw_rx_page
*rx_pg
, old_rx_pg
;
582 u32 i
, frag_len
, frag_size
;
583 int err
, j
, frag_id
= 0;
584 u16 len_on_bd
= tpa_info
->len_on_bd
;
585 u16 full_page
= 0, gro_size
= 0;
587 frag_size
= le16_to_cpu(cqe
->pkt_len
) - len_on_bd
;
589 if (fp
->mode
== TPA_MODE_GRO
) {
590 gro_size
= tpa_info
->gro_size
;
591 full_page
= tpa_info
->full_page
;
594 /* This is needed in order to enable forwarding support */
596 bnx2x_set_gro_params(skb
, tpa_info
->parsing_flags
, len_on_bd
,
597 le16_to_cpu(cqe
->pkt_len
),
598 le16_to_cpu(cqe
->num_of_coalesced_segs
));
600 #ifdef BNX2X_STOP_ON_ERROR
601 if (pages
> min_t(u32
, 8, MAX_SKB_FRAGS
) * SGE_PAGES
) {
602 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
604 BNX2X_ERR("cqe->pkt_len = %d\n", cqe
->pkt_len
);
610 /* Run through the SGL and compose the fragmented skb */
611 for (i
= 0, j
= 0; i
< pages
; i
+= PAGES_PER_SGE
, j
++) {
612 u16 sge_idx
= RX_SGE(le16_to_cpu(cqe
->sgl_or_raw_data
.sgl
[j
]));
614 /* FW gives the indices of the SGE as if the ring is an array
615 (meaning that "next" element will consume 2 indices) */
616 if (fp
->mode
== TPA_MODE_GRO
)
617 frag_len
= min_t(u32
, frag_size
, (u32
)full_page
);
619 frag_len
= min_t(u32
, frag_size
, (u32
)SGE_PAGES
);
621 rx_pg
= &fp
->rx_page_ring
[sge_idx
];
624 /* If we fail to allocate a substitute page, we simply stop
625 where we are and drop the whole packet */
626 err
= bnx2x_alloc_rx_sge(bp
, fp
, sge_idx
, GFP_ATOMIC
);
628 bnx2x_fp_qstats(bp
, fp
)->rx_skb_alloc_failed
++;
632 /* Unmap the page as we're going to pass it to the stack */
633 dma_unmap_page(&bp
->pdev
->dev
,
634 dma_unmap_addr(&old_rx_pg
, mapping
),
635 SGE_PAGES
, DMA_FROM_DEVICE
);
636 /* Add one frag and update the appropriate fields in the skb */
637 if (fp
->mode
== TPA_MODE_LRO
)
638 skb_fill_page_desc(skb
, j
, old_rx_pg
.page
, 0, frag_len
);
642 for (rem
= frag_len
; rem
> 0; rem
-= gro_size
) {
643 int len
= rem
> gro_size
? gro_size
: rem
;
644 skb_fill_page_desc(skb
, frag_id
++,
645 old_rx_pg
.page
, offset
, len
);
647 get_page(old_rx_pg
.page
);
652 skb
->data_len
+= frag_len
;
653 skb
->truesize
+= SGE_PAGES
;
654 skb
->len
+= frag_len
;
656 frag_size
-= frag_len
;
662 static void bnx2x_frag_free(const struct bnx2x_fastpath
*fp
, void *data
)
664 if (fp
->rx_frag_size
)
665 put_page(virt_to_head_page(data
));
670 static void *bnx2x_frag_alloc(const struct bnx2x_fastpath
*fp
, gfp_t gfp_mask
)
672 if (fp
->rx_frag_size
) {
673 /* GFP_KERNEL allocations are used only during initialization */
674 if (unlikely(gfp_mask
& __GFP_WAIT
))
675 return (void *)__get_free_page(gfp_mask
);
677 return netdev_alloc_frag(fp
->rx_frag_size
);
680 return kmalloc(fp
->rx_buf_size
+ NET_SKB_PAD
, gfp_mask
);
684 static void bnx2x_gro_ip_csum(struct bnx2x
*bp
, struct sk_buff
*skb
)
686 const struct iphdr
*iph
= ip_hdr(skb
);
689 skb_set_transport_header(skb
, sizeof(struct iphdr
));
692 th
->check
= ~tcp_v4_check(skb
->len
- skb_transport_offset(skb
),
693 iph
->saddr
, iph
->daddr
, 0);
696 static void bnx2x_gro_ipv6_csum(struct bnx2x
*bp
, struct sk_buff
*skb
)
698 struct ipv6hdr
*iph
= ipv6_hdr(skb
);
701 skb_set_transport_header(skb
, sizeof(struct ipv6hdr
));
704 th
->check
= ~tcp_v6_check(skb
->len
- skb_transport_offset(skb
),
705 &iph
->saddr
, &iph
->daddr
, 0);
708 static void bnx2x_gro_csum(struct bnx2x
*bp
, struct sk_buff
*skb
,
709 void (*gro_func
)(struct bnx2x
*, struct sk_buff
*))
711 skb_set_network_header(skb
, 0);
713 tcp_gro_complete(skb
);
717 static void bnx2x_gro_receive(struct bnx2x
*bp
, struct bnx2x_fastpath
*fp
,
721 if (skb_shinfo(skb
)->gso_size
) {
722 switch (be16_to_cpu(skb
->protocol
)) {
724 bnx2x_gro_csum(bp
, skb
, bnx2x_gro_ip_csum
);
727 bnx2x_gro_csum(bp
, skb
, bnx2x_gro_ipv6_csum
);
730 BNX2X_ERR("Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n",
731 be16_to_cpu(skb
->protocol
));
735 skb_record_rx_queue(skb
, fp
->rx_queue
);
736 napi_gro_receive(&fp
->napi
, skb
);
739 static void bnx2x_tpa_stop(struct bnx2x
*bp
, struct bnx2x_fastpath
*fp
,
740 struct bnx2x_agg_info
*tpa_info
,
742 struct eth_end_agg_rx_cqe
*cqe
,
745 struct sw_rx_bd
*rx_buf
= &tpa_info
->first_buf
;
746 u8 pad
= tpa_info
->placement_offset
;
747 u16 len
= tpa_info
->len_on_bd
;
748 struct sk_buff
*skb
= NULL
;
749 u8
*new_data
, *data
= rx_buf
->data
;
750 u8 old_tpa_state
= tpa_info
->tpa_state
;
752 tpa_info
->tpa_state
= BNX2X_TPA_STOP
;
754 /* If we there was an error during the handling of the TPA_START -
755 * drop this aggregation.
757 if (old_tpa_state
== BNX2X_TPA_ERROR
)
760 /* Try to allocate the new data */
761 new_data
= bnx2x_frag_alloc(fp
, GFP_ATOMIC
);
762 /* Unmap skb in the pool anyway, as we are going to change
763 pool entry status to BNX2X_TPA_STOP even if new skb allocation
765 dma_unmap_single(&bp
->pdev
->dev
, dma_unmap_addr(rx_buf
, mapping
),
766 fp
->rx_buf_size
, DMA_FROM_DEVICE
);
767 if (likely(new_data
))
768 skb
= build_skb(data
, fp
->rx_frag_size
);
771 #ifdef BNX2X_STOP_ON_ERROR
772 if (pad
+ len
> fp
->rx_buf_size
) {
773 BNX2X_ERR("skb_put is about to fail... pad %d len %d rx_buf_size %d\n",
774 pad
, len
, fp
->rx_buf_size
);
780 skb_reserve(skb
, pad
+ NET_SKB_PAD
);
782 skb_set_hash(skb
, tpa_info
->rxhash
, tpa_info
->rxhash_type
);
784 skb
->protocol
= eth_type_trans(skb
, bp
->dev
);
785 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
787 if (!bnx2x_fill_frag_skb(bp
, fp
, tpa_info
, pages
,
788 skb
, cqe
, cqe_idx
)) {
789 if (tpa_info
->parsing_flags
& PARSING_FLAGS_VLAN
)
790 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
), tpa_info
->vlan_tag
);
791 bnx2x_gro_receive(bp
, fp
, skb
);
793 DP(NETIF_MSG_RX_STATUS
,
794 "Failed to allocate new pages - dropping packet!\n");
795 dev_kfree_skb_any(skb
);
798 /* put new data in bin */
799 rx_buf
->data
= new_data
;
804 bnx2x_frag_free(fp
, new_data
);
806 /* drop the packet and keep the buffer in the bin */
807 DP(NETIF_MSG_RX_STATUS
,
808 "Failed to allocate or map a new skb - dropping packet!\n");
809 bnx2x_fp_stats(bp
, fp
)->eth_q_stats
.rx_skb_alloc_failed
++;
812 static int bnx2x_alloc_rx_data(struct bnx2x
*bp
, struct bnx2x_fastpath
*fp
,
813 u16 index
, gfp_t gfp_mask
)
816 struct sw_rx_bd
*rx_buf
= &fp
->rx_buf_ring
[index
];
817 struct eth_rx_bd
*rx_bd
= &fp
->rx_desc_ring
[index
];
820 data
= bnx2x_frag_alloc(fp
, gfp_mask
);
821 if (unlikely(data
== NULL
))
824 mapping
= dma_map_single(&bp
->pdev
->dev
, data
+ NET_SKB_PAD
,
827 if (unlikely(dma_mapping_error(&bp
->pdev
->dev
, mapping
))) {
828 bnx2x_frag_free(fp
, data
);
829 BNX2X_ERR("Can't map rx data\n");
834 dma_unmap_addr_set(rx_buf
, mapping
, mapping
);
836 rx_bd
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
837 rx_bd
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
843 void bnx2x_csum_validate(struct sk_buff
*skb
, union eth_rx_cqe
*cqe
,
844 struct bnx2x_fastpath
*fp
,
845 struct bnx2x_eth_q_stats
*qstats
)
847 /* Do nothing if no L4 csum validation was done.
848 * We do not check whether IP csum was validated. For IPv4 we assume
849 * that if the card got as far as validating the L4 csum, it also
850 * validated the IP csum. IPv6 has no IP csum.
852 if (cqe
->fast_path_cqe
.status_flags
&
853 ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG
)
856 /* If L4 validation was done, check if an error was found. */
858 if (cqe
->fast_path_cqe
.type_error_flags
&
859 (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG
|
860 ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG
))
861 qstats
->hw_csum_err
++;
863 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
866 static int bnx2x_rx_int(struct bnx2x_fastpath
*fp
, int budget
)
868 struct bnx2x
*bp
= fp
->bp
;
869 u16 bd_cons
, bd_prod
, bd_prod_fw
, comp_ring_cons
;
870 u16 sw_comp_cons
, sw_comp_prod
;
872 union eth_rx_cqe
*cqe
;
873 struct eth_fast_path_rx_cqe
*cqe_fp
;
875 #ifdef BNX2X_STOP_ON_ERROR
876 if (unlikely(bp
->panic
))
882 bd_cons
= fp
->rx_bd_cons
;
883 bd_prod
= fp
->rx_bd_prod
;
884 bd_prod_fw
= bd_prod
;
885 sw_comp_cons
= fp
->rx_comp_cons
;
886 sw_comp_prod
= fp
->rx_comp_prod
;
888 comp_ring_cons
= RCQ_BD(sw_comp_cons
);
889 cqe
= &fp
->rx_comp_ring
[comp_ring_cons
];
890 cqe_fp
= &cqe
->fast_path_cqe
;
892 DP(NETIF_MSG_RX_STATUS
,
893 "queue[%d]: sw_comp_cons %u\n", fp
->index
, sw_comp_cons
);
895 while (BNX2X_IS_CQE_COMPLETED(cqe_fp
)) {
896 struct sw_rx_bd
*rx_buf
= NULL
;
899 enum eth_rx_cqe_type cqe_fp_type
;
903 enum pkt_hash_types rxhash_type
;
905 #ifdef BNX2X_STOP_ON_ERROR
906 if (unlikely(bp
->panic
))
910 bd_prod
= RX_BD(bd_prod
);
911 bd_cons
= RX_BD(bd_cons
);
913 /* A rmb() is required to ensure that the CQE is not read
914 * before it is written by the adapter DMA. PCI ordering
915 * rules will make sure the other fields are written before
916 * the marker at the end of struct eth_fast_path_rx_cqe
917 * but without rmb() a weakly ordered processor can process
918 * stale data. Without the barrier TPA state-machine might
919 * enter inconsistent state and kernel stack might be
920 * provided with incorrect packet description - these lead
921 * to various kernel crashed.
925 cqe_fp_flags
= cqe_fp
->type_error_flags
;
926 cqe_fp_type
= cqe_fp_flags
& ETH_FAST_PATH_RX_CQE_TYPE
;
928 DP(NETIF_MSG_RX_STATUS
,
929 "CQE type %x err %x status %x queue %x vlan %x len %u\n",
930 CQE_TYPE(cqe_fp_flags
),
931 cqe_fp_flags
, cqe_fp
->status_flags
,
932 le32_to_cpu(cqe_fp
->rss_hash_result
),
933 le16_to_cpu(cqe_fp
->vlan_tag
),
934 le16_to_cpu(cqe_fp
->pkt_len_or_gro_seg_len
));
936 /* is this a slowpath msg? */
937 if (unlikely(CQE_TYPE_SLOW(cqe_fp_type
))) {
938 bnx2x_sp_event(fp
, cqe
);
942 rx_buf
= &fp
->rx_buf_ring
[bd_cons
];
945 if (!CQE_TYPE_FAST(cqe_fp_type
)) {
946 struct bnx2x_agg_info
*tpa_info
;
947 u16 frag_size
, pages
;
948 #ifdef BNX2X_STOP_ON_ERROR
950 if (fp
->disable_tpa
&&
951 (CQE_TYPE_START(cqe_fp_type
) ||
952 CQE_TYPE_STOP(cqe_fp_type
)))
953 BNX2X_ERR("START/STOP packet while disable_tpa type %x\n",
954 CQE_TYPE(cqe_fp_type
));
957 if (CQE_TYPE_START(cqe_fp_type
)) {
958 u16 queue
= cqe_fp
->queue_index
;
959 DP(NETIF_MSG_RX_STATUS
,
960 "calling tpa_start on queue %d\n",
963 bnx2x_tpa_start(fp
, queue
,
969 queue
= cqe
->end_agg_cqe
.queue_index
;
970 tpa_info
= &fp
->tpa_info
[queue
];
971 DP(NETIF_MSG_RX_STATUS
,
972 "calling tpa_stop on queue %d\n",
975 frag_size
= le16_to_cpu(cqe
->end_agg_cqe
.pkt_len
) -
978 if (fp
->mode
== TPA_MODE_GRO
)
979 pages
= (frag_size
+ tpa_info
->full_page
- 1) /
982 pages
= SGE_PAGE_ALIGN(frag_size
) >>
985 bnx2x_tpa_stop(bp
, fp
, tpa_info
, pages
,
986 &cqe
->end_agg_cqe
, comp_ring_cons
);
987 #ifdef BNX2X_STOP_ON_ERROR
992 bnx2x_update_sge_prod(fp
, pages
, &cqe
->end_agg_cqe
);
996 len
= le16_to_cpu(cqe_fp
->pkt_len_or_gro_seg_len
);
997 pad
= cqe_fp
->placement_offset
;
998 dma_sync_single_for_cpu(&bp
->pdev
->dev
,
999 dma_unmap_addr(rx_buf
, mapping
),
1000 pad
+ RX_COPY_THRESH
,
1003 prefetch(data
+ pad
); /* speedup eth_type_trans() */
1004 /* is this an error packet? */
1005 if (unlikely(cqe_fp_flags
& ETH_RX_ERROR_FALGS
)) {
1006 DP(NETIF_MSG_RX_ERR
| NETIF_MSG_RX_STATUS
,
1007 "ERROR flags %x rx packet %u\n",
1008 cqe_fp_flags
, sw_comp_cons
);
1009 bnx2x_fp_qstats(bp
, fp
)->rx_err_discard_pkt
++;
1013 /* Since we don't have a jumbo ring
1014 * copy small packets if mtu > 1500
1016 if ((bp
->dev
->mtu
> ETH_MAX_PACKET_SIZE
) &&
1017 (len
<= RX_COPY_THRESH
)) {
1018 skb
= napi_alloc_skb(&fp
->napi
, len
);
1020 DP(NETIF_MSG_RX_ERR
| NETIF_MSG_RX_STATUS
,
1021 "ERROR packet dropped because of alloc failure\n");
1022 bnx2x_fp_qstats(bp
, fp
)->rx_skb_alloc_failed
++;
1025 memcpy(skb
->data
, data
+ pad
, len
);
1026 bnx2x_reuse_rx_data(fp
, bd_cons
, bd_prod
);
1028 if (likely(bnx2x_alloc_rx_data(bp
, fp
, bd_prod
,
1029 GFP_ATOMIC
) == 0)) {
1030 dma_unmap_single(&bp
->pdev
->dev
,
1031 dma_unmap_addr(rx_buf
, mapping
),
1034 skb
= build_skb(data
, fp
->rx_frag_size
);
1035 if (unlikely(!skb
)) {
1036 bnx2x_frag_free(fp
, data
);
1037 bnx2x_fp_qstats(bp
, fp
)->
1038 rx_skb_alloc_failed
++;
1041 skb_reserve(skb
, pad
);
1043 DP(NETIF_MSG_RX_ERR
| NETIF_MSG_RX_STATUS
,
1044 "ERROR packet dropped because of alloc failure\n");
1045 bnx2x_fp_qstats(bp
, fp
)->rx_skb_alloc_failed
++;
1047 bnx2x_reuse_rx_data(fp
, bd_cons
, bd_prod
);
1053 skb
->protocol
= eth_type_trans(skb
, bp
->dev
);
1055 /* Set Toeplitz hash for a none-LRO skb */
1056 rxhash
= bnx2x_get_rxhash(bp
, cqe_fp
, &rxhash_type
);
1057 skb_set_hash(skb
, rxhash
, rxhash_type
);
1059 skb_checksum_none_assert(skb
);
1061 if (bp
->dev
->features
& NETIF_F_RXCSUM
)
1062 bnx2x_csum_validate(skb
, cqe
, fp
,
1063 bnx2x_fp_qstats(bp
, fp
));
1065 skb_record_rx_queue(skb
, fp
->rx_queue
);
1067 /* Check if this packet was timestamped */
1068 if (unlikely(cqe
->fast_path_cqe
.type_error_flags
&
1069 (1 << ETH_FAST_PATH_RX_CQE_PTP_PKT_SHIFT
)))
1070 bnx2x_set_rx_ts(bp
, skb
);
1072 if (le16_to_cpu(cqe_fp
->pars_flags
.flags
) &
1074 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
),
1075 le16_to_cpu(cqe_fp
->vlan_tag
));
1077 skb_mark_napi_id(skb
, &fp
->napi
);
1079 if (bnx2x_fp_ll_polling(fp
))
1080 netif_receive_skb(skb
);
1082 napi_gro_receive(&fp
->napi
, skb
);
1084 rx_buf
->data
= NULL
;
1086 bd_cons
= NEXT_RX_IDX(bd_cons
);
1087 bd_prod
= NEXT_RX_IDX(bd_prod
);
1088 bd_prod_fw
= NEXT_RX_IDX(bd_prod_fw
);
1091 sw_comp_prod
= NEXT_RCQ_IDX(sw_comp_prod
);
1092 sw_comp_cons
= NEXT_RCQ_IDX(sw_comp_cons
);
1094 /* mark CQE as free */
1095 BNX2X_SEED_CQE(cqe_fp
);
1097 if (rx_pkt
== budget
)
1100 comp_ring_cons
= RCQ_BD(sw_comp_cons
);
1101 cqe
= &fp
->rx_comp_ring
[comp_ring_cons
];
1102 cqe_fp
= &cqe
->fast_path_cqe
;
1105 fp
->rx_bd_cons
= bd_cons
;
1106 fp
->rx_bd_prod
= bd_prod_fw
;
1107 fp
->rx_comp_cons
= sw_comp_cons
;
1108 fp
->rx_comp_prod
= sw_comp_prod
;
1110 /* Update producers */
1111 bnx2x_update_rx_prod(bp
, fp
, bd_prod_fw
, sw_comp_prod
,
1114 fp
->rx_pkt
+= rx_pkt
;
1120 static irqreturn_t
bnx2x_msix_fp_int(int irq
, void *fp_cookie
)
1122 struct bnx2x_fastpath
*fp
= fp_cookie
;
1123 struct bnx2x
*bp
= fp
->bp
;
1127 "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n",
1128 fp
->index
, fp
->fw_sb_id
, fp
->igu_sb_id
);
1130 bnx2x_ack_sb(bp
, fp
->igu_sb_id
, USTORM_ID
, 0, IGU_INT_DISABLE
, 0);
1132 #ifdef BNX2X_STOP_ON_ERROR
1133 if (unlikely(bp
->panic
))
1137 /* Handle Rx and Tx according to MSI-X vector */
1138 for_each_cos_in_tx_queue(fp
, cos
)
1139 prefetch(fp
->txdata_ptr
[cos
]->tx_cons_sb
);
1141 prefetch(&fp
->sb_running_index
[SM_RX_ID
]);
1142 napi_schedule_irqoff(&bnx2x_fp(bp
, fp
->index
, napi
));
1147 /* HW Lock for shared dual port PHYs */
1148 void bnx2x_acquire_phy_lock(struct bnx2x
*bp
)
1150 mutex_lock(&bp
->port
.phy_mutex
);
1152 bnx2x_acquire_hw_lock(bp
, HW_LOCK_RESOURCE_MDIO
);
1155 void bnx2x_release_phy_lock(struct bnx2x
*bp
)
1157 bnx2x_release_hw_lock(bp
, HW_LOCK_RESOURCE_MDIO
);
1159 mutex_unlock(&bp
->port
.phy_mutex
);
1162 /* calculates MF speed according to current linespeed and MF configuration */
1163 u16
bnx2x_get_mf_speed(struct bnx2x
*bp
)
1165 u16 line_speed
= bp
->link_vars
.line_speed
;
1167 u16 maxCfg
= bnx2x_extract_max_cfg(bp
,
1168 bp
->mf_config
[BP_VN(bp
)]);
1170 /* Calculate the current MAX line speed limit for the MF
1174 line_speed
= (line_speed
* maxCfg
) / 100;
1175 else { /* SD mode */
1176 u16 vn_max_rate
= maxCfg
* 100;
1178 if (vn_max_rate
< line_speed
)
1179 line_speed
= vn_max_rate
;
1187 * bnx2x_fill_report_data - fill link report data to report
1189 * @bp: driver handle
1190 * @data: link state to update
1192 * It uses a none-atomic bit operations because is called under the mutex.
1194 static void bnx2x_fill_report_data(struct bnx2x
*bp
,
1195 struct bnx2x_link_report_data
*data
)
1197 memset(data
, 0, sizeof(*data
));
1200 /* Fill the report data: effective line speed */
1201 data
->line_speed
= bnx2x_get_mf_speed(bp
);
1204 if (!bp
->link_vars
.link_up
|| (bp
->flags
& MF_FUNC_DIS
))
1205 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN
,
1206 &data
->link_report_flags
);
1208 if (!BNX2X_NUM_ETH_QUEUES(bp
))
1209 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN
,
1210 &data
->link_report_flags
);
1213 if (bp
->link_vars
.duplex
== DUPLEX_FULL
)
1214 __set_bit(BNX2X_LINK_REPORT_FD
,
1215 &data
->link_report_flags
);
1217 /* Rx Flow Control is ON */
1218 if (bp
->link_vars
.flow_ctrl
& BNX2X_FLOW_CTRL_RX
)
1219 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON
,
1220 &data
->link_report_flags
);
1222 /* Tx Flow Control is ON */
1223 if (bp
->link_vars
.flow_ctrl
& BNX2X_FLOW_CTRL_TX
)
1224 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON
,
1225 &data
->link_report_flags
);
1227 *data
= bp
->vf_link_vars
;
1232 * bnx2x_link_report - report link status to OS.
1234 * @bp: driver handle
1236 * Calls the __bnx2x_link_report() under the same locking scheme
1237 * as a link/PHY state managing code to ensure a consistent link
1241 void bnx2x_link_report(struct bnx2x
*bp
)
1243 bnx2x_acquire_phy_lock(bp
);
1244 __bnx2x_link_report(bp
);
1245 bnx2x_release_phy_lock(bp
);
1249 * __bnx2x_link_report - report link status to OS.
1251 * @bp: driver handle
1253 * None atomic implementation.
1254 * Should be called under the phy_lock.
1256 void __bnx2x_link_report(struct bnx2x
*bp
)
1258 struct bnx2x_link_report_data cur_data
;
1261 if (IS_PF(bp
) && !CHIP_IS_E1(bp
))
1262 bnx2x_read_mf_cfg(bp
);
1264 /* Read the current link report info */
1265 bnx2x_fill_report_data(bp
, &cur_data
);
1267 /* Don't report link down or exactly the same link status twice */
1268 if (!memcmp(&cur_data
, &bp
->last_reported_link
, sizeof(cur_data
)) ||
1269 (test_bit(BNX2X_LINK_REPORT_LINK_DOWN
,
1270 &bp
->last_reported_link
.link_report_flags
) &&
1271 test_bit(BNX2X_LINK_REPORT_LINK_DOWN
,
1272 &cur_data
.link_report_flags
)))
1277 /* We are going to report a new link parameters now -
1278 * remember the current data for the next time.
1280 memcpy(&bp
->last_reported_link
, &cur_data
, sizeof(cur_data
));
1282 /* propagate status to VFs */
1284 bnx2x_iov_link_update(bp
);
1286 if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN
,
1287 &cur_data
.link_report_flags
)) {
1288 netif_carrier_off(bp
->dev
);
1289 netdev_err(bp
->dev
, "NIC Link is Down\n");
1295 netif_carrier_on(bp
->dev
);
1297 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD
,
1298 &cur_data
.link_report_flags
))
1303 /* Handle the FC at the end so that only these flags would be
1304 * possibly set. This way we may easily check if there is no FC
1307 if (cur_data
.link_report_flags
) {
1308 if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON
,
1309 &cur_data
.link_report_flags
)) {
1310 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON
,
1311 &cur_data
.link_report_flags
))
1312 flow
= "ON - receive & transmit";
1314 flow
= "ON - receive";
1316 flow
= "ON - transmit";
1321 netdev_info(bp
->dev
, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
1322 cur_data
.line_speed
, duplex
, flow
);
1326 static void bnx2x_set_next_page_sgl(struct bnx2x_fastpath
*fp
)
1330 for (i
= 1; i
<= NUM_RX_SGE_PAGES
; i
++) {
1331 struct eth_rx_sge
*sge
;
1333 sge
= &fp
->rx_sge_ring
[RX_SGE_CNT
* i
- 2];
1335 cpu_to_le32(U64_HI(fp
->rx_sge_mapping
+
1336 BCM_PAGE_SIZE
*(i
% NUM_RX_SGE_PAGES
)));
1339 cpu_to_le32(U64_LO(fp
->rx_sge_mapping
+
1340 BCM_PAGE_SIZE
*(i
% NUM_RX_SGE_PAGES
)));
1344 static void bnx2x_free_tpa_pool(struct bnx2x
*bp
,
1345 struct bnx2x_fastpath
*fp
, int last
)
1349 for (i
= 0; i
< last
; i
++) {
1350 struct bnx2x_agg_info
*tpa_info
= &fp
->tpa_info
[i
];
1351 struct sw_rx_bd
*first_buf
= &tpa_info
->first_buf
;
1352 u8
*data
= first_buf
->data
;
1355 DP(NETIF_MSG_IFDOWN
, "tpa bin %d empty on free\n", i
);
1358 if (tpa_info
->tpa_state
== BNX2X_TPA_START
)
1359 dma_unmap_single(&bp
->pdev
->dev
,
1360 dma_unmap_addr(first_buf
, mapping
),
1361 fp
->rx_buf_size
, DMA_FROM_DEVICE
);
1362 bnx2x_frag_free(fp
, data
);
1363 first_buf
->data
= NULL
;
1367 void bnx2x_init_rx_rings_cnic(struct bnx2x
*bp
)
1371 for_each_rx_queue_cnic(bp
, j
) {
1372 struct bnx2x_fastpath
*fp
= &bp
->fp
[j
];
1376 /* Activate BD ring */
1378 * this will generate an interrupt (to the TSTORM)
1379 * must only be done after chip is initialized
1381 bnx2x_update_rx_prod(bp
, fp
, fp
->rx_bd_prod
, fp
->rx_comp_prod
,
1386 void bnx2x_init_rx_rings(struct bnx2x
*bp
)
1388 int func
= BP_FUNC(bp
);
1392 /* Allocate TPA resources */
1393 for_each_eth_queue(bp
, j
) {
1394 struct bnx2x_fastpath
*fp
= &bp
->fp
[j
];
1397 "mtu %d rx_buf_size %d\n", bp
->dev
->mtu
, fp
->rx_buf_size
);
1399 if (!fp
->disable_tpa
) {
1400 /* Fill the per-aggregation pool */
1401 for (i
= 0; i
< MAX_AGG_QS(bp
); i
++) {
1402 struct bnx2x_agg_info
*tpa_info
=
1404 struct sw_rx_bd
*first_buf
=
1405 &tpa_info
->first_buf
;
1408 bnx2x_frag_alloc(fp
, GFP_KERNEL
);
1409 if (!first_buf
->data
) {
1410 BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
1412 bnx2x_free_tpa_pool(bp
, fp
, i
);
1413 fp
->disable_tpa
= 1;
1416 dma_unmap_addr_set(first_buf
, mapping
, 0);
1417 tpa_info
->tpa_state
= BNX2X_TPA_STOP
;
1420 /* "next page" elements initialization */
1421 bnx2x_set_next_page_sgl(fp
);
1423 /* set SGEs bit mask */
1424 bnx2x_init_sge_ring_bit_mask(fp
);
1426 /* Allocate SGEs and initialize the ring elements */
1427 for (i
= 0, ring_prod
= 0;
1428 i
< MAX_RX_SGE_CNT
*NUM_RX_SGE_PAGES
; i
++) {
1430 if (bnx2x_alloc_rx_sge(bp
, fp
, ring_prod
,
1432 BNX2X_ERR("was only able to allocate %d rx sges\n",
1434 BNX2X_ERR("disabling TPA for queue[%d]\n",
1436 /* Cleanup already allocated elements */
1437 bnx2x_free_rx_sge_range(bp
, fp
,
1439 bnx2x_free_tpa_pool(bp
, fp
,
1441 fp
->disable_tpa
= 1;
1445 ring_prod
= NEXT_SGE_IDX(ring_prod
);
1448 fp
->rx_sge_prod
= ring_prod
;
1452 for_each_eth_queue(bp
, j
) {
1453 struct bnx2x_fastpath
*fp
= &bp
->fp
[j
];
1457 /* Activate BD ring */
1459 * this will generate an interrupt (to the TSTORM)
1460 * must only be done after chip is initialized
1462 bnx2x_update_rx_prod(bp
, fp
, fp
->rx_bd_prod
, fp
->rx_comp_prod
,
1468 if (CHIP_IS_E1(bp
)) {
1469 REG_WR(bp
, BAR_USTRORM_INTMEM
+
1470 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func
),
1471 U64_LO(fp
->rx_comp_mapping
));
1472 REG_WR(bp
, BAR_USTRORM_INTMEM
+
1473 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func
) + 4,
1474 U64_HI(fp
->rx_comp_mapping
));
1479 static void bnx2x_free_tx_skbs_queue(struct bnx2x_fastpath
*fp
)
1482 struct bnx2x
*bp
= fp
->bp
;
1484 for_each_cos_in_tx_queue(fp
, cos
) {
1485 struct bnx2x_fp_txdata
*txdata
= fp
->txdata_ptr
[cos
];
1486 unsigned pkts_compl
= 0, bytes_compl
= 0;
1488 u16 sw_prod
= txdata
->tx_pkt_prod
;
1489 u16 sw_cons
= txdata
->tx_pkt_cons
;
1491 while (sw_cons
!= sw_prod
) {
1492 bnx2x_free_tx_pkt(bp
, txdata
, TX_BD(sw_cons
),
1493 &pkts_compl
, &bytes_compl
);
1497 netdev_tx_reset_queue(
1498 netdev_get_tx_queue(bp
->dev
,
1499 txdata
->txq_index
));
1503 static void bnx2x_free_tx_skbs_cnic(struct bnx2x
*bp
)
1507 for_each_tx_queue_cnic(bp
, i
) {
1508 bnx2x_free_tx_skbs_queue(&bp
->fp
[i
]);
1512 static void bnx2x_free_tx_skbs(struct bnx2x
*bp
)
1516 for_each_eth_queue(bp
, i
) {
1517 bnx2x_free_tx_skbs_queue(&bp
->fp
[i
]);
1521 static void bnx2x_free_rx_bds(struct bnx2x_fastpath
*fp
)
1523 struct bnx2x
*bp
= fp
->bp
;
1526 /* ring wasn't allocated */
1527 if (fp
->rx_buf_ring
== NULL
)
1530 for (i
= 0; i
< NUM_RX_BD
; i
++) {
1531 struct sw_rx_bd
*rx_buf
= &fp
->rx_buf_ring
[i
];
1532 u8
*data
= rx_buf
->data
;
1536 dma_unmap_single(&bp
->pdev
->dev
,
1537 dma_unmap_addr(rx_buf
, mapping
),
1538 fp
->rx_buf_size
, DMA_FROM_DEVICE
);
1540 rx_buf
->data
= NULL
;
1541 bnx2x_frag_free(fp
, data
);
1545 static void bnx2x_free_rx_skbs_cnic(struct bnx2x
*bp
)
1549 for_each_rx_queue_cnic(bp
, j
) {
1550 bnx2x_free_rx_bds(&bp
->fp
[j
]);
1554 static void bnx2x_free_rx_skbs(struct bnx2x
*bp
)
1558 for_each_eth_queue(bp
, j
) {
1559 struct bnx2x_fastpath
*fp
= &bp
->fp
[j
];
1561 bnx2x_free_rx_bds(fp
);
1563 if (!fp
->disable_tpa
)
1564 bnx2x_free_tpa_pool(bp
, fp
, MAX_AGG_QS(bp
));
1568 static void bnx2x_free_skbs_cnic(struct bnx2x
*bp
)
1570 bnx2x_free_tx_skbs_cnic(bp
);
1571 bnx2x_free_rx_skbs_cnic(bp
);
1574 void bnx2x_free_skbs(struct bnx2x
*bp
)
1576 bnx2x_free_tx_skbs(bp
);
1577 bnx2x_free_rx_skbs(bp
);
1580 void bnx2x_update_max_mf_config(struct bnx2x
*bp
, u32 value
)
1582 /* load old values */
1583 u32 mf_cfg
= bp
->mf_config
[BP_VN(bp
)];
1585 if (value
!= bnx2x_extract_max_cfg(bp
, mf_cfg
)) {
1586 /* leave all but MAX value */
1587 mf_cfg
&= ~FUNC_MF_CFG_MAX_BW_MASK
;
1589 /* set new MAX value */
1590 mf_cfg
|= (value
<< FUNC_MF_CFG_MAX_BW_SHIFT
)
1591 & FUNC_MF_CFG_MAX_BW_MASK
;
1593 bnx2x_fw_command(bp
, DRV_MSG_CODE_SET_MF_BW
, mf_cfg
);
1598 * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1600 * @bp: driver handle
1601 * @nvecs: number of vectors to be released
1603 static void bnx2x_free_msix_irqs(struct bnx2x
*bp
, int nvecs
)
1607 if (nvecs
== offset
)
1610 /* VFs don't have a default SB */
1612 free_irq(bp
->msix_table
[offset
].vector
, bp
->dev
);
1613 DP(NETIF_MSG_IFDOWN
, "released sp irq (%d)\n",
1614 bp
->msix_table
[offset
].vector
);
1618 if (CNIC_SUPPORT(bp
)) {
1619 if (nvecs
== offset
)
1624 for_each_eth_queue(bp
, i
) {
1625 if (nvecs
== offset
)
1627 DP(NETIF_MSG_IFDOWN
, "about to release fp #%d->%d irq\n",
1628 i
, bp
->msix_table
[offset
].vector
);
1630 free_irq(bp
->msix_table
[offset
++].vector
, &bp
->fp
[i
]);
1634 void bnx2x_free_irq(struct bnx2x
*bp
)
1636 if (bp
->flags
& USING_MSIX_FLAG
&&
1637 !(bp
->flags
& USING_SINGLE_MSIX_FLAG
)) {
1638 int nvecs
= BNX2X_NUM_ETH_QUEUES(bp
) + CNIC_SUPPORT(bp
);
1640 /* vfs don't have a default status block */
1644 bnx2x_free_msix_irqs(bp
, nvecs
);
1646 free_irq(bp
->dev
->irq
, bp
->dev
);
1650 int bnx2x_enable_msix(struct bnx2x
*bp
)
1652 int msix_vec
= 0, i
, rc
;
1654 /* VFs don't have a default status block */
1656 bp
->msix_table
[msix_vec
].entry
= msix_vec
;
1657 BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n",
1658 bp
->msix_table
[0].entry
);
1662 /* Cnic requires an msix vector for itself */
1663 if (CNIC_SUPPORT(bp
)) {
1664 bp
->msix_table
[msix_vec
].entry
= msix_vec
;
1665 BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n",
1666 msix_vec
, bp
->msix_table
[msix_vec
].entry
);
1670 /* We need separate vectors for ETH queues only (not FCoE) */
1671 for_each_eth_queue(bp
, i
) {
1672 bp
->msix_table
[msix_vec
].entry
= msix_vec
;
1673 BNX2X_DEV_INFO("msix_table[%d].entry = %d (fastpath #%u)\n",
1674 msix_vec
, msix_vec
, i
);
1678 DP(BNX2X_MSG_SP
, "about to request enable msix with %d vectors\n",
1681 rc
= pci_enable_msix_range(bp
->pdev
, &bp
->msix_table
[0],
1682 BNX2X_MIN_MSIX_VEC_CNT(bp
), msix_vec
);
1684 * reconfigure number of tx/rx queues according to available
1687 if (rc
== -ENOSPC
) {
1688 /* Get by with single vector */
1689 rc
= pci_enable_msix_range(bp
->pdev
, &bp
->msix_table
[0], 1, 1);
1691 BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n",
1696 BNX2X_DEV_INFO("Using single MSI-X vector\n");
1697 bp
->flags
|= USING_SINGLE_MSIX_FLAG
;
1699 BNX2X_DEV_INFO("set number of queues to 1\n");
1700 bp
->num_ethernet_queues
= 1;
1701 bp
->num_queues
= bp
->num_ethernet_queues
+ bp
->num_cnic_queues
;
1702 } else if (rc
< 0) {
1703 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc
);
1705 } else if (rc
< msix_vec
) {
1706 /* how less vectors we will have? */
1707 int diff
= msix_vec
- rc
;
1709 BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc
);
1712 * decrease number of queues by number of unallocated entries
1714 bp
->num_ethernet_queues
-= diff
;
1715 bp
->num_queues
= bp
->num_ethernet_queues
+ bp
->num_cnic_queues
;
1717 BNX2X_DEV_INFO("New queue configuration set: %d\n",
1721 bp
->flags
|= USING_MSIX_FLAG
;
1726 /* fall to INTx if not enough memory */
1728 bp
->flags
|= DISABLE_MSI_FLAG
;
1733 static int bnx2x_req_msix_irqs(struct bnx2x
*bp
)
1735 int i
, rc
, offset
= 0;
1737 /* no default status block for vf */
1739 rc
= request_irq(bp
->msix_table
[offset
++].vector
,
1740 bnx2x_msix_sp_int
, 0,
1741 bp
->dev
->name
, bp
->dev
);
1743 BNX2X_ERR("request sp irq failed\n");
1748 if (CNIC_SUPPORT(bp
))
1751 for_each_eth_queue(bp
, i
) {
1752 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
1753 snprintf(fp
->name
, sizeof(fp
->name
), "%s-fp-%d",
1756 rc
= request_irq(bp
->msix_table
[offset
].vector
,
1757 bnx2x_msix_fp_int
, 0, fp
->name
, fp
);
1759 BNX2X_ERR("request fp #%d irq (%d) failed rc %d\n", i
,
1760 bp
->msix_table
[offset
].vector
, rc
);
1761 bnx2x_free_msix_irqs(bp
, offset
);
1768 i
= BNX2X_NUM_ETH_QUEUES(bp
);
1770 offset
= 1 + CNIC_SUPPORT(bp
);
1771 netdev_info(bp
->dev
,
1772 "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n",
1773 bp
->msix_table
[0].vector
,
1774 0, bp
->msix_table
[offset
].vector
,
1775 i
- 1, bp
->msix_table
[offset
+ i
- 1].vector
);
1777 offset
= CNIC_SUPPORT(bp
);
1778 netdev_info(bp
->dev
,
1779 "using MSI-X IRQs: fp[%d] %d ... fp[%d] %d\n",
1780 0, bp
->msix_table
[offset
].vector
,
1781 i
- 1, bp
->msix_table
[offset
+ i
- 1].vector
);
1786 int bnx2x_enable_msi(struct bnx2x
*bp
)
1790 rc
= pci_enable_msi(bp
->pdev
);
1792 BNX2X_DEV_INFO("MSI is not attainable\n");
1795 bp
->flags
|= USING_MSI_FLAG
;
1800 static int bnx2x_req_irq(struct bnx2x
*bp
)
1802 unsigned long flags
;
1805 if (bp
->flags
& (USING_MSI_FLAG
| USING_MSIX_FLAG
))
1808 flags
= IRQF_SHARED
;
1810 if (bp
->flags
& USING_MSIX_FLAG
)
1811 irq
= bp
->msix_table
[0].vector
;
1813 irq
= bp
->pdev
->irq
;
1815 return request_irq(irq
, bnx2x_interrupt
, flags
, bp
->dev
->name
, bp
->dev
);
1818 static int bnx2x_setup_irqs(struct bnx2x
*bp
)
1821 if (bp
->flags
& USING_MSIX_FLAG
&&
1822 !(bp
->flags
& USING_SINGLE_MSIX_FLAG
)) {
1823 rc
= bnx2x_req_msix_irqs(bp
);
1827 rc
= bnx2x_req_irq(bp
);
1829 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc
);
1832 if (bp
->flags
& USING_MSI_FLAG
) {
1833 bp
->dev
->irq
= bp
->pdev
->irq
;
1834 netdev_info(bp
->dev
, "using MSI IRQ %d\n",
1837 if (bp
->flags
& USING_MSIX_FLAG
) {
1838 bp
->dev
->irq
= bp
->msix_table
[0].vector
;
1839 netdev_info(bp
->dev
, "using MSIX IRQ %d\n",
1847 static void bnx2x_napi_enable_cnic(struct bnx2x
*bp
)
1851 for_each_rx_queue_cnic(bp
, i
) {
1852 bnx2x_fp_busy_poll_init(&bp
->fp
[i
]);
1853 napi_enable(&bnx2x_fp(bp
, i
, napi
));
1857 static void bnx2x_napi_enable(struct bnx2x
*bp
)
1861 for_each_eth_queue(bp
, i
) {
1862 bnx2x_fp_busy_poll_init(&bp
->fp
[i
]);
1863 napi_enable(&bnx2x_fp(bp
, i
, napi
));
1867 static void bnx2x_napi_disable_cnic(struct bnx2x
*bp
)
1871 for_each_rx_queue_cnic(bp
, i
) {
1872 napi_disable(&bnx2x_fp(bp
, i
, napi
));
1873 while (!bnx2x_fp_ll_disable(&bp
->fp
[i
]))
1874 usleep_range(1000, 2000);
1878 static void bnx2x_napi_disable(struct bnx2x
*bp
)
1882 for_each_eth_queue(bp
, i
) {
1883 napi_disable(&bnx2x_fp(bp
, i
, napi
));
1884 while (!bnx2x_fp_ll_disable(&bp
->fp
[i
]))
1885 usleep_range(1000, 2000);
1889 void bnx2x_netif_start(struct bnx2x
*bp
)
1891 if (netif_running(bp
->dev
)) {
1892 bnx2x_napi_enable(bp
);
1893 if (CNIC_LOADED(bp
))
1894 bnx2x_napi_enable_cnic(bp
);
1895 bnx2x_int_enable(bp
);
1896 if (bp
->state
== BNX2X_STATE_OPEN
)
1897 netif_tx_wake_all_queues(bp
->dev
);
1901 void bnx2x_netif_stop(struct bnx2x
*bp
, int disable_hw
)
1903 bnx2x_int_disable_sync(bp
, disable_hw
);
1904 bnx2x_napi_disable(bp
);
1905 if (CNIC_LOADED(bp
))
1906 bnx2x_napi_disable_cnic(bp
);
1909 u16
bnx2x_select_queue(struct net_device
*dev
, struct sk_buff
*skb
,
1910 void *accel_priv
, select_queue_fallback_t fallback
)
1912 struct bnx2x
*bp
= netdev_priv(dev
);
1914 if (CNIC_LOADED(bp
) && !NO_FCOE(bp
)) {
1915 struct ethhdr
*hdr
= (struct ethhdr
*)skb
->data
;
1916 u16 ether_type
= ntohs(hdr
->h_proto
);
1918 /* Skip VLAN tag if present */
1919 if (ether_type
== ETH_P_8021Q
) {
1920 struct vlan_ethhdr
*vhdr
=
1921 (struct vlan_ethhdr
*)skb
->data
;
1923 ether_type
= ntohs(vhdr
->h_vlan_encapsulated_proto
);
1926 /* If ethertype is FCoE or FIP - use FCoE ring */
1927 if ((ether_type
== ETH_P_FCOE
) || (ether_type
== ETH_P_FIP
))
1928 return bnx2x_fcoe_tx(bp
, txq_index
);
1931 /* select a non-FCoE queue */
1932 return fallback(dev
, skb
) % BNX2X_NUM_ETH_QUEUES(bp
);
1935 void bnx2x_set_num_queues(struct bnx2x
*bp
)
1938 bp
->num_ethernet_queues
= bnx2x_calc_num_queues(bp
);
1940 /* override in STORAGE SD modes */
1941 if (IS_MF_STORAGE_ONLY(bp
))
1942 bp
->num_ethernet_queues
= 1;
1944 /* Add special queues */
1945 bp
->num_cnic_queues
= CNIC_SUPPORT(bp
); /* For FCOE */
1946 bp
->num_queues
= bp
->num_ethernet_queues
+ bp
->num_cnic_queues
;
1948 BNX2X_DEV_INFO("set number of queues to %d\n", bp
->num_queues
);
1952 * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
1954 * @bp: Driver handle
1956 * We currently support for at most 16 Tx queues for each CoS thus we will
1957 * allocate a multiple of 16 for ETH L2 rings according to the value of the
1960 * If there is an FCoE L2 queue the appropriate Tx queue will have the next
1961 * index after all ETH L2 indices.
1963 * If the actual number of Tx queues (for each CoS) is less than 16 then there
1964 * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
1965 * 16..31,...) with indices that are not coupled with any real Tx queue.
1967 * The proper configuration of skb->queue_mapping is handled by
1968 * bnx2x_select_queue() and __skb_tx_hash().
1970 * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
1971 * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
1973 static int bnx2x_set_real_num_queues(struct bnx2x
*bp
, int include_cnic
)
1977 tx
= BNX2X_NUM_ETH_QUEUES(bp
) * bp
->max_cos
;
1978 rx
= BNX2X_NUM_ETH_QUEUES(bp
);
1980 /* account for fcoe queue */
1981 if (include_cnic
&& !NO_FCOE(bp
)) {
1986 rc
= netif_set_real_num_tx_queues(bp
->dev
, tx
);
1988 BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc
);
1991 rc
= netif_set_real_num_rx_queues(bp
->dev
, rx
);
1993 BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc
);
1997 DP(NETIF_MSG_IFUP
, "Setting real num queues to (tx, rx) (%d, %d)\n",
2003 static void bnx2x_set_rx_buf_size(struct bnx2x
*bp
)
2007 for_each_queue(bp
, i
) {
2008 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
2011 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
2014 * Although there are no IP frames expected to arrive to
2015 * this ring we still want to add an
2016 * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
2019 mtu
= BNX2X_FCOE_MINI_JUMBO_MTU
;
2022 fp
->rx_buf_size
= BNX2X_FW_RX_ALIGN_START
+
2023 IP_HEADER_ALIGNMENT_PADDING
+
2026 BNX2X_FW_RX_ALIGN_END
;
2027 /* Note : rx_buf_size doesn't take into account NET_SKB_PAD */
2028 if (fp
->rx_buf_size
+ NET_SKB_PAD
<= PAGE_SIZE
)
2029 fp
->rx_frag_size
= fp
->rx_buf_size
+ NET_SKB_PAD
;
2031 fp
->rx_frag_size
= 0;
2035 static int bnx2x_init_rss(struct bnx2x
*bp
)
2038 u8 num_eth_queues
= BNX2X_NUM_ETH_QUEUES(bp
);
2040 /* Prepare the initial contents for the indirection table if RSS is
2043 for (i
= 0; i
< sizeof(bp
->rss_conf_obj
.ind_table
); i
++)
2044 bp
->rss_conf_obj
.ind_table
[i
] =
2046 ethtool_rxfh_indir_default(i
, num_eth_queues
);
2049 * For 57710 and 57711 SEARCHER configuration (rss_keys) is
2050 * per-port, so if explicit configuration is needed , do it only
2053 * For 57712 and newer on the other hand it's a per-function
2056 return bnx2x_config_rss_eth(bp
, bp
->port
.pmf
|| !CHIP_IS_E1x(bp
));
2059 int bnx2x_rss(struct bnx2x
*bp
, struct bnx2x_rss_config_obj
*rss_obj
,
2060 bool config_hash
, bool enable
)
2062 struct bnx2x_config_rss_params params
= {NULL
};
2064 /* Although RSS is meaningless when there is a single HW queue we
2065 * still need it enabled in order to have HW Rx hash generated.
2067 * if (!is_eth_multi(bp))
2068 * bp->multi_mode = ETH_RSS_MODE_DISABLED;
2071 params
.rss_obj
= rss_obj
;
2073 __set_bit(RAMROD_COMP_WAIT
, ¶ms
.ramrod_flags
);
2076 __set_bit(BNX2X_RSS_MODE_REGULAR
, ¶ms
.rss_flags
);
2078 /* RSS configuration */
2079 __set_bit(BNX2X_RSS_IPV4
, ¶ms
.rss_flags
);
2080 __set_bit(BNX2X_RSS_IPV4_TCP
, ¶ms
.rss_flags
);
2081 __set_bit(BNX2X_RSS_IPV6
, ¶ms
.rss_flags
);
2082 __set_bit(BNX2X_RSS_IPV6_TCP
, ¶ms
.rss_flags
);
2083 if (rss_obj
->udp_rss_v4
)
2084 __set_bit(BNX2X_RSS_IPV4_UDP
, ¶ms
.rss_flags
);
2085 if (rss_obj
->udp_rss_v6
)
2086 __set_bit(BNX2X_RSS_IPV6_UDP
, ¶ms
.rss_flags
);
2088 if (!CHIP_IS_E1x(bp
))
2089 /* valid only for TUNN_MODE_GRE tunnel mode */
2090 __set_bit(BNX2X_RSS_GRE_INNER_HDRS
, ¶ms
.rss_flags
);
2092 __set_bit(BNX2X_RSS_MODE_DISABLED
, ¶ms
.rss_flags
);
2096 params
.rss_result_mask
= MULTI_MASK
;
2098 memcpy(params
.ind_table
, rss_obj
->ind_table
, sizeof(params
.ind_table
));
2102 netdev_rss_key_fill(params
.rss_key
, T_ETH_RSS_KEY
* 4);
2103 __set_bit(BNX2X_RSS_SET_SRCH
, ¶ms
.rss_flags
);
2107 return bnx2x_config_rss(bp
, ¶ms
);
2109 return bnx2x_vfpf_config_rss(bp
, ¶ms
);
2112 static int bnx2x_init_hw(struct bnx2x
*bp
, u32 load_code
)
2114 struct bnx2x_func_state_params func_params
= {NULL
};
2116 /* Prepare parameters for function state transitions */
2117 __set_bit(RAMROD_COMP_WAIT
, &func_params
.ramrod_flags
);
2119 func_params
.f_obj
= &bp
->func_obj
;
2120 func_params
.cmd
= BNX2X_F_CMD_HW_INIT
;
2122 func_params
.params
.hw_init
.load_phase
= load_code
;
2124 return bnx2x_func_state_change(bp
, &func_params
);
2128 * Cleans the object that have internal lists without sending
2129 * ramrods. Should be run when interrupts are disabled.
2131 void bnx2x_squeeze_objects(struct bnx2x
*bp
)
2134 unsigned long ramrod_flags
= 0, vlan_mac_flags
= 0;
2135 struct bnx2x_mcast_ramrod_params rparam
= {NULL
};
2136 struct bnx2x_vlan_mac_obj
*mac_obj
= &bp
->sp_objs
->mac_obj
;
2138 /***************** Cleanup MACs' object first *************************/
2140 /* Wait for completion of requested */
2141 __set_bit(RAMROD_COMP_WAIT
, &ramrod_flags
);
2142 /* Perform a dry cleanup */
2143 __set_bit(RAMROD_DRV_CLR_ONLY
, &ramrod_flags
);
2145 /* Clean ETH primary MAC */
2146 __set_bit(BNX2X_ETH_MAC
, &vlan_mac_flags
);
2147 rc
= mac_obj
->delete_all(bp
, &bp
->sp_objs
->mac_obj
, &vlan_mac_flags
,
2150 BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc
);
2152 /* Cleanup UC list */
2154 __set_bit(BNX2X_UC_LIST_MAC
, &vlan_mac_flags
);
2155 rc
= mac_obj
->delete_all(bp
, mac_obj
, &vlan_mac_flags
,
2158 BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc
);
2160 /***************** Now clean mcast object *****************************/
2161 rparam
.mcast_obj
= &bp
->mcast_obj
;
2162 __set_bit(RAMROD_DRV_CLR_ONLY
, &rparam
.ramrod_flags
);
2164 /* Add a DEL command... - Since we're doing a driver cleanup only,
2165 * we take a lock surrounding both the initial send and the CONTs,
2166 * as we don't want a true completion to disrupt us in the middle.
2168 netif_addr_lock_bh(bp
->dev
);
2169 rc
= bnx2x_config_mcast(bp
, &rparam
, BNX2X_MCAST_CMD_DEL
);
2171 BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n",
2174 /* ...and wait until all pending commands are cleared */
2175 rc
= bnx2x_config_mcast(bp
, &rparam
, BNX2X_MCAST_CMD_CONT
);
2178 BNX2X_ERR("Failed to clean multi-cast object: %d\n",
2180 netif_addr_unlock_bh(bp
->dev
);
2184 rc
= bnx2x_config_mcast(bp
, &rparam
, BNX2X_MCAST_CMD_CONT
);
2186 netif_addr_unlock_bh(bp
->dev
);
2189 #ifndef BNX2X_STOP_ON_ERROR
2190 #define LOAD_ERROR_EXIT(bp, label) \
2192 (bp)->state = BNX2X_STATE_ERROR; \
2196 #define LOAD_ERROR_EXIT_CNIC(bp, label) \
2198 bp->cnic_loaded = false; \
2201 #else /*BNX2X_STOP_ON_ERROR*/
2202 #define LOAD_ERROR_EXIT(bp, label) \
2204 (bp)->state = BNX2X_STATE_ERROR; \
2208 #define LOAD_ERROR_EXIT_CNIC(bp, label) \
2210 bp->cnic_loaded = false; \
2214 #endif /*BNX2X_STOP_ON_ERROR*/
2216 static void bnx2x_free_fw_stats_mem(struct bnx2x
*bp
)
2218 BNX2X_PCI_FREE(bp
->fw_stats
, bp
->fw_stats_mapping
,
2219 bp
->fw_stats_data_sz
+ bp
->fw_stats_req_sz
);
2223 static int bnx2x_alloc_fw_stats_mem(struct bnx2x
*bp
)
2225 int num_groups
, vf_headroom
= 0;
2226 int is_fcoe_stats
= NO_FCOE(bp
) ? 0 : 1;
2228 /* number of queues for statistics is number of eth queues + FCoE */
2229 u8 num_queue_stats
= BNX2X_NUM_ETH_QUEUES(bp
) + is_fcoe_stats
;
2231 /* Total number of FW statistics requests =
2232 * 1 for port stats + 1 for PF stats + potential 2 for FCoE (fcoe proper
2233 * and fcoe l2 queue) stats + num of queues (which includes another 1
2234 * for fcoe l2 queue if applicable)
2236 bp
->fw_stats_num
= 2 + is_fcoe_stats
+ num_queue_stats
;
2238 /* vf stats appear in the request list, but their data is allocated by
2239 * the VFs themselves. We don't include them in the bp->fw_stats_num as
2240 * it is used to determine where to place the vf stats queries in the
2244 vf_headroom
= bnx2x_vf_headroom(bp
);
2246 /* Request is built from stats_query_header and an array of
2247 * stats_query_cmd_group each of which contains
2248 * STATS_QUERY_CMD_COUNT rules. The real number or requests is
2249 * configured in the stats_query_header.
2252 (((bp
->fw_stats_num
+ vf_headroom
) / STATS_QUERY_CMD_COUNT
) +
2253 (((bp
->fw_stats_num
+ vf_headroom
) % STATS_QUERY_CMD_COUNT
) ?
2256 DP(BNX2X_MSG_SP
, "stats fw_stats_num %d, vf headroom %d, num_groups %d\n",
2257 bp
->fw_stats_num
, vf_headroom
, num_groups
);
2258 bp
->fw_stats_req_sz
= sizeof(struct stats_query_header
) +
2259 num_groups
* sizeof(struct stats_query_cmd_group
);
2261 /* Data for statistics requests + stats_counter
2262 * stats_counter holds per-STORM counters that are incremented
2263 * when STORM has finished with the current request.
2264 * memory for FCoE offloaded statistics are counted anyway,
2265 * even if they will not be sent.
2266 * VF stats are not accounted for here as the data of VF stats is stored
2267 * in memory allocated by the VF, not here.
2269 bp
->fw_stats_data_sz
= sizeof(struct per_port_stats
) +
2270 sizeof(struct per_pf_stats
) +
2271 sizeof(struct fcoe_statistics_params
) +
2272 sizeof(struct per_queue_stats
) * num_queue_stats
+
2273 sizeof(struct stats_counter
);
2275 bp
->fw_stats
= BNX2X_PCI_ALLOC(&bp
->fw_stats_mapping
,
2276 bp
->fw_stats_data_sz
+ bp
->fw_stats_req_sz
);
2281 bp
->fw_stats_req
= (struct bnx2x_fw_stats_req
*)bp
->fw_stats
;
2282 bp
->fw_stats_req_mapping
= bp
->fw_stats_mapping
;
2283 bp
->fw_stats_data
= (struct bnx2x_fw_stats_data
*)
2284 ((u8
*)bp
->fw_stats
+ bp
->fw_stats_req_sz
);
2285 bp
->fw_stats_data_mapping
= bp
->fw_stats_mapping
+
2286 bp
->fw_stats_req_sz
;
2288 DP(BNX2X_MSG_SP
, "statistics request base address set to %x %x\n",
2289 U64_HI(bp
->fw_stats_req_mapping
),
2290 U64_LO(bp
->fw_stats_req_mapping
));
2291 DP(BNX2X_MSG_SP
, "statistics data base address set to %x %x\n",
2292 U64_HI(bp
->fw_stats_data_mapping
),
2293 U64_LO(bp
->fw_stats_data_mapping
));
2297 bnx2x_free_fw_stats_mem(bp
);
2298 BNX2X_ERR("Can't allocate FW stats memory\n");
2302 /* send load request to mcp and analyze response */
2303 static int bnx2x_nic_load_request(struct bnx2x
*bp
, u32
*load_code
)
2309 (SHMEM_RD(bp
, func_mb
[BP_FW_MB_IDX(bp
)].drv_mb_header
) &
2310 DRV_MSG_SEQ_NUMBER_MASK
);
2311 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp
->fw_seq
);
2313 /* Get current FW pulse sequence */
2314 bp
->fw_drv_pulse_wr_seq
=
2315 (SHMEM_RD(bp
, func_mb
[BP_FW_MB_IDX(bp
)].drv_pulse_mb
) &
2316 DRV_PULSE_SEQ_MASK
);
2317 BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp
->fw_drv_pulse_wr_seq
);
2319 param
= DRV_MSG_CODE_LOAD_REQ_WITH_LFA
;
2321 if (IS_MF_SD(bp
) && bnx2x_port_after_undi(bp
))
2322 param
|= DRV_MSG_CODE_LOAD_REQ_FORCE_LFA
;
2325 (*load_code
) = bnx2x_fw_command(bp
, DRV_MSG_CODE_LOAD_REQ
, param
);
2327 /* if mcp fails to respond we must abort */
2328 if (!(*load_code
)) {
2329 BNX2X_ERR("MCP response failure, aborting\n");
2333 /* If mcp refused (e.g. other port is in diagnostic mode) we
2336 if ((*load_code
) == FW_MSG_CODE_DRV_LOAD_REFUSED
) {
2337 BNX2X_ERR("MCP refused load request, aborting\n");
2343 /* check whether another PF has already loaded FW to chip. In
2344 * virtualized environments a pf from another VM may have already
2345 * initialized the device including loading FW
2347 int bnx2x_compare_fw_ver(struct bnx2x
*bp
, u32 load_code
, bool print_err
)
2349 /* is another pf loaded on this engine? */
2350 if (load_code
!= FW_MSG_CODE_DRV_LOAD_COMMON_CHIP
&&
2351 load_code
!= FW_MSG_CODE_DRV_LOAD_COMMON
) {
2352 /* build my FW version dword */
2353 u32 my_fw
= (BCM_5710_FW_MAJOR_VERSION
) +
2354 (BCM_5710_FW_MINOR_VERSION
<< 8) +
2355 (BCM_5710_FW_REVISION_VERSION
<< 16) +
2356 (BCM_5710_FW_ENGINEERING_VERSION
<< 24);
2358 /* read loaded FW from chip */
2359 u32 loaded_fw
= REG_RD(bp
, XSEM_REG_PRAM
);
2361 DP(BNX2X_MSG_SP
, "loaded fw %x, my fw %x\n",
2364 /* abort nic load if version mismatch */
2365 if (my_fw
!= loaded_fw
) {
2367 BNX2X_ERR("bnx2x with FW %x was already loaded which mismatches my %x FW. Aborting\n",
2370 BNX2X_DEV_INFO("bnx2x with FW %x was already loaded which mismatches my %x FW, possibly due to MF UNDI\n",
2378 /* returns the "mcp load_code" according to global load_count array */
2379 static int bnx2x_nic_load_no_mcp(struct bnx2x
*bp
, int port
)
2381 int path
= BP_PATH(bp
);
2383 DP(NETIF_MSG_IFUP
, "NO MCP - load counts[%d] %d, %d, %d\n",
2384 path
, bnx2x_load_count
[path
][0], bnx2x_load_count
[path
][1],
2385 bnx2x_load_count
[path
][2]);
2386 bnx2x_load_count
[path
][0]++;
2387 bnx2x_load_count
[path
][1 + port
]++;
2388 DP(NETIF_MSG_IFUP
, "NO MCP - new load counts[%d] %d, %d, %d\n",
2389 path
, bnx2x_load_count
[path
][0], bnx2x_load_count
[path
][1],
2390 bnx2x_load_count
[path
][2]);
2391 if (bnx2x_load_count
[path
][0] == 1)
2392 return FW_MSG_CODE_DRV_LOAD_COMMON
;
2393 else if (bnx2x_load_count
[path
][1 + port
] == 1)
2394 return FW_MSG_CODE_DRV_LOAD_PORT
;
2396 return FW_MSG_CODE_DRV_LOAD_FUNCTION
;
2399 /* mark PMF if applicable */
2400 static void bnx2x_nic_load_pmf(struct bnx2x
*bp
, u32 load_code
)
2402 if ((load_code
== FW_MSG_CODE_DRV_LOAD_COMMON
) ||
2403 (load_code
== FW_MSG_CODE_DRV_LOAD_COMMON_CHIP
) ||
2404 (load_code
== FW_MSG_CODE_DRV_LOAD_PORT
)) {
2406 /* We need the barrier to ensure the ordering between the
2407 * writing to bp->port.pmf here and reading it from the
2408 * bnx2x_periodic_task().
2415 DP(NETIF_MSG_LINK
, "pmf %d\n", bp
->port
.pmf
);
2418 static void bnx2x_nic_load_afex_dcc(struct bnx2x
*bp
, int load_code
)
2420 if (((load_code
== FW_MSG_CODE_DRV_LOAD_COMMON
) ||
2421 (load_code
== FW_MSG_CODE_DRV_LOAD_COMMON_CHIP
)) &&
2422 (bp
->common
.shmem2_base
)) {
2423 if (SHMEM2_HAS(bp
, dcc_support
))
2424 SHMEM2_WR(bp
, dcc_support
,
2425 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV
|
2426 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV
));
2427 if (SHMEM2_HAS(bp
, afex_driver_support
))
2428 SHMEM2_WR(bp
, afex_driver_support
,
2429 SHMEM_AFEX_SUPPORTED_VERSION_ONE
);
2432 /* Set AFEX default VLAN tag to an invalid value */
2433 bp
->afex_def_vlan_tag
= -1;
2437 * bnx2x_bz_fp - zero content of the fastpath structure.
2439 * @bp: driver handle
2440 * @index: fastpath index to be zeroed
2442 * Makes sure the contents of the bp->fp[index].napi is kept
2445 static void bnx2x_bz_fp(struct bnx2x
*bp
, int index
)
2447 struct bnx2x_fastpath
*fp
= &bp
->fp
[index
];
2449 struct napi_struct orig_napi
= fp
->napi
;
2450 struct bnx2x_agg_info
*orig_tpa_info
= fp
->tpa_info
;
2452 /* bzero bnx2x_fastpath contents */
2454 memset(fp
->tpa_info
, 0, ETH_MAX_AGGREGATION_QUEUES_E1H_E2
*
2455 sizeof(struct bnx2x_agg_info
));
2456 memset(fp
, 0, sizeof(*fp
));
2458 /* Restore the NAPI object as it has been already initialized */
2459 fp
->napi
= orig_napi
;
2460 fp
->tpa_info
= orig_tpa_info
;
2464 fp
->max_cos
= bp
->max_cos
;
2466 /* Special queues support only one CoS */
2469 /* Init txdata pointers */
2471 fp
->txdata_ptr
[0] = &bp
->bnx2x_txq
[FCOE_TXQ_IDX(bp
)];
2473 for_each_cos_in_tx_queue(fp
, cos
)
2474 fp
->txdata_ptr
[cos
] = &bp
->bnx2x_txq
[cos
*
2475 BNX2X_NUM_ETH_QUEUES(bp
) + index
];
2477 /* set the tpa flag for each queue. The tpa flag determines the queue
2478 * minimal size so it must be set prior to queue memory allocation
2480 fp
->disable_tpa
= !(bp
->flags
& TPA_ENABLE_FLAG
||
2481 (bp
->flags
& GRO_ENABLE_FLAG
&&
2482 bnx2x_mtu_allows_gro(bp
->dev
->mtu
)));
2483 if (bp
->flags
& TPA_ENABLE_FLAG
)
2484 fp
->mode
= TPA_MODE_LRO
;
2485 else if (bp
->flags
& GRO_ENABLE_FLAG
)
2486 fp
->mode
= TPA_MODE_GRO
;
2488 /* We don't want TPA if it's disabled in bp
2489 * or if this is an FCoE L2 ring.
2491 if (bp
->disable_tpa
|| IS_FCOE_FP(fp
))
2492 fp
->disable_tpa
= 1;
2495 int bnx2x_load_cnic(struct bnx2x
*bp
)
2497 int i
, rc
, port
= BP_PORT(bp
);
2499 DP(NETIF_MSG_IFUP
, "Starting CNIC-related load\n");
2501 mutex_init(&bp
->cnic_mutex
);
2504 rc
= bnx2x_alloc_mem_cnic(bp
);
2506 BNX2X_ERR("Unable to allocate bp memory for cnic\n");
2507 LOAD_ERROR_EXIT_CNIC(bp
, load_error_cnic0
);
2511 rc
= bnx2x_alloc_fp_mem_cnic(bp
);
2513 BNX2X_ERR("Unable to allocate memory for cnic fps\n");
2514 LOAD_ERROR_EXIT_CNIC(bp
, load_error_cnic0
);
2517 /* Update the number of queues with the cnic queues */
2518 rc
= bnx2x_set_real_num_queues(bp
, 1);
2520 BNX2X_ERR("Unable to set real_num_queues including cnic\n");
2521 LOAD_ERROR_EXIT_CNIC(bp
, load_error_cnic0
);
2524 /* Add all CNIC NAPI objects */
2525 bnx2x_add_all_napi_cnic(bp
);
2526 DP(NETIF_MSG_IFUP
, "cnic napi added\n");
2527 bnx2x_napi_enable_cnic(bp
);
2529 rc
= bnx2x_init_hw_func_cnic(bp
);
2531 LOAD_ERROR_EXIT_CNIC(bp
, load_error_cnic1
);
2533 bnx2x_nic_init_cnic(bp
);
2536 /* Enable Timer scan */
2537 REG_WR(bp
, TM_REG_EN_LINEAR0_TIMER
+ port
*4, 1);
2539 /* setup cnic queues */
2540 for_each_cnic_queue(bp
, i
) {
2541 rc
= bnx2x_setup_queue(bp
, &bp
->fp
[i
], 0);
2543 BNX2X_ERR("Queue setup failed\n");
2544 LOAD_ERROR_EXIT(bp
, load_error_cnic2
);
2549 /* Initialize Rx filter. */
2550 bnx2x_set_rx_mode_inner(bp
);
2552 /* re-read iscsi info */
2553 bnx2x_get_iscsi_info(bp
);
2554 bnx2x_setup_cnic_irq_info(bp
);
2555 bnx2x_setup_cnic_info(bp
);
2556 bp
->cnic_loaded
= true;
2557 if (bp
->state
== BNX2X_STATE_OPEN
)
2558 bnx2x_cnic_notify(bp
, CNIC_CTL_START_CMD
);
2560 DP(NETIF_MSG_IFUP
, "Ending successfully CNIC-related load\n");
2564 #ifndef BNX2X_STOP_ON_ERROR
2566 /* Disable Timer scan */
2567 REG_WR(bp
, TM_REG_EN_LINEAR0_TIMER
+ port
*4, 0);
2570 bnx2x_napi_disable_cnic(bp
);
2571 /* Update the number of queues without the cnic queues */
2572 if (bnx2x_set_real_num_queues(bp
, 0))
2573 BNX2X_ERR("Unable to set real_num_queues not including cnic\n");
2575 BNX2X_ERR("CNIC-related load failed\n");
2576 bnx2x_free_fp_mem_cnic(bp
);
2577 bnx2x_free_mem_cnic(bp
);
2579 #endif /* ! BNX2X_STOP_ON_ERROR */
2582 /* must be called with rtnl_lock */
2583 int bnx2x_nic_load(struct bnx2x
*bp
, int load_mode
)
2585 int port
= BP_PORT(bp
);
2586 int i
, rc
= 0, load_code
= 0;
2588 DP(NETIF_MSG_IFUP
, "Starting NIC load\n");
2590 "CNIC is %s\n", CNIC_ENABLED(bp
) ? "enabled" : "disabled");
2592 #ifdef BNX2X_STOP_ON_ERROR
2593 if (unlikely(bp
->panic
)) {
2594 BNX2X_ERR("Can't load NIC when there is panic\n");
2599 bp
->state
= BNX2X_STATE_OPENING_WAIT4_LOAD
;
2601 /* zero the structure w/o any lock, before SP handler is initialized */
2602 memset(&bp
->last_reported_link
, 0, sizeof(bp
->last_reported_link
));
2603 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN
,
2604 &bp
->last_reported_link
.link_report_flags
);
2607 /* must be called before memory allocation and HW init */
2608 bnx2x_ilt_set_info(bp
);
2611 * Zero fastpath structures preserving invariants like napi, which are
2612 * allocated only once, fp index, max_cos, bp pointer.
2613 * Also set fp->disable_tpa and txdata_ptr.
2615 DP(NETIF_MSG_IFUP
, "num queues: %d", bp
->num_queues
);
2616 for_each_queue(bp
, i
)
2618 memset(bp
->bnx2x_txq
, 0, (BNX2X_MAX_RSS_COUNT(bp
) * BNX2X_MULTI_TX_COS
+
2619 bp
->num_cnic_queues
) *
2620 sizeof(struct bnx2x_fp_txdata
));
2622 bp
->fcoe_init
= false;
2624 /* Set the receive queues buffer size */
2625 bnx2x_set_rx_buf_size(bp
);
2628 rc
= bnx2x_alloc_mem(bp
);
2630 BNX2X_ERR("Unable to allocate bp memory\n");
2635 /* need to be done after alloc mem, since it's self adjusting to amount
2636 * of memory available for RSS queues
2638 rc
= bnx2x_alloc_fp_mem(bp
);
2640 BNX2X_ERR("Unable to allocate memory for fps\n");
2641 LOAD_ERROR_EXIT(bp
, load_error0
);
2644 /* Allocated memory for FW statistics */
2645 if (bnx2x_alloc_fw_stats_mem(bp
))
2646 LOAD_ERROR_EXIT(bp
, load_error0
);
2648 /* request pf to initialize status blocks */
2650 rc
= bnx2x_vfpf_init(bp
);
2652 LOAD_ERROR_EXIT(bp
, load_error0
);
2655 /* As long as bnx2x_alloc_mem() may possibly update
2656 * bp->num_queues, bnx2x_set_real_num_queues() should always
2657 * come after it. At this stage cnic queues are not counted.
2659 rc
= bnx2x_set_real_num_queues(bp
, 0);
2661 BNX2X_ERR("Unable to set real_num_queues\n");
2662 LOAD_ERROR_EXIT(bp
, load_error0
);
2665 /* configure multi cos mappings in kernel.
2666 * this configuration may be overridden by a multi class queue
2667 * discipline or by a dcbx negotiation result.
2669 bnx2x_setup_tc(bp
->dev
, bp
->max_cos
);
2671 /* Add all NAPI objects */
2672 bnx2x_add_all_napi(bp
);
2673 DP(NETIF_MSG_IFUP
, "napi added\n");
2674 bnx2x_napi_enable(bp
);
2677 /* set pf load just before approaching the MCP */
2678 bnx2x_set_pf_load(bp
);
2680 /* if mcp exists send load request and analyze response */
2681 if (!BP_NOMCP(bp
)) {
2682 /* attempt to load pf */
2683 rc
= bnx2x_nic_load_request(bp
, &load_code
);
2685 LOAD_ERROR_EXIT(bp
, load_error1
);
2687 /* what did mcp say? */
2688 rc
= bnx2x_compare_fw_ver(bp
, load_code
, true);
2690 bnx2x_fw_command(bp
, DRV_MSG_CODE_LOAD_DONE
, 0);
2691 LOAD_ERROR_EXIT(bp
, load_error2
);
2694 load_code
= bnx2x_nic_load_no_mcp(bp
, port
);
2697 /* mark pmf if applicable */
2698 bnx2x_nic_load_pmf(bp
, load_code
);
2700 /* Init Function state controlling object */
2701 bnx2x__init_func_obj(bp
);
2704 rc
= bnx2x_init_hw(bp
, load_code
);
2706 BNX2X_ERR("HW init failed, aborting\n");
2707 bnx2x_fw_command(bp
, DRV_MSG_CODE_LOAD_DONE
, 0);
2708 LOAD_ERROR_EXIT(bp
, load_error2
);
2712 bnx2x_pre_irq_nic_init(bp
);
2714 /* Connect to IRQs */
2715 rc
= bnx2x_setup_irqs(bp
);
2717 BNX2X_ERR("setup irqs failed\n");
2719 bnx2x_fw_command(bp
, DRV_MSG_CODE_LOAD_DONE
, 0);
2720 LOAD_ERROR_EXIT(bp
, load_error2
);
2723 /* Init per-function objects */
2725 /* Setup NIC internals and enable interrupts */
2726 bnx2x_post_irq_nic_init(bp
, load_code
);
2728 bnx2x_init_bp_objs(bp
);
2729 bnx2x_iov_nic_init(bp
);
2731 /* Set AFEX default VLAN tag to an invalid value */
2732 bp
->afex_def_vlan_tag
= -1;
2733 bnx2x_nic_load_afex_dcc(bp
, load_code
);
2734 bp
->state
= BNX2X_STATE_OPENING_WAIT4_PORT
;
2735 rc
= bnx2x_func_start(bp
);
2737 BNX2X_ERR("Function start failed!\n");
2738 bnx2x_fw_command(bp
, DRV_MSG_CODE_LOAD_DONE
, 0);
2740 LOAD_ERROR_EXIT(bp
, load_error3
);
2743 /* Send LOAD_DONE command to MCP */
2744 if (!BP_NOMCP(bp
)) {
2745 load_code
= bnx2x_fw_command(bp
,
2746 DRV_MSG_CODE_LOAD_DONE
, 0);
2748 BNX2X_ERR("MCP response failure, aborting\n");
2750 LOAD_ERROR_EXIT(bp
, load_error3
);
2754 /* initialize FW coalescing state machines in RAM */
2755 bnx2x_update_coalesce(bp
);
2758 /* setup the leading queue */
2759 rc
= bnx2x_setup_leading(bp
);
2761 BNX2X_ERR("Setup leading failed!\n");
2762 LOAD_ERROR_EXIT(bp
, load_error3
);
2765 /* set up the rest of the queues */
2766 for_each_nondefault_eth_queue(bp
, i
) {
2768 rc
= bnx2x_setup_queue(bp
, &bp
->fp
[i
], false);
2770 rc
= bnx2x_vfpf_setup_q(bp
, &bp
->fp
[i
], false);
2772 BNX2X_ERR("Queue %d setup failed\n", i
);
2773 LOAD_ERROR_EXIT(bp
, load_error3
);
2778 rc
= bnx2x_init_rss(bp
);
2780 BNX2X_ERR("PF RSS init failed\n");
2781 LOAD_ERROR_EXIT(bp
, load_error3
);
2784 /* Now when Clients are configured we are ready to work */
2785 bp
->state
= BNX2X_STATE_OPEN
;
2787 /* Configure a ucast MAC */
2789 rc
= bnx2x_set_eth_mac(bp
, true);
2791 rc
= bnx2x_vfpf_config_mac(bp
, bp
->dev
->dev_addr
, bp
->fp
->index
,
2794 BNX2X_ERR("Setting Ethernet MAC failed\n");
2795 LOAD_ERROR_EXIT(bp
, load_error3
);
2798 if (IS_PF(bp
) && bp
->pending_max
) {
2799 bnx2x_update_max_mf_config(bp
, bp
->pending_max
);
2800 bp
->pending_max
= 0;
2804 rc
= bnx2x_initial_phy_init(bp
, load_mode
);
2806 LOAD_ERROR_EXIT(bp
, load_error3
);
2808 bp
->link_params
.feature_config_flags
&= ~FEATURE_CONFIG_BOOT_FROM_SAN
;
2810 /* Start fast path */
2812 /* Initialize Rx filter. */
2813 bnx2x_set_rx_mode_inner(bp
);
2815 if (bp
->flags
& PTP_SUPPORTED
) {
2817 bnx2x_configure_ptp_filters(bp
);
2820 switch (load_mode
) {
2822 /* Tx queue should be only re-enabled */
2823 netif_tx_wake_all_queues(bp
->dev
);
2827 netif_tx_start_all_queues(bp
->dev
);
2828 smp_mb__after_atomic();
2832 case LOAD_LOOPBACK_EXT
:
2833 bp
->state
= BNX2X_STATE_DIAG
;
2841 bnx2x_update_drv_flags(bp
, 1 << DRV_FLAGS_PORT_MASK
, 0);
2843 bnx2x__link_status_update(bp
);
2845 /* start the timer */
2846 mod_timer(&bp
->timer
, jiffies
+ bp
->current_interval
);
2848 if (CNIC_ENABLED(bp
))
2849 bnx2x_load_cnic(bp
);
2852 bnx2x_schedule_sp_rtnl(bp
, BNX2X_SP_RTNL_GET_DRV_VERSION
, 0);
2854 if (IS_PF(bp
) && SHMEM2_HAS(bp
, drv_capabilities_flag
)) {
2855 /* mark driver is loaded in shmem2 */
2857 val
= SHMEM2_RD(bp
, drv_capabilities_flag
[BP_FW_MB_IDX(bp
)]);
2858 SHMEM2_WR(bp
, drv_capabilities_flag
[BP_FW_MB_IDX(bp
)],
2859 val
| DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED
|
2860 DRV_FLAGS_CAPABILITIES_LOADED_L2
);
2863 /* Wait for all pending SP commands to complete */
2864 if (IS_PF(bp
) && !bnx2x_wait_sp_comp(bp
, ~0x0UL
)) {
2865 BNX2X_ERR("Timeout waiting for SP elements to complete\n");
2866 bnx2x_nic_unload(bp
, UNLOAD_CLOSE
, false);
2870 /* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */
2871 if (bp
->port
.pmf
&& (bp
->state
!= BNX2X_STATE_DIAG
))
2872 bnx2x_dcbx_init(bp
, false);
2874 DP(NETIF_MSG_IFUP
, "Ending successfully NIC load\n");
2878 #ifndef BNX2X_STOP_ON_ERROR
2881 bnx2x_int_disable_sync(bp
, 1);
2883 /* Clean queueable objects */
2884 bnx2x_squeeze_objects(bp
);
2887 /* Free SKBs, SGEs, TPA pool and driver internals */
2888 bnx2x_free_skbs(bp
);
2889 for_each_rx_queue(bp
, i
)
2890 bnx2x_free_rx_sge_range(bp
, bp
->fp
+ i
, NUM_RX_SGE
);
2895 if (IS_PF(bp
) && !BP_NOMCP(bp
)) {
2896 bnx2x_fw_command(bp
, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP
, 0);
2897 bnx2x_fw_command(bp
, DRV_MSG_CODE_UNLOAD_DONE
, 0);
2902 bnx2x_napi_disable(bp
);
2903 bnx2x_del_all_napi(bp
);
2905 /* clear pf_load status, as it was already set */
2907 bnx2x_clear_pf_load(bp
);
2909 bnx2x_free_fw_stats_mem(bp
);
2910 bnx2x_free_fp_mem(bp
);
2914 #endif /* ! BNX2X_STOP_ON_ERROR */
2917 int bnx2x_drain_tx_queues(struct bnx2x
*bp
)
2921 /* Wait until tx fastpath tasks complete */
2922 for_each_tx_queue(bp
, i
) {
2923 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
2925 for_each_cos_in_tx_queue(fp
, cos
)
2926 rc
= bnx2x_clean_tx_queue(bp
, fp
->txdata_ptr
[cos
]);
2933 /* must be called with rtnl_lock */
2934 int bnx2x_nic_unload(struct bnx2x
*bp
, int unload_mode
, bool keep_link
)
2937 bool global
= false;
2939 DP(NETIF_MSG_IFUP
, "Starting NIC unload\n");
2941 /* mark driver is unloaded in shmem2 */
2942 if (IS_PF(bp
) && SHMEM2_HAS(bp
, drv_capabilities_flag
)) {
2944 val
= SHMEM2_RD(bp
, drv_capabilities_flag
[BP_FW_MB_IDX(bp
)]);
2945 SHMEM2_WR(bp
, drv_capabilities_flag
[BP_FW_MB_IDX(bp
)],
2946 val
& ~DRV_FLAGS_CAPABILITIES_LOADED_L2
);
2949 if (IS_PF(bp
) && bp
->recovery_state
!= BNX2X_RECOVERY_DONE
&&
2950 (bp
->state
== BNX2X_STATE_CLOSED
||
2951 bp
->state
== BNX2X_STATE_ERROR
)) {
2952 /* We can get here if the driver has been unloaded
2953 * during parity error recovery and is either waiting for a
2954 * leader to complete or for other functions to unload and
2955 * then ifdown has been issued. In this case we want to
2956 * unload and let other functions to complete a recovery
2959 bp
->recovery_state
= BNX2X_RECOVERY_DONE
;
2961 bnx2x_release_leader_lock(bp
);
2964 DP(NETIF_MSG_IFDOWN
, "Releasing a leadership...\n");
2965 BNX2X_ERR("Can't unload in closed or error state\n");
2969 /* Nothing to do during unload if previous bnx2x_nic_load()
2970 * have not completed successfully - all resources are released.
2972 * we can get here only after unsuccessful ndo_* callback, during which
2973 * dev->IFF_UP flag is still on.
2975 if (bp
->state
== BNX2X_STATE_CLOSED
|| bp
->state
== BNX2X_STATE_ERROR
)
2978 /* It's important to set the bp->state to the value different from
2979 * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
2980 * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
2982 bp
->state
= BNX2X_STATE_CLOSING_WAIT4_HALT
;
2985 /* indicate to VFs that the PF is going down */
2986 bnx2x_iov_channel_down(bp
);
2988 if (CNIC_LOADED(bp
))
2989 bnx2x_cnic_notify(bp
, CNIC_CTL_STOP_CMD
);
2992 bnx2x_tx_disable(bp
);
2993 netdev_reset_tc(bp
->dev
);
2995 bp
->rx_mode
= BNX2X_RX_MODE_NONE
;
2997 del_timer_sync(&bp
->timer
);
3000 /* Set ALWAYS_ALIVE bit in shmem */
3001 bp
->fw_drv_pulse_wr_seq
|= DRV_PULSE_ALWAYS_ALIVE
;
3002 bnx2x_drv_pulse(bp
);
3003 bnx2x_stats_handle(bp
, STATS_EVENT_STOP
);
3004 bnx2x_save_statistics(bp
);
3007 /* wait till consumers catch up with producers in all queues */
3008 bnx2x_drain_tx_queues(bp
);
3010 /* if VF indicate to PF this function is going down (PF will delete sp
3011 * elements and clear initializations
3014 bnx2x_vfpf_close_vf(bp
);
3015 else if (unload_mode
!= UNLOAD_RECOVERY
)
3016 /* if this is a normal/close unload need to clean up chip*/
3017 bnx2x_chip_cleanup(bp
, unload_mode
, keep_link
);
3019 /* Send the UNLOAD_REQUEST to the MCP */
3020 bnx2x_send_unload_req(bp
, unload_mode
);
3022 /* Prevent transactions to host from the functions on the
3023 * engine that doesn't reset global blocks in case of global
3024 * attention once global blocks are reset and gates are opened
3025 * (the engine which leader will perform the recovery
3028 if (!CHIP_IS_E1x(bp
))
3029 bnx2x_pf_disable(bp
);
3031 /* Disable HW interrupts, NAPI */
3032 bnx2x_netif_stop(bp
, 1);
3033 /* Delete all NAPI objects */
3034 bnx2x_del_all_napi(bp
);
3035 if (CNIC_LOADED(bp
))
3036 bnx2x_del_all_napi_cnic(bp
);
3040 /* Report UNLOAD_DONE to MCP */
3041 bnx2x_send_unload_done(bp
, false);
3045 * At this stage no more interrupts will arrive so we may safely clean
3046 * the queueable objects here in case they failed to get cleaned so far.
3049 bnx2x_squeeze_objects(bp
);
3051 /* There should be no more pending SP commands at this stage */
3056 /* clear pending work in rtnl task */
3057 bp
->sp_rtnl_state
= 0;
3060 /* Free SKBs, SGEs, TPA pool and driver internals */
3061 bnx2x_free_skbs(bp
);
3062 if (CNIC_LOADED(bp
))
3063 bnx2x_free_skbs_cnic(bp
);
3064 for_each_rx_queue(bp
, i
)
3065 bnx2x_free_rx_sge_range(bp
, bp
->fp
+ i
, NUM_RX_SGE
);
3067 bnx2x_free_fp_mem(bp
);
3068 if (CNIC_LOADED(bp
))
3069 bnx2x_free_fp_mem_cnic(bp
);
3072 if (CNIC_LOADED(bp
))
3073 bnx2x_free_mem_cnic(bp
);
3077 bp
->state
= BNX2X_STATE_CLOSED
;
3078 bp
->cnic_loaded
= false;
3080 /* Clear driver version indication in shmem */
3082 bnx2x_update_mng_version(bp
);
3084 /* Check if there are pending parity attentions. If there are - set
3085 * RECOVERY_IN_PROGRESS.
3087 if (IS_PF(bp
) && bnx2x_chk_parity_attn(bp
, &global
, false)) {
3088 bnx2x_set_reset_in_progress(bp
);
3090 /* Set RESET_IS_GLOBAL if needed */
3092 bnx2x_set_reset_global(bp
);
3095 /* The last driver must disable a "close the gate" if there is no
3096 * parity attention or "process kill" pending.
3099 !bnx2x_clear_pf_load(bp
) &&
3100 bnx2x_reset_is_done(bp
, BP_PATH(bp
)))
3101 bnx2x_disable_close_the_gate(bp
);
3103 DP(NETIF_MSG_IFUP
, "Ending NIC unload\n");
3108 int bnx2x_set_power_state(struct bnx2x
*bp
, pci_power_t state
)
3112 /* If there is no power capability, silently succeed */
3113 if (!bp
->pdev
->pm_cap
) {
3114 BNX2X_DEV_INFO("No power capability. Breaking.\n");
3118 pci_read_config_word(bp
->pdev
, bp
->pdev
->pm_cap
+ PCI_PM_CTRL
, &pmcsr
);
3122 pci_write_config_word(bp
->pdev
, bp
->pdev
->pm_cap
+ PCI_PM_CTRL
,
3123 ((pmcsr
& ~PCI_PM_CTRL_STATE_MASK
) |
3124 PCI_PM_CTRL_PME_STATUS
));
3126 if (pmcsr
& PCI_PM_CTRL_STATE_MASK
)
3127 /* delay required during transition out of D3hot */
3132 /* If there are other clients above don't
3133 shut down the power */
3134 if (atomic_read(&bp
->pdev
->enable_cnt
) != 1)
3136 /* Don't shut down the power for emulation and FPGA */
3137 if (CHIP_REV_IS_SLOW(bp
))
3140 pmcsr
&= ~PCI_PM_CTRL_STATE_MASK
;
3144 pmcsr
|= PCI_PM_CTRL_PME_ENABLE
;
3146 pci_write_config_word(bp
->pdev
, bp
->pdev
->pm_cap
+ PCI_PM_CTRL
,
3149 /* No more memory access after this point until
3150 * device is brought back to D0.
3155 dev_err(&bp
->pdev
->dev
, "Can't support state = %d\n", state
);
3162 * net_device service functions
3164 static int bnx2x_poll(struct napi_struct
*napi
, int budget
)
3168 struct bnx2x_fastpath
*fp
= container_of(napi
, struct bnx2x_fastpath
,
3170 struct bnx2x
*bp
= fp
->bp
;
3173 #ifdef BNX2X_STOP_ON_ERROR
3174 if (unlikely(bp
->panic
)) {
3175 napi_complete(napi
);
3179 if (!bnx2x_fp_lock_napi(fp
))
3182 for_each_cos_in_tx_queue(fp
, cos
)
3183 if (bnx2x_tx_queue_has_work(fp
->txdata_ptr
[cos
]))
3184 bnx2x_tx_int(bp
, fp
->txdata_ptr
[cos
]);
3186 if (bnx2x_has_rx_work(fp
)) {
3187 work_done
+= bnx2x_rx_int(fp
, budget
- work_done
);
3189 /* must not complete if we consumed full budget */
3190 if (work_done
>= budget
) {
3191 bnx2x_fp_unlock_napi(fp
);
3196 bnx2x_fp_unlock_napi(fp
);
3198 /* Fall out from the NAPI loop if needed */
3199 if (!(bnx2x_has_rx_work(fp
) || bnx2x_has_tx_work(fp
))) {
3201 /* No need to update SB for FCoE L2 ring as long as
3202 * it's connected to the default SB and the SB
3203 * has been updated when NAPI was scheduled.
3205 if (IS_FCOE_FP(fp
)) {
3206 napi_complete(napi
);
3209 bnx2x_update_fpsb_idx(fp
);
3210 /* bnx2x_has_rx_work() reads the status block,
3211 * thus we need to ensure that status block indices
3212 * have been actually read (bnx2x_update_fpsb_idx)
3213 * prior to this check (bnx2x_has_rx_work) so that
3214 * we won't write the "newer" value of the status block
3215 * to IGU (if there was a DMA right after
3216 * bnx2x_has_rx_work and if there is no rmb, the memory
3217 * reading (bnx2x_update_fpsb_idx) may be postponed
3218 * to right before bnx2x_ack_sb). In this case there
3219 * will never be another interrupt until there is
3220 * another update of the status block, while there
3221 * is still unhandled work.
3225 if (!(bnx2x_has_rx_work(fp
) || bnx2x_has_tx_work(fp
))) {
3226 napi_complete(napi
);
3227 /* Re-enable interrupts */
3228 DP(NETIF_MSG_RX_STATUS
,
3229 "Update index to %d\n", fp
->fp_hc_idx
);
3230 bnx2x_ack_sb(bp
, fp
->igu_sb_id
, USTORM_ID
,
3231 le16_to_cpu(fp
->fp_hc_idx
),
3241 #ifdef CONFIG_NET_RX_BUSY_POLL
3242 /* must be called with local_bh_disable()d */
3243 int bnx2x_low_latency_recv(struct napi_struct
*napi
)
3245 struct bnx2x_fastpath
*fp
= container_of(napi
, struct bnx2x_fastpath
,
3247 struct bnx2x
*bp
= fp
->bp
;
3250 if ((bp
->state
== BNX2X_STATE_CLOSED
) ||
3251 (bp
->state
== BNX2X_STATE_ERROR
) ||
3252 (bp
->flags
& (TPA_ENABLE_FLAG
| GRO_ENABLE_FLAG
)))
3253 return LL_FLUSH_FAILED
;
3255 if (!bnx2x_fp_lock_poll(fp
))
3256 return LL_FLUSH_BUSY
;
3258 if (bnx2x_has_rx_work(fp
))
3259 found
= bnx2x_rx_int(fp
, 4);
3261 bnx2x_fp_unlock_poll(fp
);
3267 /* we split the first BD into headers and data BDs
3268 * to ease the pain of our fellow microcode engineers
3269 * we use one mapping for both BDs
3271 static u16
bnx2x_tx_split(struct bnx2x
*bp
,
3272 struct bnx2x_fp_txdata
*txdata
,
3273 struct sw_tx_bd
*tx_buf
,
3274 struct eth_tx_start_bd
**tx_bd
, u16 hlen
,
3277 struct eth_tx_start_bd
*h_tx_bd
= *tx_bd
;
3278 struct eth_tx_bd
*d_tx_bd
;
3280 int old_len
= le16_to_cpu(h_tx_bd
->nbytes
);
3282 /* first fix first BD */
3283 h_tx_bd
->nbytes
= cpu_to_le16(hlen
);
3285 DP(NETIF_MSG_TX_QUEUED
, "TSO split header size is %d (%x:%x)\n",
3286 h_tx_bd
->nbytes
, h_tx_bd
->addr_hi
, h_tx_bd
->addr_lo
);
3288 /* now get a new data BD
3289 * (after the pbd) and fill it */
3290 bd_prod
= TX_BD(NEXT_TX_IDX(bd_prod
));
3291 d_tx_bd
= &txdata
->tx_desc_ring
[bd_prod
].reg_bd
;
3293 mapping
= HILO_U64(le32_to_cpu(h_tx_bd
->addr_hi
),
3294 le32_to_cpu(h_tx_bd
->addr_lo
)) + hlen
;
3296 d_tx_bd
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
3297 d_tx_bd
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
3298 d_tx_bd
->nbytes
= cpu_to_le16(old_len
- hlen
);
3300 /* this marks the BD as one that has no individual mapping */
3301 tx_buf
->flags
|= BNX2X_TSO_SPLIT_BD
;
3303 DP(NETIF_MSG_TX_QUEUED
,
3304 "TSO split data size is %d (%x:%x)\n",
3305 d_tx_bd
->nbytes
, d_tx_bd
->addr_hi
, d_tx_bd
->addr_lo
);
3308 *tx_bd
= (struct eth_tx_start_bd
*)d_tx_bd
;
3313 #define bswab32(b32) ((__force __le32) swab32((__force __u32) (b32)))
3314 #define bswab16(b16) ((__force __le16) swab16((__force __u16) (b16)))
3315 static __le16
bnx2x_csum_fix(unsigned char *t_header
, u16 csum
, s8 fix
)
3317 __sum16 tsum
= (__force __sum16
) csum
;
3320 tsum
= ~csum_fold(csum_sub((__force __wsum
) csum
,
3321 csum_partial(t_header
- fix
, fix
, 0)));
3324 tsum
= ~csum_fold(csum_add((__force __wsum
) csum
,
3325 csum_partial(t_header
, -fix
, 0)));
3327 return bswab16(tsum
);
3330 static u32
bnx2x_xmit_type(struct bnx2x
*bp
, struct sk_buff
*skb
)
3336 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
)
3339 protocol
= vlan_get_protocol(skb
);
3340 if (protocol
== htons(ETH_P_IPV6
)) {
3342 prot
= ipv6_hdr(skb
)->nexthdr
;
3345 prot
= ip_hdr(skb
)->protocol
;
3348 if (!CHIP_IS_E1x(bp
) && skb
->encapsulation
) {
3349 if (inner_ip_hdr(skb
)->version
== 6) {
3350 rc
|= XMIT_CSUM_ENC_V6
;
3351 if (inner_ipv6_hdr(skb
)->nexthdr
== IPPROTO_TCP
)
3352 rc
|= XMIT_CSUM_TCP
;
3354 rc
|= XMIT_CSUM_ENC_V4
;
3355 if (inner_ip_hdr(skb
)->protocol
== IPPROTO_TCP
)
3356 rc
|= XMIT_CSUM_TCP
;
3359 if (prot
== IPPROTO_TCP
)
3360 rc
|= XMIT_CSUM_TCP
;
3362 if (skb_is_gso(skb
)) {
3363 if (skb_is_gso_v6(skb
)) {
3364 rc
|= (XMIT_GSO_V6
| XMIT_CSUM_TCP
);
3365 if (rc
& XMIT_CSUM_ENC
)
3366 rc
|= XMIT_GSO_ENC_V6
;
3368 rc
|= (XMIT_GSO_V4
| XMIT_CSUM_TCP
);
3369 if (rc
& XMIT_CSUM_ENC
)
3370 rc
|= XMIT_GSO_ENC_V4
;
3377 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
3378 /* check if packet requires linearization (packet is too fragmented)
3379 no need to check fragmentation if page size > 8K (there will be no
3380 violation to FW restrictions) */
3381 static int bnx2x_pkt_req_lin(struct bnx2x
*bp
, struct sk_buff
*skb
,
3386 int first_bd_sz
= 0;
3388 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
3389 if (skb_shinfo(skb
)->nr_frags
>= (MAX_FETCH_BD
- 3)) {
3391 if (xmit_type
& XMIT_GSO
) {
3392 unsigned short lso_mss
= skb_shinfo(skb
)->gso_size
;
3393 /* Check if LSO packet needs to be copied:
3394 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
3395 int wnd_size
= MAX_FETCH_BD
- 3;
3396 /* Number of windows to check */
3397 int num_wnds
= skb_shinfo(skb
)->nr_frags
- wnd_size
;
3402 /* Headers length */
3403 hlen
= (int)(skb_transport_header(skb
) - skb
->data
) +
3406 /* Amount of data (w/o headers) on linear part of SKB*/
3407 first_bd_sz
= skb_headlen(skb
) - hlen
;
3409 wnd_sum
= first_bd_sz
;
3411 /* Calculate the first sum - it's special */
3412 for (frag_idx
= 0; frag_idx
< wnd_size
- 1; frag_idx
++)
3414 skb_frag_size(&skb_shinfo(skb
)->frags
[frag_idx
]);
3416 /* If there was data on linear skb data - check it */
3417 if (first_bd_sz
> 0) {
3418 if (unlikely(wnd_sum
< lso_mss
)) {
3423 wnd_sum
-= first_bd_sz
;
3426 /* Others are easier: run through the frag list and
3427 check all windows */
3428 for (wnd_idx
= 0; wnd_idx
<= num_wnds
; wnd_idx
++) {
3430 skb_frag_size(&skb_shinfo(skb
)->frags
[wnd_idx
+ wnd_size
- 1]);
3432 if (unlikely(wnd_sum
< lso_mss
)) {
3437 skb_frag_size(&skb_shinfo(skb
)->frags
[wnd_idx
]);
3440 /* in non-LSO too fragmented packet should always
3447 if (unlikely(to_copy
))
3448 DP(NETIF_MSG_TX_QUEUED
,
3449 "Linearization IS REQUIRED for %s packet. num_frags %d hlen %d first_bd_sz %d\n",
3450 (xmit_type
& XMIT_GSO
) ? "LSO" : "non-LSO",
3451 skb_shinfo(skb
)->nr_frags
, hlen
, first_bd_sz
);
3458 * bnx2x_set_pbd_gso - update PBD in GSO case.
3462 * @xmit_type: xmit flags
3464 static void bnx2x_set_pbd_gso(struct sk_buff
*skb
,
3465 struct eth_tx_parse_bd_e1x
*pbd
,
3468 pbd
->lso_mss
= cpu_to_le16(skb_shinfo(skb
)->gso_size
);
3469 pbd
->tcp_send_seq
= bswab32(tcp_hdr(skb
)->seq
);
3470 pbd
->tcp_flags
= pbd_tcp_flags(tcp_hdr(skb
));
3472 if (xmit_type
& XMIT_GSO_V4
) {
3473 pbd
->ip_id
= bswab16(ip_hdr(skb
)->id
);
3474 pbd
->tcp_pseudo_csum
=
3475 bswab16(~csum_tcpudp_magic(ip_hdr(skb
)->saddr
,
3477 0, IPPROTO_TCP
, 0));
3479 pbd
->tcp_pseudo_csum
=
3480 bswab16(~csum_ipv6_magic(&ipv6_hdr(skb
)->saddr
,
3481 &ipv6_hdr(skb
)->daddr
,
3482 0, IPPROTO_TCP
, 0));
3486 cpu_to_le16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN
);
3490 * bnx2x_set_pbd_csum_enc - update PBD with checksum and return header length
3492 * @bp: driver handle
3494 * @parsing_data: data to be updated
3495 * @xmit_type: xmit flags
3497 * 57712/578xx related, when skb has encapsulation
3499 static u8
bnx2x_set_pbd_csum_enc(struct bnx2x
*bp
, struct sk_buff
*skb
,
3500 u32
*parsing_data
, u32 xmit_type
)
3503 ((((u8
*)skb_inner_transport_header(skb
) - skb
->data
) >> 1) <<
3504 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT
) &
3505 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W
;
3507 if (xmit_type
& XMIT_CSUM_TCP
) {
3508 *parsing_data
|= ((inner_tcp_hdrlen(skb
) / 4) <<
3509 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT
) &
3510 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW
;
3512 return skb_inner_transport_header(skb
) +
3513 inner_tcp_hdrlen(skb
) - skb
->data
;
3516 /* We support checksum offload for TCP and UDP only.
3517 * No need to pass the UDP header length - it's a constant.
3519 return skb_inner_transport_header(skb
) +
3520 sizeof(struct udphdr
) - skb
->data
;
3524 * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
3526 * @bp: driver handle
3528 * @parsing_data: data to be updated
3529 * @xmit_type: xmit flags
3531 * 57712/578xx related
3533 static u8
bnx2x_set_pbd_csum_e2(struct bnx2x
*bp
, struct sk_buff
*skb
,
3534 u32
*parsing_data
, u32 xmit_type
)
3537 ((((u8
*)skb_transport_header(skb
) - skb
->data
) >> 1) <<
3538 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT
) &
3539 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W
;
3541 if (xmit_type
& XMIT_CSUM_TCP
) {
3542 *parsing_data
|= ((tcp_hdrlen(skb
) / 4) <<
3543 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT
) &
3544 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW
;
3546 return skb_transport_header(skb
) + tcp_hdrlen(skb
) - skb
->data
;
3548 /* We support checksum offload for TCP and UDP only.
3549 * No need to pass the UDP header length - it's a constant.
3551 return skb_transport_header(skb
) + sizeof(struct udphdr
) - skb
->data
;
3554 /* set FW indication according to inner or outer protocols if tunneled */
3555 static void bnx2x_set_sbd_csum(struct bnx2x
*bp
, struct sk_buff
*skb
,
3556 struct eth_tx_start_bd
*tx_start_bd
,
3559 tx_start_bd
->bd_flags
.as_bitfield
|= ETH_TX_BD_FLAGS_L4_CSUM
;
3561 if (xmit_type
& (XMIT_CSUM_ENC_V6
| XMIT_CSUM_V6
))
3562 tx_start_bd
->bd_flags
.as_bitfield
|= ETH_TX_BD_FLAGS_IPV6
;
3564 if (!(xmit_type
& XMIT_CSUM_TCP
))
3565 tx_start_bd
->bd_flags
.as_bitfield
|= ETH_TX_BD_FLAGS_IS_UDP
;
3569 * bnx2x_set_pbd_csum - update PBD with checksum and return header length
3571 * @bp: driver handle
3573 * @pbd: parse BD to be updated
3574 * @xmit_type: xmit flags
3576 static u8
bnx2x_set_pbd_csum(struct bnx2x
*bp
, struct sk_buff
*skb
,
3577 struct eth_tx_parse_bd_e1x
*pbd
,
3580 u8 hlen
= (skb_network_header(skb
) - skb
->data
) >> 1;
3582 /* for now NS flag is not used in Linux */
3585 ((skb
->protocol
== cpu_to_be16(ETH_P_8021Q
)) <<
3586 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT
));
3588 pbd
->ip_hlen_w
= (skb_transport_header(skb
) -
3589 skb_network_header(skb
)) >> 1;
3591 hlen
+= pbd
->ip_hlen_w
;
3593 /* We support checksum offload for TCP and UDP only */
3594 if (xmit_type
& XMIT_CSUM_TCP
)
3595 hlen
+= tcp_hdrlen(skb
) / 2;
3597 hlen
+= sizeof(struct udphdr
) / 2;
3599 pbd
->total_hlen_w
= cpu_to_le16(hlen
);
3602 if (xmit_type
& XMIT_CSUM_TCP
) {
3603 pbd
->tcp_pseudo_csum
= bswab16(tcp_hdr(skb
)->check
);
3606 s8 fix
= SKB_CS_OFF(skb
); /* signed! */
3608 DP(NETIF_MSG_TX_QUEUED
,
3609 "hlen %d fix %d csum before fix %x\n",
3610 le16_to_cpu(pbd
->total_hlen_w
), fix
, SKB_CS(skb
));
3612 /* HW bug: fixup the CSUM */
3613 pbd
->tcp_pseudo_csum
=
3614 bnx2x_csum_fix(skb_transport_header(skb
),
3617 DP(NETIF_MSG_TX_QUEUED
, "csum after fix %x\n",
3618 pbd
->tcp_pseudo_csum
);
3624 static void bnx2x_update_pbds_gso_enc(struct sk_buff
*skb
,
3625 struct eth_tx_parse_bd_e2
*pbd_e2
,
3626 struct eth_tx_parse_2nd_bd
*pbd2
,
3631 u8 outerip_off
, outerip_len
= 0;
3633 /* from outer IP to transport */
3634 hlen_w
= (skb_inner_transport_header(skb
) -
3635 skb_network_header(skb
)) >> 1;
3638 hlen_w
+= inner_tcp_hdrlen(skb
) >> 1;
3640 pbd2
->fw_ip_hdr_to_payload_w
= hlen_w
;
3642 /* outer IP header info */
3643 if (xmit_type
& XMIT_CSUM_V4
) {
3644 struct iphdr
*iph
= ip_hdr(skb
);
3645 u32 csum
= (__force u32
)(~iph
->check
) -
3646 (__force u32
)iph
->tot_len
-
3647 (__force u32
)iph
->frag_off
;
3649 outerip_len
= iph
->ihl
<< 1;
3651 pbd2
->fw_ip_csum_wo_len_flags_frag
=
3652 bswab16(csum_fold((__force __wsum
)csum
));
3654 pbd2
->fw_ip_hdr_to_payload_w
=
3655 hlen_w
- ((sizeof(struct ipv6hdr
)) >> 1);
3656 pbd_e2
->data
.tunnel_data
.flags
|=
3657 ETH_TUNNEL_DATA_IP_HDR_TYPE_OUTER
;
3660 pbd2
->tcp_send_seq
= bswab32(inner_tcp_hdr(skb
)->seq
);
3662 pbd2
->tcp_flags
= pbd_tcp_flags(inner_tcp_hdr(skb
));
3664 /* inner IP header info */
3665 if (xmit_type
& XMIT_CSUM_ENC_V4
) {
3666 pbd2
->hw_ip_id
= bswab16(inner_ip_hdr(skb
)->id
);
3668 pbd_e2
->data
.tunnel_data
.pseudo_csum
=
3669 bswab16(~csum_tcpudp_magic(
3670 inner_ip_hdr(skb
)->saddr
,
3671 inner_ip_hdr(skb
)->daddr
,
3672 0, IPPROTO_TCP
, 0));
3674 pbd_e2
->data
.tunnel_data
.pseudo_csum
=
3675 bswab16(~csum_ipv6_magic(
3676 &inner_ipv6_hdr(skb
)->saddr
,
3677 &inner_ipv6_hdr(skb
)->daddr
,
3678 0, IPPROTO_TCP
, 0));
3681 outerip_off
= (skb_network_header(skb
) - skb
->data
) >> 1;
3686 ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT
) |
3687 ((skb
->protocol
== cpu_to_be16(ETH_P_8021Q
)) <<
3688 ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN_SHIFT
);
3690 if (ip_hdr(skb
)->protocol
== IPPROTO_UDP
) {
3691 SET_FLAG(*global_data
, ETH_TX_PARSE_2ND_BD_TUNNEL_UDP_EXIST
, 1);
3692 pbd2
->tunnel_udp_hdr_start_w
= skb_transport_offset(skb
) >> 1;
3696 static inline void bnx2x_set_ipv6_ext_e2(struct sk_buff
*skb
, u32
*parsing_data
,
3699 struct ipv6hdr
*ipv6
;
3701 if (!(xmit_type
& (XMIT_GSO_ENC_V6
| XMIT_GSO_V6
)))
3704 if (xmit_type
& XMIT_GSO_ENC_V6
)
3705 ipv6
= inner_ipv6_hdr(skb
);
3706 else /* XMIT_GSO_V6 */
3707 ipv6
= ipv6_hdr(skb
);
3709 if (ipv6
->nexthdr
== NEXTHDR_IPV6
)
3710 *parsing_data
|= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR
;
3713 /* called with netif_tx_lock
3714 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
3715 * netif_wake_queue()
3717 netdev_tx_t
bnx2x_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
3719 struct bnx2x
*bp
= netdev_priv(dev
);
3721 struct netdev_queue
*txq
;
3722 struct bnx2x_fp_txdata
*txdata
;
3723 struct sw_tx_bd
*tx_buf
;
3724 struct eth_tx_start_bd
*tx_start_bd
, *first_bd
;
3725 struct eth_tx_bd
*tx_data_bd
, *total_pkt_bd
= NULL
;
3726 struct eth_tx_parse_bd_e1x
*pbd_e1x
= NULL
;
3727 struct eth_tx_parse_bd_e2
*pbd_e2
= NULL
;
3728 struct eth_tx_parse_2nd_bd
*pbd2
= NULL
;
3729 u32 pbd_e2_parsing_data
= 0;
3730 u16 pkt_prod
, bd_prod
;
3733 u32 xmit_type
= bnx2x_xmit_type(bp
, skb
);
3736 __le16 pkt_size
= 0;
3738 u8 mac_type
= UNICAST_ADDRESS
;
3740 #ifdef BNX2X_STOP_ON_ERROR
3741 if (unlikely(bp
->panic
))
3742 return NETDEV_TX_BUSY
;
3745 txq_index
= skb_get_queue_mapping(skb
);
3746 txq
= netdev_get_tx_queue(dev
, txq_index
);
3748 BUG_ON(txq_index
>= MAX_ETH_TXQ_IDX(bp
) + (CNIC_LOADED(bp
) ? 1 : 0));
3750 txdata
= &bp
->bnx2x_txq
[txq_index
];
3752 /* enable this debug print to view the transmission queue being used
3753 DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n",
3754 txq_index, fp_index, txdata_index); */
3756 /* enable this debug print to view the transmission details
3757 DP(NETIF_MSG_TX_QUEUED,
3758 "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n",
3759 txdata->cid, fp_index, txdata_index, txdata, fp); */
3761 if (unlikely(bnx2x_tx_avail(bp
, txdata
) <
3762 skb_shinfo(skb
)->nr_frags
+
3764 NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT
))) {
3765 /* Handle special storage cases separately */
3766 if (txdata
->tx_ring_size
== 0) {
3767 struct bnx2x_eth_q_stats
*q_stats
=
3768 bnx2x_fp_qstats(bp
, txdata
->parent_fp
);
3769 q_stats
->driver_filtered_tx_pkt
++;
3771 return NETDEV_TX_OK
;
3773 bnx2x_fp_qstats(bp
, txdata
->parent_fp
)->driver_xoff
++;
3774 netif_tx_stop_queue(txq
);
3775 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
3777 return NETDEV_TX_BUSY
;
3780 DP(NETIF_MSG_TX_QUEUED
,
3781 "queue[%d]: SKB: summed %x protocol %x protocol(%x,%x) gso type %x xmit_type %x len %d\n",
3782 txq_index
, skb
->ip_summed
, skb
->protocol
, ipv6_hdr(skb
)->nexthdr
,
3783 ip_hdr(skb
)->protocol
, skb_shinfo(skb
)->gso_type
, xmit_type
,
3786 eth
= (struct ethhdr
*)skb
->data
;
3788 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
3789 if (unlikely(is_multicast_ether_addr(eth
->h_dest
))) {
3790 if (is_broadcast_ether_addr(eth
->h_dest
))
3791 mac_type
= BROADCAST_ADDRESS
;
3793 mac_type
= MULTICAST_ADDRESS
;
3796 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT)
3797 /* First, check if we need to linearize the skb (due to FW
3798 restrictions). No need to check fragmentation if page size > 8K
3799 (there will be no violation to FW restrictions) */
3800 if (bnx2x_pkt_req_lin(bp
, skb
, xmit_type
)) {
3801 /* Statistics of linearization */
3803 if (skb_linearize(skb
) != 0) {
3804 DP(NETIF_MSG_TX_QUEUED
,
3805 "SKB linearization failed - silently dropping this SKB\n");
3806 dev_kfree_skb_any(skb
);
3807 return NETDEV_TX_OK
;
3811 /* Map skb linear data for DMA */
3812 mapping
= dma_map_single(&bp
->pdev
->dev
, skb
->data
,
3813 skb_headlen(skb
), DMA_TO_DEVICE
);
3814 if (unlikely(dma_mapping_error(&bp
->pdev
->dev
, mapping
))) {
3815 DP(NETIF_MSG_TX_QUEUED
,
3816 "SKB mapping failed - silently dropping this SKB\n");
3817 dev_kfree_skb_any(skb
);
3818 return NETDEV_TX_OK
;
3821 Please read carefully. First we use one BD which we mark as start,
3822 then we have a parsing info BD (used for TSO or xsum),
3823 and only then we have the rest of the TSO BDs.
3824 (don't forget to mark the last one as last,
3825 and to unmap only AFTER you write to the BD ...)
3826 And above all, all pdb sizes are in words - NOT DWORDS!
3829 /* get current pkt produced now - advance it just before sending packet
3830 * since mapping of pages may fail and cause packet to be dropped
3832 pkt_prod
= txdata
->tx_pkt_prod
;
3833 bd_prod
= TX_BD(txdata
->tx_bd_prod
);
3835 /* get a tx_buf and first BD
3836 * tx_start_bd may be changed during SPLIT,
3837 * but first_bd will always stay first
3839 tx_buf
= &txdata
->tx_buf_ring
[TX_BD(pkt_prod
)];
3840 tx_start_bd
= &txdata
->tx_desc_ring
[bd_prod
].start_bd
;
3841 first_bd
= tx_start_bd
;
3843 tx_start_bd
->bd_flags
.as_bitfield
= ETH_TX_BD_FLAGS_START_BD
;
3845 if (unlikely(skb_shinfo(skb
)->tx_flags
& SKBTX_HW_TSTAMP
)) {
3846 if (!(bp
->flags
& TX_TIMESTAMPING_EN
)) {
3847 BNX2X_ERR("Tx timestamping was not enabled, this packet will not be timestamped\n");
3848 } else if (bp
->ptp_tx_skb
) {
3849 BNX2X_ERR("The device supports only a single outstanding packet to timestamp, this packet will not be timestamped\n");
3851 skb_shinfo(skb
)->tx_flags
|= SKBTX_IN_PROGRESS
;
3852 /* schedule check for Tx timestamp */
3853 bp
->ptp_tx_skb
= skb_get(skb
);
3854 bp
->ptp_tx_start
= jiffies
;
3855 schedule_work(&bp
->ptp_task
);
3859 /* header nbd: indirectly zero other flags! */
3860 tx_start_bd
->general_data
= 1 << ETH_TX_START_BD_HDR_NBDS_SHIFT
;
3862 /* remember the first BD of the packet */
3863 tx_buf
->first_bd
= txdata
->tx_bd_prod
;
3867 DP(NETIF_MSG_TX_QUEUED
,
3868 "sending pkt %u @%p next_idx %u bd %u @%p\n",
3869 pkt_prod
, tx_buf
, txdata
->tx_pkt_prod
, bd_prod
, tx_start_bd
);
3871 if (skb_vlan_tag_present(skb
)) {
3872 tx_start_bd
->vlan_or_ethertype
=
3873 cpu_to_le16(skb_vlan_tag_get(skb
));
3874 tx_start_bd
->bd_flags
.as_bitfield
|=
3875 (X_ETH_OUTBAND_VLAN
<< ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT
);
3877 /* when transmitting in a vf, start bd must hold the ethertype
3878 * for fw to enforce it
3880 #ifndef BNX2X_STOP_ON_ERROR
3883 tx_start_bd
->vlan_or_ethertype
=
3884 cpu_to_le16(ntohs(eth
->h_proto
));
3885 #ifndef BNX2X_STOP_ON_ERROR
3887 /* used by FW for packet accounting */
3888 tx_start_bd
->vlan_or_ethertype
= cpu_to_le16(pkt_prod
);
3892 nbd
= 2; /* start_bd + pbd + frags (updated when pages are mapped) */
3894 /* turn on parsing and get a BD */
3895 bd_prod
= TX_BD(NEXT_TX_IDX(bd_prod
));
3897 if (xmit_type
& XMIT_CSUM
)
3898 bnx2x_set_sbd_csum(bp
, skb
, tx_start_bd
, xmit_type
);
3900 if (!CHIP_IS_E1x(bp
)) {
3901 pbd_e2
= &txdata
->tx_desc_ring
[bd_prod
].parse_bd_e2
;
3902 memset(pbd_e2
, 0, sizeof(struct eth_tx_parse_bd_e2
));
3904 if (xmit_type
& XMIT_CSUM_ENC
) {
3905 u16 global_data
= 0;
3907 /* Set PBD in enc checksum offload case */
3908 hlen
= bnx2x_set_pbd_csum_enc(bp
, skb
,
3909 &pbd_e2_parsing_data
,
3912 /* turn on 2nd parsing and get a BD */
3913 bd_prod
= TX_BD(NEXT_TX_IDX(bd_prod
));
3915 pbd2
= &txdata
->tx_desc_ring
[bd_prod
].parse_2nd_bd
;
3917 memset(pbd2
, 0, sizeof(*pbd2
));
3919 pbd_e2
->data
.tunnel_data
.ip_hdr_start_inner_w
=
3920 (skb_inner_network_header(skb
) -
3923 if (xmit_type
& XMIT_GSO_ENC
)
3924 bnx2x_update_pbds_gso_enc(skb
, pbd_e2
, pbd2
,
3928 pbd2
->global_data
= cpu_to_le16(global_data
);
3930 /* add addition parse BD indication to start BD */
3931 SET_FLAG(tx_start_bd
->general_data
,
3932 ETH_TX_START_BD_PARSE_NBDS
, 1);
3933 /* set encapsulation flag in start BD */
3934 SET_FLAG(tx_start_bd
->general_data
,
3935 ETH_TX_START_BD_TUNNEL_EXIST
, 1);
3937 tx_buf
->flags
|= BNX2X_HAS_SECOND_PBD
;
3940 } else if (xmit_type
& XMIT_CSUM
) {
3941 /* Set PBD in checksum offload case w/o encapsulation */
3942 hlen
= bnx2x_set_pbd_csum_e2(bp
, skb
,
3943 &pbd_e2_parsing_data
,
3947 bnx2x_set_ipv6_ext_e2(skb
, &pbd_e2_parsing_data
, xmit_type
);
3948 /* Add the macs to the parsing BD if this is a vf or if
3949 * Tx Switching is enabled.
3952 /* override GRE parameters in BD */
3953 bnx2x_set_fw_mac_addr(&pbd_e2
->data
.mac_addr
.src_hi
,
3954 &pbd_e2
->data
.mac_addr
.src_mid
,
3955 &pbd_e2
->data
.mac_addr
.src_lo
,
3958 bnx2x_set_fw_mac_addr(&pbd_e2
->data
.mac_addr
.dst_hi
,
3959 &pbd_e2
->data
.mac_addr
.dst_mid
,
3960 &pbd_e2
->data
.mac_addr
.dst_lo
,
3963 if (bp
->flags
& TX_SWITCHING
)
3964 bnx2x_set_fw_mac_addr(
3965 &pbd_e2
->data
.mac_addr
.dst_hi
,
3966 &pbd_e2
->data
.mac_addr
.dst_mid
,
3967 &pbd_e2
->data
.mac_addr
.dst_lo
,
3969 #ifdef BNX2X_STOP_ON_ERROR
3970 /* Enforce security is always set in Stop on Error -
3971 * source mac should be present in the parsing BD
3973 bnx2x_set_fw_mac_addr(&pbd_e2
->data
.mac_addr
.src_hi
,
3974 &pbd_e2
->data
.mac_addr
.src_mid
,
3975 &pbd_e2
->data
.mac_addr
.src_lo
,
3980 SET_FLAG(pbd_e2_parsing_data
,
3981 ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE
, mac_type
);
3983 u16 global_data
= 0;
3984 pbd_e1x
= &txdata
->tx_desc_ring
[bd_prod
].parse_bd_e1x
;
3985 memset(pbd_e1x
, 0, sizeof(struct eth_tx_parse_bd_e1x
));
3986 /* Set PBD in checksum offload case */
3987 if (xmit_type
& XMIT_CSUM
)
3988 hlen
= bnx2x_set_pbd_csum(bp
, skb
, pbd_e1x
, xmit_type
);
3990 SET_FLAG(global_data
,
3991 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE
, mac_type
);
3992 pbd_e1x
->global_data
|= cpu_to_le16(global_data
);
3995 /* Setup the data pointer of the first BD of the packet */
3996 tx_start_bd
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
3997 tx_start_bd
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
3998 tx_start_bd
->nbytes
= cpu_to_le16(skb_headlen(skb
));
3999 pkt_size
= tx_start_bd
->nbytes
;
4001 DP(NETIF_MSG_TX_QUEUED
,
4002 "first bd @%p addr (%x:%x) nbytes %d flags %x vlan %x\n",
4003 tx_start_bd
, tx_start_bd
->addr_hi
, tx_start_bd
->addr_lo
,
4004 le16_to_cpu(tx_start_bd
->nbytes
),
4005 tx_start_bd
->bd_flags
.as_bitfield
,
4006 le16_to_cpu(tx_start_bd
->vlan_or_ethertype
));
4008 if (xmit_type
& XMIT_GSO
) {
4010 DP(NETIF_MSG_TX_QUEUED
,
4011 "TSO packet len %d hlen %d total len %d tso size %d\n",
4012 skb
->len
, hlen
, skb_headlen(skb
),
4013 skb_shinfo(skb
)->gso_size
);
4015 tx_start_bd
->bd_flags
.as_bitfield
|= ETH_TX_BD_FLAGS_SW_LSO
;
4017 if (unlikely(skb_headlen(skb
) > hlen
)) {
4019 bd_prod
= bnx2x_tx_split(bp
, txdata
, tx_buf
,
4023 if (!CHIP_IS_E1x(bp
))
4024 pbd_e2_parsing_data
|=
4025 (skb_shinfo(skb
)->gso_size
<<
4026 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT
) &
4027 ETH_TX_PARSE_BD_E2_LSO_MSS
;
4029 bnx2x_set_pbd_gso(skb
, pbd_e1x
, xmit_type
);
4032 /* Set the PBD's parsing_data field if not zero
4033 * (for the chips newer than 57711).
4035 if (pbd_e2_parsing_data
)
4036 pbd_e2
->parsing_data
= cpu_to_le32(pbd_e2_parsing_data
);
4038 tx_data_bd
= (struct eth_tx_bd
*)tx_start_bd
;
4040 /* Handle fragmented skb */
4041 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
4042 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
4044 mapping
= skb_frag_dma_map(&bp
->pdev
->dev
, frag
, 0,
4045 skb_frag_size(frag
), DMA_TO_DEVICE
);
4046 if (unlikely(dma_mapping_error(&bp
->pdev
->dev
, mapping
))) {
4047 unsigned int pkts_compl
= 0, bytes_compl
= 0;
4049 DP(NETIF_MSG_TX_QUEUED
,
4050 "Unable to map page - dropping packet...\n");
4052 /* we need unmap all buffers already mapped
4054 * first_bd->nbd need to be properly updated
4055 * before call to bnx2x_free_tx_pkt
4057 first_bd
->nbd
= cpu_to_le16(nbd
);
4058 bnx2x_free_tx_pkt(bp
, txdata
,
4059 TX_BD(txdata
->tx_pkt_prod
),
4060 &pkts_compl
, &bytes_compl
);
4061 return NETDEV_TX_OK
;
4064 bd_prod
= TX_BD(NEXT_TX_IDX(bd_prod
));
4065 tx_data_bd
= &txdata
->tx_desc_ring
[bd_prod
].reg_bd
;
4066 if (total_pkt_bd
== NULL
)
4067 total_pkt_bd
= &txdata
->tx_desc_ring
[bd_prod
].reg_bd
;
4069 tx_data_bd
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
4070 tx_data_bd
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
4071 tx_data_bd
->nbytes
= cpu_to_le16(skb_frag_size(frag
));
4072 le16_add_cpu(&pkt_size
, skb_frag_size(frag
));
4075 DP(NETIF_MSG_TX_QUEUED
,
4076 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
4077 i
, tx_data_bd
, tx_data_bd
->addr_hi
, tx_data_bd
->addr_lo
,
4078 le16_to_cpu(tx_data_bd
->nbytes
));
4081 DP(NETIF_MSG_TX_QUEUED
, "last bd @%p\n", tx_data_bd
);
4083 /* update with actual num BDs */
4084 first_bd
->nbd
= cpu_to_le16(nbd
);
4086 bd_prod
= TX_BD(NEXT_TX_IDX(bd_prod
));
4088 /* now send a tx doorbell, counting the next BD
4089 * if the packet contains or ends with it
4091 if (TX_BD_POFF(bd_prod
) < nbd
)
4094 /* total_pkt_bytes should be set on the first data BD if
4095 * it's not an LSO packet and there is more than one
4096 * data BD. In this case pkt_size is limited by an MTU value.
4097 * However we prefer to set it for an LSO packet (while we don't
4098 * have to) in order to save some CPU cycles in a none-LSO
4099 * case, when we much more care about them.
4101 if (total_pkt_bd
!= NULL
)
4102 total_pkt_bd
->total_pkt_bytes
= pkt_size
;
4105 DP(NETIF_MSG_TX_QUEUED
,
4106 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u tcp_flags %x xsum %x seq %u hlen %u\n",
4107 pbd_e1x
, pbd_e1x
->global_data
, pbd_e1x
->ip_hlen_w
,
4108 pbd_e1x
->ip_id
, pbd_e1x
->lso_mss
, pbd_e1x
->tcp_flags
,
4109 pbd_e1x
->tcp_pseudo_csum
, pbd_e1x
->tcp_send_seq
,
4110 le16_to_cpu(pbd_e1x
->total_hlen_w
));
4112 DP(NETIF_MSG_TX_QUEUED
,
4113 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
4115 pbd_e2
->data
.mac_addr
.dst_hi
,
4116 pbd_e2
->data
.mac_addr
.dst_mid
,
4117 pbd_e2
->data
.mac_addr
.dst_lo
,
4118 pbd_e2
->data
.mac_addr
.src_hi
,
4119 pbd_e2
->data
.mac_addr
.src_mid
,
4120 pbd_e2
->data
.mac_addr
.src_lo
,
4121 pbd_e2
->parsing_data
);
4122 DP(NETIF_MSG_TX_QUEUED
, "doorbell: nbd %d bd %u\n", nbd
, bd_prod
);
4124 netdev_tx_sent_queue(txq
, skb
->len
);
4126 skb_tx_timestamp(skb
);
4128 txdata
->tx_pkt_prod
++;
4130 * Make sure that the BD data is updated before updating the producer
4131 * since FW might read the BD right after the producer is updated.
4132 * This is only applicable for weak-ordered memory model archs such
4133 * as IA-64. The following barrier is also mandatory since FW will
4134 * assumes packets must have BDs.
4138 txdata
->tx_db
.data
.prod
+= nbd
;
4141 DOORBELL(bp
, txdata
->cid
, txdata
->tx_db
.raw
);
4145 txdata
->tx_bd_prod
+= nbd
;
4147 if (unlikely(bnx2x_tx_avail(bp
, txdata
) < MAX_DESC_PER_TX_PKT
)) {
4148 netif_tx_stop_queue(txq
);
4150 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
4151 * ordering of set_bit() in netif_tx_stop_queue() and read of
4155 bnx2x_fp_qstats(bp
, txdata
->parent_fp
)->driver_xoff
++;
4156 if (bnx2x_tx_avail(bp
, txdata
) >= MAX_DESC_PER_TX_PKT
)
4157 netif_tx_wake_queue(txq
);
4161 return NETDEV_TX_OK
;
4165 * bnx2x_setup_tc - routine to configure net_device for multi tc
4167 * @netdev: net device to configure
4168 * @tc: number of traffic classes to enable
4170 * callback connected to the ndo_setup_tc function pointer
4172 int bnx2x_setup_tc(struct net_device
*dev
, u8 num_tc
)
4174 int cos
, prio
, count
, offset
;
4175 struct bnx2x
*bp
= netdev_priv(dev
);
4177 /* setup tc must be called under rtnl lock */
4180 /* no traffic classes requested. Aborting */
4182 netdev_reset_tc(dev
);
4186 /* requested to support too many traffic classes */
4187 if (num_tc
> bp
->max_cos
) {
4188 BNX2X_ERR("support for too many traffic classes requested: %d. Max supported is %d\n",
4189 num_tc
, bp
->max_cos
);
4193 /* declare amount of supported traffic classes */
4194 if (netdev_set_num_tc(dev
, num_tc
)) {
4195 BNX2X_ERR("failed to declare %d traffic classes\n", num_tc
);
4199 /* configure priority to traffic class mapping */
4200 for (prio
= 0; prio
< BNX2X_MAX_PRIORITY
; prio
++) {
4201 netdev_set_prio_tc_map(dev
, prio
, bp
->prio_to_cos
[prio
]);
4202 DP(BNX2X_MSG_SP
| NETIF_MSG_IFUP
,
4203 "mapping priority %d to tc %d\n",
4204 prio
, bp
->prio_to_cos
[prio
]);
4207 /* Use this configuration to differentiate tc0 from other COSes
4208 This can be used for ets or pfc, and save the effort of setting
4209 up a multio class queue disc or negotiating DCBX with a switch
4210 netdev_set_prio_tc_map(dev, 0, 0);
4211 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0);
4212 for (prio = 1; prio < 16; prio++) {
4213 netdev_set_prio_tc_map(dev, prio, 1);
4214 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1);
4217 /* configure traffic class to transmission queue mapping */
4218 for (cos
= 0; cos
< bp
->max_cos
; cos
++) {
4219 count
= BNX2X_NUM_ETH_QUEUES(bp
);
4220 offset
= cos
* BNX2X_NUM_NON_CNIC_QUEUES(bp
);
4221 netdev_set_tc_queue(dev
, cos
, count
, offset
);
4222 DP(BNX2X_MSG_SP
| NETIF_MSG_IFUP
,
4223 "mapping tc %d to offset %d count %d\n",
4224 cos
, offset
, count
);
4230 /* called with rtnl_lock */
4231 int bnx2x_change_mac_addr(struct net_device
*dev
, void *p
)
4233 struct sockaddr
*addr
= p
;
4234 struct bnx2x
*bp
= netdev_priv(dev
);
4237 if (!is_valid_ether_addr(addr
->sa_data
)) {
4238 BNX2X_ERR("Requested MAC address is not valid\n");
4242 if (IS_MF_STORAGE_ONLY(bp
)) {
4243 BNX2X_ERR("Can't change address on STORAGE ONLY function\n");
4247 if (netif_running(dev
)) {
4248 rc
= bnx2x_set_eth_mac(bp
, false);
4253 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
4255 if (netif_running(dev
))
4256 rc
= bnx2x_set_eth_mac(bp
, true);
4261 static void bnx2x_free_fp_mem_at(struct bnx2x
*bp
, int fp_index
)
4263 union host_hc_status_block
*sb
= &bnx2x_fp(bp
, fp_index
, status_blk
);
4264 struct bnx2x_fastpath
*fp
= &bp
->fp
[fp_index
];
4269 if (IS_FCOE_IDX(fp_index
)) {
4270 memset(sb
, 0, sizeof(union host_hc_status_block
));
4271 fp
->status_blk_mapping
= 0;
4274 if (!CHIP_IS_E1x(bp
))
4275 BNX2X_PCI_FREE(sb
->e2_sb
,
4276 bnx2x_fp(bp
, fp_index
,
4277 status_blk_mapping
),
4278 sizeof(struct host_hc_status_block_e2
));
4280 BNX2X_PCI_FREE(sb
->e1x_sb
,
4281 bnx2x_fp(bp
, fp_index
,
4282 status_blk_mapping
),
4283 sizeof(struct host_hc_status_block_e1x
));
4287 if (!skip_rx_queue(bp
, fp_index
)) {
4288 bnx2x_free_rx_bds(fp
);
4290 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4291 BNX2X_FREE(bnx2x_fp(bp
, fp_index
, rx_buf_ring
));
4292 BNX2X_PCI_FREE(bnx2x_fp(bp
, fp_index
, rx_desc_ring
),
4293 bnx2x_fp(bp
, fp_index
, rx_desc_mapping
),
4294 sizeof(struct eth_rx_bd
) * NUM_RX_BD
);
4296 BNX2X_PCI_FREE(bnx2x_fp(bp
, fp_index
, rx_comp_ring
),
4297 bnx2x_fp(bp
, fp_index
, rx_comp_mapping
),
4298 sizeof(struct eth_fast_path_rx_cqe
) *
4302 BNX2X_FREE(bnx2x_fp(bp
, fp_index
, rx_page_ring
));
4303 BNX2X_PCI_FREE(bnx2x_fp(bp
, fp_index
, rx_sge_ring
),
4304 bnx2x_fp(bp
, fp_index
, rx_sge_mapping
),
4305 BCM_PAGE_SIZE
* NUM_RX_SGE_PAGES
);
4309 if (!skip_tx_queue(bp
, fp_index
)) {
4310 /* fastpath tx rings: tx_buf tx_desc */
4311 for_each_cos_in_tx_queue(fp
, cos
) {
4312 struct bnx2x_fp_txdata
*txdata
= fp
->txdata_ptr
[cos
];
4314 DP(NETIF_MSG_IFDOWN
,
4315 "freeing tx memory of fp %d cos %d cid %d\n",
4316 fp_index
, cos
, txdata
->cid
);
4318 BNX2X_FREE(txdata
->tx_buf_ring
);
4319 BNX2X_PCI_FREE(txdata
->tx_desc_ring
,
4320 txdata
->tx_desc_mapping
,
4321 sizeof(union eth_tx_bd_types
) * NUM_TX_BD
);
4324 /* end of fastpath */
4327 static void bnx2x_free_fp_mem_cnic(struct bnx2x
*bp
)
4330 for_each_cnic_queue(bp
, i
)
4331 bnx2x_free_fp_mem_at(bp
, i
);
4334 void bnx2x_free_fp_mem(struct bnx2x
*bp
)
4337 for_each_eth_queue(bp
, i
)
4338 bnx2x_free_fp_mem_at(bp
, i
);
4341 static void set_sb_shortcuts(struct bnx2x
*bp
, int index
)
4343 union host_hc_status_block status_blk
= bnx2x_fp(bp
, index
, status_blk
);
4344 if (!CHIP_IS_E1x(bp
)) {
4345 bnx2x_fp(bp
, index
, sb_index_values
) =
4346 (__le16
*)status_blk
.e2_sb
->sb
.index_values
;
4347 bnx2x_fp(bp
, index
, sb_running_index
) =
4348 (__le16
*)status_blk
.e2_sb
->sb
.running_index
;
4350 bnx2x_fp(bp
, index
, sb_index_values
) =
4351 (__le16
*)status_blk
.e1x_sb
->sb
.index_values
;
4352 bnx2x_fp(bp
, index
, sb_running_index
) =
4353 (__le16
*)status_blk
.e1x_sb
->sb
.running_index
;
4357 /* Returns the number of actually allocated BDs */
4358 static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath
*fp
,
4361 struct bnx2x
*bp
= fp
->bp
;
4362 u16 ring_prod
, cqe_ring_prod
;
4363 int i
, failure_cnt
= 0;
4365 fp
->rx_comp_cons
= 0;
4366 cqe_ring_prod
= ring_prod
= 0;
4368 /* This routine is called only during fo init so
4369 * fp->eth_q_stats.rx_skb_alloc_failed = 0
4371 for (i
= 0; i
< rx_ring_size
; i
++) {
4372 if (bnx2x_alloc_rx_data(bp
, fp
, ring_prod
, GFP_KERNEL
) < 0) {
4376 ring_prod
= NEXT_RX_IDX(ring_prod
);
4377 cqe_ring_prod
= NEXT_RCQ_IDX(cqe_ring_prod
);
4378 WARN_ON(ring_prod
<= (i
- failure_cnt
));
4382 BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n",
4383 i
- failure_cnt
, fp
->index
);
4385 fp
->rx_bd_prod
= ring_prod
;
4386 /* Limit the CQE producer by the CQE ring size */
4387 fp
->rx_comp_prod
= min_t(u16
, NUM_RCQ_RINGS
*RCQ_DESC_CNT
,
4389 fp
->rx_pkt
= fp
->rx_calls
= 0;
4391 bnx2x_fp_stats(bp
, fp
)->eth_q_stats
.rx_skb_alloc_failed
+= failure_cnt
;
4393 return i
- failure_cnt
;
4396 static void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath
*fp
)
4400 for (i
= 1; i
<= NUM_RCQ_RINGS
; i
++) {
4401 struct eth_rx_cqe_next_page
*nextpg
;
4403 nextpg
= (struct eth_rx_cqe_next_page
*)
4404 &fp
->rx_comp_ring
[RCQ_DESC_CNT
* i
- 1];
4406 cpu_to_le32(U64_HI(fp
->rx_comp_mapping
+
4407 BCM_PAGE_SIZE
*(i
% NUM_RCQ_RINGS
)));
4409 cpu_to_le32(U64_LO(fp
->rx_comp_mapping
+
4410 BCM_PAGE_SIZE
*(i
% NUM_RCQ_RINGS
)));
4414 static int bnx2x_alloc_fp_mem_at(struct bnx2x
*bp
, int index
)
4416 union host_hc_status_block
*sb
;
4417 struct bnx2x_fastpath
*fp
= &bp
->fp
[index
];
4420 int rx_ring_size
= 0;
4422 if (!bp
->rx_ring_size
&& IS_MF_STORAGE_ONLY(bp
)) {
4423 rx_ring_size
= MIN_RX_SIZE_NONTPA
;
4424 bp
->rx_ring_size
= rx_ring_size
;
4425 } else if (!bp
->rx_ring_size
) {
4426 rx_ring_size
= MAX_RX_AVAIL
/BNX2X_NUM_RX_QUEUES(bp
);
4428 if (CHIP_IS_E3(bp
)) {
4429 u32 cfg
= SHMEM_RD(bp
,
4430 dev_info
.port_hw_config
[BP_PORT(bp
)].
4433 /* Decrease ring size for 1G functions */
4434 if ((cfg
& PORT_HW_CFG_NET_SERDES_IF_MASK
) ==
4435 PORT_HW_CFG_NET_SERDES_IF_SGMII
)
4439 /* allocate at least number of buffers required by FW */
4440 rx_ring_size
= max_t(int, bp
->disable_tpa
? MIN_RX_SIZE_NONTPA
:
4441 MIN_RX_SIZE_TPA
, rx_ring_size
);
4443 bp
->rx_ring_size
= rx_ring_size
;
4444 } else /* if rx_ring_size specified - use it */
4445 rx_ring_size
= bp
->rx_ring_size
;
4447 DP(BNX2X_MSG_SP
, "calculated rx_ring_size %d\n", rx_ring_size
);
4450 sb
= &bnx2x_fp(bp
, index
, status_blk
);
4452 if (!IS_FCOE_IDX(index
)) {
4454 if (!CHIP_IS_E1x(bp
)) {
4455 sb
->e2_sb
= BNX2X_PCI_ALLOC(&bnx2x_fp(bp
, index
, status_blk_mapping
),
4456 sizeof(struct host_hc_status_block_e2
));
4460 sb
->e1x_sb
= BNX2X_PCI_ALLOC(&bnx2x_fp(bp
, index
, status_blk_mapping
),
4461 sizeof(struct host_hc_status_block_e1x
));
4467 /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
4468 * set shortcuts for it.
4470 if (!IS_FCOE_IDX(index
))
4471 set_sb_shortcuts(bp
, index
);
4474 if (!skip_tx_queue(bp
, index
)) {
4475 /* fastpath tx rings: tx_buf tx_desc */
4476 for_each_cos_in_tx_queue(fp
, cos
) {
4477 struct bnx2x_fp_txdata
*txdata
= fp
->txdata_ptr
[cos
];
4480 "allocating tx memory of fp %d cos %d\n",
4483 txdata
->tx_buf_ring
= kcalloc(NUM_TX_BD
,
4484 sizeof(struct sw_tx_bd
),
4486 if (!txdata
->tx_buf_ring
)
4488 txdata
->tx_desc_ring
= BNX2X_PCI_ALLOC(&txdata
->tx_desc_mapping
,
4489 sizeof(union eth_tx_bd_types
) * NUM_TX_BD
);
4490 if (!txdata
->tx_desc_ring
)
4496 if (!skip_rx_queue(bp
, index
)) {
4497 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4498 bnx2x_fp(bp
, index
, rx_buf_ring
) =
4499 kcalloc(NUM_RX_BD
, sizeof(struct sw_rx_bd
), GFP_KERNEL
);
4500 if (!bnx2x_fp(bp
, index
, rx_buf_ring
))
4502 bnx2x_fp(bp
, index
, rx_desc_ring
) =
4503 BNX2X_PCI_ALLOC(&bnx2x_fp(bp
, index
, rx_desc_mapping
),
4504 sizeof(struct eth_rx_bd
) * NUM_RX_BD
);
4505 if (!bnx2x_fp(bp
, index
, rx_desc_ring
))
4508 /* Seed all CQEs by 1s */
4509 bnx2x_fp(bp
, index
, rx_comp_ring
) =
4510 BNX2X_PCI_FALLOC(&bnx2x_fp(bp
, index
, rx_comp_mapping
),
4511 sizeof(struct eth_fast_path_rx_cqe
) * NUM_RCQ_BD
);
4512 if (!bnx2x_fp(bp
, index
, rx_comp_ring
))
4516 bnx2x_fp(bp
, index
, rx_page_ring
) =
4517 kcalloc(NUM_RX_SGE
, sizeof(struct sw_rx_page
),
4519 if (!bnx2x_fp(bp
, index
, rx_page_ring
))
4521 bnx2x_fp(bp
, index
, rx_sge_ring
) =
4522 BNX2X_PCI_ALLOC(&bnx2x_fp(bp
, index
, rx_sge_mapping
),
4523 BCM_PAGE_SIZE
* NUM_RX_SGE_PAGES
);
4524 if (!bnx2x_fp(bp
, index
, rx_sge_ring
))
4527 bnx2x_set_next_page_rx_bd(fp
);
4530 bnx2x_set_next_page_rx_cq(fp
);
4533 ring_size
= bnx2x_alloc_rx_bds(fp
, rx_ring_size
);
4534 if (ring_size
< rx_ring_size
)
4540 /* handles low memory cases */
4542 BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
4544 /* FW will drop all packets if queue is not big enough,
4545 * In these cases we disable the queue
4546 * Min size is different for OOO, TPA and non-TPA queues
4548 if (ring_size
< (fp
->disable_tpa
?
4549 MIN_RX_SIZE_NONTPA
: MIN_RX_SIZE_TPA
)) {
4550 /* release memory allocated for this queue */
4551 bnx2x_free_fp_mem_at(bp
, index
);
4557 static int bnx2x_alloc_fp_mem_cnic(struct bnx2x
*bp
)
4561 if (bnx2x_alloc_fp_mem_at(bp
, FCOE_IDX(bp
)))
4562 /* we will fail load process instead of mark
4570 static int bnx2x_alloc_fp_mem(struct bnx2x
*bp
)
4574 /* 1. Allocate FP for leading - fatal if error
4575 * 2. Allocate RSS - fix number of queues if error
4579 if (bnx2x_alloc_fp_mem_at(bp
, 0))
4583 for_each_nondefault_eth_queue(bp
, i
)
4584 if (bnx2x_alloc_fp_mem_at(bp
, i
))
4587 /* handle memory failures */
4588 if (i
!= BNX2X_NUM_ETH_QUEUES(bp
)) {
4589 int delta
= BNX2X_NUM_ETH_QUEUES(bp
) - i
;
4592 bnx2x_shrink_eth_fp(bp
, delta
);
4593 if (CNIC_SUPPORT(bp
))
4594 /* move non eth FPs next to last eth FP
4595 * must be done in that order
4596 * FCOE_IDX < FWD_IDX < OOO_IDX
4599 /* move FCoE fp even NO_FCOE_FLAG is on */
4600 bnx2x_move_fp(bp
, FCOE_IDX(bp
), FCOE_IDX(bp
) - delta
);
4601 bp
->num_ethernet_queues
-= delta
;
4602 bp
->num_queues
= bp
->num_ethernet_queues
+
4603 bp
->num_cnic_queues
;
4604 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
4605 bp
->num_queues
+ delta
, bp
->num_queues
);
4611 void bnx2x_free_mem_bp(struct bnx2x
*bp
)
4615 for (i
= 0; i
< bp
->fp_array_size
; i
++)
4616 kfree(bp
->fp
[i
].tpa_info
);
4619 kfree(bp
->fp_stats
);
4620 kfree(bp
->bnx2x_txq
);
4621 kfree(bp
->msix_table
);
4625 int bnx2x_alloc_mem_bp(struct bnx2x
*bp
)
4627 struct bnx2x_fastpath
*fp
;
4628 struct msix_entry
*tbl
;
4629 struct bnx2x_ilt
*ilt
;
4630 int msix_table_size
= 0;
4631 int fp_array_size
, txq_array_size
;
4635 * The biggest MSI-X table we might need is as a maximum number of fast
4636 * path IGU SBs plus default SB (for PF only).
4638 msix_table_size
= bp
->igu_sb_cnt
;
4641 BNX2X_DEV_INFO("msix_table_size %d\n", msix_table_size
);
4643 /* fp array: RSS plus CNIC related L2 queues */
4644 fp_array_size
= BNX2X_MAX_RSS_COUNT(bp
) + CNIC_SUPPORT(bp
);
4645 bp
->fp_array_size
= fp_array_size
;
4646 BNX2X_DEV_INFO("fp_array_size %d\n", bp
->fp_array_size
);
4648 fp
= kcalloc(bp
->fp_array_size
, sizeof(*fp
), GFP_KERNEL
);
4651 for (i
= 0; i
< bp
->fp_array_size
; i
++) {
4653 kcalloc(ETH_MAX_AGGREGATION_QUEUES_E1H_E2
,
4654 sizeof(struct bnx2x_agg_info
), GFP_KERNEL
);
4655 if (!(fp
[i
].tpa_info
))
4661 /* allocate sp objs */
4662 bp
->sp_objs
= kcalloc(bp
->fp_array_size
, sizeof(struct bnx2x_sp_objs
),
4667 /* allocate fp_stats */
4668 bp
->fp_stats
= kcalloc(bp
->fp_array_size
, sizeof(struct bnx2x_fp_stats
),
4673 /* Allocate memory for the transmission queues array */
4675 BNX2X_MAX_RSS_COUNT(bp
) * BNX2X_MULTI_TX_COS
+ CNIC_SUPPORT(bp
);
4676 BNX2X_DEV_INFO("txq_array_size %d", txq_array_size
);
4678 bp
->bnx2x_txq
= kcalloc(txq_array_size
, sizeof(struct bnx2x_fp_txdata
),
4684 tbl
= kcalloc(msix_table_size
, sizeof(*tbl
), GFP_KERNEL
);
4687 bp
->msix_table
= tbl
;
4690 ilt
= kzalloc(sizeof(*ilt
), GFP_KERNEL
);
4697 bnx2x_free_mem_bp(bp
);
4701 int bnx2x_reload_if_running(struct net_device
*dev
)
4703 struct bnx2x
*bp
= netdev_priv(dev
);
4705 if (unlikely(!netif_running(dev
)))
4708 bnx2x_nic_unload(bp
, UNLOAD_NORMAL
, true);
4709 return bnx2x_nic_load(bp
, LOAD_NORMAL
);
4712 int bnx2x_get_cur_phy_idx(struct bnx2x
*bp
)
4714 u32 sel_phy_idx
= 0;
4715 if (bp
->link_params
.num_phys
<= 1)
4718 if (bp
->link_vars
.link_up
) {
4719 sel_phy_idx
= EXT_PHY1
;
4720 /* In case link is SERDES, check if the EXT_PHY2 is the one */
4721 if ((bp
->link_vars
.link_status
& LINK_STATUS_SERDES_LINK
) &&
4722 (bp
->link_params
.phy
[EXT_PHY2
].supported
& SUPPORTED_FIBRE
))
4723 sel_phy_idx
= EXT_PHY2
;
4726 switch (bnx2x_phy_selection(&bp
->link_params
)) {
4727 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT
:
4728 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY
:
4729 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY
:
4730 sel_phy_idx
= EXT_PHY1
;
4732 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY
:
4733 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY
:
4734 sel_phy_idx
= EXT_PHY2
;
4741 int bnx2x_get_link_cfg_idx(struct bnx2x
*bp
)
4743 u32 sel_phy_idx
= bnx2x_get_cur_phy_idx(bp
);
4745 * The selected activated PHY is always after swapping (in case PHY
4746 * swapping is enabled). So when swapping is enabled, we need to reverse
4750 if (bp
->link_params
.multi_phy_config
&
4751 PORT_HW_CFG_PHY_SWAPPED_ENABLED
) {
4752 if (sel_phy_idx
== EXT_PHY1
)
4753 sel_phy_idx
= EXT_PHY2
;
4754 else if (sel_phy_idx
== EXT_PHY2
)
4755 sel_phy_idx
= EXT_PHY1
;
4757 return LINK_CONFIG_IDX(sel_phy_idx
);
4760 #ifdef NETDEV_FCOE_WWNN
4761 int bnx2x_fcoe_get_wwn(struct net_device
*dev
, u64
*wwn
, int type
)
4763 struct bnx2x
*bp
= netdev_priv(dev
);
4764 struct cnic_eth_dev
*cp
= &bp
->cnic_eth_dev
;
4767 case NETDEV_FCOE_WWNN
:
4768 *wwn
= HILO_U64(cp
->fcoe_wwn_node_name_hi
,
4769 cp
->fcoe_wwn_node_name_lo
);
4771 case NETDEV_FCOE_WWPN
:
4772 *wwn
= HILO_U64(cp
->fcoe_wwn_port_name_hi
,
4773 cp
->fcoe_wwn_port_name_lo
);
4776 BNX2X_ERR("Wrong WWN type requested - %d\n", type
);
4784 /* called with rtnl_lock */
4785 int bnx2x_change_mtu(struct net_device
*dev
, int new_mtu
)
4787 struct bnx2x
*bp
= netdev_priv(dev
);
4789 if (bp
->recovery_state
!= BNX2X_RECOVERY_DONE
) {
4790 BNX2X_ERR("Can't perform change MTU during parity recovery\n");
4794 if ((new_mtu
> ETH_MAX_JUMBO_PACKET_SIZE
) ||
4795 ((new_mtu
+ ETH_HLEN
) < ETH_MIN_PACKET_SIZE
)) {
4796 BNX2X_ERR("Can't support requested MTU size\n");
4800 /* This does not race with packet allocation
4801 * because the actual alloc size is
4802 * only updated as part of load
4806 return bnx2x_reload_if_running(dev
);
4809 netdev_features_t
bnx2x_fix_features(struct net_device
*dev
,
4810 netdev_features_t features
)
4812 struct bnx2x
*bp
= netdev_priv(dev
);
4814 if (pci_num_vf(bp
->pdev
)) {
4815 netdev_features_t changed
= dev
->features
^ features
;
4817 /* Revert the requested changes in features if they
4818 * would require internal reload of PF in bnx2x_set_features().
4820 if (!(features
& NETIF_F_RXCSUM
) && !bp
->disable_tpa
) {
4821 features
&= ~NETIF_F_RXCSUM
;
4822 features
|= dev
->features
& NETIF_F_RXCSUM
;
4825 if (changed
& NETIF_F_LOOPBACK
) {
4826 features
&= ~NETIF_F_LOOPBACK
;
4827 features
|= dev
->features
& NETIF_F_LOOPBACK
;
4831 /* TPA requires Rx CSUM offloading */
4832 if (!(features
& NETIF_F_RXCSUM
)) {
4833 features
&= ~NETIF_F_LRO
;
4834 features
&= ~NETIF_F_GRO
;
4837 /* Note: do not disable SW GRO in kernel when HW GRO is off */
4838 if (bp
->disable_tpa
)
4839 features
&= ~NETIF_F_LRO
;
4844 int bnx2x_set_features(struct net_device
*dev
, netdev_features_t features
)
4846 struct bnx2x
*bp
= netdev_priv(dev
);
4847 u32 flags
= bp
->flags
;
4849 bool bnx2x_reload
= false;
4851 if (features
& NETIF_F_LRO
)
4852 flags
|= TPA_ENABLE_FLAG
;
4854 flags
&= ~TPA_ENABLE_FLAG
;
4856 if (features
& NETIF_F_GRO
)
4857 flags
|= GRO_ENABLE_FLAG
;
4859 flags
&= ~GRO_ENABLE_FLAG
;
4861 /* VFs or non SRIOV PFs should be able to change loopback feature */
4862 if (!pci_num_vf(bp
->pdev
)) {
4863 if (features
& NETIF_F_LOOPBACK
) {
4864 if (bp
->link_params
.loopback_mode
!= LOOPBACK_BMAC
) {
4865 bp
->link_params
.loopback_mode
= LOOPBACK_BMAC
;
4866 bnx2x_reload
= true;
4869 if (bp
->link_params
.loopback_mode
!= LOOPBACK_NONE
) {
4870 bp
->link_params
.loopback_mode
= LOOPBACK_NONE
;
4871 bnx2x_reload
= true;
4876 changes
= flags
^ bp
->flags
;
4878 /* if GRO is changed while LRO is enabled, don't force a reload */
4879 if ((changes
& GRO_ENABLE_FLAG
) && (flags
& TPA_ENABLE_FLAG
))
4880 changes
&= ~GRO_ENABLE_FLAG
;
4882 /* if GRO is changed while HW TPA is off, don't force a reload */
4883 if ((changes
& GRO_ENABLE_FLAG
) && bp
->disable_tpa
)
4884 changes
&= ~GRO_ENABLE_FLAG
;
4887 bnx2x_reload
= true;
4892 if (bp
->recovery_state
== BNX2X_RECOVERY_DONE
)
4893 return bnx2x_reload_if_running(dev
);
4894 /* else: bnx2x_nic_load() will be called at end of recovery */
4900 void bnx2x_tx_timeout(struct net_device
*dev
)
4902 struct bnx2x
*bp
= netdev_priv(dev
);
4904 #ifdef BNX2X_STOP_ON_ERROR
4909 /* This allows the netif to be shutdown gracefully before resetting */
4910 bnx2x_schedule_sp_rtnl(bp
, BNX2X_SP_RTNL_TX_TIMEOUT
, 0);
4913 int bnx2x_suspend(struct pci_dev
*pdev
, pm_message_t state
)
4915 struct net_device
*dev
= pci_get_drvdata(pdev
);
4919 dev_err(&pdev
->dev
, "BAD net device from bnx2x_init_one\n");
4922 bp
= netdev_priv(dev
);
4926 pci_save_state(pdev
);
4928 if (!netif_running(dev
)) {
4933 netif_device_detach(dev
);
4935 bnx2x_nic_unload(bp
, UNLOAD_CLOSE
, false);
4937 bnx2x_set_power_state(bp
, pci_choose_state(pdev
, state
));
4944 int bnx2x_resume(struct pci_dev
*pdev
)
4946 struct net_device
*dev
= pci_get_drvdata(pdev
);
4951 dev_err(&pdev
->dev
, "BAD net device from bnx2x_init_one\n");
4954 bp
= netdev_priv(dev
);
4956 if (pci_num_vf(bp
->pdev
)) {
4957 DP(BNX2X_MSG_IOV
, "VFs are enabled, can not change MTU\n");
4961 if (bp
->recovery_state
!= BNX2X_RECOVERY_DONE
) {
4962 BNX2X_ERR("Handling parity error recovery. Try again later\n");
4968 pci_restore_state(pdev
);
4970 if (!netif_running(dev
)) {
4975 bnx2x_set_power_state(bp
, PCI_D0
);
4976 netif_device_attach(dev
);
4978 rc
= bnx2x_nic_load(bp
, LOAD_OPEN
);
4985 void bnx2x_set_ctx_validation(struct bnx2x
*bp
, struct eth_context
*cxt
,
4989 BNX2X_ERR("bad context pointer %p\n", cxt
);
4993 /* ustorm cxt validation */
4994 cxt
->ustorm_ag_context
.cdu_usage
=
4995 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp
, cid
),
4996 CDU_REGION_NUMBER_UCM_AG
, ETH_CONNECTION_TYPE
);
4997 /* xcontext validation */
4998 cxt
->xstorm_ag_context
.cdu_reserved
=
4999 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp
, cid
),
5000 CDU_REGION_NUMBER_XCM_AG
, ETH_CONNECTION_TYPE
);
5003 static void storm_memset_hc_timeout(struct bnx2x
*bp
, u8 port
,
5004 u8 fw_sb_id
, u8 sb_index
,
5007 u32 addr
= BAR_CSTRORM_INTMEM
+
5008 CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id
, sb_index
);
5009 REG_WR8(bp
, addr
, ticks
);
5011 "port %x fw_sb_id %d sb_index %d ticks %d\n",
5012 port
, fw_sb_id
, sb_index
, ticks
);
5015 static void storm_memset_hc_disable(struct bnx2x
*bp
, u8 port
,
5016 u16 fw_sb_id
, u8 sb_index
,
5019 u32 enable_flag
= disable
? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT
);
5020 u32 addr
= BAR_CSTRORM_INTMEM
+
5021 CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id
, sb_index
);
5022 u8 flags
= REG_RD8(bp
, addr
);
5024 flags
&= ~HC_INDEX_DATA_HC_ENABLED
;
5025 flags
|= enable_flag
;
5026 REG_WR8(bp
, addr
, flags
);
5028 "port %x fw_sb_id %d sb_index %d disable %d\n",
5029 port
, fw_sb_id
, sb_index
, disable
);
5032 void bnx2x_update_coalesce_sb_index(struct bnx2x
*bp
, u8 fw_sb_id
,
5033 u8 sb_index
, u8 disable
, u16 usec
)
5035 int port
= BP_PORT(bp
);
5036 u8 ticks
= usec
/ BNX2X_BTR
;
5038 storm_memset_hc_timeout(bp
, port
, fw_sb_id
, sb_index
, ticks
);
5040 disable
= disable
? 1 : (usec
? 0 : 1);
5041 storm_memset_hc_disable(bp
, port
, fw_sb_id
, sb_index
, disable
);
5044 void bnx2x_schedule_sp_rtnl(struct bnx2x
*bp
, enum sp_rtnl_flag flag
,
5047 smp_mb__before_atomic();
5048 set_bit(flag
, &bp
->sp_rtnl_state
);
5049 smp_mb__after_atomic();
5050 DP((BNX2X_MSG_SP
| verbose
), "Scheduling sp_rtnl task [Flag: %d]\n",
5052 schedule_delayed_work(&bp
->sp_rtnl_task
, 0);
5054 EXPORT_SYMBOL(bnx2x_schedule_sp_rtnl
);