bnx2x: new Multi-function mode - BD
[deliverable/linux.git] / drivers / net / ethernet / broadcom / bnx2x / bnx2x_cmn.c
CommitLineData
4ad79e13 1/* bnx2x_cmn.c: QLogic Everest network driver.
9f6c9258 2 *
247fa82b 3 * Copyright (c) 2007-2013 Broadcom Corporation
4ad79e13
YM
4 * Copyright (c) 2014 QLogic Corporation
5 * All rights reserved
9f6c9258
DK
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation.
10 *
08f6dd89 11 * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
9f6c9258
DK
12 * Written by: Eliezer Tamir
13 * Based on code from Michael Chan's bnx2 driver
14 * UDP CSUM errata workaround by Arik Gendelman
15 * Slowpath and fastpath rework by Vladislav Zolotarov
16 * Statistics and Link management by Yitchak Gertner
17 *
18 */
19
f1deab50
JP
20#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21
9f6c9258 22#include <linux/etherdevice.h>
9bcc0893 23#include <linux/if_vlan.h>
a6b7a407 24#include <linux/interrupt.h>
9f6c9258 25#include <linux/ip.h>
c9931896 26#include <linux/crash_dump.h>
9969085e 27#include <net/tcp.h>
f2e0899f 28#include <net/ipv6.h>
7f3e01fe 29#include <net/ip6_checksum.h>
076bb0c8 30#include <net/busy_poll.h>
c0cba59e 31#include <linux/prefetch.h>
9f6c9258 32#include "bnx2x_cmn.h"
523224a3 33#include "bnx2x_init.h"
042181f5 34#include "bnx2x_sp.h"
9f6c9258 35
a8f47eb7 36static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp);
37static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp);
38static int bnx2x_alloc_fp_mem(struct bnx2x *bp);
39static int bnx2x_poll(struct napi_struct *napi, int budget);
40
41static void bnx2x_add_all_napi_cnic(struct bnx2x *bp)
42{
43 int i;
44
45 /* Add NAPI objects */
46 for_each_rx_queue_cnic(bp, i) {
47 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
48 bnx2x_poll, NAPI_POLL_WEIGHT);
49 napi_hash_add(&bnx2x_fp(bp, i, napi));
50 }
51}
52
53static void bnx2x_add_all_napi(struct bnx2x *bp)
54{
55 int i;
56
57 /* Add NAPI objects */
58 for_each_eth_queue(bp, i) {
59 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
60 bnx2x_poll, NAPI_POLL_WEIGHT);
61 napi_hash_add(&bnx2x_fp(bp, i, napi));
62 }
63}
64
65static int bnx2x_calc_num_queues(struct bnx2x *bp)
66{
7d0445d6 67 int nq = bnx2x_num_queues ? : netif_get_num_default_rss_queues();
ff2ad307
MS
68
69 /* Reduce memory usage in kdump environment by using only one queue */
c9931896 70 if (is_kdump_kernel())
ff2ad307
MS
71 nq = 1;
72
7d0445d6
MS
73 nq = clamp(nq, 1, BNX2X_MAX_QUEUES(bp));
74 return nq;
a8f47eb7 75}
76
b3b83c3f
DK
77/**
78 * bnx2x_move_fp - move content of the fastpath structure.
79 *
80 * @bp: driver handle
81 * @from: source FP index
82 * @to: destination FP index
83 *
84 * Makes sure the contents of the bp->fp[to].napi is kept
72754080
AE
85 * intact. This is done by first copying the napi struct from
86 * the target to the source, and then mem copying the entire
65565884
MS
87 * source onto the target. Update txdata pointers and related
88 * content.
b3b83c3f
DK
89 */
90static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
91{
92 struct bnx2x_fastpath *from_fp = &bp->fp[from];
93 struct bnx2x_fastpath *to_fp = &bp->fp[to];
15192a8c
BW
94 struct bnx2x_sp_objs *from_sp_objs = &bp->sp_objs[from];
95 struct bnx2x_sp_objs *to_sp_objs = &bp->sp_objs[to];
96 struct bnx2x_fp_stats *from_fp_stats = &bp->fp_stats[from];
97 struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to];
65565884
MS
98 int old_max_eth_txqs, new_max_eth_txqs;
99 int old_txdata_index = 0, new_txdata_index = 0;
34d5626a 100 struct bnx2x_agg_info *old_tpa_info = to_fp->tpa_info;
72754080
AE
101
102 /* Copy the NAPI object as it has been already initialized */
103 from_fp->napi = to_fp->napi;
104
b3b83c3f
DK
105 /* Move bnx2x_fastpath contents */
106 memcpy(to_fp, from_fp, sizeof(*to_fp));
107 to_fp->index = to;
65565884 108
34d5626a
YM
109 /* Retain the tpa_info of the original `to' version as we don't want
110 * 2 FPs to contain the same tpa_info pointer.
111 */
112 to_fp->tpa_info = old_tpa_info;
113
15192a8c
BW
114 /* move sp_objs contents as well, as their indices match fp ones */
115 memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs));
116
117 /* move fp_stats contents as well, as their indices match fp ones */
118 memcpy(to_fp_stats, from_fp_stats, sizeof(*to_fp_stats));
119
65565884
MS
120 /* Update txdata pointers in fp and move txdata content accordingly:
121 * Each fp consumes 'max_cos' txdata structures, so the index should be
122 * decremented by max_cos x delta.
123 */
124
125 old_max_eth_txqs = BNX2X_NUM_ETH_QUEUES(bp) * (bp)->max_cos;
126 new_max_eth_txqs = (BNX2X_NUM_ETH_QUEUES(bp) - from + to) *
127 (bp)->max_cos;
128 if (from == FCOE_IDX(bp)) {
129 old_txdata_index = old_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
130 new_txdata_index = new_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
131 }
132
4864a16a
YM
133 memcpy(&bp->bnx2x_txq[new_txdata_index],
134 &bp->bnx2x_txq[old_txdata_index],
65565884
MS
135 sizeof(struct bnx2x_fp_txdata));
136 to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index];
b3b83c3f
DK
137}
138
8ca5e17e
AE
139/**
140 * bnx2x_fill_fw_str - Fill buffer with FW version string.
141 *
142 * @bp: driver handle
143 * @buf: character buffer to fill with the fw name
144 * @buf_len: length of the above buffer
145 *
146 */
147void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len)
148{
149 if (IS_PF(bp)) {
150 u8 phy_fw_ver[PHY_FW_VER_LEN];
151
152 phy_fw_ver[0] = '\0';
153 bnx2x_get_ext_phy_fw_version(&bp->link_params,
154 phy_fw_ver, PHY_FW_VER_LEN);
155 strlcpy(buf, bp->fw_ver, buf_len);
156 snprintf(buf + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
157 "bc %d.%d.%d%s%s",
158 (bp->common.bc_ver & 0xff0000) >> 16,
159 (bp->common.bc_ver & 0xff00) >> 8,
160 (bp->common.bc_ver & 0xff),
161 ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
162 } else {
6411280a 163 bnx2x_vf_fill_fw_str(bp, buf, buf_len);
8ca5e17e
AE
164 }
165}
166
4864a16a
YM
167/**
168 * bnx2x_shrink_eth_fp - guarantees fastpath structures stay intact
169 *
170 * @bp: driver handle
171 * @delta: number of eth queues which were not allocated
172 */
173static void bnx2x_shrink_eth_fp(struct bnx2x *bp, int delta)
174{
175 int i, cos, old_eth_num = BNX2X_NUM_ETH_QUEUES(bp);
176
177 /* Queue pointer cannot be re-set on an fp-basis, as moving pointer
16a5fd92 178 * backward along the array could cause memory to be overridden
4864a16a
YM
179 */
180 for (cos = 1; cos < bp->max_cos; cos++) {
181 for (i = 0; i < old_eth_num - delta; i++) {
182 struct bnx2x_fastpath *fp = &bp->fp[i];
183 int new_idx = cos * (old_eth_num - delta) + i;
184
185 memcpy(&bp->bnx2x_txq[new_idx], fp->txdata_ptr[cos],
186 sizeof(struct bnx2x_fp_txdata));
187 fp->txdata_ptr[cos] = &bp->bnx2x_txq[new_idx];
188 }
189 }
190}
191
a8f47eb7 192int bnx2x_load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
619c5cb6 193
9f6c9258
DK
194/* free skb in the packet ring at pos idx
195 * return idx of last bd freed
196 */
6383c0b3 197static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
2df1a70a
TH
198 u16 idx, unsigned int *pkts_compl,
199 unsigned int *bytes_compl)
9f6c9258 200{
6383c0b3 201 struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
9f6c9258
DK
202 struct eth_tx_start_bd *tx_start_bd;
203 struct eth_tx_bd *tx_data_bd;
204 struct sk_buff *skb = tx_buf->skb;
205 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
206 int nbd;
95e92fd4 207 u16 split_bd_len = 0;
9f6c9258
DK
208
209 /* prefetch skb end pointer to speedup dev_kfree_skb() */
210 prefetch(&skb->end);
211
51c1a580 212 DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n",
6383c0b3 213 txdata->txq_index, idx, tx_buf, skb);
9f6c9258 214
6383c0b3 215 tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
9f6c9258
DK
216
217 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
218#ifdef BNX2X_STOP_ON_ERROR
219 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
220 BNX2X_ERR("BAD nbd!\n");
221 bnx2x_panic();
222 }
223#endif
224 new_cons = nbd + tx_buf->first_bd;
225
226 /* Get the next bd */
227 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
228
229 /* Skip a parse bd... */
230 --nbd;
231 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
232
fe26566d
DK
233 if (tx_buf->flags & BNX2X_HAS_SECOND_PBD) {
234 /* Skip second parse bd... */
235 --nbd;
236 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
237 }
238
95e92fd4 239 /* TSO headers+data bds share a common mapping. See bnx2x_tx_split() */
9f6c9258 240 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
95e92fd4
MS
241 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
242 split_bd_len = BD_UNMAP_LEN(tx_data_bd);
9f6c9258
DK
243 --nbd;
244 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
245 }
246
95e92fd4
MS
247 /* unmap first bd */
248 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
249 BD_UNMAP_LEN(tx_start_bd) + split_bd_len,
250 DMA_TO_DEVICE);
251
9f6c9258
DK
252 /* now free frags */
253 while (nbd > 0) {
254
6383c0b3 255 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
9f6c9258
DK
256 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
257 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
258 if (--nbd)
259 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
260 }
261
262 /* release skb */
263 WARN_ON(!skb);
d8290ae5 264 if (likely(skb)) {
2df1a70a
TH
265 (*pkts_compl)++;
266 (*bytes_compl) += skb->len;
267 }
d8290ae5 268
40955532 269 dev_kfree_skb_any(skb);
9f6c9258
DK
270 tx_buf->first_bd = 0;
271 tx_buf->skb = NULL;
272
273 return new_cons;
274}
275
6383c0b3 276int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
9f6c9258 277{
9f6c9258 278 struct netdev_queue *txq;
6383c0b3 279 u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
2df1a70a 280 unsigned int pkts_compl = 0, bytes_compl = 0;
9f6c9258
DK
281
282#ifdef BNX2X_STOP_ON_ERROR
283 if (unlikely(bp->panic))
284 return -1;
285#endif
286
6383c0b3
AE
287 txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
288 hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
289 sw_cons = txdata->tx_pkt_cons;
9f6c9258
DK
290
291 while (sw_cons != hw_cons) {
292 u16 pkt_cons;
293
294 pkt_cons = TX_BD(sw_cons);
295
51c1a580
MS
296 DP(NETIF_MSG_TX_DONE,
297 "queue[%d]: hw_cons %u sw_cons %u pkt_cons %u\n",
6383c0b3 298 txdata->txq_index, hw_cons, sw_cons, pkt_cons);
9f6c9258 299
2df1a70a 300 bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
2de67439 301 &pkts_compl, &bytes_compl);
2df1a70a 302
9f6c9258
DK
303 sw_cons++;
304 }
305
2df1a70a
TH
306 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
307
6383c0b3
AE
308 txdata->tx_pkt_cons = sw_cons;
309 txdata->tx_bd_cons = bd_cons;
9f6c9258
DK
310
311 /* Need to make the tx_bd_cons update visible to start_xmit()
312 * before checking for netif_tx_queue_stopped(). Without the
313 * memory barrier, there is a small possibility that
314 * start_xmit() will miss it and cause the queue to be stopped
315 * forever.
619c5cb6
VZ
316 * On the other hand we need an rmb() here to ensure the proper
317 * ordering of bit testing in the following
318 * netif_tx_queue_stopped(txq) call.
9f6c9258
DK
319 */
320 smp_mb();
321
9f6c9258 322 if (unlikely(netif_tx_queue_stopped(txq))) {
16a5fd92 323 /* Taking tx_lock() is needed to prevent re-enabling the queue
9f6c9258
DK
324 * while it's empty. This could have happen if rx_action() gets
325 * suspended in bnx2x_tx_int() after the condition before
326 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
327 *
328 * stops the queue->sees fresh tx_bd_cons->releases the queue->
329 * sends some packets consuming the whole queue again->
330 * stops the queue
331 */
332
333 __netif_tx_lock(txq, smp_processor_id());
334
335 if ((netif_tx_queue_stopped(txq)) &&
336 (bp->state == BNX2X_STATE_OPEN) &&
7df2dc6b 337 (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT))
9f6c9258
DK
338 netif_tx_wake_queue(txq);
339
340 __netif_tx_unlock(txq);
341 }
342 return 0;
343}
344
345static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
346 u16 idx)
347{
348 u16 last_max = fp->last_max_sge;
349
350 if (SUB_S16(idx, last_max) > 0)
351 fp->last_max_sge = idx;
352}
353
621b4d66
DK
354static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
355 u16 sge_len,
356 struct eth_end_agg_rx_cqe *cqe)
9f6c9258
DK
357{
358 struct bnx2x *bp = fp->bp;
9f6c9258
DK
359 u16 last_max, last_elem, first_elem;
360 u16 delta = 0;
361 u16 i;
362
363 if (!sge_len)
364 return;
365
366 /* First mark all used pages */
367 for (i = 0; i < sge_len; i++)
619c5cb6 368 BIT_VEC64_CLEAR_BIT(fp->sge_mask,
621b4d66 369 RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i])));
9f6c9258
DK
370
371 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
621b4d66 372 sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
9f6c9258
DK
373
374 /* Here we assume that the last SGE index is the biggest */
375 prefetch((void *)(fp->sge_mask));
523224a3 376 bnx2x_update_last_max_sge(fp,
621b4d66 377 le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
9f6c9258
DK
378
379 last_max = RX_SGE(fp->last_max_sge);
619c5cb6
VZ
380 last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
381 first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
9f6c9258
DK
382
383 /* If ring is not full */
384 if (last_elem + 1 != first_elem)
385 last_elem++;
386
387 /* Now update the prod */
388 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
389 if (likely(fp->sge_mask[i]))
390 break;
391
619c5cb6
VZ
392 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
393 delta += BIT_VEC64_ELEM_SZ;
9f6c9258
DK
394 }
395
396 if (delta > 0) {
397 fp->rx_sge_prod += delta;
398 /* clear page-end entries */
399 bnx2x_clear_sge_mask_next_elems(fp);
400 }
401
402 DP(NETIF_MSG_RX_STATUS,
403 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
404 fp->last_max_sge, fp->rx_sge_prod);
405}
406
2de67439 407/* Get Toeplitz hash value in the skb using the value from the
e52fcb24
ED
408 * CQE (calculated by HW).
409 */
410static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
a334b5fb 411 const struct eth_fast_path_rx_cqe *cqe,
5495ab75 412 enum pkt_hash_types *rxhash_type)
e52fcb24 413{
2de67439 414 /* Get Toeplitz hash from CQE */
e52fcb24 415 if ((bp->dev->features & NETIF_F_RXHASH) &&
a334b5fb
ED
416 (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) {
417 enum eth_rss_hash_type htype;
418
419 htype = cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE;
5495ab75
TH
420 *rxhash_type = ((htype == TCP_IPV4_HASH_TYPE) ||
421 (htype == TCP_IPV6_HASH_TYPE)) ?
422 PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3;
423
e52fcb24 424 return le32_to_cpu(cqe->rss_hash_result);
a334b5fb 425 }
5495ab75 426 *rxhash_type = PKT_HASH_TYPE_NONE;
e52fcb24
ED
427 return 0;
428}
429
9f6c9258 430static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
e52fcb24 431 u16 cons, u16 prod,
619c5cb6 432 struct eth_fast_path_rx_cqe *cqe)
9f6c9258
DK
433{
434 struct bnx2x *bp = fp->bp;
435 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
436 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
437 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
438 dma_addr_t mapping;
619c5cb6
VZ
439 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
440 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
9f6c9258 441
619c5cb6
VZ
442 /* print error if current state != stop */
443 if (tpa_info->tpa_state != BNX2X_TPA_STOP)
9f6c9258
DK
444 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
445
e52fcb24 446 /* Try to map an empty data buffer from the aggregation info */
619c5cb6 447 mapping = dma_map_single(&bp->pdev->dev,
e52fcb24 448 first_buf->data + NET_SKB_PAD,
619c5cb6
VZ
449 fp->rx_buf_size, DMA_FROM_DEVICE);
450 /*
451 * ...if it fails - move the skb from the consumer to the producer
452 * and set the current aggregation state as ERROR to drop it
453 * when TPA_STOP arrives.
454 */
455
456 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
457 /* Move the BD from the consumer to the producer */
e52fcb24 458 bnx2x_reuse_rx_data(fp, cons, prod);
619c5cb6
VZ
459 tpa_info->tpa_state = BNX2X_TPA_ERROR;
460 return;
461 }
9f6c9258 462
e52fcb24
ED
463 /* move empty data from pool to prod */
464 prod_rx_buf->data = first_buf->data;
619c5cb6 465 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
e52fcb24 466 /* point prod_bd to new data */
9f6c9258
DK
467 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
468 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
469
619c5cb6
VZ
470 /* move partial skb from cons to pool (don't unmap yet) */
471 *first_buf = *cons_rx_buf;
472
473 /* mark bin state as START */
474 tpa_info->parsing_flags =
475 le16_to_cpu(cqe->pars_flags.flags);
476 tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
477 tpa_info->tpa_state = BNX2X_TPA_START;
478 tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
479 tpa_info->placement_offset = cqe->placement_offset;
5495ab75 480 tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->rxhash_type);
621b4d66
DK
481 if (fp->mode == TPA_MODE_GRO) {
482 u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
924d75ab 483 tpa_info->full_page = SGE_PAGES / gro_size * gro_size;
621b4d66
DK
484 tpa_info->gro_size = gro_size;
485 }
619c5cb6 486
9f6c9258
DK
487#ifdef BNX2X_STOP_ON_ERROR
488 fp->tpa_queue_used |= (1 << queue);
9f6c9258 489 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
9f6c9258
DK
490 fp->tpa_queue_used);
491#endif
492}
493
e4e3c02a
VZ
494/* Timestamp option length allowed for TPA aggregation:
495 *
496 * nop nop kind length echo val
497 */
498#define TPA_TSTAMP_OPT_LEN 12
499/**
cbf1de72 500 * bnx2x_set_gro_params - compute GRO values
e4e3c02a 501 *
cbf1de72 502 * @skb: packet skb
e8920674
DK
503 * @parsing_flags: parsing flags from the START CQE
504 * @len_on_bd: total length of the first packet for the
505 * aggregation.
cbf1de72 506 * @pkt_len: length of all segments
e8920674
DK
507 *
508 * Approximate value of the MSS for this aggregation calculated using
509 * the first packet of it.
2de67439 510 * Compute number of aggregated segments, and gso_type.
e4e3c02a 511 */
cbf1de72 512static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags,
ab5777d7
YM
513 u16 len_on_bd, unsigned int pkt_len,
514 u16 num_of_coalesced_segs)
e4e3c02a 515{
cbf1de72 516 /* TPA aggregation won't have either IP options or TCP options
619c5cb6 517 * other than timestamp or IPv6 extension headers.
e4e3c02a 518 */
619c5cb6
VZ
519 u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
520
521 if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
cbf1de72 522 PRS_FLAG_OVERETH_IPV6) {
619c5cb6 523 hdrs_len += sizeof(struct ipv6hdr);
cbf1de72
YM
524 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
525 } else {
619c5cb6 526 hdrs_len += sizeof(struct iphdr);
cbf1de72
YM
527 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
528 }
e4e3c02a
VZ
529
530 /* Check if there was a TCP timestamp, if there is it's will
531 * always be 12 bytes length: nop nop kind length echo val.
532 *
533 * Otherwise FW would close the aggregation.
534 */
535 if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
536 hdrs_len += TPA_TSTAMP_OPT_LEN;
537
cbf1de72
YM
538 skb_shinfo(skb)->gso_size = len_on_bd - hdrs_len;
539
540 /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
541 * to skb_shinfo(skb)->gso_segs
542 */
ab5777d7 543 NAPI_GRO_CB(skb)->count = num_of_coalesced_segs;
e4e3c02a
VZ
544}
545
996dedba
MS
546static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp,
547 u16 index, gfp_t gfp_mask)
1191cb83 548{
1191cb83
ED
549 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
550 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
4cace675 551 struct bnx2x_alloc_pool *pool = &fp->page_pool;
1191cb83
ED
552 dma_addr_t mapping;
553
4cace675 554 if (!pool->page || (PAGE_SIZE - pool->offset) < SGE_PAGE_SIZE) {
1191cb83 555
4cace675
GKB
556 /* put page reference used by the memory pool, since we
557 * won't be using this page as the mempool anymore.
558 */
559 if (pool->page)
560 put_page(pool->page);
561
562 pool->page = alloc_pages(gfp_mask, PAGES_PER_SGE_SHIFT);
563 if (unlikely(!pool->page)) {
564 BNX2X_ERR("Can't alloc sge\n");
565 return -ENOMEM;
566 }
567
4cace675 568 pool->offset = 0;
1191cb83
ED
569 }
570
8031612d
MS
571 mapping = dma_map_page(&bp->pdev->dev, pool->page,
572 pool->offset, SGE_PAGE_SIZE, DMA_FROM_DEVICE);
573 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
574 BNX2X_ERR("Can't map sge\n");
575 return -ENOMEM;
576 }
577
4cace675
GKB
578 get_page(pool->page);
579 sw_buf->page = pool->page;
580 sw_buf->offset = pool->offset;
581
1191cb83
ED
582 dma_unmap_addr_set(sw_buf, mapping, mapping);
583
584 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
585 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
586
4cace675
GKB
587 pool->offset += SGE_PAGE_SIZE;
588
1191cb83
ED
589 return 0;
590}
591
9f6c9258 592static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
621b4d66
DK
593 struct bnx2x_agg_info *tpa_info,
594 u16 pages,
595 struct sk_buff *skb,
619c5cb6
VZ
596 struct eth_end_agg_rx_cqe *cqe,
597 u16 cqe_idx)
9f6c9258
DK
598{
599 struct sw_rx_page *rx_pg, old_rx_pg;
621b4d66
DK
600 u32 i, frag_len, frag_size;
601 int err, j, frag_id = 0;
619c5cb6 602 u16 len_on_bd = tpa_info->len_on_bd;
621b4d66 603 u16 full_page = 0, gro_size = 0;
9f6c9258 604
619c5cb6 605 frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
621b4d66
DK
606
607 if (fp->mode == TPA_MODE_GRO) {
608 gro_size = tpa_info->gro_size;
609 full_page = tpa_info->full_page;
610 }
9f6c9258
DK
611
612 /* This is needed in order to enable forwarding support */
cbf1de72
YM
613 if (frag_size)
614 bnx2x_set_gro_params(skb, tpa_info->parsing_flags, len_on_bd,
ab5777d7
YM
615 le16_to_cpu(cqe->pkt_len),
616 le16_to_cpu(cqe->num_of_coalesced_segs));
621b4d66 617
9f6c9258 618#ifdef BNX2X_STOP_ON_ERROR
924d75ab 619 if (pages > min_t(u32, 8, MAX_SKB_FRAGS) * SGE_PAGES) {
9f6c9258
DK
620 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
621 pages, cqe_idx);
619c5cb6 622 BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
9f6c9258
DK
623 bnx2x_panic();
624 return -EINVAL;
625 }
626#endif
627
628 /* Run through the SGL and compose the fragmented skb */
629 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
619c5cb6 630 u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
9f6c9258
DK
631
632 /* FW gives the indices of the SGE as if the ring is an array
633 (meaning that "next" element will consume 2 indices) */
621b4d66
DK
634 if (fp->mode == TPA_MODE_GRO)
635 frag_len = min_t(u32, frag_size, (u32)full_page);
636 else /* LRO */
924d75ab 637 frag_len = min_t(u32, frag_size, (u32)SGE_PAGES);
621b4d66 638
9f6c9258
DK
639 rx_pg = &fp->rx_page_ring[sge_idx];
640 old_rx_pg = *rx_pg;
641
642 /* If we fail to allocate a substitute page, we simply stop
643 where we are and drop the whole packet */
996dedba 644 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx, GFP_ATOMIC);
9f6c9258 645 if (unlikely(err)) {
15192a8c 646 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
9f6c9258
DK
647 return err;
648 }
649
8031612d
MS
650 dma_unmap_page(&bp->pdev->dev,
651 dma_unmap_addr(&old_rx_pg, mapping),
652 SGE_PAGE_SIZE, DMA_FROM_DEVICE);
9f6c9258 653 /* Add one frag and update the appropriate fields in the skb */
621b4d66 654 if (fp->mode == TPA_MODE_LRO)
4cace675
GKB
655 skb_fill_page_desc(skb, j, old_rx_pg.page,
656 old_rx_pg.offset, frag_len);
621b4d66
DK
657 else { /* GRO */
658 int rem;
659 int offset = 0;
660 for (rem = frag_len; rem > 0; rem -= gro_size) {
661 int len = rem > gro_size ? gro_size : rem;
662 skb_fill_page_desc(skb, frag_id++,
4cace675
GKB
663 old_rx_pg.page,
664 old_rx_pg.offset + offset,
665 len);
621b4d66
DK
666 if (offset)
667 get_page(old_rx_pg.page);
668 offset += len;
669 }
670 }
9f6c9258
DK
671
672 skb->data_len += frag_len;
924d75ab 673 skb->truesize += SGE_PAGES;
9f6c9258
DK
674 skb->len += frag_len;
675
676 frag_size -= frag_len;
677 }
678
679 return 0;
680}
681
d46d132c
ED
682static void bnx2x_frag_free(const struct bnx2x_fastpath *fp, void *data)
683{
684 if (fp->rx_frag_size)
e51423d9 685 skb_free_frag(data);
d46d132c
ED
686 else
687 kfree(data);
688}
689
996dedba 690static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp, gfp_t gfp_mask)
d46d132c 691{
996dedba
MS
692 if (fp->rx_frag_size) {
693 /* GFP_KERNEL allocations are used only during initialization */
694 if (unlikely(gfp_mask & __GFP_WAIT))
695 return (void *)__get_free_page(gfp_mask);
696
d46d132c 697 return netdev_alloc_frag(fp->rx_frag_size);
996dedba 698 }
d46d132c 699
996dedba 700 return kmalloc(fp->rx_buf_size + NET_SKB_PAD, gfp_mask);
d46d132c
ED
701}
702
9969085e
YM
703#ifdef CONFIG_INET
704static void bnx2x_gro_ip_csum(struct bnx2x *bp, struct sk_buff *skb)
705{
706 const struct iphdr *iph = ip_hdr(skb);
707 struct tcphdr *th;
708
709 skb_set_transport_header(skb, sizeof(struct iphdr));
710 th = tcp_hdr(skb);
711
712 th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
713 iph->saddr, iph->daddr, 0);
714}
715
716static void bnx2x_gro_ipv6_csum(struct bnx2x *bp, struct sk_buff *skb)
717{
718 struct ipv6hdr *iph = ipv6_hdr(skb);
719 struct tcphdr *th;
720
721 skb_set_transport_header(skb, sizeof(struct ipv6hdr));
722 th = tcp_hdr(skb);
723
724 th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
725 &iph->saddr, &iph->daddr, 0);
726}
2c2d06d5
YM
727
728static void bnx2x_gro_csum(struct bnx2x *bp, struct sk_buff *skb,
729 void (*gro_func)(struct bnx2x*, struct sk_buff*))
730{
731 skb_set_network_header(skb, 0);
732 gro_func(bp, skb);
733 tcp_gro_complete(skb);
734}
9969085e
YM
735#endif
736
737static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp,
738 struct sk_buff *skb)
739{
740#ifdef CONFIG_INET
cbf1de72 741 if (skb_shinfo(skb)->gso_size) {
9969085e
YM
742 switch (be16_to_cpu(skb->protocol)) {
743 case ETH_P_IP:
2c2d06d5 744 bnx2x_gro_csum(bp, skb, bnx2x_gro_ip_csum);
9969085e
YM
745 break;
746 case ETH_P_IPV6:
2c2d06d5 747 bnx2x_gro_csum(bp, skb, bnx2x_gro_ipv6_csum);
9969085e
YM
748 break;
749 default:
2c2d06d5 750 BNX2X_ERR("Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n",
9969085e
YM
751 be16_to_cpu(skb->protocol));
752 }
9969085e
YM
753 }
754#endif
60e66fee 755 skb_record_rx_queue(skb, fp->rx_queue);
9969085e
YM
756 napi_gro_receive(&fp->napi, skb);
757}
758
1191cb83
ED
759static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
760 struct bnx2x_agg_info *tpa_info,
761 u16 pages,
762 struct eth_end_agg_rx_cqe *cqe,
763 u16 cqe_idx)
9f6c9258 764{
619c5cb6 765 struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
621b4d66 766 u8 pad = tpa_info->placement_offset;
619c5cb6 767 u16 len = tpa_info->len_on_bd;
e52fcb24 768 struct sk_buff *skb = NULL;
621b4d66 769 u8 *new_data, *data = rx_buf->data;
619c5cb6
VZ
770 u8 old_tpa_state = tpa_info->tpa_state;
771
772 tpa_info->tpa_state = BNX2X_TPA_STOP;
773
774 /* If we there was an error during the handling of the TPA_START -
775 * drop this aggregation.
776 */
777 if (old_tpa_state == BNX2X_TPA_ERROR)
778 goto drop;
779
e52fcb24 780 /* Try to allocate the new data */
996dedba 781 new_data = bnx2x_frag_alloc(fp, GFP_ATOMIC);
9f6c9258
DK
782 /* Unmap skb in the pool anyway, as we are going to change
783 pool entry status to BNX2X_TPA_STOP even if new skb allocation
784 fails. */
785 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
a8c94b91 786 fp->rx_buf_size, DMA_FROM_DEVICE);
e52fcb24 787 if (likely(new_data))
d46d132c 788 skb = build_skb(data, fp->rx_frag_size);
9f6c9258 789
e52fcb24 790 if (likely(skb)) {
9f6c9258 791#ifdef BNX2X_STOP_ON_ERROR
a8c94b91 792 if (pad + len > fp->rx_buf_size) {
51c1a580 793 BNX2X_ERR("skb_put is about to fail... pad %d len %d rx_buf_size %d\n",
a8c94b91 794 pad, len, fp->rx_buf_size);
9f6c9258
DK
795 bnx2x_panic();
796 return;
797 }
798#endif
799
e52fcb24 800 skb_reserve(skb, pad + NET_SKB_PAD);
9f6c9258 801 skb_put(skb, len);
5495ab75 802 skb_set_hash(skb, tpa_info->rxhash, tpa_info->rxhash_type);
9f6c9258
DK
803
804 skb->protocol = eth_type_trans(skb, bp->dev);
805 skb->ip_summed = CHECKSUM_UNNECESSARY;
806
621b4d66
DK
807 if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
808 skb, cqe, cqe_idx)) {
619c5cb6 809 if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
86a9bad3 810 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tpa_info->vlan_tag);
9969085e 811 bnx2x_gro_receive(bp, fp, skb);
9f6c9258 812 } else {
51c1a580
MS
813 DP(NETIF_MSG_RX_STATUS,
814 "Failed to allocate new pages - dropping packet!\n");
40955532 815 dev_kfree_skb_any(skb);
9f6c9258
DK
816 }
817
e52fcb24
ED
818 /* put new data in bin */
819 rx_buf->data = new_data;
9f6c9258 820
619c5cb6 821 return;
9f6c9258 822 }
07b0f009
ED
823 if (new_data)
824 bnx2x_frag_free(fp, new_data);
619c5cb6
VZ
825drop:
826 /* drop the packet and keep the buffer in the bin */
827 DP(NETIF_MSG_RX_STATUS,
828 "Failed to allocate or map a new skb - dropping packet!\n");
15192a8c 829 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++;
9f6c9258
DK
830}
831
996dedba
MS
832static int bnx2x_alloc_rx_data(struct bnx2x *bp, struct bnx2x_fastpath *fp,
833 u16 index, gfp_t gfp_mask)
1191cb83
ED
834{
835 u8 *data;
836 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
837 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
838 dma_addr_t mapping;
839
996dedba 840 data = bnx2x_frag_alloc(fp, gfp_mask);
1191cb83
ED
841 if (unlikely(data == NULL))
842 return -ENOMEM;
843
844 mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD,
845 fp->rx_buf_size,
846 DMA_FROM_DEVICE);
847 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
d46d132c 848 bnx2x_frag_free(fp, data);
1191cb83
ED
849 BNX2X_ERR("Can't map rx data\n");
850 return -ENOMEM;
851 }
852
853 rx_buf->data = data;
854 dma_unmap_addr_set(rx_buf, mapping, mapping);
855
856 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
857 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
858
859 return 0;
860}
861
15192a8c
BW
862static
863void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
864 struct bnx2x_fastpath *fp,
865 struct bnx2x_eth_q_stats *qstats)
d6cb3e41 866{
e488921f
MS
867 /* Do nothing if no L4 csum validation was done.
868 * We do not check whether IP csum was validated. For IPv4 we assume
869 * that if the card got as far as validating the L4 csum, it also
870 * validated the IP csum. IPv6 has no IP csum.
871 */
d6cb3e41 872 if (cqe->fast_path_cqe.status_flags &
e488921f 873 ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)
d6cb3e41
ED
874 return;
875
e488921f 876 /* If L4 validation was done, check if an error was found. */
d6cb3e41
ED
877
878 if (cqe->fast_path_cqe.type_error_flags &
879 (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
880 ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
15192a8c 881 qstats->hw_csum_err++;
d6cb3e41
ED
882 else
883 skb->ip_summed = CHECKSUM_UNNECESSARY;
884}
9f6c9258 885
a8f47eb7 886static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
9f6c9258
DK
887{
888 struct bnx2x *bp = fp->bp;
889 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
75b29459 890 u16 sw_comp_cons, sw_comp_prod;
9f6c9258 891 int rx_pkt = 0;
75b29459
DK
892 union eth_rx_cqe *cqe;
893 struct eth_fast_path_rx_cqe *cqe_fp;
9f6c9258
DK
894
895#ifdef BNX2X_STOP_ON_ERROR
896 if (unlikely(bp->panic))
897 return 0;
898#endif
b3529744
EB
899 if (budget <= 0)
900 return rx_pkt;
9f6c9258 901
9f6c9258
DK
902 bd_cons = fp->rx_bd_cons;
903 bd_prod = fp->rx_bd_prod;
904 bd_prod_fw = bd_prod;
905 sw_comp_cons = fp->rx_comp_cons;
906 sw_comp_prod = fp->rx_comp_prod;
907
75b29459
DK
908 comp_ring_cons = RCQ_BD(sw_comp_cons);
909 cqe = &fp->rx_comp_ring[comp_ring_cons];
910 cqe_fp = &cqe->fast_path_cqe;
9f6c9258
DK
911
912 DP(NETIF_MSG_RX_STATUS,
75b29459 913 "queue[%d]: sw_comp_cons %u\n", fp->index, sw_comp_cons);
9f6c9258 914
75b29459 915 while (BNX2X_IS_CQE_COMPLETED(cqe_fp)) {
9f6c9258
DK
916 struct sw_rx_bd *rx_buf = NULL;
917 struct sk_buff *skb;
9f6c9258 918 u8 cqe_fp_flags;
619c5cb6 919 enum eth_rx_cqe_type cqe_fp_type;
621b4d66 920 u16 len, pad, queue;
e52fcb24 921 u8 *data;
bd5cef03 922 u32 rxhash;
5495ab75 923 enum pkt_hash_types rxhash_type;
9f6c9258 924
619c5cb6
VZ
925#ifdef BNX2X_STOP_ON_ERROR
926 if (unlikely(bp->panic))
927 return 0;
928#endif
929
9f6c9258
DK
930 bd_prod = RX_BD(bd_prod);
931 bd_cons = RX_BD(bd_cons);
932
9aaae044 933 /* A rmb() is required to ensure that the CQE is not read
934 * before it is written by the adapter DMA. PCI ordering
935 * rules will make sure the other fields are written before
936 * the marker at the end of struct eth_fast_path_rx_cqe
937 * but without rmb() a weakly ordered processor can process
938 * stale data. Without the barrier TPA state-machine might
939 * enter inconsistent state and kernel stack might be
940 * provided with incorrect packet description - these lead
941 * to various kernel crashed.
942 */
943 rmb();
944
619c5cb6
VZ
945 cqe_fp_flags = cqe_fp->type_error_flags;
946 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
9f6c9258 947
51c1a580
MS
948 DP(NETIF_MSG_RX_STATUS,
949 "CQE type %x err %x status %x queue %x vlan %x len %u\n",
950 CQE_TYPE(cqe_fp_flags),
619c5cb6
VZ
951 cqe_fp_flags, cqe_fp->status_flags,
952 le32_to_cpu(cqe_fp->rss_hash_result),
621b4d66
DK
953 le16_to_cpu(cqe_fp->vlan_tag),
954 le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len));
9f6c9258
DK
955
956 /* is this a slowpath msg? */
619c5cb6 957 if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
9f6c9258
DK
958 bnx2x_sp_event(fp, cqe);
959 goto next_cqe;
e52fcb24 960 }
621b4d66 961
e52fcb24
ED
962 rx_buf = &fp->rx_buf_ring[bd_cons];
963 data = rx_buf->data;
9f6c9258 964
e52fcb24 965 if (!CQE_TYPE_FAST(cqe_fp_type)) {
621b4d66
DK
966 struct bnx2x_agg_info *tpa_info;
967 u16 frag_size, pages;
619c5cb6 968#ifdef BNX2X_STOP_ON_ERROR
e52fcb24 969 /* sanity check */
7e6b4d44 970 if (fp->mode == TPA_MODE_DISABLED &&
e52fcb24
ED
971 (CQE_TYPE_START(cqe_fp_type) ||
972 CQE_TYPE_STOP(cqe_fp_type)))
7e6b4d44 973 BNX2X_ERR("START/STOP packet while TPA disabled, type %x\n",
e52fcb24 974 CQE_TYPE(cqe_fp_type));
619c5cb6 975#endif
9f6c9258 976
e52fcb24
ED
977 if (CQE_TYPE_START(cqe_fp_type)) {
978 u16 queue = cqe_fp->queue_index;
979 DP(NETIF_MSG_RX_STATUS,
980 "calling tpa_start on queue %d\n",
981 queue);
9f6c9258 982
e52fcb24
ED
983 bnx2x_tpa_start(fp, queue,
984 bd_cons, bd_prod,
985 cqe_fp);
621b4d66 986
e52fcb24 987 goto next_rx;
621b4d66
DK
988 }
989 queue = cqe->end_agg_cqe.queue_index;
990 tpa_info = &fp->tpa_info[queue];
991 DP(NETIF_MSG_RX_STATUS,
992 "calling tpa_stop on queue %d\n",
993 queue);
994
995 frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) -
996 tpa_info->len_on_bd;
997
998 if (fp->mode == TPA_MODE_GRO)
999 pages = (frag_size + tpa_info->full_page - 1) /
1000 tpa_info->full_page;
1001 else
1002 pages = SGE_PAGE_ALIGN(frag_size) >>
1003 SGE_PAGE_SHIFT;
1004
1005 bnx2x_tpa_stop(bp, fp, tpa_info, pages,
1006 &cqe->end_agg_cqe, comp_ring_cons);
9f6c9258 1007#ifdef BNX2X_STOP_ON_ERROR
621b4d66
DK
1008 if (bp->panic)
1009 return 0;
9f6c9258
DK
1010#endif
1011
621b4d66
DK
1012 bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe);
1013 goto next_cqe;
e52fcb24
ED
1014 }
1015 /* non TPA */
621b4d66 1016 len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len);
e52fcb24
ED
1017 pad = cqe_fp->placement_offset;
1018 dma_sync_single_for_cpu(&bp->pdev->dev,
9f6c9258 1019 dma_unmap_addr(rx_buf, mapping),
e52fcb24
ED
1020 pad + RX_COPY_THRESH,
1021 DMA_FROM_DEVICE);
1022 pad += NET_SKB_PAD;
1023 prefetch(data + pad); /* speedup eth_type_trans() */
1024 /* is this an error packet? */
1025 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
51c1a580 1026 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
e52fcb24
ED
1027 "ERROR flags %x rx packet %u\n",
1028 cqe_fp_flags, sw_comp_cons);
15192a8c 1029 bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++;
e52fcb24
ED
1030 goto reuse_rx;
1031 }
9f6c9258 1032
e52fcb24
ED
1033 /* Since we don't have a jumbo ring
1034 * copy small packets if mtu > 1500
1035 */
1036 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1037 (len <= RX_COPY_THRESH)) {
45abfb10 1038 skb = napi_alloc_skb(&fp->napi, len);
e52fcb24 1039 if (skb == NULL) {
51c1a580 1040 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
e52fcb24 1041 "ERROR packet dropped because of alloc failure\n");
15192a8c 1042 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
9f6c9258
DK
1043 goto reuse_rx;
1044 }
e52fcb24
ED
1045 memcpy(skb->data, data + pad, len);
1046 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
1047 } else {
996dedba
MS
1048 if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod,
1049 GFP_ATOMIC) == 0)) {
9f6c9258 1050 dma_unmap_single(&bp->pdev->dev,
e52fcb24 1051 dma_unmap_addr(rx_buf, mapping),
a8c94b91 1052 fp->rx_buf_size,
9f6c9258 1053 DMA_FROM_DEVICE);
d46d132c 1054 skb = build_skb(data, fp->rx_frag_size);
e52fcb24 1055 if (unlikely(!skb)) {
d46d132c 1056 bnx2x_frag_free(fp, data);
15192a8c
BW
1057 bnx2x_fp_qstats(bp, fp)->
1058 rx_skb_alloc_failed++;
e52fcb24
ED
1059 goto next_rx;
1060 }
9f6c9258 1061 skb_reserve(skb, pad);
9f6c9258 1062 } else {
51c1a580
MS
1063 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
1064 "ERROR packet dropped because of alloc failure\n");
15192a8c 1065 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
9f6c9258 1066reuse_rx:
e52fcb24 1067 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
9f6c9258
DK
1068 goto next_rx;
1069 }
036d2df9 1070 }
9f6c9258 1071
036d2df9
DK
1072 skb_put(skb, len);
1073 skb->protocol = eth_type_trans(skb, bp->dev);
9f6c9258 1074
036d2df9 1075 /* Set Toeplitz hash for a none-LRO skb */
5495ab75
TH
1076 rxhash = bnx2x_get_rxhash(bp, cqe_fp, &rxhash_type);
1077 skb_set_hash(skb, rxhash, rxhash_type);
9f6c9258 1078
036d2df9 1079 skb_checksum_none_assert(skb);
f85582f8 1080
d6cb3e41 1081 if (bp->dev->features & NETIF_F_RXCSUM)
15192a8c
BW
1082 bnx2x_csum_validate(skb, cqe, fp,
1083 bnx2x_fp_qstats(bp, fp));
9f6c9258 1084
f233cafe 1085 skb_record_rx_queue(skb, fp->rx_queue);
9f6c9258 1086
eeed018c 1087 /* Check if this packet was timestamped */
56daf66d 1088 if (unlikely(cqe->fast_path_cqe.type_error_flags &
eeed018c
MK
1089 (1 << ETH_FAST_PATH_RX_CQE_PTP_PKT_SHIFT)))
1090 bnx2x_set_rx_ts(bp, skb);
1091
619c5cb6
VZ
1092 if (le16_to_cpu(cqe_fp->pars_flags.flags) &
1093 PARSING_FLAGS_VLAN)
86a9bad3 1094 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
619c5cb6 1095 le16_to_cpu(cqe_fp->vlan_tag));
9f6c9258 1096
8b80cda5 1097 skb_mark_napi_id(skb, &fp->napi);
8f20aa57
DK
1098
1099 if (bnx2x_fp_ll_polling(fp))
1100 netif_receive_skb(skb);
1101 else
1102 napi_gro_receive(&fp->napi, skb);
9f6c9258 1103next_rx:
e52fcb24 1104 rx_buf->data = NULL;
9f6c9258
DK
1105
1106 bd_cons = NEXT_RX_IDX(bd_cons);
1107 bd_prod = NEXT_RX_IDX(bd_prod);
1108 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1109 rx_pkt++;
1110next_cqe:
1111 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1112 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1113
75b29459
DK
1114 /* mark CQE as free */
1115 BNX2X_SEED_CQE(cqe_fp);
1116
9f6c9258
DK
1117 if (rx_pkt == budget)
1118 break;
75b29459
DK
1119
1120 comp_ring_cons = RCQ_BD(sw_comp_cons);
1121 cqe = &fp->rx_comp_ring[comp_ring_cons];
1122 cqe_fp = &cqe->fast_path_cqe;
9f6c9258
DK
1123 } /* while */
1124
1125 fp->rx_bd_cons = bd_cons;
1126 fp->rx_bd_prod = bd_prod_fw;
1127 fp->rx_comp_cons = sw_comp_cons;
1128 fp->rx_comp_prod = sw_comp_prod;
1129
1130 /* Update producers */
1131 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1132 fp->rx_sge_prod);
1133
1134 fp->rx_pkt += rx_pkt;
1135 fp->rx_calls++;
1136
1137 return rx_pkt;
1138}
1139
1140static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1141{
1142 struct bnx2x_fastpath *fp = fp_cookie;
1143 struct bnx2x *bp = fp->bp;
6383c0b3 1144 u8 cos;
9f6c9258 1145
51c1a580
MS
1146 DP(NETIF_MSG_INTR,
1147 "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n",
523224a3 1148 fp->index, fp->fw_sb_id, fp->igu_sb_id);
ecf01c22 1149
523224a3 1150 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
9f6c9258
DK
1151
1152#ifdef BNX2X_STOP_ON_ERROR
1153 if (unlikely(bp->panic))
1154 return IRQ_HANDLED;
1155#endif
1156
1157 /* Handle Rx and Tx according to MSI-X vector */
6383c0b3 1158 for_each_cos_in_tx_queue(fp, cos)
65565884 1159 prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
6383c0b3 1160
523224a3 1161 prefetch(&fp->sb_running_index[SM_RX_ID]);
f5fbf115 1162 napi_schedule_irqoff(&bnx2x_fp(bp, fp->index, napi));
9f6c9258
DK
1163
1164 return IRQ_HANDLED;
1165}
1166
9f6c9258
DK
1167/* HW Lock for shared dual port PHYs */
1168void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1169{
1170 mutex_lock(&bp->port.phy_mutex);
1171
8203c4b6 1172 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
9f6c9258
DK
1173}
1174
1175void bnx2x_release_phy_lock(struct bnx2x *bp)
1176{
8203c4b6 1177 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
9f6c9258
DK
1178
1179 mutex_unlock(&bp->port.phy_mutex);
1180}
1181
0793f83f
DK
1182/* calculates MF speed according to current linespeed and MF configuration */
1183u16 bnx2x_get_mf_speed(struct bnx2x *bp)
1184{
1185 u16 line_speed = bp->link_vars.line_speed;
1186 if (IS_MF(bp)) {
faa6fcbb
DK
1187 u16 maxCfg = bnx2x_extract_max_cfg(bp,
1188 bp->mf_config[BP_VN(bp)]);
1189
1190 /* Calculate the current MAX line speed limit for the MF
1191 * devices
0793f83f 1192 */
faa6fcbb
DK
1193 if (IS_MF_SI(bp))
1194 line_speed = (line_speed * maxCfg) / 100;
1195 else { /* SD mode */
0793f83f
DK
1196 u16 vn_max_rate = maxCfg * 100;
1197
1198 if (vn_max_rate < line_speed)
1199 line_speed = vn_max_rate;
faa6fcbb 1200 }
0793f83f
DK
1201 }
1202
1203 return line_speed;
1204}
1205
2ae17f66
VZ
1206/**
1207 * bnx2x_fill_report_data - fill link report data to report
1208 *
1209 * @bp: driver handle
1210 * @data: link state to update
1211 *
1212 * It uses a none-atomic bit operations because is called under the mutex.
1213 */
1191cb83
ED
1214static void bnx2x_fill_report_data(struct bnx2x *bp,
1215 struct bnx2x_link_report_data *data)
2ae17f66 1216{
2ae17f66
VZ
1217 memset(data, 0, sizeof(*data));
1218
6495d15a
DK
1219 if (IS_PF(bp)) {
1220 /* Fill the report data: effective line speed */
1221 data->line_speed = bnx2x_get_mf_speed(bp);
1222
1223 /* Link is down */
1224 if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
1225 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1226 &data->link_report_flags);
1227
1228 if (!BNX2X_NUM_ETH_QUEUES(bp))
1229 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1230 &data->link_report_flags);
1231
1232 /* Full DUPLEX */
1233 if (bp->link_vars.duplex == DUPLEX_FULL)
1234 __set_bit(BNX2X_LINK_REPORT_FD,
1235 &data->link_report_flags);
1236
1237 /* Rx Flow Control is ON */
1238 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
1239 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1240 &data->link_report_flags);
1241
1242 /* Tx Flow Control is ON */
1243 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1244 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1245 &data->link_report_flags);
1246 } else { /* VF */
1247 *data = bp->vf_link_vars;
1248 }
2ae17f66
VZ
1249}
1250
1251/**
1252 * bnx2x_link_report - report link status to OS.
1253 *
1254 * @bp: driver handle
1255 *
1256 * Calls the __bnx2x_link_report() under the same locking scheme
1257 * as a link/PHY state managing code to ensure a consistent link
1258 * reporting.
1259 */
1260
9f6c9258
DK
1261void bnx2x_link_report(struct bnx2x *bp)
1262{
2ae17f66
VZ
1263 bnx2x_acquire_phy_lock(bp);
1264 __bnx2x_link_report(bp);
1265 bnx2x_release_phy_lock(bp);
1266}
9f6c9258 1267
2ae17f66
VZ
1268/**
1269 * __bnx2x_link_report - report link status to OS.
1270 *
1271 * @bp: driver handle
1272 *
16a5fd92 1273 * None atomic implementation.
2ae17f66
VZ
1274 * Should be called under the phy_lock.
1275 */
1276void __bnx2x_link_report(struct bnx2x *bp)
1277{
1278 struct bnx2x_link_report_data cur_data;
9f6c9258 1279
2ae17f66 1280 /* reread mf_cfg */
ad5afc89 1281 if (IS_PF(bp) && !CHIP_IS_E1(bp))
2ae17f66
VZ
1282 bnx2x_read_mf_cfg(bp);
1283
1284 /* Read the current link report info */
1285 bnx2x_fill_report_data(bp, &cur_data);
1286
1287 /* Don't report link down or exactly the same link status twice */
1288 if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
1289 (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1290 &bp->last_reported_link.link_report_flags) &&
1291 test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1292 &cur_data.link_report_flags)))
1293 return;
1294
1295 bp->link_cnt++;
9f6c9258 1296
2ae17f66
VZ
1297 /* We are going to report a new link parameters now -
1298 * remember the current data for the next time.
1299 */
1300 memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
9f6c9258 1301
6495d15a
DK
1302 /* propagate status to VFs */
1303 if (IS_PF(bp))
1304 bnx2x_iov_link_update(bp);
1305
2ae17f66
VZ
1306 if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1307 &cur_data.link_report_flags)) {
1308 netif_carrier_off(bp->dev);
1309 netdev_err(bp->dev, "NIC Link is Down\n");
1310 return;
1311 } else {
94f05b0f
JP
1312 const char *duplex;
1313 const char *flow;
1314
2ae17f66 1315 netif_carrier_on(bp->dev);
9f6c9258 1316
2ae17f66
VZ
1317 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
1318 &cur_data.link_report_flags))
94f05b0f 1319 duplex = "full";
9f6c9258 1320 else
94f05b0f 1321 duplex = "half";
9f6c9258 1322
2ae17f66
VZ
1323 /* Handle the FC at the end so that only these flags would be
1324 * possibly set. This way we may easily check if there is no FC
1325 * enabled.
1326 */
1327 if (cur_data.link_report_flags) {
1328 if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1329 &cur_data.link_report_flags)) {
2ae17f66
VZ
1330 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1331 &cur_data.link_report_flags))
94f05b0f
JP
1332 flow = "ON - receive & transmit";
1333 else
1334 flow = "ON - receive";
9f6c9258 1335 } else {
94f05b0f 1336 flow = "ON - transmit";
9f6c9258 1337 }
94f05b0f
JP
1338 } else {
1339 flow = "none";
9f6c9258 1340 }
94f05b0f
JP
1341 netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
1342 cur_data.line_speed, duplex, flow);
9f6c9258
DK
1343 }
1344}
1345
1191cb83
ED
1346static void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
1347{
1348 int i;
1349
1350 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1351 struct eth_rx_sge *sge;
1352
1353 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
1354 sge->addr_hi =
1355 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
1356 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1357
1358 sge->addr_lo =
1359 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
1360 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1361 }
1362}
1363
1364static void bnx2x_free_tpa_pool(struct bnx2x *bp,
1365 struct bnx2x_fastpath *fp, int last)
1366{
1367 int i;
1368
1369 for (i = 0; i < last; i++) {
1370 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
1371 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
1372 u8 *data = first_buf->data;
1373
1374 if (data == NULL) {
1375 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
1376 continue;
1377 }
1378 if (tpa_info->tpa_state == BNX2X_TPA_START)
1379 dma_unmap_single(&bp->pdev->dev,
1380 dma_unmap_addr(first_buf, mapping),
1381 fp->rx_buf_size, DMA_FROM_DEVICE);
d46d132c 1382 bnx2x_frag_free(fp, data);
1191cb83
ED
1383 first_buf->data = NULL;
1384 }
1385}
1386
55c11941
MS
1387void bnx2x_init_rx_rings_cnic(struct bnx2x *bp)
1388{
1389 int j;
1390
1391 for_each_rx_queue_cnic(bp, j) {
1392 struct bnx2x_fastpath *fp = &bp->fp[j];
1393
1394 fp->rx_bd_cons = 0;
1395
1396 /* Activate BD ring */
1397 /* Warning!
1398 * this will generate an interrupt (to the TSTORM)
1399 * must only be done after chip is initialized
1400 */
1401 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1402 fp->rx_sge_prod);
1403 }
1404}
1405
9f6c9258
DK
1406void bnx2x_init_rx_rings(struct bnx2x *bp)
1407{
1408 int func = BP_FUNC(bp);
523224a3 1409 u16 ring_prod;
9f6c9258 1410 int i, j;
25141580 1411
b3b83c3f 1412 /* Allocate TPA resources */
55c11941 1413 for_each_eth_queue(bp, j) {
523224a3 1414 struct bnx2x_fastpath *fp = &bp->fp[j];
9f6c9258 1415
a8c94b91
VZ
1416 DP(NETIF_MSG_IFUP,
1417 "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
1418
7e6b4d44 1419 if (fp->mode != TPA_MODE_DISABLED) {
16a5fd92 1420 /* Fill the per-aggregation pool */
dfacf138 1421 for (i = 0; i < MAX_AGG_QS(bp); i++) {
619c5cb6
VZ
1422 struct bnx2x_agg_info *tpa_info =
1423 &fp->tpa_info[i];
1424 struct sw_rx_bd *first_buf =
1425 &tpa_info->first_buf;
1426
996dedba
MS
1427 first_buf->data =
1428 bnx2x_frag_alloc(fp, GFP_KERNEL);
e52fcb24 1429 if (!first_buf->data) {
51c1a580
MS
1430 BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
1431 j);
9f6c9258 1432 bnx2x_free_tpa_pool(bp, fp, i);
7e6b4d44 1433 fp->mode = TPA_MODE_DISABLED;
9f6c9258
DK
1434 break;
1435 }
619c5cb6
VZ
1436 dma_unmap_addr_set(first_buf, mapping, 0);
1437 tpa_info->tpa_state = BNX2X_TPA_STOP;
9f6c9258 1438 }
523224a3
DK
1439
1440 /* "next page" elements initialization */
1441 bnx2x_set_next_page_sgl(fp);
1442
1443 /* set SGEs bit mask */
1444 bnx2x_init_sge_ring_bit_mask(fp);
1445
1446 /* Allocate SGEs and initialize the ring elements */
1447 for (i = 0, ring_prod = 0;
1448 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
1449
996dedba
MS
1450 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod,
1451 GFP_KERNEL) < 0) {
51c1a580
MS
1452 BNX2X_ERR("was only able to allocate %d rx sges\n",
1453 i);
1454 BNX2X_ERR("disabling TPA for queue[%d]\n",
1455 j);
523224a3 1456 /* Cleanup already allocated elements */
619c5cb6
VZ
1457 bnx2x_free_rx_sge_range(bp, fp,
1458 ring_prod);
1459 bnx2x_free_tpa_pool(bp, fp,
dfacf138 1460 MAX_AGG_QS(bp));
7e6b4d44 1461 fp->mode = TPA_MODE_DISABLED;
523224a3
DK
1462 ring_prod = 0;
1463 break;
1464 }
1465 ring_prod = NEXT_SGE_IDX(ring_prod);
1466 }
1467
1468 fp->rx_sge_prod = ring_prod;
9f6c9258
DK
1469 }
1470 }
1471
55c11941 1472 for_each_eth_queue(bp, j) {
9f6c9258
DK
1473 struct bnx2x_fastpath *fp = &bp->fp[j];
1474
1475 fp->rx_bd_cons = 0;
9f6c9258 1476
b3b83c3f
DK
1477 /* Activate BD ring */
1478 /* Warning!
1479 * this will generate an interrupt (to the TSTORM)
1480 * must only be done after chip is initialized
1481 */
1482 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1483 fp->rx_sge_prod);
9f6c9258 1484
9f6c9258
DK
1485 if (j != 0)
1486 continue;
1487
619c5cb6 1488 if (CHIP_IS_E1(bp)) {
f2e0899f
DK
1489 REG_WR(bp, BAR_USTRORM_INTMEM +
1490 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1491 U64_LO(fp->rx_comp_mapping));
1492 REG_WR(bp, BAR_USTRORM_INTMEM +
1493 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1494 U64_HI(fp->rx_comp_mapping));
1495 }
9f6c9258
DK
1496 }
1497}
f85582f8 1498
55c11941 1499static void bnx2x_free_tx_skbs_queue(struct bnx2x_fastpath *fp)
9f6c9258 1500{
6383c0b3 1501 u8 cos;
55c11941 1502 struct bnx2x *bp = fp->bp;
9f6c9258 1503
55c11941
MS
1504 for_each_cos_in_tx_queue(fp, cos) {
1505 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
1506 unsigned pkts_compl = 0, bytes_compl = 0;
9f6c9258 1507
55c11941
MS
1508 u16 sw_prod = txdata->tx_pkt_prod;
1509 u16 sw_cons = txdata->tx_pkt_cons;
9f6c9258 1510
55c11941
MS
1511 while (sw_cons != sw_prod) {
1512 bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
1513 &pkts_compl, &bytes_compl);
1514 sw_cons++;
9f6c9258 1515 }
55c11941
MS
1516
1517 netdev_tx_reset_queue(
1518 netdev_get_tx_queue(bp->dev,
1519 txdata->txq_index));
1520 }
1521}
1522
1523static void bnx2x_free_tx_skbs_cnic(struct bnx2x *bp)
1524{
1525 int i;
1526
1527 for_each_tx_queue_cnic(bp, i) {
1528 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1529 }
1530}
1531
1532static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1533{
1534 int i;
1535
1536 for_each_eth_queue(bp, i) {
1537 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
9f6c9258
DK
1538 }
1539}
1540
b3b83c3f
DK
1541static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1542{
1543 struct bnx2x *bp = fp->bp;
1544 int i;
1545
1546 /* ring wasn't allocated */
1547 if (fp->rx_buf_ring == NULL)
1548 return;
1549
1550 for (i = 0; i < NUM_RX_BD; i++) {
1551 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
e52fcb24 1552 u8 *data = rx_buf->data;
b3b83c3f 1553
e52fcb24 1554 if (data == NULL)
b3b83c3f 1555 continue;
b3b83c3f
DK
1556 dma_unmap_single(&bp->pdev->dev,
1557 dma_unmap_addr(rx_buf, mapping),
1558 fp->rx_buf_size, DMA_FROM_DEVICE);
1559
e52fcb24 1560 rx_buf->data = NULL;
d46d132c 1561 bnx2x_frag_free(fp, data);
b3b83c3f
DK
1562 }
1563}
1564
55c11941
MS
1565static void bnx2x_free_rx_skbs_cnic(struct bnx2x *bp)
1566{
1567 int j;
1568
1569 for_each_rx_queue_cnic(bp, j) {
1570 bnx2x_free_rx_bds(&bp->fp[j]);
1571 }
1572}
1573
9f6c9258
DK
1574static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1575{
b3b83c3f 1576 int j;
9f6c9258 1577
55c11941 1578 for_each_eth_queue(bp, j) {
9f6c9258
DK
1579 struct bnx2x_fastpath *fp = &bp->fp[j];
1580
b3b83c3f 1581 bnx2x_free_rx_bds(fp);
9f6c9258 1582
7e6b4d44 1583 if (fp->mode != TPA_MODE_DISABLED)
dfacf138 1584 bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
9f6c9258
DK
1585 }
1586}
1587
a8f47eb7 1588static void bnx2x_free_skbs_cnic(struct bnx2x *bp)
55c11941
MS
1589{
1590 bnx2x_free_tx_skbs_cnic(bp);
1591 bnx2x_free_rx_skbs_cnic(bp);
1592}
1593
9f6c9258
DK
1594void bnx2x_free_skbs(struct bnx2x *bp)
1595{
1596 bnx2x_free_tx_skbs(bp);
1597 bnx2x_free_rx_skbs(bp);
1598}
1599
e3835b99
DK
1600void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1601{
1602 /* load old values */
1603 u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1604
1605 if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1606 /* leave all but MAX value */
1607 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1608
1609 /* set new MAX value */
1610 mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1611 & FUNC_MF_CFG_MAX_BW_MASK;
1612
1613 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1614 }
1615}
1616
ca92429f
DK
1617/**
1618 * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1619 *
1620 * @bp: driver handle
1621 * @nvecs: number of vectors to be released
1622 */
1623static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
9f6c9258 1624{
ca92429f 1625 int i, offset = 0;
9f6c9258 1626
ca92429f
DK
1627 if (nvecs == offset)
1628 return;
ad5afc89
AE
1629
1630 /* VFs don't have a default SB */
1631 if (IS_PF(bp)) {
1632 free_irq(bp->msix_table[offset].vector, bp->dev);
1633 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
1634 bp->msix_table[offset].vector);
1635 offset++;
1636 }
55c11941
MS
1637
1638 if (CNIC_SUPPORT(bp)) {
1639 if (nvecs == offset)
1640 return;
1641 offset++;
1642 }
ca92429f 1643
ec6ba945 1644 for_each_eth_queue(bp, i) {
ca92429f
DK
1645 if (nvecs == offset)
1646 return;
51c1a580
MS
1647 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq\n",
1648 i, bp->msix_table[offset].vector);
9f6c9258 1649
ca92429f 1650 free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
9f6c9258
DK
1651 }
1652}
1653
d6214d7a 1654void bnx2x_free_irq(struct bnx2x *bp)
9f6c9258 1655{
30a5de77 1656 if (bp->flags & USING_MSIX_FLAG &&
ad5afc89
AE
1657 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1658 int nvecs = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_SUPPORT(bp);
1659
1660 /* vfs don't have a default status block */
1661 if (IS_PF(bp))
1662 nvecs++;
1663
1664 bnx2x_free_msix_irqs(bp, nvecs);
1665 } else {
30a5de77 1666 free_irq(bp->dev->irq, bp->dev);
ad5afc89 1667 }
9f6c9258
DK
1668}
1669
0e8d2ec5 1670int bnx2x_enable_msix(struct bnx2x *bp)
9f6c9258 1671{
1ab4434c 1672 int msix_vec = 0, i, rc;
9f6c9258 1673
1ab4434c
AE
1674 /* VFs don't have a default status block */
1675 if (IS_PF(bp)) {
1676 bp->msix_table[msix_vec].entry = msix_vec;
1677 BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n",
1678 bp->msix_table[0].entry);
1679 msix_vec++;
1680 }
9f6c9258 1681
55c11941
MS
1682 /* Cnic requires an msix vector for itself */
1683 if (CNIC_SUPPORT(bp)) {
1684 bp->msix_table[msix_vec].entry = msix_vec;
1685 BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n",
1686 msix_vec, bp->msix_table[msix_vec].entry);
1687 msix_vec++;
1688 }
1689
6383c0b3 1690 /* We need separate vectors for ETH queues only (not FCoE) */
ec6ba945 1691 for_each_eth_queue(bp, i) {
d6214d7a 1692 bp->msix_table[msix_vec].entry = msix_vec;
51c1a580
MS
1693 BNX2X_DEV_INFO("msix_table[%d].entry = %d (fastpath #%u)\n",
1694 msix_vec, msix_vec, i);
d6214d7a 1695 msix_vec++;
9f6c9258
DK
1696 }
1697
1ab4434c
AE
1698 DP(BNX2X_MSG_SP, "about to request enable msix with %d vectors\n",
1699 msix_vec);
d6214d7a 1700
a5444b17
AG
1701 rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0],
1702 BNX2X_MIN_MSIX_VEC_CNT(bp), msix_vec);
9f6c9258
DK
1703 /*
1704 * reconfigure number of tx/rx queues according to available
1705 * MSI-X vectors
1706 */
a5444b17 1707 if (rc == -ENOSPC) {
30a5de77 1708 /* Get by with single vector */
a5444b17
AG
1709 rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0], 1, 1);
1710 if (rc < 0) {
30a5de77
DK
1711 BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n",
1712 rc);
1713 goto no_msix;
1714 }
1715
1716 BNX2X_DEV_INFO("Using single MSI-X vector\n");
1717 bp->flags |= USING_SINGLE_MSIX_FLAG;
1718
55c11941
MS
1719 BNX2X_DEV_INFO("set number of queues to 1\n");
1720 bp->num_ethernet_queues = 1;
1721 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
30a5de77 1722 } else if (rc < 0) {
a5444b17 1723 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
30a5de77 1724 goto no_msix;
a5444b17
AG
1725 } else if (rc < msix_vec) {
1726 /* how less vectors we will have? */
1727 int diff = msix_vec - rc;
1728
1729 BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
1730
1731 /*
1732 * decrease number of queues by number of unallocated entries
1733 */
1734 bp->num_ethernet_queues -= diff;
1735 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1736
1737 BNX2X_DEV_INFO("New queue configuration set: %d\n",
1738 bp->num_queues);
9f6c9258
DK
1739 }
1740
1741 bp->flags |= USING_MSIX_FLAG;
1742
1743 return 0;
30a5de77
DK
1744
1745no_msix:
1746 /* fall to INTx if not enough memory */
1747 if (rc == -ENOMEM)
1748 bp->flags |= DISABLE_MSI_FLAG;
1749
1750 return rc;
9f6c9258
DK
1751}
1752
1753static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1754{
ca92429f 1755 int i, rc, offset = 0;
9f6c9258 1756
ad5afc89
AE
1757 /* no default status block for vf */
1758 if (IS_PF(bp)) {
1759 rc = request_irq(bp->msix_table[offset++].vector,
1760 bnx2x_msix_sp_int, 0,
1761 bp->dev->name, bp->dev);
1762 if (rc) {
1763 BNX2X_ERR("request sp irq failed\n");
1764 return -EBUSY;
1765 }
9f6c9258
DK
1766 }
1767
55c11941
MS
1768 if (CNIC_SUPPORT(bp))
1769 offset++;
1770
ec6ba945 1771 for_each_eth_queue(bp, i) {
9f6c9258
DK
1772 struct bnx2x_fastpath *fp = &bp->fp[i];
1773 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1774 bp->dev->name, i);
1775
d6214d7a 1776 rc = request_irq(bp->msix_table[offset].vector,
9f6c9258
DK
1777 bnx2x_msix_fp_int, 0, fp->name, fp);
1778 if (rc) {
ca92429f
DK
1779 BNX2X_ERR("request fp #%d irq (%d) failed rc %d\n", i,
1780 bp->msix_table[offset].vector, rc);
1781 bnx2x_free_msix_irqs(bp, offset);
9f6c9258
DK
1782 return -EBUSY;
1783 }
1784
d6214d7a 1785 offset++;
9f6c9258
DK
1786 }
1787
ec6ba945 1788 i = BNX2X_NUM_ETH_QUEUES(bp);
ad5afc89
AE
1789 if (IS_PF(bp)) {
1790 offset = 1 + CNIC_SUPPORT(bp);
1791 netdev_info(bp->dev,
1792 "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n",
1793 bp->msix_table[0].vector,
1794 0, bp->msix_table[offset].vector,
1795 i - 1, bp->msix_table[offset + i - 1].vector);
1796 } else {
1797 offset = CNIC_SUPPORT(bp);
1798 netdev_info(bp->dev,
1799 "using MSI-X IRQs: fp[%d] %d ... fp[%d] %d\n",
1800 0, bp->msix_table[offset].vector,
1801 i - 1, bp->msix_table[offset + i - 1].vector);
1802 }
9f6c9258
DK
1803 return 0;
1804}
1805
d6214d7a 1806int bnx2x_enable_msi(struct bnx2x *bp)
9f6c9258
DK
1807{
1808 int rc;
1809
1810 rc = pci_enable_msi(bp->pdev);
1811 if (rc) {
51c1a580 1812 BNX2X_DEV_INFO("MSI is not attainable\n");
9f6c9258
DK
1813 return -1;
1814 }
1815 bp->flags |= USING_MSI_FLAG;
1816
1817 return 0;
1818}
1819
1820static int bnx2x_req_irq(struct bnx2x *bp)
1821{
1822 unsigned long flags;
30a5de77 1823 unsigned int irq;
9f6c9258 1824
30a5de77 1825 if (bp->flags & (USING_MSI_FLAG | USING_MSIX_FLAG))
9f6c9258
DK
1826 flags = 0;
1827 else
1828 flags = IRQF_SHARED;
1829
30a5de77
DK
1830 if (bp->flags & USING_MSIX_FLAG)
1831 irq = bp->msix_table[0].vector;
1832 else
1833 irq = bp->pdev->irq;
1834
1835 return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev);
9f6c9258
DK
1836}
1837
c957d09f 1838static int bnx2x_setup_irqs(struct bnx2x *bp)
619c5cb6
VZ
1839{
1840 int rc = 0;
30a5de77
DK
1841 if (bp->flags & USING_MSIX_FLAG &&
1842 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
619c5cb6
VZ
1843 rc = bnx2x_req_msix_irqs(bp);
1844 if (rc)
1845 return rc;
1846 } else {
619c5cb6
VZ
1847 rc = bnx2x_req_irq(bp);
1848 if (rc) {
1849 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
1850 return rc;
1851 }
1852 if (bp->flags & USING_MSI_FLAG) {
1853 bp->dev->irq = bp->pdev->irq;
30a5de77
DK
1854 netdev_info(bp->dev, "using MSI IRQ %d\n",
1855 bp->dev->irq);
1856 }
1857 if (bp->flags & USING_MSIX_FLAG) {
1858 bp->dev->irq = bp->msix_table[0].vector;
1859 netdev_info(bp->dev, "using MSIX IRQ %d\n",
1860 bp->dev->irq);
619c5cb6
VZ
1861 }
1862 }
1863
1864 return 0;
1865}
1866
55c11941
MS
1867static void bnx2x_napi_enable_cnic(struct bnx2x *bp)
1868{
1869 int i;
1870
8f20aa57 1871 for_each_rx_queue_cnic(bp, i) {
074975d0 1872 bnx2x_fp_busy_poll_init(&bp->fp[i]);
55c11941 1873 napi_enable(&bnx2x_fp(bp, i, napi));
8f20aa57 1874 }
55c11941
MS
1875}
1876
1191cb83 1877static void bnx2x_napi_enable(struct bnx2x *bp)
9f6c9258
DK
1878{
1879 int i;
1880
8f20aa57 1881 for_each_eth_queue(bp, i) {
074975d0 1882 bnx2x_fp_busy_poll_init(&bp->fp[i]);
9f6c9258 1883 napi_enable(&bnx2x_fp(bp, i, napi));
8f20aa57 1884 }
9f6c9258
DK
1885}
1886
55c11941
MS
1887static void bnx2x_napi_disable_cnic(struct bnx2x *bp)
1888{
1889 int i;
1890
8f20aa57 1891 for_each_rx_queue_cnic(bp, i) {
55c11941 1892 napi_disable(&bnx2x_fp(bp, i, napi));
9a2620c8
YM
1893 while (!bnx2x_fp_ll_disable(&bp->fp[i]))
1894 usleep_range(1000, 2000);
8f20aa57 1895 }
55c11941
MS
1896}
1897
1191cb83 1898static void bnx2x_napi_disable(struct bnx2x *bp)
9f6c9258
DK
1899{
1900 int i;
1901
8f20aa57 1902 for_each_eth_queue(bp, i) {
9f6c9258 1903 napi_disable(&bnx2x_fp(bp, i, napi));
9a2620c8
YM
1904 while (!bnx2x_fp_ll_disable(&bp->fp[i]))
1905 usleep_range(1000, 2000);
8f20aa57 1906 }
9f6c9258
DK
1907}
1908
1909void bnx2x_netif_start(struct bnx2x *bp)
1910{
4b7ed897
DK
1911 if (netif_running(bp->dev)) {
1912 bnx2x_napi_enable(bp);
55c11941
MS
1913 if (CNIC_LOADED(bp))
1914 bnx2x_napi_enable_cnic(bp);
4b7ed897
DK
1915 bnx2x_int_enable(bp);
1916 if (bp->state == BNX2X_STATE_OPEN)
1917 netif_tx_wake_all_queues(bp->dev);
9f6c9258
DK
1918 }
1919}
1920
1921void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1922{
1923 bnx2x_int_disable_sync(bp, disable_hw);
1924 bnx2x_napi_disable(bp);
55c11941
MS
1925 if (CNIC_LOADED(bp))
1926 bnx2x_napi_disable_cnic(bp);
9f6c9258 1927}
9f6c9258 1928
f663dd9a 1929u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
99932d4f 1930 void *accel_priv, select_queue_fallback_t fallback)
8307fa3e 1931{
8307fa3e 1932 struct bnx2x *bp = netdev_priv(dev);
cdb9d6ae 1933
55c11941 1934 if (CNIC_LOADED(bp) && !NO_FCOE(bp)) {
8307fa3e
VZ
1935 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1936 u16 ether_type = ntohs(hdr->h_proto);
1937
1938 /* Skip VLAN tag if present */
1939 if (ether_type == ETH_P_8021Q) {
1940 struct vlan_ethhdr *vhdr =
1941 (struct vlan_ethhdr *)skb->data;
1942
1943 ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1944 }
1945
1946 /* If ethertype is FCoE or FIP - use FCoE ring */
1947 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
6383c0b3 1948 return bnx2x_fcoe_tx(bp, txq_index);
8307fa3e 1949 }
55c11941 1950
cdb9d6ae 1951 /* select a non-FCoE queue */
99932d4f 1952 return fallback(dev, skb) % BNX2X_NUM_ETH_QUEUES(bp);
8307fa3e
VZ
1953}
1954
d6214d7a
DK
1955void bnx2x_set_num_queues(struct bnx2x *bp)
1956{
96305234 1957 /* RSS queues */
55c11941 1958 bp->num_ethernet_queues = bnx2x_calc_num_queues(bp);
ec6ba945 1959
a3348722 1960 /* override in STORAGE SD modes */
2e98ffc2 1961 if (IS_MF_STORAGE_ONLY(bp))
55c11941
MS
1962 bp->num_ethernet_queues = 1;
1963
ec6ba945 1964 /* Add special queues */
55c11941
MS
1965 bp->num_cnic_queues = CNIC_SUPPORT(bp); /* For FCOE */
1966 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
65565884
MS
1967
1968 BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
ec6ba945
VZ
1969}
1970
cdb9d6ae
VZ
1971/**
1972 * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
1973 *
1974 * @bp: Driver handle
1975 *
1976 * We currently support for at most 16 Tx queues for each CoS thus we will
1977 * allocate a multiple of 16 for ETH L2 rings according to the value of the
1978 * bp->max_cos.
1979 *
1980 * If there is an FCoE L2 queue the appropriate Tx queue will have the next
1981 * index after all ETH L2 indices.
1982 *
1983 * If the actual number of Tx queues (for each CoS) is less than 16 then there
1984 * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
16a5fd92 1985 * 16..31,...) with indices that are not coupled with any real Tx queue.
cdb9d6ae
VZ
1986 *
1987 * The proper configuration of skb->queue_mapping is handled by
1988 * bnx2x_select_queue() and __skb_tx_hash().
1989 *
1990 * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
1991 * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
1992 */
55c11941 1993static int bnx2x_set_real_num_queues(struct bnx2x *bp, int include_cnic)
ec6ba945 1994{
6383c0b3 1995 int rc, tx, rx;
ec6ba945 1996
65565884 1997 tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos;
55c11941 1998 rx = BNX2X_NUM_ETH_QUEUES(bp);
ec6ba945 1999
6383c0b3 2000/* account for fcoe queue */
55c11941
MS
2001 if (include_cnic && !NO_FCOE(bp)) {
2002 rx++;
2003 tx++;
6383c0b3 2004 }
6383c0b3
AE
2005
2006 rc = netif_set_real_num_tx_queues(bp->dev, tx);
2007 if (rc) {
2008 BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
2009 return rc;
2010 }
2011 rc = netif_set_real_num_rx_queues(bp->dev, rx);
2012 if (rc) {
2013 BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
2014 return rc;
2015 }
2016
51c1a580 2017 DP(NETIF_MSG_IFUP, "Setting real num queues to (tx, rx) (%d, %d)\n",
6383c0b3
AE
2018 tx, rx);
2019
ec6ba945
VZ
2020 return rc;
2021}
2022
1191cb83 2023static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
a8c94b91
VZ
2024{
2025 int i;
2026
2027 for_each_queue(bp, i) {
2028 struct bnx2x_fastpath *fp = &bp->fp[i];
e52fcb24 2029 u32 mtu;
a8c94b91
VZ
2030
2031 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
2032 if (IS_FCOE_IDX(i))
2033 /*
2034 * Although there are no IP frames expected to arrive to
2035 * this ring we still want to add an
2036 * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
2037 * overrun attack.
2038 */
e52fcb24 2039 mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
a8c94b91 2040 else
e52fcb24
ED
2041 mtu = bp->dev->mtu;
2042 fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START +
2043 IP_HEADER_ALIGNMENT_PADDING +
2044 ETH_OVREHEAD +
2045 mtu +
2046 BNX2X_FW_RX_ALIGN_END;
16a5fd92 2047 /* Note : rx_buf_size doesn't take into account NET_SKB_PAD */
d46d132c
ED
2048 if (fp->rx_buf_size + NET_SKB_PAD <= PAGE_SIZE)
2049 fp->rx_frag_size = fp->rx_buf_size + NET_SKB_PAD;
2050 else
2051 fp->rx_frag_size = 0;
a8c94b91
VZ
2052 }
2053}
2054
60cad4e6 2055static int bnx2x_init_rss(struct bnx2x *bp)
619c5cb6
VZ
2056{
2057 int i;
619c5cb6
VZ
2058 u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
2059
16a5fd92 2060 /* Prepare the initial contents for the indirection table if RSS is
619c5cb6
VZ
2061 * enabled
2062 */
5d317c6a
MS
2063 for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++)
2064 bp->rss_conf_obj.ind_table[i] =
96305234
DK
2065 bp->fp->cl_id +
2066 ethtool_rxfh_indir_default(i, num_eth_queues);
619c5cb6
VZ
2067
2068 /*
2069 * For 57710 and 57711 SEARCHER configuration (rss_keys) is
2070 * per-port, so if explicit configuration is needed , do it only
2071 * for a PMF.
2072 *
2073 * For 57712 and newer on the other hand it's a per-function
2074 * configuration.
2075 */
5d317c6a 2076 return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp));
619c5cb6
VZ
2077}
2078
60cad4e6
AE
2079int bnx2x_rss(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
2080 bool config_hash, bool enable)
619c5cb6 2081{
3b603066 2082 struct bnx2x_config_rss_params params = {NULL};
619c5cb6
VZ
2083
2084 /* Although RSS is meaningless when there is a single HW queue we
2085 * still need it enabled in order to have HW Rx hash generated.
2086 *
2087 * if (!is_eth_multi(bp))
2088 * bp->multi_mode = ETH_RSS_MODE_DISABLED;
2089 */
2090
96305234 2091 params.rss_obj = rss_obj;
619c5cb6
VZ
2092
2093 __set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
2094
60cad4e6
AE
2095 if (enable) {
2096 __set_bit(BNX2X_RSS_MODE_REGULAR, &params.rss_flags);
2097
2098 /* RSS configuration */
2099 __set_bit(BNX2X_RSS_IPV4, &params.rss_flags);
2100 __set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags);
2101 __set_bit(BNX2X_RSS_IPV6, &params.rss_flags);
2102 __set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags);
2103 if (rss_obj->udp_rss_v4)
2104 __set_bit(BNX2X_RSS_IPV4_UDP, &params.rss_flags);
2105 if (rss_obj->udp_rss_v6)
2106 __set_bit(BNX2X_RSS_IPV6_UDP, &params.rss_flags);
e42780b6 2107
28311f8e
YM
2108 if (!CHIP_IS_E1x(bp)) {
2109 /* valid only for TUNN_MODE_VXLAN tunnel mode */
2110 __set_bit(BNX2X_RSS_IPV4_VXLAN, &params.rss_flags);
2111 __set_bit(BNX2X_RSS_IPV6_VXLAN, &params.rss_flags);
2112
e42780b6 2113 /* valid only for TUNN_MODE_GRE tunnel mode */
28311f8e
YM
2114 __set_bit(BNX2X_RSS_TUNN_INNER_HDRS, &params.rss_flags);
2115 }
60cad4e6
AE
2116 } else {
2117 __set_bit(BNX2X_RSS_MODE_DISABLED, &params.rss_flags);
2118 }
619c5cb6 2119
96305234
DK
2120 /* Hash bits */
2121 params.rss_result_mask = MULTI_MASK;
619c5cb6 2122
5d317c6a 2123 memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table));
619c5cb6 2124
96305234
DK
2125 if (config_hash) {
2126 /* RSS keys */
e3ec69ca 2127 netdev_rss_key_fill(params.rss_key, T_ETH_RSS_KEY * 4);
96305234 2128 __set_bit(BNX2X_RSS_SET_SRCH, &params.rss_flags);
619c5cb6
VZ
2129 }
2130
60cad4e6
AE
2131 if (IS_PF(bp))
2132 return bnx2x_config_rss(bp, &params);
2133 else
2134 return bnx2x_vfpf_config_rss(bp, &params);
619c5cb6
VZ
2135}
2136
1191cb83 2137static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
619c5cb6 2138{
3b603066 2139 struct bnx2x_func_state_params func_params = {NULL};
619c5cb6
VZ
2140
2141 /* Prepare parameters for function state transitions */
2142 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
2143
2144 func_params.f_obj = &bp->func_obj;
2145 func_params.cmd = BNX2X_F_CMD_HW_INIT;
2146
2147 func_params.params.hw_init.load_phase = load_code;
2148
2149 return bnx2x_func_state_change(bp, &func_params);
2150}
2151
2152/*
2153 * Cleans the object that have internal lists without sending
16a5fd92 2154 * ramrods. Should be run when interrupts are disabled.
619c5cb6 2155 */
7fa6f340 2156void bnx2x_squeeze_objects(struct bnx2x *bp)
619c5cb6
VZ
2157{
2158 int rc;
2159 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
3b603066 2160 struct bnx2x_mcast_ramrod_params rparam = {NULL};
15192a8c 2161 struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
619c5cb6
VZ
2162
2163 /***************** Cleanup MACs' object first *************************/
2164
2165 /* Wait for completion of requested */
2166 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
2167 /* Perform a dry cleanup */
2168 __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
2169
2170 /* Clean ETH primary MAC */
2171 __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
15192a8c 2172 rc = mac_obj->delete_all(bp, &bp->sp_objs->mac_obj, &vlan_mac_flags,
619c5cb6
VZ
2173 &ramrod_flags);
2174 if (rc != 0)
2175 BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
2176
2177 /* Cleanup UC list */
2178 vlan_mac_flags = 0;
2179 __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
2180 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
2181 &ramrod_flags);
2182 if (rc != 0)
2183 BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
2184
2185 /***************** Now clean mcast object *****************************/
2186 rparam.mcast_obj = &bp->mcast_obj;
2187 __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
2188
8b09be5f
YM
2189 /* Add a DEL command... - Since we're doing a driver cleanup only,
2190 * we take a lock surrounding both the initial send and the CONTs,
2191 * as we don't want a true completion to disrupt us in the middle.
2192 */
2193 netif_addr_lock_bh(bp->dev);
619c5cb6
VZ
2194 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
2195 if (rc < 0)
51c1a580
MS
2196 BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n",
2197 rc);
619c5cb6
VZ
2198
2199 /* ...and wait until all pending commands are cleared */
2200 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2201 while (rc != 0) {
2202 if (rc < 0) {
2203 BNX2X_ERR("Failed to clean multi-cast object: %d\n",
2204 rc);
8b09be5f 2205 netif_addr_unlock_bh(bp->dev);
619c5cb6
VZ
2206 return;
2207 }
2208
2209 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2210 }
8b09be5f 2211 netif_addr_unlock_bh(bp->dev);
619c5cb6
VZ
2212}
2213
2214#ifndef BNX2X_STOP_ON_ERROR
2215#define LOAD_ERROR_EXIT(bp, label) \
2216 do { \
2217 (bp)->state = BNX2X_STATE_ERROR; \
2218 goto label; \
2219 } while (0)
55c11941
MS
2220
2221#define LOAD_ERROR_EXIT_CNIC(bp, label) \
2222 do { \
2223 bp->cnic_loaded = false; \
2224 goto label; \
2225 } while (0)
2226#else /*BNX2X_STOP_ON_ERROR*/
619c5cb6
VZ
2227#define LOAD_ERROR_EXIT(bp, label) \
2228 do { \
2229 (bp)->state = BNX2X_STATE_ERROR; \
2230 (bp)->panic = 1; \
2231 return -EBUSY; \
2232 } while (0)
55c11941
MS
2233#define LOAD_ERROR_EXIT_CNIC(bp, label) \
2234 do { \
2235 bp->cnic_loaded = false; \
2236 (bp)->panic = 1; \
2237 return -EBUSY; \
2238 } while (0)
2239#endif /*BNX2X_STOP_ON_ERROR*/
619c5cb6 2240
ad5afc89
AE
2241static void bnx2x_free_fw_stats_mem(struct bnx2x *bp)
2242{
2243 BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
2244 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2245 return;
2246}
2247
2248static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
452427b0 2249{
8db573ba 2250 int num_groups, vf_headroom = 0;
ad5afc89 2251 int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1;
452427b0 2252
ad5afc89
AE
2253 /* number of queues for statistics is number of eth queues + FCoE */
2254 u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe_stats;
452427b0 2255
ad5afc89
AE
2256 /* Total number of FW statistics requests =
2257 * 1 for port stats + 1 for PF stats + potential 2 for FCoE (fcoe proper
2258 * and fcoe l2 queue) stats + num of queues (which includes another 1
2259 * for fcoe l2 queue if applicable)
2260 */
2261 bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats;
452427b0 2262
8db573ba
AE
2263 /* vf stats appear in the request list, but their data is allocated by
2264 * the VFs themselves. We don't include them in the bp->fw_stats_num as
2265 * it is used to determine where to place the vf stats queries in the
2266 * request struct
2267 */
2268 if (IS_SRIOV(bp))
6411280a 2269 vf_headroom = bnx2x_vf_headroom(bp);
8db573ba 2270
ad5afc89
AE
2271 /* Request is built from stats_query_header and an array of
2272 * stats_query_cmd_group each of which contains
2273 * STATS_QUERY_CMD_COUNT rules. The real number or requests is
2274 * configured in the stats_query_header.
2275 */
2276 num_groups =
8db573ba
AE
2277 (((bp->fw_stats_num + vf_headroom) / STATS_QUERY_CMD_COUNT) +
2278 (((bp->fw_stats_num + vf_headroom) % STATS_QUERY_CMD_COUNT) ?
ad5afc89
AE
2279 1 : 0));
2280
8db573ba
AE
2281 DP(BNX2X_MSG_SP, "stats fw_stats_num %d, vf headroom %d, num_groups %d\n",
2282 bp->fw_stats_num, vf_headroom, num_groups);
ad5afc89
AE
2283 bp->fw_stats_req_sz = sizeof(struct stats_query_header) +
2284 num_groups * sizeof(struct stats_query_cmd_group);
2285
2286 /* Data for statistics requests + stats_counter
2287 * stats_counter holds per-STORM counters that are incremented
2288 * when STORM has finished with the current request.
2289 * memory for FCoE offloaded statistics are counted anyway,
2290 * even if they will not be sent.
2291 * VF stats are not accounted for here as the data of VF stats is stored
2292 * in memory allocated by the VF, not here.
2293 */
2294 bp->fw_stats_data_sz = sizeof(struct per_port_stats) +
2295 sizeof(struct per_pf_stats) +
2296 sizeof(struct fcoe_statistics_params) +
2297 sizeof(struct per_queue_stats) * num_queue_stats +
2298 sizeof(struct stats_counter);
2299
cd2b0389
JP
2300 bp->fw_stats = BNX2X_PCI_ALLOC(&bp->fw_stats_mapping,
2301 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2302 if (!bp->fw_stats)
2303 goto alloc_mem_err;
ad5afc89
AE
2304
2305 /* Set shortcuts */
2306 bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats;
2307 bp->fw_stats_req_mapping = bp->fw_stats_mapping;
2308 bp->fw_stats_data = (struct bnx2x_fw_stats_data *)
2309 ((u8 *)bp->fw_stats + bp->fw_stats_req_sz);
2310 bp->fw_stats_data_mapping = bp->fw_stats_mapping +
2311 bp->fw_stats_req_sz;
2312
6bf07b8e 2313 DP(BNX2X_MSG_SP, "statistics request base address set to %x %x\n",
ad5afc89
AE
2314 U64_HI(bp->fw_stats_req_mapping),
2315 U64_LO(bp->fw_stats_req_mapping));
6bf07b8e 2316 DP(BNX2X_MSG_SP, "statistics data base address set to %x %x\n",
ad5afc89
AE
2317 U64_HI(bp->fw_stats_data_mapping),
2318 U64_LO(bp->fw_stats_data_mapping));
2319 return 0;
2320
2321alloc_mem_err:
2322 bnx2x_free_fw_stats_mem(bp);
2323 BNX2X_ERR("Can't allocate FW stats memory\n");
2324 return -ENOMEM;
2325}
2326
2327/* send load request to mcp and analyze response */
2328static int bnx2x_nic_load_request(struct bnx2x *bp, u32 *load_code)
2329{
178135c1
DK
2330 u32 param;
2331
ad5afc89
AE
2332 /* init fw_seq */
2333 bp->fw_seq =
2334 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
2335 DRV_MSG_SEQ_NUMBER_MASK);
2336 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
2337
2338 /* Get current FW pulse sequence */
2339 bp->fw_drv_pulse_wr_seq =
2340 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) &
2341 DRV_PULSE_SEQ_MASK);
2342 BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
2343
178135c1
DK
2344 param = DRV_MSG_CODE_LOAD_REQ_WITH_LFA;
2345
2346 if (IS_MF_SD(bp) && bnx2x_port_after_undi(bp))
2347 param |= DRV_MSG_CODE_LOAD_REQ_FORCE_LFA;
2348
ad5afc89 2349 /* load request */
178135c1 2350 (*load_code) = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, param);
ad5afc89
AE
2351
2352 /* if mcp fails to respond we must abort */
2353 if (!(*load_code)) {
2354 BNX2X_ERR("MCP response failure, aborting\n");
2355 return -EBUSY;
2356 }
2357
2358 /* If mcp refused (e.g. other port is in diagnostic mode) we
2359 * must abort
2360 */
2361 if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) {
2362 BNX2X_ERR("MCP refused load request, aborting\n");
2363 return -EBUSY;
2364 }
2365 return 0;
2366}
2367
2368/* check whether another PF has already loaded FW to chip. In
2369 * virtualized environments a pf from another VM may have already
2370 * initialized the device including loading FW
2371 */
91ebb929 2372int bnx2x_compare_fw_ver(struct bnx2x *bp, u32 load_code, bool print_err)
ad5afc89
AE
2373{
2374 /* is another pf loaded on this engine? */
2375 if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
2376 load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
2377 /* build my FW version dword */
2378 u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) +
2379 (BCM_5710_FW_MINOR_VERSION << 8) +
2380 (BCM_5710_FW_REVISION_VERSION << 16) +
2381 (BCM_5710_FW_ENGINEERING_VERSION << 24);
2382
2383 /* read loaded FW from chip */
2384 u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
2385
2386 DP(BNX2X_MSG_SP, "loaded fw %x, my fw %x\n",
2387 loaded_fw, my_fw);
2388
2389 /* abort nic load if version mismatch */
2390 if (my_fw != loaded_fw) {
91ebb929
YM
2391 if (print_err)
2392 BNX2X_ERR("bnx2x with FW %x was already loaded which mismatches my %x FW. Aborting\n",
2393 loaded_fw, my_fw);
2394 else
2395 BNX2X_DEV_INFO("bnx2x with FW %x was already loaded which mismatches my %x FW, possibly due to MF UNDI\n",
2396 loaded_fw, my_fw);
ad5afc89
AE
2397 return -EBUSY;
2398 }
2399 }
2400 return 0;
2401}
2402
2403/* returns the "mcp load_code" according to global load_count array */
2404static int bnx2x_nic_load_no_mcp(struct bnx2x *bp, int port)
2405{
2406 int path = BP_PATH(bp);
2407
2408 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
a8f47eb7 2409 path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
2410 bnx2x_load_count[path][2]);
2411 bnx2x_load_count[path][0]++;
2412 bnx2x_load_count[path][1 + port]++;
ad5afc89 2413 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
a8f47eb7 2414 path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
2415 bnx2x_load_count[path][2]);
2416 if (bnx2x_load_count[path][0] == 1)
ad5afc89 2417 return FW_MSG_CODE_DRV_LOAD_COMMON;
a8f47eb7 2418 else if (bnx2x_load_count[path][1 + port] == 1)
ad5afc89
AE
2419 return FW_MSG_CODE_DRV_LOAD_PORT;
2420 else
2421 return FW_MSG_CODE_DRV_LOAD_FUNCTION;
2422}
2423
2424/* mark PMF if applicable */
2425static void bnx2x_nic_load_pmf(struct bnx2x *bp, u32 load_code)
2426{
2427 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2428 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
2429 (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
2430 bp->port.pmf = 1;
2431 /* We need the barrier to ensure the ordering between the
2432 * writing to bp->port.pmf here and reading it from the
2433 * bnx2x_periodic_task().
2434 */
2435 smp_mb();
2436 } else {
2437 bp->port.pmf = 0;
452427b0
YM
2438 }
2439
ad5afc89
AE
2440 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2441}
2442
2443static void bnx2x_nic_load_afex_dcc(struct bnx2x *bp, int load_code)
2444{
2445 if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2446 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
2447 (bp->common.shmem2_base)) {
2448 if (SHMEM2_HAS(bp, dcc_support))
2449 SHMEM2_WR(bp, dcc_support,
2450 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
2451 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
2452 if (SHMEM2_HAS(bp, afex_driver_support))
2453 SHMEM2_WR(bp, afex_driver_support,
2454 SHMEM_AFEX_SUPPORTED_VERSION_ONE);
2455 }
2456
2457 /* Set AFEX default VLAN tag to an invalid value */
2458 bp->afex_def_vlan_tag = -1;
452427b0
YM
2459}
2460
1191cb83
ED
2461/**
2462 * bnx2x_bz_fp - zero content of the fastpath structure.
2463 *
2464 * @bp: driver handle
2465 * @index: fastpath index to be zeroed
2466 *
2467 * Makes sure the contents of the bp->fp[index].napi is kept
2468 * intact.
2469 */
2470static void bnx2x_bz_fp(struct bnx2x *bp, int index)
2471{
2472 struct bnx2x_fastpath *fp = &bp->fp[index];
65565884 2473 int cos;
1191cb83 2474 struct napi_struct orig_napi = fp->napi;
15192a8c 2475 struct bnx2x_agg_info *orig_tpa_info = fp->tpa_info;
d76a6111 2476
1191cb83 2477 /* bzero bnx2x_fastpath contents */
c3146eb6
DK
2478 if (fp->tpa_info)
2479 memset(fp->tpa_info, 0, ETH_MAX_AGGREGATION_QUEUES_E1H_E2 *
2480 sizeof(struct bnx2x_agg_info));
2481 memset(fp, 0, sizeof(*fp));
1191cb83
ED
2482
2483 /* Restore the NAPI object as it has been already initialized */
2484 fp->napi = orig_napi;
15192a8c 2485 fp->tpa_info = orig_tpa_info;
1191cb83
ED
2486 fp->bp = bp;
2487 fp->index = index;
2488 if (IS_ETH_FP(fp))
2489 fp->max_cos = bp->max_cos;
2490 else
2491 /* Special queues support only one CoS */
2492 fp->max_cos = 1;
2493
65565884 2494 /* Init txdata pointers */
65565884
MS
2495 if (IS_FCOE_FP(fp))
2496 fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)];
65565884
MS
2497 if (IS_ETH_FP(fp))
2498 for_each_cos_in_tx_queue(fp, cos)
2499 fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos *
2500 BNX2X_NUM_ETH_QUEUES(bp) + index];
2501
16a5fd92 2502 /* set the tpa flag for each queue. The tpa flag determines the queue
1191cb83
ED
2503 * minimal size so it must be set prior to queue memory allocation
2504 */
f8dcb5e3 2505 if (bp->dev->features & NETIF_F_LRO)
1191cb83 2506 fp->mode = TPA_MODE_LRO;
f8dcb5e3 2507 else if (bp->dev->features & NETIF_F_GRO &&
7e6b4d44 2508 bnx2x_mtu_allows_gro(bp->dev->mtu))
1191cb83 2509 fp->mode = TPA_MODE_GRO;
7e6b4d44
MS
2510 else
2511 fp->mode = TPA_MODE_DISABLED;
1191cb83 2512
22a8f237
MS
2513 /* We don't want TPA if it's disabled in bp
2514 * or if this is an FCoE L2 ring.
2515 */
2516 if (bp->disable_tpa || IS_FCOE_FP(fp))
7e6b4d44 2517 fp->mode = TPA_MODE_DISABLED;
55c11941
MS
2518}
2519
230d00eb
YM
2520void bnx2x_set_os_driver_state(struct bnx2x *bp, u32 state)
2521{
2522 u32 cur;
2523
2524 if (!IS_MF_BD(bp) || !SHMEM2_HAS(bp, os_driver_state) || IS_VF(bp))
2525 return;
2526
2527 cur = SHMEM2_RD(bp, os_driver_state[BP_FW_MB_IDX(bp)]);
2528 DP(NETIF_MSG_IFUP, "Driver state %08x-->%08x\n",
2529 cur, state);
2530
2531 SHMEM2_WR(bp, os_driver_state[BP_FW_MB_IDX(bp)], state);
2532}
2533
55c11941
MS
2534int bnx2x_load_cnic(struct bnx2x *bp)
2535{
2536 int i, rc, port = BP_PORT(bp);
2537
2538 DP(NETIF_MSG_IFUP, "Starting CNIC-related load\n");
2539
2540 mutex_init(&bp->cnic_mutex);
2541
ad5afc89
AE
2542 if (IS_PF(bp)) {
2543 rc = bnx2x_alloc_mem_cnic(bp);
2544 if (rc) {
2545 BNX2X_ERR("Unable to allocate bp memory for cnic\n");
2546 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2547 }
55c11941
MS
2548 }
2549
2550 rc = bnx2x_alloc_fp_mem_cnic(bp);
2551 if (rc) {
2552 BNX2X_ERR("Unable to allocate memory for cnic fps\n");
2553 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2554 }
2555
2556 /* Update the number of queues with the cnic queues */
2557 rc = bnx2x_set_real_num_queues(bp, 1);
2558 if (rc) {
2559 BNX2X_ERR("Unable to set real_num_queues including cnic\n");
2560 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2561 }
2562
2563 /* Add all CNIC NAPI objects */
2564 bnx2x_add_all_napi_cnic(bp);
2565 DP(NETIF_MSG_IFUP, "cnic napi added\n");
2566 bnx2x_napi_enable_cnic(bp);
2567
2568 rc = bnx2x_init_hw_func_cnic(bp);
2569 if (rc)
2570 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic1);
2571
2572 bnx2x_nic_init_cnic(bp);
2573
ad5afc89
AE
2574 if (IS_PF(bp)) {
2575 /* Enable Timer scan */
2576 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
2577
2578 /* setup cnic queues */
2579 for_each_cnic_queue(bp, i) {
2580 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2581 if (rc) {
2582 BNX2X_ERR("Queue setup failed\n");
2583 LOAD_ERROR_EXIT(bp, load_error_cnic2);
2584 }
55c11941
MS
2585 }
2586 }
2587
2588 /* Initialize Rx filter. */
8b09be5f 2589 bnx2x_set_rx_mode_inner(bp);
55c11941
MS
2590
2591 /* re-read iscsi info */
2592 bnx2x_get_iscsi_info(bp);
2593 bnx2x_setup_cnic_irq_info(bp);
2594 bnx2x_setup_cnic_info(bp);
2595 bp->cnic_loaded = true;
2596 if (bp->state == BNX2X_STATE_OPEN)
2597 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
2598
55c11941
MS
2599 DP(NETIF_MSG_IFUP, "Ending successfully CNIC-related load\n");
2600
2601 return 0;
2602
2603#ifndef BNX2X_STOP_ON_ERROR
2604load_error_cnic2:
2605 /* Disable Timer scan */
2606 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
2607
2608load_error_cnic1:
2609 bnx2x_napi_disable_cnic(bp);
2610 /* Update the number of queues without the cnic queues */
d9d81862 2611 if (bnx2x_set_real_num_queues(bp, 0))
55c11941
MS
2612 BNX2X_ERR("Unable to set real_num_queues not including cnic\n");
2613load_error_cnic0:
2614 BNX2X_ERR("CNIC-related load failed\n");
2615 bnx2x_free_fp_mem_cnic(bp);
2616 bnx2x_free_mem_cnic(bp);
2617 return rc;
2618#endif /* ! BNX2X_STOP_ON_ERROR */
1191cb83
ED
2619}
2620
9f6c9258
DK
2621/* must be called with rtnl_lock */
2622int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2623{
619c5cb6 2624 int port = BP_PORT(bp);
ad5afc89 2625 int i, rc = 0, load_code = 0;
9f6c9258 2626
55c11941
MS
2627 DP(NETIF_MSG_IFUP, "Starting NIC load\n");
2628 DP(NETIF_MSG_IFUP,
2629 "CNIC is %s\n", CNIC_ENABLED(bp) ? "enabled" : "disabled");
2630
9f6c9258 2631#ifdef BNX2X_STOP_ON_ERROR
51c1a580
MS
2632 if (unlikely(bp->panic)) {
2633 BNX2X_ERR("Can't load NIC when there is panic\n");
9f6c9258 2634 return -EPERM;
51c1a580 2635 }
9f6c9258
DK
2636#endif
2637
2638 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
2639
16a5fd92 2640 /* zero the structure w/o any lock, before SP handler is initialized */
2ae17f66
VZ
2641 memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
2642 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
2643 &bp->last_reported_link.link_report_flags);
2ae17f66 2644
ad5afc89
AE
2645 if (IS_PF(bp))
2646 /* must be called before memory allocation and HW init */
2647 bnx2x_ilt_set_info(bp);
523224a3 2648
6383c0b3
AE
2649 /*
2650 * Zero fastpath structures preserving invariants like napi, which are
2651 * allocated only once, fp index, max_cos, bp pointer.
7e6b4d44 2652 * Also set fp->mode and txdata_ptr.
b3b83c3f 2653 */
51c1a580 2654 DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
b3b83c3f
DK
2655 for_each_queue(bp, i)
2656 bnx2x_bz_fp(bp, i);
55c11941
MS
2657 memset(bp->bnx2x_txq, 0, (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS +
2658 bp->num_cnic_queues) *
2659 sizeof(struct bnx2x_fp_txdata));
b3b83c3f 2660
55c11941 2661 bp->fcoe_init = false;
6383c0b3 2662
a8c94b91
VZ
2663 /* Set the receive queues buffer size */
2664 bnx2x_set_rx_buf_size(bp);
2665
ad5afc89
AE
2666 if (IS_PF(bp)) {
2667 rc = bnx2x_alloc_mem(bp);
2668 if (rc) {
2669 BNX2X_ERR("Unable to allocate bp memory\n");
2670 return rc;
2671 }
2672 }
2673
ad5afc89
AE
2674 /* need to be done after alloc mem, since it's self adjusting to amount
2675 * of memory available for RSS queues
2676 */
2677 rc = bnx2x_alloc_fp_mem(bp);
2678 if (rc) {
2679 BNX2X_ERR("Unable to allocate memory for fps\n");
2680 LOAD_ERROR_EXIT(bp, load_error0);
2681 }
d6214d7a 2682
e3ed4eae
DK
2683 /* Allocated memory for FW statistics */
2684 if (bnx2x_alloc_fw_stats_mem(bp))
2685 LOAD_ERROR_EXIT(bp, load_error0);
2686
8d9ac297
AE
2687 /* request pf to initialize status blocks */
2688 if (IS_VF(bp)) {
2689 rc = bnx2x_vfpf_init(bp);
2690 if (rc)
2691 LOAD_ERROR_EXIT(bp, load_error0);
2692 }
2693
b3b83c3f
DK
2694 /* As long as bnx2x_alloc_mem() may possibly update
2695 * bp->num_queues, bnx2x_set_real_num_queues() should always
55c11941 2696 * come after it. At this stage cnic queues are not counted.
b3b83c3f 2697 */
55c11941 2698 rc = bnx2x_set_real_num_queues(bp, 0);
d6214d7a 2699 if (rc) {
ec6ba945 2700 BNX2X_ERR("Unable to set real_num_queues\n");
619c5cb6 2701 LOAD_ERROR_EXIT(bp, load_error0);
9f6c9258
DK
2702 }
2703
6383c0b3 2704 /* configure multi cos mappings in kernel.
16a5fd92
YM
2705 * this configuration may be overridden by a multi class queue
2706 * discipline or by a dcbx negotiation result.
6383c0b3
AE
2707 */
2708 bnx2x_setup_tc(bp->dev, bp->max_cos);
2709
26614ba5
MS
2710 /* Add all NAPI objects */
2711 bnx2x_add_all_napi(bp);
55c11941 2712 DP(NETIF_MSG_IFUP, "napi added\n");
9f6c9258
DK
2713 bnx2x_napi_enable(bp);
2714
ad5afc89
AE
2715 if (IS_PF(bp)) {
2716 /* set pf load just before approaching the MCP */
2717 bnx2x_set_pf_load(bp);
2718
2719 /* if mcp exists send load request and analyze response */
2720 if (!BP_NOMCP(bp)) {
2721 /* attempt to load pf */
2722 rc = bnx2x_nic_load_request(bp, &load_code);
2723 if (rc)
2724 LOAD_ERROR_EXIT(bp, load_error1);
2725
2726 /* what did mcp say? */
91ebb929 2727 rc = bnx2x_compare_fw_ver(bp, load_code, true);
ad5afc89
AE
2728 if (rc) {
2729 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
d1e2d966
AE
2730 LOAD_ERROR_EXIT(bp, load_error2);
2731 }
ad5afc89
AE
2732 } else {
2733 load_code = bnx2x_nic_load_no_mcp(bp, port);
d1e2d966 2734 }
9f6c9258 2735
ad5afc89
AE
2736 /* mark pmf if applicable */
2737 bnx2x_nic_load_pmf(bp, load_code);
9f6c9258 2738
ad5afc89
AE
2739 /* Init Function state controlling object */
2740 bnx2x__init_func_obj(bp);
6383c0b3 2741
ad5afc89
AE
2742 /* Initialize HW */
2743 rc = bnx2x_init_hw(bp, load_code);
2744 if (rc) {
2745 BNX2X_ERR("HW init failed, aborting\n");
2746 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2747 LOAD_ERROR_EXIT(bp, load_error2);
2748 }
9f6c9258
DK
2749 }
2750
ecf01c22
YM
2751 bnx2x_pre_irq_nic_init(bp);
2752
d6214d7a
DK
2753 /* Connect to IRQs */
2754 rc = bnx2x_setup_irqs(bp);
523224a3 2755 if (rc) {
ad5afc89
AE
2756 BNX2X_ERR("setup irqs failed\n");
2757 if (IS_PF(bp))
2758 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
619c5cb6 2759 LOAD_ERROR_EXIT(bp, load_error2);
523224a3
DK
2760 }
2761
619c5cb6 2762 /* Init per-function objects */
ad5afc89 2763 if (IS_PF(bp)) {
ecf01c22
YM
2764 /* Setup NIC internals and enable interrupts */
2765 bnx2x_post_irq_nic_init(bp, load_code);
2766
ad5afc89 2767 bnx2x_init_bp_objs(bp);
b56e9670 2768 bnx2x_iov_nic_init(bp);
a3348722 2769
ad5afc89
AE
2770 /* Set AFEX default VLAN tag to an invalid value */
2771 bp->afex_def_vlan_tag = -1;
2772 bnx2x_nic_load_afex_dcc(bp, load_code);
2773 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
2774 rc = bnx2x_func_start(bp);
2775 if (rc) {
2776 BNX2X_ERR("Function start failed!\n");
2777 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
9f6c9258 2778
619c5cb6 2779 LOAD_ERROR_EXIT(bp, load_error3);
9f6c9258 2780 }
9f6c9258 2781
ad5afc89
AE
2782 /* Send LOAD_DONE command to MCP */
2783 if (!BP_NOMCP(bp)) {
2784 load_code = bnx2x_fw_command(bp,
2785 DRV_MSG_CODE_LOAD_DONE, 0);
2786 if (!load_code) {
2787 BNX2X_ERR("MCP response failure, aborting\n");
2788 rc = -EBUSY;
2789 LOAD_ERROR_EXIT(bp, load_error3);
2790 }
2791 }
9f6c9258 2792
0c14e5ce
AE
2793 /* initialize FW coalescing state machines in RAM */
2794 bnx2x_update_coalesce(bp);
60cad4e6 2795 }
0c14e5ce 2796
60cad4e6
AE
2797 /* setup the leading queue */
2798 rc = bnx2x_setup_leading(bp);
2799 if (rc) {
2800 BNX2X_ERR("Setup leading failed!\n");
2801 LOAD_ERROR_EXIT(bp, load_error3);
2802 }
ad5afc89 2803
60cad4e6
AE
2804 /* set up the rest of the queues */
2805 for_each_nondefault_eth_queue(bp, i) {
2806 if (IS_PF(bp))
2807 rc = bnx2x_setup_queue(bp, &bp->fp[i], false);
2808 else /* VF */
2809 rc = bnx2x_vfpf_setup_q(bp, &bp->fp[i], false);
ad5afc89 2810 if (rc) {
60cad4e6 2811 BNX2X_ERR("Queue %d setup failed\n", i);
ad5afc89
AE
2812 LOAD_ERROR_EXIT(bp, load_error3);
2813 }
60cad4e6 2814 }
8d9ac297 2815
60cad4e6
AE
2816 /* setup rss */
2817 rc = bnx2x_init_rss(bp);
2818 if (rc) {
2819 BNX2X_ERR("PF RSS init failed\n");
2820 LOAD_ERROR_EXIT(bp, load_error3);
51c1a580 2821 }
619c5cb6 2822
523224a3
DK
2823 /* Now when Clients are configured we are ready to work */
2824 bp->state = BNX2X_STATE_OPEN;
2825
619c5cb6 2826 /* Configure a ucast MAC */
ad5afc89
AE
2827 if (IS_PF(bp))
2828 rc = bnx2x_set_eth_mac(bp, true);
8d9ac297 2829 else /* vf */
f8f4f61a
DK
2830 rc = bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr, bp->fp->index,
2831 true);
51c1a580
MS
2832 if (rc) {
2833 BNX2X_ERR("Setting Ethernet MAC failed\n");
55c11941 2834 LOAD_ERROR_EXIT(bp, load_error3);
51c1a580 2835 }
6e30dd4e 2836
ad5afc89 2837 if (IS_PF(bp) && bp->pending_max) {
e3835b99
DK
2838 bnx2x_update_max_mf_config(bp, bp->pending_max);
2839 bp->pending_max = 0;
2840 }
2841
ad5afc89
AE
2842 if (bp->port.pmf) {
2843 rc = bnx2x_initial_phy_init(bp, load_mode);
2844 if (rc)
2845 LOAD_ERROR_EXIT(bp, load_error3);
2846 }
c63da990 2847 bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_BOOT_FROM_SAN;
9f6c9258 2848
619c5cb6
VZ
2849 /* Start fast path */
2850
2851 /* Initialize Rx filter. */
8b09be5f 2852 bnx2x_set_rx_mode_inner(bp);
6e30dd4e 2853
eeed018c
MK
2854 if (bp->flags & PTP_SUPPORTED) {
2855 bnx2x_init_ptp(bp);
2856 bnx2x_configure_ptp_filters(bp);
2857 }
2858 /* Start Tx */
9f6c9258
DK
2859 switch (load_mode) {
2860 case LOAD_NORMAL:
16a5fd92 2861 /* Tx queue should be only re-enabled */
523224a3 2862 netif_tx_wake_all_queues(bp->dev);
9f6c9258
DK
2863 break;
2864
2865 case LOAD_OPEN:
2866 netif_tx_start_all_queues(bp->dev);
4e857c58 2867 smp_mb__after_atomic();
9f6c9258
DK
2868 break;
2869
2870 case LOAD_DIAG:
8970b2e4 2871 case LOAD_LOOPBACK_EXT:
9f6c9258
DK
2872 bp->state = BNX2X_STATE_DIAG;
2873 break;
2874
2875 default:
2876 break;
2877 }
2878
00253a8c 2879 if (bp->port.pmf)
4c704899 2880 bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_PORT_MASK, 0);
00253a8c 2881 else
9f6c9258
DK
2882 bnx2x__link_status_update(bp);
2883
2884 /* start the timer */
2885 mod_timer(&bp->timer, jiffies + bp->current_interval);
2886
55c11941
MS
2887 if (CNIC_ENABLED(bp))
2888 bnx2x_load_cnic(bp);
9f6c9258 2889
42f8277f
YM
2890 if (IS_PF(bp))
2891 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);
2892
ad5afc89
AE
2893 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
2894 /* mark driver is loaded in shmem2 */
9ce392d4
YM
2895 u32 val;
2896 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
230d00eb
YM
2897 val &= ~DRV_FLAGS_MTU_MASK;
2898 val |= (bp->dev->mtu << DRV_FLAGS_MTU_SHIFT);
9ce392d4
YM
2899 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2900 val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
2901 DRV_FLAGS_CAPABILITIES_LOADED_L2);
2902 }
2903
619c5cb6 2904 /* Wait for all pending SP commands to complete */
ad5afc89 2905 if (IS_PF(bp) && !bnx2x_wait_sp_comp(bp, ~0x0UL)) {
619c5cb6 2906 BNX2X_ERR("Timeout waiting for SP elements to complete\n");
5d07d868 2907 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
619c5cb6
VZ
2908 return -EBUSY;
2909 }
6891dd25 2910
9876879f
BW
2911 /* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */
2912 if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG))
2913 bnx2x_dcbx_init(bp, false);
2914
230d00eb
YM
2915 if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp))
2916 bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_ACTIVE);
2917
55c11941
MS
2918 DP(NETIF_MSG_IFUP, "Ending successfully NIC load\n");
2919
9f6c9258
DK
2920 return 0;
2921
619c5cb6 2922#ifndef BNX2X_STOP_ON_ERROR
9f6c9258 2923load_error3:
ad5afc89
AE
2924 if (IS_PF(bp)) {
2925 bnx2x_int_disable_sync(bp, 1);
d6214d7a 2926
ad5afc89
AE
2927 /* Clean queueable objects */
2928 bnx2x_squeeze_objects(bp);
2929 }
619c5cb6 2930
9f6c9258
DK
2931 /* Free SKBs, SGEs, TPA pool and driver internals */
2932 bnx2x_free_skbs(bp);
ec6ba945 2933 for_each_rx_queue(bp, i)
9f6c9258 2934 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
d6214d7a 2935
9f6c9258 2936 /* Release IRQs */
d6214d7a
DK
2937 bnx2x_free_irq(bp);
2938load_error2:
ad5afc89 2939 if (IS_PF(bp) && !BP_NOMCP(bp)) {
d6214d7a
DK
2940 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
2941 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
2942 }
2943
2944 bp->port.pmf = 0;
9f6c9258
DK
2945load_error1:
2946 bnx2x_napi_disable(bp);
722c6f58 2947 bnx2x_del_all_napi(bp);
ad5afc89 2948
889b9af3 2949 /* clear pf_load status, as it was already set */
ad5afc89
AE
2950 if (IS_PF(bp))
2951 bnx2x_clear_pf_load(bp);
d6214d7a 2952load_error0:
ad5afc89 2953 bnx2x_free_fw_stats_mem(bp);
e3ed4eae 2954 bnx2x_free_fp_mem(bp);
9f6c9258
DK
2955 bnx2x_free_mem(bp);
2956
2957 return rc;
619c5cb6 2958#endif /* ! BNX2X_STOP_ON_ERROR */
9f6c9258
DK
2959}
2960
7fa6f340 2961int bnx2x_drain_tx_queues(struct bnx2x *bp)
ad5afc89
AE
2962{
2963 u8 rc = 0, cos, i;
2964
2965 /* Wait until tx fastpath tasks complete */
2966 for_each_tx_queue(bp, i) {
2967 struct bnx2x_fastpath *fp = &bp->fp[i];
2968
2969 for_each_cos_in_tx_queue(fp, cos)
2970 rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]);
2971 if (rc)
2972 return rc;
2973 }
2974 return 0;
2975}
2976
9f6c9258 2977/* must be called with rtnl_lock */
5d07d868 2978int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
9f6c9258
DK
2979{
2980 int i;
c9ee9206
VZ
2981 bool global = false;
2982
55c11941
MS
2983 DP(NETIF_MSG_IFUP, "Starting NIC unload\n");
2984
230d00eb
YM
2985 if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp))
2986 bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_DISABLED);
2987
9ce392d4 2988 /* mark driver is unloaded in shmem2 */
ad5afc89 2989 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
9ce392d4
YM
2990 u32 val;
2991 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2992 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2993 val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
2994 }
2995
80bfe5cc 2996 if (IS_PF(bp) && bp->recovery_state != BNX2X_RECOVERY_DONE &&
ad5afc89
AE
2997 (bp->state == BNX2X_STATE_CLOSED ||
2998 bp->state == BNX2X_STATE_ERROR)) {
c9ee9206
VZ
2999 /* We can get here if the driver has been unloaded
3000 * during parity error recovery and is either waiting for a
3001 * leader to complete or for other functions to unload and
3002 * then ifdown has been issued. In this case we want to
3003 * unload and let other functions to complete a recovery
3004 * process.
3005 */
9f6c9258
DK
3006 bp->recovery_state = BNX2X_RECOVERY_DONE;
3007 bp->is_leader = 0;
c9ee9206
VZ
3008 bnx2x_release_leader_lock(bp);
3009 smp_mb();
3010
51c1a580
MS
3011 DP(NETIF_MSG_IFDOWN, "Releasing a leadership...\n");
3012 BNX2X_ERR("Can't unload in closed or error state\n");
9f6c9258
DK
3013 return -EINVAL;
3014 }
3015
80bfe5cc 3016 /* Nothing to do during unload if previous bnx2x_nic_load()
16a5fd92 3017 * have not completed successfully - all resources are released.
80bfe5cc
YM
3018 *
3019 * we can get here only after unsuccessful ndo_* callback, during which
3020 * dev->IFF_UP flag is still on.
3021 */
3022 if (bp->state == BNX2X_STATE_CLOSED || bp->state == BNX2X_STATE_ERROR)
3023 return 0;
3024
3025 /* It's important to set the bp->state to the value different from
87b7ba3d
VZ
3026 * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
3027 * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
3028 */
3029 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
3030 smp_mb();
3031
78c3bcc5
AE
3032 /* indicate to VFs that the PF is going down */
3033 bnx2x_iov_channel_down(bp);
3034
55c11941
MS
3035 if (CNIC_LOADED(bp))
3036 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
3037
9505ee37
VZ
3038 /* Stop Tx */
3039 bnx2x_tx_disable(bp);
65565884 3040 netdev_reset_tc(bp->dev);
9505ee37 3041
9f6c9258 3042 bp->rx_mode = BNX2X_RX_MODE_NONE;
9f6c9258 3043
9f6c9258 3044 del_timer_sync(&bp->timer);
f85582f8 3045
ad5afc89
AE
3046 if (IS_PF(bp)) {
3047 /* Set ALWAYS_ALIVE bit in shmem */
3048 bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
3049 bnx2x_drv_pulse(bp);
3050 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
3051 bnx2x_save_statistics(bp);
3052 }
9f6c9258 3053
ad5afc89
AE
3054 /* wait till consumers catch up with producers in all queues */
3055 bnx2x_drain_tx_queues(bp);
9f6c9258 3056
9b176b6b
AE
3057 /* if VF indicate to PF this function is going down (PF will delete sp
3058 * elements and clear initializations
3059 */
3060 if (IS_VF(bp))
3061 bnx2x_vfpf_close_vf(bp);
3062 else if (unload_mode != UNLOAD_RECOVERY)
3063 /* if this is a normal/close unload need to clean up chip*/
5d07d868 3064 bnx2x_chip_cleanup(bp, unload_mode, keep_link);
523224a3 3065 else {
c9ee9206
VZ
3066 /* Send the UNLOAD_REQUEST to the MCP */
3067 bnx2x_send_unload_req(bp, unload_mode);
3068
16a5fd92 3069 /* Prevent transactions to host from the functions on the
c9ee9206 3070 * engine that doesn't reset global blocks in case of global
16a5fd92 3071 * attention once global blocks are reset and gates are opened
c9ee9206
VZ
3072 * (the engine which leader will perform the recovery
3073 * last).
3074 */
3075 if (!CHIP_IS_E1x(bp))
3076 bnx2x_pf_disable(bp);
3077
3078 /* Disable HW interrupts, NAPI */
523224a3 3079 bnx2x_netif_stop(bp, 1);
26614ba5
MS
3080 /* Delete all NAPI objects */
3081 bnx2x_del_all_napi(bp);
55c11941
MS
3082 if (CNIC_LOADED(bp))
3083 bnx2x_del_all_napi_cnic(bp);
523224a3 3084 /* Release IRQs */
d6214d7a 3085 bnx2x_free_irq(bp);
c9ee9206
VZ
3086
3087 /* Report UNLOAD_DONE to MCP */
5d07d868 3088 bnx2x_send_unload_done(bp, false);
523224a3 3089 }
9f6c9258 3090
619c5cb6 3091 /*
16a5fd92 3092 * At this stage no more interrupts will arrive so we may safely clean
619c5cb6
VZ
3093 * the queueable objects here in case they failed to get cleaned so far.
3094 */
ad5afc89
AE
3095 if (IS_PF(bp))
3096 bnx2x_squeeze_objects(bp);
619c5cb6 3097
79616895
VZ
3098 /* There should be no more pending SP commands at this stage */
3099 bp->sp_state = 0;
3100
9f6c9258
DK
3101 bp->port.pmf = 0;
3102
a0d307b2
DK
3103 /* clear pending work in rtnl task */
3104 bp->sp_rtnl_state = 0;
3105 smp_mb();
3106
9f6c9258
DK
3107 /* Free SKBs, SGEs, TPA pool and driver internals */
3108 bnx2x_free_skbs(bp);
55c11941
MS
3109 if (CNIC_LOADED(bp))
3110 bnx2x_free_skbs_cnic(bp);
ec6ba945 3111 for_each_rx_queue(bp, i)
9f6c9258 3112 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
d6214d7a 3113
ad5afc89
AE
3114 bnx2x_free_fp_mem(bp);
3115 if (CNIC_LOADED(bp))
55c11941 3116 bnx2x_free_fp_mem_cnic(bp);
9f6c9258 3117
ad5afc89 3118 if (IS_PF(bp)) {
ad5afc89
AE
3119 if (CNIC_LOADED(bp))
3120 bnx2x_free_mem_cnic(bp);
3121 }
b4cddbd6
AE
3122 bnx2x_free_mem(bp);
3123
9f6c9258 3124 bp->state = BNX2X_STATE_CLOSED;
55c11941 3125 bp->cnic_loaded = false;
9f6c9258 3126
42f8277f
YM
3127 /* Clear driver version indication in shmem */
3128 if (IS_PF(bp))
3129 bnx2x_update_mng_version(bp);
3130
c9ee9206
VZ
3131 /* Check if there are pending parity attentions. If there are - set
3132 * RECOVERY_IN_PROGRESS.
3133 */
ad5afc89 3134 if (IS_PF(bp) && bnx2x_chk_parity_attn(bp, &global, false)) {
c9ee9206
VZ
3135 bnx2x_set_reset_in_progress(bp);
3136
3137 /* Set RESET_IS_GLOBAL if needed */
3138 if (global)
3139 bnx2x_set_reset_global(bp);
3140 }
3141
9f6c9258
DK
3142 /* The last driver must disable a "close the gate" if there is no
3143 * parity attention or "process kill" pending.
3144 */
ad5afc89
AE
3145 if (IS_PF(bp) &&
3146 !bnx2x_clear_pf_load(bp) &&
3147 bnx2x_reset_is_done(bp, BP_PATH(bp)))
9f6c9258
DK
3148 bnx2x_disable_close_the_gate(bp);
3149
55c11941
MS
3150 DP(NETIF_MSG_IFUP, "Ending NIC unload\n");
3151
9f6c9258
DK
3152 return 0;
3153}
f85582f8 3154
9f6c9258
DK
3155int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
3156{
3157 u16 pmcsr;
3158
adf5f6a1 3159 /* If there is no power capability, silently succeed */
29ed74c3 3160 if (!bp->pdev->pm_cap) {
51c1a580 3161 BNX2X_DEV_INFO("No power capability. Breaking.\n");
adf5f6a1
DK
3162 return 0;
3163 }
3164
29ed74c3 3165 pci_read_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL, &pmcsr);
9f6c9258
DK
3166
3167 switch (state) {
3168 case PCI_D0:
29ed74c3 3169 pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
9f6c9258
DK
3170 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3171 PCI_PM_CTRL_PME_STATUS));
3172
3173 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3174 /* delay required during transition out of D3hot */
3175 msleep(20);
3176 break;
3177
3178 case PCI_D3hot:
3179 /* If there are other clients above don't
3180 shut down the power */
3181 if (atomic_read(&bp->pdev->enable_cnt) != 1)
3182 return 0;
3183 /* Don't shut down the power for emulation and FPGA */
3184 if (CHIP_REV_IS_SLOW(bp))
3185 return 0;
3186
3187 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3188 pmcsr |= 3;
3189
3190 if (bp->wol)
3191 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3192
29ed74c3 3193 pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
9f6c9258
DK
3194 pmcsr);
3195
3196 /* No more memory access after this point until
3197 * device is brought back to D0.
3198 */
3199 break;
3200
3201 default:
51c1a580 3202 dev_err(&bp->pdev->dev, "Can't support state = %d\n", state);
9f6c9258
DK
3203 return -EINVAL;
3204 }
3205 return 0;
3206}
3207
9f6c9258
DK
3208/*
3209 * net_device service functions
3210 */
a8f47eb7 3211static int bnx2x_poll(struct napi_struct *napi, int budget)
9f6c9258
DK
3212{
3213 int work_done = 0;
6383c0b3 3214 u8 cos;
9f6c9258
DK
3215 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
3216 napi);
3217 struct bnx2x *bp = fp->bp;
3218
3219 while (1) {
3220#ifdef BNX2X_STOP_ON_ERROR
3221 if (unlikely(bp->panic)) {
3222 napi_complete(napi);
3223 return 0;
3224 }
3225#endif
8f20aa57 3226 if (!bnx2x_fp_lock_napi(fp))
24e579c8 3227 return budget;
9f6c9258 3228
6383c0b3 3229 for_each_cos_in_tx_queue(fp, cos)
65565884
MS
3230 if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
3231 bnx2x_tx_int(bp, fp->txdata_ptr[cos]);
6383c0b3 3232
9f6c9258
DK
3233 if (bnx2x_has_rx_work(fp)) {
3234 work_done += bnx2x_rx_int(fp, budget - work_done);
3235
3236 /* must not complete if we consumed full budget */
8f20aa57
DK
3237 if (work_done >= budget) {
3238 bnx2x_fp_unlock_napi(fp);
9f6c9258 3239 break;
8f20aa57 3240 }
9f6c9258
DK
3241 }
3242
074975d0
ED
3243 bnx2x_fp_unlock_napi(fp);
3244
9f6c9258 3245 /* Fall out from the NAPI loop if needed */
074975d0 3246 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
55c11941 3247
ec6ba945
VZ
3248 /* No need to update SB for FCoE L2 ring as long as
3249 * it's connected to the default SB and the SB
3250 * has been updated when NAPI was scheduled.
3251 */
3252 if (IS_FCOE_FP(fp)) {
3253 napi_complete(napi);
3254 break;
3255 }
9f6c9258 3256 bnx2x_update_fpsb_idx(fp);
f85582f8
DK
3257 /* bnx2x_has_rx_work() reads the status block,
3258 * thus we need to ensure that status block indices
3259 * have been actually read (bnx2x_update_fpsb_idx)
3260 * prior to this check (bnx2x_has_rx_work) so that
3261 * we won't write the "newer" value of the status block
3262 * to IGU (if there was a DMA right after
3263 * bnx2x_has_rx_work and if there is no rmb, the memory
3264 * reading (bnx2x_update_fpsb_idx) may be postponed
3265 * to right before bnx2x_ack_sb). In this case there
3266 * will never be another interrupt until there is
3267 * another update of the status block, while there
3268 * is still unhandled work.
3269 */
9f6c9258
DK
3270 rmb();
3271
3272 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
3273 napi_complete(napi);
3274 /* Re-enable interrupts */
51c1a580 3275 DP(NETIF_MSG_RX_STATUS,
523224a3
DK
3276 "Update index to %d\n", fp->fp_hc_idx);
3277 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
3278 le16_to_cpu(fp->fp_hc_idx),
9f6c9258
DK
3279 IGU_INT_ENABLE, 1);
3280 break;
3281 }
3282 }
3283 }
3284
3285 return work_done;
3286}
3287
e0d1095a 3288#ifdef CONFIG_NET_RX_BUSY_POLL
8f20aa57
DK
3289/* must be called with local_bh_disable()d */
3290int bnx2x_low_latency_recv(struct napi_struct *napi)
3291{
3292 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
3293 napi);
3294 struct bnx2x *bp = fp->bp;
3295 int found = 0;
3296
3297 if ((bp->state == BNX2X_STATE_CLOSED) ||
3298 (bp->state == BNX2X_STATE_ERROR) ||
f8dcb5e3 3299 (bp->dev->features & (NETIF_F_LRO | NETIF_F_GRO)))
8f20aa57
DK
3300 return LL_FLUSH_FAILED;
3301
3302 if (!bnx2x_fp_lock_poll(fp))
3303 return LL_FLUSH_BUSY;
3304
75b29459 3305 if (bnx2x_has_rx_work(fp))
8f20aa57 3306 found = bnx2x_rx_int(fp, 4);
8f20aa57
DK
3307
3308 bnx2x_fp_unlock_poll(fp);
3309
3310 return found;
3311}
3312#endif
3313
9f6c9258
DK
3314/* we split the first BD into headers and data BDs
3315 * to ease the pain of our fellow microcode engineers
3316 * we use one mapping for both BDs
9f6c9258 3317 */
91226790
DK
3318static u16 bnx2x_tx_split(struct bnx2x *bp,
3319 struct bnx2x_fp_txdata *txdata,
3320 struct sw_tx_bd *tx_buf,
3321 struct eth_tx_start_bd **tx_bd, u16 hlen,
3322 u16 bd_prod)
9f6c9258
DK
3323{
3324 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
3325 struct eth_tx_bd *d_tx_bd;
3326 dma_addr_t mapping;
3327 int old_len = le16_to_cpu(h_tx_bd->nbytes);
3328
3329 /* first fix first BD */
9f6c9258
DK
3330 h_tx_bd->nbytes = cpu_to_le16(hlen);
3331
91226790
DK
3332 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x)\n",
3333 h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo);
9f6c9258
DK
3334
3335 /* now get a new data BD
3336 * (after the pbd) and fill it */
3337 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
6383c0b3 3338 d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
9f6c9258
DK
3339
3340 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
3341 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
3342
3343 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3344 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3345 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
3346
3347 /* this marks the BD as one that has no individual mapping */
3348 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
3349
3350 DP(NETIF_MSG_TX_QUEUED,
3351 "TSO split data size is %d (%x:%x)\n",
3352 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
3353
3354 /* update tx_bd */
3355 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
3356
3357 return bd_prod;
3358}
3359
86564c3f
YM
3360#define bswab32(b32) ((__force __le32) swab32((__force __u32) (b32)))
3361#define bswab16(b16) ((__force __le16) swab16((__force __u16) (b16)))
91226790 3362static __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
9f6c9258 3363{
86564c3f
YM
3364 __sum16 tsum = (__force __sum16) csum;
3365
9f6c9258 3366 if (fix > 0)
86564c3f
YM
3367 tsum = ~csum_fold(csum_sub((__force __wsum) csum,
3368 csum_partial(t_header - fix, fix, 0)));
9f6c9258
DK
3369
3370 else if (fix < 0)
86564c3f
YM
3371 tsum = ~csum_fold(csum_add((__force __wsum) csum,
3372 csum_partial(t_header, -fix, 0)));
9f6c9258 3373
e2593fcd 3374 return bswab16(tsum);
9f6c9258
DK
3375}
3376
91226790 3377static u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
9f6c9258
DK
3378{
3379 u32 rc;
a848ade4
DK
3380 __u8 prot = 0;
3381 __be16 protocol;
9f6c9258
DK
3382
3383 if (skb->ip_summed != CHECKSUM_PARTIAL)
a848ade4 3384 return XMIT_PLAIN;
9f6c9258 3385
a848ade4
DK
3386 protocol = vlan_get_protocol(skb);
3387 if (protocol == htons(ETH_P_IPV6)) {
3388 rc = XMIT_CSUM_V6;
3389 prot = ipv6_hdr(skb)->nexthdr;
3390 } else {
3391 rc = XMIT_CSUM_V4;
3392 prot = ip_hdr(skb)->protocol;
3393 }
9f6c9258 3394
a848ade4
DK
3395 if (!CHIP_IS_E1x(bp) && skb->encapsulation) {
3396 if (inner_ip_hdr(skb)->version == 6) {
3397 rc |= XMIT_CSUM_ENC_V6;
3398 if (inner_ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
3399 rc |= XMIT_CSUM_TCP;
9f6c9258 3400 } else {
a848ade4
DK
3401 rc |= XMIT_CSUM_ENC_V4;
3402 if (inner_ip_hdr(skb)->protocol == IPPROTO_TCP)
9f6c9258
DK
3403 rc |= XMIT_CSUM_TCP;
3404 }
3405 }
a848ade4
DK
3406 if (prot == IPPROTO_TCP)
3407 rc |= XMIT_CSUM_TCP;
9f6c9258 3408
36a8f39e
ED
3409 if (skb_is_gso(skb)) {
3410 if (skb_is_gso_v6(skb)) {
3411 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP);
3412 if (rc & XMIT_CSUM_ENC)
3413 rc |= XMIT_GSO_ENC_V6;
3414 } else {
3415 rc |= (XMIT_GSO_V4 | XMIT_CSUM_TCP);
3416 if (rc & XMIT_CSUM_ENC)
3417 rc |= XMIT_GSO_ENC_V4;
3418 }
a848ade4 3419 }
9f6c9258
DK
3420
3421 return rc;
3422}
3423
3424#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
3425/* check if packet requires linearization (packet is too fragmented)
3426 no need to check fragmentation if page size > 8K (there will be no
3427 violation to FW restrictions) */
3428static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
3429 u32 xmit_type)
3430{
3431 int to_copy = 0;
3432 int hlen = 0;
3433 int first_bd_sz = 0;
3434
3435 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
3436 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
3437
3438 if (xmit_type & XMIT_GSO) {
3439 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
3440 /* Check if LSO packet needs to be copied:
3441 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
3442 int wnd_size = MAX_FETCH_BD - 3;
3443 /* Number of windows to check */
3444 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
3445 int wnd_idx = 0;
3446 int frag_idx = 0;
3447 u32 wnd_sum = 0;
3448
3449 /* Headers length */
592b9b8d
YM
3450 if (xmit_type & XMIT_GSO_ENC)
3451 hlen = (int)(skb_inner_transport_header(skb) -
3452 skb->data) +
3453 inner_tcp_hdrlen(skb);
3454 else
3455 hlen = (int)(skb_transport_header(skb) -
3456 skb->data) + tcp_hdrlen(skb);
9f6c9258
DK
3457
3458 /* Amount of data (w/o headers) on linear part of SKB*/
3459 first_bd_sz = skb_headlen(skb) - hlen;
3460
3461 wnd_sum = first_bd_sz;
3462
3463 /* Calculate the first sum - it's special */
3464 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
3465 wnd_sum +=
9e903e08 3466 skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]);
9f6c9258
DK
3467
3468 /* If there was data on linear skb data - check it */
3469 if (first_bd_sz > 0) {
3470 if (unlikely(wnd_sum < lso_mss)) {
3471 to_copy = 1;
3472 goto exit_lbl;
3473 }
3474
3475 wnd_sum -= first_bd_sz;
3476 }
3477
3478 /* Others are easier: run through the frag list and
3479 check all windows */
3480 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
3481 wnd_sum +=
9e903e08 3482 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]);
9f6c9258
DK
3483
3484 if (unlikely(wnd_sum < lso_mss)) {
3485 to_copy = 1;
3486 break;
3487 }
3488 wnd_sum -=
9e903e08 3489 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx]);
9f6c9258
DK
3490 }
3491 } else {
3492 /* in non-LSO too fragmented packet should always
3493 be linearized */
3494 to_copy = 1;
3495 }
3496 }
3497
3498exit_lbl:
3499 if (unlikely(to_copy))
3500 DP(NETIF_MSG_TX_QUEUED,
51c1a580 3501 "Linearization IS REQUIRED for %s packet. num_frags %d hlen %d first_bd_sz %d\n",
9f6c9258
DK
3502 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
3503 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
3504
3505 return to_copy;
3506}
3507#endif
3508
f2e0899f 3509/**
e8920674 3510 * bnx2x_set_pbd_gso - update PBD in GSO case.
f2e0899f 3511 *
e8920674
DK
3512 * @skb: packet skb
3513 * @pbd: parse BD
3514 * @xmit_type: xmit flags
f2e0899f 3515 */
91226790
DK
3516static void bnx2x_set_pbd_gso(struct sk_buff *skb,
3517 struct eth_tx_parse_bd_e1x *pbd,
3518 u32 xmit_type)
f2e0899f
DK
3519{
3520 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
86564c3f 3521 pbd->tcp_send_seq = bswab32(tcp_hdr(skb)->seq);
91226790 3522 pbd->tcp_flags = pbd_tcp_flags(tcp_hdr(skb));
f2e0899f
DK
3523
3524 if (xmit_type & XMIT_GSO_V4) {
86564c3f 3525 pbd->ip_id = bswab16(ip_hdr(skb)->id);
f2e0899f 3526 pbd->tcp_pseudo_csum =
86564c3f
YM
3527 bswab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
3528 ip_hdr(skb)->daddr,
3529 0, IPPROTO_TCP, 0));
057cf65e 3530 } else {
f2e0899f 3531 pbd->tcp_pseudo_csum =
86564c3f
YM
3532 bswab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3533 &ipv6_hdr(skb)->daddr,
3534 0, IPPROTO_TCP, 0));
057cf65e 3535 }
f2e0899f 3536
86564c3f
YM
3537 pbd->global_data |=
3538 cpu_to_le16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN);
f2e0899f 3539}
f85582f8 3540
a848ade4
DK
3541/**
3542 * bnx2x_set_pbd_csum_enc - update PBD with checksum and return header length
3543 *
3544 * @bp: driver handle
3545 * @skb: packet skb
3546 * @parsing_data: data to be updated
3547 * @xmit_type: xmit flags
3548 *
3549 * 57712/578xx related, when skb has encapsulation
3550 */
3551static u8 bnx2x_set_pbd_csum_enc(struct bnx2x *bp, struct sk_buff *skb,
3552 u32 *parsing_data, u32 xmit_type)
3553{
3554 *parsing_data |=
3555 ((((u8 *)skb_inner_transport_header(skb) - skb->data) >> 1) <<
3556 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3557 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
3558
3559 if (xmit_type & XMIT_CSUM_TCP) {
3560 *parsing_data |= ((inner_tcp_hdrlen(skb) / 4) <<
3561 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3562 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
3563
3564 return skb_inner_transport_header(skb) +
3565 inner_tcp_hdrlen(skb) - skb->data;
3566 }
3567
3568 /* We support checksum offload for TCP and UDP only.
3569 * No need to pass the UDP header length - it's a constant.
3570 */
3571 return skb_inner_transport_header(skb) +
3572 sizeof(struct udphdr) - skb->data;
3573}
3574
f2e0899f 3575/**
e8920674 3576 * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
f2e0899f 3577 *
e8920674
DK
3578 * @bp: driver handle
3579 * @skb: packet skb
3580 * @parsing_data: data to be updated
3581 * @xmit_type: xmit flags
f2e0899f 3582 *
91226790 3583 * 57712/578xx related
f2e0899f 3584 */
91226790
DK
3585static u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
3586 u32 *parsing_data, u32 xmit_type)
f2e0899f 3587{
e39aece7 3588 *parsing_data |=
2de67439 3589 ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
91226790
DK
3590 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3591 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
f2e0899f 3592
e39aece7
VZ
3593 if (xmit_type & XMIT_CSUM_TCP) {
3594 *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
3595 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3596 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
f2e0899f 3597
e39aece7 3598 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
924d75ab
YM
3599 }
3600 /* We support checksum offload for TCP and UDP only.
3601 * No need to pass the UDP header length - it's a constant.
3602 */
3603 return skb_transport_header(skb) + sizeof(struct udphdr) - skb->data;
f2e0899f
DK
3604}
3605
a848ade4 3606/* set FW indication according to inner or outer protocols if tunneled */
91226790
DK
3607static void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3608 struct eth_tx_start_bd *tx_start_bd,
3609 u32 xmit_type)
93ef5c02 3610{
93ef5c02
DK
3611 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
3612
a848ade4 3613 if (xmit_type & (XMIT_CSUM_ENC_V6 | XMIT_CSUM_V6))
91226790 3614 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
93ef5c02
DK
3615
3616 if (!(xmit_type & XMIT_CSUM_TCP))
3617 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
93ef5c02
DK
3618}
3619
f2e0899f 3620/**
e8920674 3621 * bnx2x_set_pbd_csum - update PBD with checksum and return header length
f2e0899f 3622 *
e8920674
DK
3623 * @bp: driver handle
3624 * @skb: packet skb
3625 * @pbd: parse BD to be updated
3626 * @xmit_type: xmit flags
f2e0899f 3627 */
91226790
DK
3628static u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3629 struct eth_tx_parse_bd_e1x *pbd,
3630 u32 xmit_type)
f2e0899f 3631{
e39aece7 3632 u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
f2e0899f
DK
3633
3634 /* for now NS flag is not used in Linux */
3635 pbd->global_data =
86564c3f
YM
3636 cpu_to_le16(hlen |
3637 ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3638 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
f2e0899f
DK
3639
3640 pbd->ip_hlen_w = (skb_transport_header(skb) -
e39aece7 3641 skb_network_header(skb)) >> 1;
f2e0899f 3642
e39aece7
VZ
3643 hlen += pbd->ip_hlen_w;
3644
3645 /* We support checksum offload for TCP and UDP only */
3646 if (xmit_type & XMIT_CSUM_TCP)
3647 hlen += tcp_hdrlen(skb) / 2;
3648 else
3649 hlen += sizeof(struct udphdr) / 2;
f2e0899f
DK
3650
3651 pbd->total_hlen_w = cpu_to_le16(hlen);
3652 hlen = hlen*2;
3653
3654 if (xmit_type & XMIT_CSUM_TCP) {
86564c3f 3655 pbd->tcp_pseudo_csum = bswab16(tcp_hdr(skb)->check);
f2e0899f
DK
3656
3657 } else {
3658 s8 fix = SKB_CS_OFF(skb); /* signed! */
3659
3660 DP(NETIF_MSG_TX_QUEUED,
3661 "hlen %d fix %d csum before fix %x\n",
3662 le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
3663
3664 /* HW bug: fixup the CSUM */
3665 pbd->tcp_pseudo_csum =
3666 bnx2x_csum_fix(skb_transport_header(skb),
3667 SKB_CS(skb), fix);
3668
3669 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
3670 pbd->tcp_pseudo_csum);
3671 }
3672
3673 return hlen;
3674}
f85582f8 3675
a848ade4
DK
3676static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb,
3677 struct eth_tx_parse_bd_e2 *pbd_e2,
3678 struct eth_tx_parse_2nd_bd *pbd2,
3679 u16 *global_data,
3680 u32 xmit_type)
3681{
e287a75c 3682 u16 hlen_w = 0;
a848ade4 3683 u8 outerip_off, outerip_len = 0;
e768fb29 3684
e287a75c
DK
3685 /* from outer IP to transport */
3686 hlen_w = (skb_inner_transport_header(skb) -
3687 skb_network_header(skb)) >> 1;
a848ade4
DK
3688
3689 /* transport len */
e768fb29 3690 hlen_w += inner_tcp_hdrlen(skb) >> 1;
a848ade4 3691
e287a75c 3692 pbd2->fw_ip_hdr_to_payload_w = hlen_w;
a848ade4 3693
e768fb29
DK
3694 /* outer IP header info */
3695 if (xmit_type & XMIT_CSUM_V4) {
e287a75c 3696 struct iphdr *iph = ip_hdr(skb);
1b4fc0e2
DK
3697 u32 csum = (__force u32)(~iph->check) -
3698 (__force u32)iph->tot_len -
3699 (__force u32)iph->frag_off;
c957d09f 3700
e42780b6
DK
3701 outerip_len = iph->ihl << 1;
3702
a848ade4 3703 pbd2->fw_ip_csum_wo_len_flags_frag =
c957d09f 3704 bswab16(csum_fold((__force __wsum)csum));
a848ade4
DK
3705 } else {
3706 pbd2->fw_ip_hdr_to_payload_w =
e287a75c 3707 hlen_w - ((sizeof(struct ipv6hdr)) >> 1);
e42780b6 3708 pbd_e2->data.tunnel_data.flags |=
28311f8e 3709 ETH_TUNNEL_DATA_IPV6_OUTER;
a848ade4
DK
3710 }
3711
3712 pbd2->tcp_send_seq = bswab32(inner_tcp_hdr(skb)->seq);
3713
3714 pbd2->tcp_flags = pbd_tcp_flags(inner_tcp_hdr(skb));
3715
e42780b6
DK
3716 /* inner IP header info */
3717 if (xmit_type & XMIT_CSUM_ENC_V4) {
e287a75c 3718 pbd2->hw_ip_id = bswab16(inner_ip_hdr(skb)->id);
a848ade4
DK
3719
3720 pbd_e2->data.tunnel_data.pseudo_csum =
3721 bswab16(~csum_tcpudp_magic(
3722 inner_ip_hdr(skb)->saddr,
3723 inner_ip_hdr(skb)->daddr,
3724 0, IPPROTO_TCP, 0));
a848ade4
DK
3725 } else {
3726 pbd_e2->data.tunnel_data.pseudo_csum =
3727 bswab16(~csum_ipv6_magic(
3728 &inner_ipv6_hdr(skb)->saddr,
3729 &inner_ipv6_hdr(skb)->daddr,
3730 0, IPPROTO_TCP, 0));
3731 }
3732
3733 outerip_off = (skb_network_header(skb) - skb->data) >> 1;
3734
3735 *global_data |=
3736 outerip_off |
a848ade4
DK
3737 (outerip_len <<
3738 ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT) |
3739 ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3740 ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN_SHIFT);
65bc0cfe
DK
3741
3742 if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
3743 SET_FLAG(*global_data, ETH_TX_PARSE_2ND_BD_TUNNEL_UDP_EXIST, 1);
3744 pbd2->tunnel_udp_hdr_start_w = skb_transport_offset(skb) >> 1;
3745 }
a848ade4
DK
3746}
3747
e42780b6
DK
3748static inline void bnx2x_set_ipv6_ext_e2(struct sk_buff *skb, u32 *parsing_data,
3749 u32 xmit_type)
3750{
3751 struct ipv6hdr *ipv6;
3752
3753 if (!(xmit_type & (XMIT_GSO_ENC_V6 | XMIT_GSO_V6)))
3754 return;
3755
3756 if (xmit_type & XMIT_GSO_ENC_V6)
3757 ipv6 = inner_ipv6_hdr(skb);
3758 else /* XMIT_GSO_V6 */
3759 ipv6 = ipv6_hdr(skb);
3760
3761 if (ipv6->nexthdr == NEXTHDR_IPV6)
3762 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
3763}
3764
9f6c9258
DK
3765/* called with netif_tx_lock
3766 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
3767 * netif_wake_queue()
3768 */
3769netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
3770{
3771 struct bnx2x *bp = netdev_priv(dev);
6383c0b3 3772
9f6c9258 3773 struct netdev_queue *txq;
6383c0b3 3774 struct bnx2x_fp_txdata *txdata;
9f6c9258 3775 struct sw_tx_bd *tx_buf;
619c5cb6 3776 struct eth_tx_start_bd *tx_start_bd, *first_bd;
9f6c9258 3777 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
523224a3 3778 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
f2e0899f 3779 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
a848ade4 3780 struct eth_tx_parse_2nd_bd *pbd2 = NULL;
2297a2da 3781 u32 pbd_e2_parsing_data = 0;
9f6c9258 3782 u16 pkt_prod, bd_prod;
65565884 3783 int nbd, txq_index;
9f6c9258
DK
3784 dma_addr_t mapping;
3785 u32 xmit_type = bnx2x_xmit_type(bp, skb);
3786 int i;
3787 u8 hlen = 0;
3788 __le16 pkt_size = 0;
3789 struct ethhdr *eth;
3790 u8 mac_type = UNICAST_ADDRESS;
3791
3792#ifdef BNX2X_STOP_ON_ERROR
3793 if (unlikely(bp->panic))
3794 return NETDEV_TX_BUSY;
3795#endif
3796
6383c0b3
AE
3797 txq_index = skb_get_queue_mapping(skb);
3798 txq = netdev_get_tx_queue(dev, txq_index);
3799
55c11941 3800 BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + (CNIC_LOADED(bp) ? 1 : 0));
6383c0b3 3801
65565884 3802 txdata = &bp->bnx2x_txq[txq_index];
6383c0b3
AE
3803
3804 /* enable this debug print to view the transmission queue being used
51c1a580 3805 DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n",
6383c0b3 3806 txq_index, fp_index, txdata_index); */
9f6c9258 3807
16a5fd92 3808 /* enable this debug print to view the transmission details
51c1a580
MS
3809 DP(NETIF_MSG_TX_QUEUED,
3810 "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n",
6383c0b3 3811 txdata->cid, fp_index, txdata_index, txdata, fp); */
9f6c9258 3812
6383c0b3 3813 if (unlikely(bnx2x_tx_avail(bp, txdata) <
7df2dc6b
DK
3814 skb_shinfo(skb)->nr_frags +
3815 BDS_PER_TX_PKT +
3816 NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT))) {
2384d6aa 3817 /* Handle special storage cases separately */
c96bdc0c
DK
3818 if (txdata->tx_ring_size == 0) {
3819 struct bnx2x_eth_q_stats *q_stats =
3820 bnx2x_fp_qstats(bp, txdata->parent_fp);
3821 q_stats->driver_filtered_tx_pkt++;
3822 dev_kfree_skb(skb);
3823 return NETDEV_TX_OK;
3824 }
2de67439
YM
3825 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
3826 netif_tx_stop_queue(txq);
c96bdc0c 3827 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
2384d6aa 3828
9f6c9258
DK
3829 return NETDEV_TX_BUSY;
3830 }
3831
51c1a580 3832 DP(NETIF_MSG_TX_QUEUED,
04c46736 3833 "queue[%d]: SKB: summed %x protocol %x protocol(%x,%x) gso type %x xmit_type %x len %d\n",
6383c0b3 3834 txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
04c46736
YM
3835 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type,
3836 skb->len);
9f6c9258
DK
3837
3838 eth = (struct ethhdr *)skb->data;
3839
3840 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
3841 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
3842 if (is_broadcast_ether_addr(eth->h_dest))
3843 mac_type = BROADCAST_ADDRESS;
3844 else
3845 mac_type = MULTICAST_ADDRESS;
3846 }
3847
91226790 3848#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT)
9f6c9258
DK
3849 /* First, check if we need to linearize the skb (due to FW
3850 restrictions). No need to check fragmentation if page size > 8K
3851 (there will be no violation to FW restrictions) */
3852 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
3853 /* Statistics of linearization */
3854 bp->lin_cnt++;
3855 if (skb_linearize(skb) != 0) {
51c1a580
MS
3856 DP(NETIF_MSG_TX_QUEUED,
3857 "SKB linearization failed - silently dropping this SKB\n");
9f6c9258
DK
3858 dev_kfree_skb_any(skb);
3859 return NETDEV_TX_OK;
3860 }
3861 }
3862#endif
619c5cb6
VZ
3863 /* Map skb linear data for DMA */
3864 mapping = dma_map_single(&bp->pdev->dev, skb->data,
3865 skb_headlen(skb), DMA_TO_DEVICE);
3866 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
51c1a580
MS
3867 DP(NETIF_MSG_TX_QUEUED,
3868 "SKB mapping failed - silently dropping this SKB\n");
619c5cb6
VZ
3869 dev_kfree_skb_any(skb);
3870 return NETDEV_TX_OK;
3871 }
9f6c9258
DK
3872 /*
3873 Please read carefully. First we use one BD which we mark as start,
3874 then we have a parsing info BD (used for TSO or xsum),
3875 and only then we have the rest of the TSO BDs.
3876 (don't forget to mark the last one as last,
3877 and to unmap only AFTER you write to the BD ...)
3878 And above all, all pdb sizes are in words - NOT DWORDS!
3879 */
3880
619c5cb6
VZ
3881 /* get current pkt produced now - advance it just before sending packet
3882 * since mapping of pages may fail and cause packet to be dropped
3883 */
6383c0b3
AE
3884 pkt_prod = txdata->tx_pkt_prod;
3885 bd_prod = TX_BD(txdata->tx_bd_prod);
9f6c9258 3886
619c5cb6
VZ
3887 /* get a tx_buf and first BD
3888 * tx_start_bd may be changed during SPLIT,
3889 * but first_bd will always stay first
3890 */
6383c0b3
AE
3891 tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
3892 tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
619c5cb6 3893 first_bd = tx_start_bd;
9f6c9258
DK
3894
3895 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
f85582f8 3896
eeed018c
MK
3897 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
3898 if (!(bp->flags & TX_TIMESTAMPING_EN)) {
3899 BNX2X_ERR("Tx timestamping was not enabled, this packet will not be timestamped\n");
3900 } else if (bp->ptp_tx_skb) {
3901 BNX2X_ERR("The device supports only a single outstanding packet to timestamp, this packet will not be timestamped\n");
3902 } else {
3903 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3904 /* schedule check for Tx timestamp */
3905 bp->ptp_tx_skb = skb_get(skb);
3906 bp->ptp_tx_start = jiffies;
3907 schedule_work(&bp->ptp_task);
3908 }
3909 }
3910
91226790
DK
3911 /* header nbd: indirectly zero other flags! */
3912 tx_start_bd->general_data = 1 << ETH_TX_START_BD_HDR_NBDS_SHIFT;
9f6c9258
DK
3913
3914 /* remember the first BD of the packet */
6383c0b3 3915 tx_buf->first_bd = txdata->tx_bd_prod;
9f6c9258
DK
3916 tx_buf->skb = skb;
3917 tx_buf->flags = 0;
3918
3919 DP(NETIF_MSG_TX_QUEUED,
3920 "sending pkt %u @%p next_idx %u bd %u @%p\n",
6383c0b3 3921 pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
9f6c9258 3922
df8a39de 3923 if (skb_vlan_tag_present(skb)) {
523224a3 3924 tx_start_bd->vlan_or_ethertype =
df8a39de 3925 cpu_to_le16(skb_vlan_tag_get(skb));
523224a3
DK
3926 tx_start_bd->bd_flags.as_bitfield |=
3927 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
dc1ba591
AE
3928 } else {
3929 /* when transmitting in a vf, start bd must hold the ethertype
3930 * for fw to enforce it
3931 */
ea36475a 3932#ifndef BNX2X_STOP_ON_ERROR
91226790 3933 if (IS_VF(bp))
ea36475a 3934#endif
dc1ba591
AE
3935 tx_start_bd->vlan_or_ethertype =
3936 cpu_to_le16(ntohs(eth->h_proto));
ea36475a 3937#ifndef BNX2X_STOP_ON_ERROR
91226790 3938 else
dc1ba591
AE
3939 /* used by FW for packet accounting */
3940 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
ea36475a 3941#endif
dc1ba591 3942 }
9f6c9258 3943
91226790
DK
3944 nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
3945
9f6c9258
DK
3946 /* turn on parsing and get a BD */
3947 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9f6c9258 3948
93ef5c02
DK
3949 if (xmit_type & XMIT_CSUM)
3950 bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
9f6c9258 3951
619c5cb6 3952 if (!CHIP_IS_E1x(bp)) {
6383c0b3 3953 pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
f2e0899f 3954 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
a848ade4
DK
3955
3956 if (xmit_type & XMIT_CSUM_ENC) {
3957 u16 global_data = 0;
3958
3959 /* Set PBD in enc checksum offload case */
3960 hlen = bnx2x_set_pbd_csum_enc(bp, skb,
3961 &pbd_e2_parsing_data,
3962 xmit_type);
3963
3964 /* turn on 2nd parsing and get a BD */
3965 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3966
3967 pbd2 = &txdata->tx_desc_ring[bd_prod].parse_2nd_bd;
3968
3969 memset(pbd2, 0, sizeof(*pbd2));
3970
3971 pbd_e2->data.tunnel_data.ip_hdr_start_inner_w =
3972 (skb_inner_network_header(skb) -
3973 skb->data) >> 1;
3974
3975 if (xmit_type & XMIT_GSO_ENC)
3976 bnx2x_update_pbds_gso_enc(skb, pbd_e2, pbd2,
3977 &global_data,
3978 xmit_type);
3979
3980 pbd2->global_data = cpu_to_le16(global_data);
3981
3982 /* add addition parse BD indication to start BD */
3983 SET_FLAG(tx_start_bd->general_data,
3984 ETH_TX_START_BD_PARSE_NBDS, 1);
3985 /* set encapsulation flag in start BD */
3986 SET_FLAG(tx_start_bd->general_data,
3987 ETH_TX_START_BD_TUNNEL_EXIST, 1);
fe26566d
DK
3988
3989 tx_buf->flags |= BNX2X_HAS_SECOND_PBD;
3990
a848ade4
DK
3991 nbd++;
3992 } else if (xmit_type & XMIT_CSUM) {
91226790 3993 /* Set PBD in checksum offload case w/o encapsulation */
2297a2da
VZ
3994 hlen = bnx2x_set_pbd_csum_e2(bp, skb,
3995 &pbd_e2_parsing_data,
3996 xmit_type);
a848ade4 3997 }
dc1ba591 3998
e42780b6 3999 bnx2x_set_ipv6_ext_e2(skb, &pbd_e2_parsing_data, xmit_type);
babe723d
YM
4000 /* Add the macs to the parsing BD if this is a vf or if
4001 * Tx Switching is enabled.
4002 */
91226790
DK
4003 if (IS_VF(bp)) {
4004 /* override GRE parameters in BD */
4005 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
4006 &pbd_e2->data.mac_addr.src_mid,
4007 &pbd_e2->data.mac_addr.src_lo,
619c5cb6 4008 eth->h_source);
91226790 4009
babe723d
YM
4010 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi,
4011 &pbd_e2->data.mac_addr.dst_mid,
4012 &pbd_e2->data.mac_addr.dst_lo,
4013 eth->h_dest);
ea36475a
YM
4014 } else {
4015 if (bp->flags & TX_SWITCHING)
4016 bnx2x_set_fw_mac_addr(
4017 &pbd_e2->data.mac_addr.dst_hi,
4018 &pbd_e2->data.mac_addr.dst_mid,
4019 &pbd_e2->data.mac_addr.dst_lo,
4020 eth->h_dest);
4021#ifdef BNX2X_STOP_ON_ERROR
4022 /* Enforce security is always set in Stop on Error -
4023 * source mac should be present in the parsing BD
4024 */
4025 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
4026 &pbd_e2->data.mac_addr.src_mid,
4027 &pbd_e2->data.mac_addr.src_lo,
4028 eth->h_source);
4029#endif
619c5cb6 4030 }
96bed4b9
YM
4031
4032 SET_FLAG(pbd_e2_parsing_data,
4033 ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, mac_type);
f2e0899f 4034 } else {
96bed4b9 4035 u16 global_data = 0;
6383c0b3 4036 pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
f2e0899f
DK
4037 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
4038 /* Set PBD in checksum offload case */
4039 if (xmit_type & XMIT_CSUM)
4040 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
9f6c9258 4041
96bed4b9
YM
4042 SET_FLAG(global_data,
4043 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type);
4044 pbd_e1x->global_data |= cpu_to_le16(global_data);
9f6c9258
DK
4045 }
4046
f85582f8 4047 /* Setup the data pointer of the first BD of the packet */
9f6c9258
DK
4048 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
4049 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9f6c9258
DK
4050 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
4051 pkt_size = tx_start_bd->nbytes;
4052
51c1a580 4053 DP(NETIF_MSG_TX_QUEUED,
91226790 4054 "first bd @%p addr (%x:%x) nbytes %d flags %x vlan %x\n",
9f6c9258 4055 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
91226790 4056 le16_to_cpu(tx_start_bd->nbytes),
523224a3
DK
4057 tx_start_bd->bd_flags.as_bitfield,
4058 le16_to_cpu(tx_start_bd->vlan_or_ethertype));
9f6c9258
DK
4059
4060 if (xmit_type & XMIT_GSO) {
4061
4062 DP(NETIF_MSG_TX_QUEUED,
4063 "TSO packet len %d hlen %d total len %d tso size %d\n",
4064 skb->len, hlen, skb_headlen(skb),
4065 skb_shinfo(skb)->gso_size);
4066
4067 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
4068
91226790
DK
4069 if (unlikely(skb_headlen(skb) > hlen)) {
4070 nbd++;
6383c0b3
AE
4071 bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
4072 &tx_start_bd, hlen,
91226790
DK
4073 bd_prod);
4074 }
619c5cb6 4075 if (!CHIP_IS_E1x(bp))
e42780b6
DK
4076 pbd_e2_parsing_data |=
4077 (skb_shinfo(skb)->gso_size <<
4078 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
4079 ETH_TX_PARSE_BD_E2_LSO_MSS;
f2e0899f 4080 else
e42780b6 4081 bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
9f6c9258 4082 }
2297a2da
VZ
4083
4084 /* Set the PBD's parsing_data field if not zero
4085 * (for the chips newer than 57711).
4086 */
4087 if (pbd_e2_parsing_data)
4088 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
4089
9f6c9258
DK
4090 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
4091
f85582f8 4092 /* Handle fragmented skb */
9f6c9258
DK
4093 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4094 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4095
9e903e08
ED
4096 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
4097 skb_frag_size(frag), DMA_TO_DEVICE);
619c5cb6 4098 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
2df1a70a 4099 unsigned int pkts_compl = 0, bytes_compl = 0;
619c5cb6 4100
51c1a580
MS
4101 DP(NETIF_MSG_TX_QUEUED,
4102 "Unable to map page - dropping packet...\n");
619c5cb6
VZ
4103
4104 /* we need unmap all buffers already mapped
4105 * for this SKB;
4106 * first_bd->nbd need to be properly updated
4107 * before call to bnx2x_free_tx_pkt
4108 */
4109 first_bd->nbd = cpu_to_le16(nbd);
6383c0b3 4110 bnx2x_free_tx_pkt(bp, txdata,
2df1a70a
TH
4111 TX_BD(txdata->tx_pkt_prod),
4112 &pkts_compl, &bytes_compl);
619c5cb6
VZ
4113 return NETDEV_TX_OK;
4114 }
4115
9f6c9258 4116 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
6383c0b3 4117 tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
9f6c9258 4118 if (total_pkt_bd == NULL)
6383c0b3 4119 total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
9f6c9258 4120
9f6c9258
DK
4121 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
4122 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9e903e08
ED
4123 tx_data_bd->nbytes = cpu_to_le16(skb_frag_size(frag));
4124 le16_add_cpu(&pkt_size, skb_frag_size(frag));
619c5cb6 4125 nbd++;
9f6c9258
DK
4126
4127 DP(NETIF_MSG_TX_QUEUED,
4128 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
4129 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
4130 le16_to_cpu(tx_data_bd->nbytes));
4131 }
4132
4133 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
4134
619c5cb6
VZ
4135 /* update with actual num BDs */
4136 first_bd->nbd = cpu_to_le16(nbd);
4137
9f6c9258
DK
4138 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
4139
4140 /* now send a tx doorbell, counting the next BD
4141 * if the packet contains or ends with it
4142 */
4143 if (TX_BD_POFF(bd_prod) < nbd)
4144 nbd++;
4145
619c5cb6
VZ
4146 /* total_pkt_bytes should be set on the first data BD if
4147 * it's not an LSO packet and there is more than one
4148 * data BD. In this case pkt_size is limited by an MTU value.
4149 * However we prefer to set it for an LSO packet (while we don't
4150 * have to) in order to save some CPU cycles in a none-LSO
4151 * case, when we much more care about them.
4152 */
9f6c9258
DK
4153 if (total_pkt_bd != NULL)
4154 total_pkt_bd->total_pkt_bytes = pkt_size;
4155
523224a3 4156 if (pbd_e1x)
9f6c9258 4157 DP(NETIF_MSG_TX_QUEUED,
51c1a580 4158 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u tcp_flags %x xsum %x seq %u hlen %u\n",
523224a3
DK
4159 pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
4160 pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
4161 pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
4162 le16_to_cpu(pbd_e1x->total_hlen_w));
f2e0899f
DK
4163 if (pbd_e2)
4164 DP(NETIF_MSG_TX_QUEUED,
4165 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
91226790
DK
4166 pbd_e2,
4167 pbd_e2->data.mac_addr.dst_hi,
4168 pbd_e2->data.mac_addr.dst_mid,
4169 pbd_e2->data.mac_addr.dst_lo,
4170 pbd_e2->data.mac_addr.src_hi,
4171 pbd_e2->data.mac_addr.src_mid,
4172 pbd_e2->data.mac_addr.src_lo,
f2e0899f 4173 pbd_e2->parsing_data);
9f6c9258
DK
4174 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
4175
2df1a70a
TH
4176 netdev_tx_sent_queue(txq, skb->len);
4177
8373c57d
WB
4178 skb_tx_timestamp(skb);
4179
6383c0b3 4180 txdata->tx_pkt_prod++;
9f6c9258
DK
4181 /*
4182 * Make sure that the BD data is updated before updating the producer
4183 * since FW might read the BD right after the producer is updated.
4184 * This is only applicable for weak-ordered memory model archs such
4185 * as IA-64. The following barrier is also mandatory since FW will
4186 * assumes packets must have BDs.
4187 */
4188 wmb();
4189
6383c0b3 4190 txdata->tx_db.data.prod += nbd;
9f6c9258 4191 barrier();
f85582f8 4192
6383c0b3 4193 DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
9f6c9258
DK
4194
4195 mmiowb();
4196
6383c0b3 4197 txdata->tx_bd_prod += nbd;
9f6c9258 4198
7df2dc6b 4199 if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_DESC_PER_TX_PKT)) {
9f6c9258
DK
4200 netif_tx_stop_queue(txq);
4201
4202 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
4203 * ordering of set_bit() in netif_tx_stop_queue() and read of
4204 * fp->bd_tx_cons */
4205 smp_mb();
4206
15192a8c 4207 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
7df2dc6b 4208 if (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT)
9f6c9258
DK
4209 netif_tx_wake_queue(txq);
4210 }
6383c0b3 4211 txdata->tx_pkt++;
9f6c9258
DK
4212
4213 return NETDEV_TX_OK;
4214}
f85582f8 4215
230d00eb
YM
4216void bnx2x_get_c2s_mapping(struct bnx2x *bp, u8 *c2s_map, u8 *c2s_default)
4217{
4218 int mfw_vn = BP_FW_MB_IDX(bp);
4219 u32 tmp;
4220
4221 /* If the shmem shouldn't affect configuration, reflect */
4222 if (!IS_MF_BD(bp)) {
4223 int i;
4224
4225 for (i = 0; i < BNX2X_MAX_PRIORITY; i++)
4226 c2s_map[i] = i;
4227 *c2s_default = 0;
4228
4229 return;
4230 }
4231
4232 tmp = SHMEM2_RD(bp, c2s_pcp_map_lower[mfw_vn]);
4233 tmp = (__force u32)be32_to_cpu((__force __be32)tmp);
4234 c2s_map[0] = tmp & 0xff;
4235 c2s_map[1] = (tmp >> 8) & 0xff;
4236 c2s_map[2] = (tmp >> 16) & 0xff;
4237 c2s_map[3] = (tmp >> 24) & 0xff;
4238
4239 tmp = SHMEM2_RD(bp, c2s_pcp_map_upper[mfw_vn]);
4240 tmp = (__force u32)be32_to_cpu((__force __be32)tmp);
4241 c2s_map[4] = tmp & 0xff;
4242 c2s_map[5] = (tmp >> 8) & 0xff;
4243 c2s_map[6] = (tmp >> 16) & 0xff;
4244 c2s_map[7] = (tmp >> 24) & 0xff;
4245
4246 tmp = SHMEM2_RD(bp, c2s_pcp_map_default[mfw_vn]);
4247 tmp = (__force u32)be32_to_cpu((__force __be32)tmp);
4248 *c2s_default = (tmp >> (8 * mfw_vn)) & 0xff;
4249}
4250
6383c0b3
AE
4251/**
4252 * bnx2x_setup_tc - routine to configure net_device for multi tc
4253 *
4254 * @netdev: net device to configure
4255 * @tc: number of traffic classes to enable
4256 *
4257 * callback connected to the ndo_setup_tc function pointer
4258 */
4259int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
4260{
6383c0b3 4261 struct bnx2x *bp = netdev_priv(dev);
230d00eb
YM
4262 u8 c2s_map[BNX2X_MAX_PRIORITY], c2s_def;
4263 int cos, prio, count, offset;
6383c0b3
AE
4264
4265 /* setup tc must be called under rtnl lock */
4266 ASSERT_RTNL();
4267
16a5fd92 4268 /* no traffic classes requested. Aborting */
6383c0b3
AE
4269 if (!num_tc) {
4270 netdev_reset_tc(dev);
4271 return 0;
4272 }
4273
4274 /* requested to support too many traffic classes */
4275 if (num_tc > bp->max_cos) {
6bf07b8e 4276 BNX2X_ERR("support for too many traffic classes requested: %d. Max supported is %d\n",
51c1a580 4277 num_tc, bp->max_cos);
6383c0b3
AE
4278 return -EINVAL;
4279 }
4280
4281 /* declare amount of supported traffic classes */
4282 if (netdev_set_num_tc(dev, num_tc)) {
51c1a580 4283 BNX2X_ERR("failed to declare %d traffic classes\n", num_tc);
6383c0b3
AE
4284 return -EINVAL;
4285 }
4286
230d00eb
YM
4287 bnx2x_get_c2s_mapping(bp, c2s_map, &c2s_def);
4288
6383c0b3
AE
4289 /* configure priority to traffic class mapping */
4290 for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
230d00eb
YM
4291 int outer_prio = c2s_map[prio];
4292
4293 netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[outer_prio]);
51c1a580
MS
4294 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4295 "mapping priority %d to tc %d\n",
230d00eb 4296 outer_prio, bp->prio_to_cos[outer_prio]);
6383c0b3
AE
4297 }
4298
16a5fd92 4299 /* Use this configuration to differentiate tc0 from other COSes
6383c0b3
AE
4300 This can be used for ets or pfc, and save the effort of setting
4301 up a multio class queue disc or negotiating DCBX with a switch
4302 netdev_set_prio_tc_map(dev, 0, 0);
94f05b0f 4303 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0);
6383c0b3
AE
4304 for (prio = 1; prio < 16; prio++) {
4305 netdev_set_prio_tc_map(dev, prio, 1);
94f05b0f 4306 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1);
6383c0b3
AE
4307 } */
4308
4309 /* configure traffic class to transmission queue mapping */
4310 for (cos = 0; cos < bp->max_cos; cos++) {
4311 count = BNX2X_NUM_ETH_QUEUES(bp);
65565884 4312 offset = cos * BNX2X_NUM_NON_CNIC_QUEUES(bp);
6383c0b3 4313 netdev_set_tc_queue(dev, cos, count, offset);
51c1a580
MS
4314 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4315 "mapping tc %d to offset %d count %d\n",
6383c0b3
AE
4316 cos, offset, count);
4317 }
4318
4319 return 0;
4320}
4321
9f6c9258
DK
4322/* called with rtnl_lock */
4323int bnx2x_change_mac_addr(struct net_device *dev, void *p)
4324{
4325 struct sockaddr *addr = p;
4326 struct bnx2x *bp = netdev_priv(dev);
619c5cb6 4327 int rc = 0;
9f6c9258 4328
2e98ffc2 4329 if (!is_valid_ether_addr(addr->sa_data)) {
51c1a580 4330 BNX2X_ERR("Requested MAC address is not valid\n");
614c76df 4331 return -EINVAL;
51c1a580 4332 }
614c76df 4333
2e98ffc2
DK
4334 if (IS_MF_STORAGE_ONLY(bp)) {
4335 BNX2X_ERR("Can't change address on STORAGE ONLY function\n");
9f6c9258 4336 return -EINVAL;
51c1a580 4337 }
9f6c9258 4338
619c5cb6
VZ
4339 if (netif_running(dev)) {
4340 rc = bnx2x_set_eth_mac(bp, false);
4341 if (rc)
4342 return rc;
4343 }
4344
9f6c9258 4345 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
619c5cb6 4346
523224a3 4347 if (netif_running(dev))
619c5cb6 4348 rc = bnx2x_set_eth_mac(bp, true);
9f6c9258 4349
230d00eb
YM
4350 if (IS_PF(bp) && SHMEM2_HAS(bp, curr_cfg))
4351 SHMEM2_WR(bp, curr_cfg, CURR_CFG_MET_OS);
4352
619c5cb6 4353 return rc;
9f6c9258
DK
4354}
4355
b3b83c3f
DK
4356static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
4357{
4358 union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
4359 struct bnx2x_fastpath *fp = &bp->fp[fp_index];
6383c0b3 4360 u8 cos;
b3b83c3f
DK
4361
4362 /* Common */
55c11941 4363
b3b83c3f
DK
4364 if (IS_FCOE_IDX(fp_index)) {
4365 memset(sb, 0, sizeof(union host_hc_status_block));
4366 fp->status_blk_mapping = 0;
b3b83c3f 4367 } else {
b3b83c3f 4368 /* status blocks */
619c5cb6 4369 if (!CHIP_IS_E1x(bp))
b3b83c3f
DK
4370 BNX2X_PCI_FREE(sb->e2_sb,
4371 bnx2x_fp(bp, fp_index,
4372 status_blk_mapping),
4373 sizeof(struct host_hc_status_block_e2));
4374 else
4375 BNX2X_PCI_FREE(sb->e1x_sb,
4376 bnx2x_fp(bp, fp_index,
4377 status_blk_mapping),
4378 sizeof(struct host_hc_status_block_e1x));
b3b83c3f 4379 }
55c11941 4380
b3b83c3f
DK
4381 /* Rx */
4382 if (!skip_rx_queue(bp, fp_index)) {
4383 bnx2x_free_rx_bds(fp);
4384
4385 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4386 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
4387 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
4388 bnx2x_fp(bp, fp_index, rx_desc_mapping),
4389 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4390
4391 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
4392 bnx2x_fp(bp, fp_index, rx_comp_mapping),
4393 sizeof(struct eth_fast_path_rx_cqe) *
4394 NUM_RCQ_BD);
4395
4396 /* SGE ring */
4397 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
4398 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
4399 bnx2x_fp(bp, fp_index, rx_sge_mapping),
4400 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4401 }
4402
4403 /* Tx */
4404 if (!skip_tx_queue(bp, fp_index)) {
4405 /* fastpath tx rings: tx_buf tx_desc */
6383c0b3 4406 for_each_cos_in_tx_queue(fp, cos) {
65565884 4407 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
6383c0b3 4408
51c1a580 4409 DP(NETIF_MSG_IFDOWN,
94f05b0f 4410 "freeing tx memory of fp %d cos %d cid %d\n",
6383c0b3
AE
4411 fp_index, cos, txdata->cid);
4412
4413 BNX2X_FREE(txdata->tx_buf_ring);
4414 BNX2X_PCI_FREE(txdata->tx_desc_ring,
4415 txdata->tx_desc_mapping,
4416 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4417 }
b3b83c3f
DK
4418 }
4419 /* end of fastpath */
4420}
4421
a8f47eb7 4422static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp)
55c11941
MS
4423{
4424 int i;
4425 for_each_cnic_queue(bp, i)
4426 bnx2x_free_fp_mem_at(bp, i);
4427}
4428
b3b83c3f
DK
4429void bnx2x_free_fp_mem(struct bnx2x *bp)
4430{
4431 int i;
55c11941 4432 for_each_eth_queue(bp, i)
b3b83c3f
DK
4433 bnx2x_free_fp_mem_at(bp, i);
4434}
4435
1191cb83 4436static void set_sb_shortcuts(struct bnx2x *bp, int index)
b3b83c3f
DK
4437{
4438 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
619c5cb6 4439 if (!CHIP_IS_E1x(bp)) {
b3b83c3f
DK
4440 bnx2x_fp(bp, index, sb_index_values) =
4441 (__le16 *)status_blk.e2_sb->sb.index_values;
4442 bnx2x_fp(bp, index, sb_running_index) =
4443 (__le16 *)status_blk.e2_sb->sb.running_index;
4444 } else {
4445 bnx2x_fp(bp, index, sb_index_values) =
4446 (__le16 *)status_blk.e1x_sb->sb.index_values;
4447 bnx2x_fp(bp, index, sb_running_index) =
4448 (__le16 *)status_blk.e1x_sb->sb.running_index;
4449 }
4450}
4451
1191cb83
ED
4452/* Returns the number of actually allocated BDs */
4453static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
4454 int rx_ring_size)
4455{
4456 struct bnx2x *bp = fp->bp;
4457 u16 ring_prod, cqe_ring_prod;
4458 int i, failure_cnt = 0;
4459
4460 fp->rx_comp_cons = 0;
4461 cqe_ring_prod = ring_prod = 0;
4462
4463 /* This routine is called only during fo init so
4464 * fp->eth_q_stats.rx_skb_alloc_failed = 0
4465 */
4466 for (i = 0; i < rx_ring_size; i++) {
996dedba 4467 if (bnx2x_alloc_rx_data(bp, fp, ring_prod, GFP_KERNEL) < 0) {
1191cb83
ED
4468 failure_cnt++;
4469 continue;
4470 }
4471 ring_prod = NEXT_RX_IDX(ring_prod);
4472 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4473 WARN_ON(ring_prod <= (i - failure_cnt));
4474 }
4475
4476 if (failure_cnt)
4477 BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n",
4478 i - failure_cnt, fp->index);
4479
4480 fp->rx_bd_prod = ring_prod;
4481 /* Limit the CQE producer by the CQE ring size */
4482 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
4483 cqe_ring_prod);
4484 fp->rx_pkt = fp->rx_calls = 0;
4485
15192a8c 4486 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed += failure_cnt;
1191cb83
ED
4487
4488 return i - failure_cnt;
4489}
4490
4491static void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
4492{
4493 int i;
4494
4495 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4496 struct eth_rx_cqe_next_page *nextpg;
4497
4498 nextpg = (struct eth_rx_cqe_next_page *)
4499 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4500 nextpg->addr_hi =
4501 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4502 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4503 nextpg->addr_lo =
4504 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4505 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4506 }
4507}
4508
b3b83c3f
DK
4509static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
4510{
4511 union host_hc_status_block *sb;
4512 struct bnx2x_fastpath *fp = &bp->fp[index];
4513 int ring_size = 0;
6383c0b3 4514 u8 cos;
c2188952 4515 int rx_ring_size = 0;
b3b83c3f 4516
2e98ffc2 4517 if (!bp->rx_ring_size && IS_MF_STORAGE_ONLY(bp)) {
614c76df
DK
4518 rx_ring_size = MIN_RX_SIZE_NONTPA;
4519 bp->rx_ring_size = rx_ring_size;
55c11941 4520 } else if (!bp->rx_ring_size) {
c2188952
VZ
4521 rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
4522
065f8b92
YM
4523 if (CHIP_IS_E3(bp)) {
4524 u32 cfg = SHMEM_RD(bp,
4525 dev_info.port_hw_config[BP_PORT(bp)].
4526 default_cfg);
4527
4528 /* Decrease ring size for 1G functions */
4529 if ((cfg & PORT_HW_CFG_NET_SERDES_IF_MASK) ==
4530 PORT_HW_CFG_NET_SERDES_IF_SGMII)
4531 rx_ring_size /= 10;
4532 }
d760fc37 4533
c2188952
VZ
4534 /* allocate at least number of buffers required by FW */
4535 rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
4536 MIN_RX_SIZE_TPA, rx_ring_size);
4537
4538 bp->rx_ring_size = rx_ring_size;
614c76df 4539 } else /* if rx_ring_size specified - use it */
c2188952 4540 rx_ring_size = bp->rx_ring_size;
b3b83c3f 4541
04c46736
YM
4542 DP(BNX2X_MSG_SP, "calculated rx_ring_size %d\n", rx_ring_size);
4543
b3b83c3f
DK
4544 /* Common */
4545 sb = &bnx2x_fp(bp, index, status_blk);
55c11941 4546
b3b83c3f 4547 if (!IS_FCOE_IDX(index)) {
b3b83c3f 4548 /* status blocks */
cd2b0389
JP
4549 if (!CHIP_IS_E1x(bp)) {
4550 sb->e2_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping),
4551 sizeof(struct host_hc_status_block_e2));
4552 if (!sb->e2_sb)
4553 goto alloc_mem_err;
4554 } else {
4555 sb->e1x_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping),
4556 sizeof(struct host_hc_status_block_e1x));
4557 if (!sb->e1x_sb)
4558 goto alloc_mem_err;
4559 }
b3b83c3f 4560 }
8eef2af1
DK
4561
4562 /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
4563 * set shortcuts for it.
4564 */
4565 if (!IS_FCOE_IDX(index))
4566 set_sb_shortcuts(bp, index);
b3b83c3f
DK
4567
4568 /* Tx */
4569 if (!skip_tx_queue(bp, index)) {
4570 /* fastpath tx rings: tx_buf tx_desc */
6383c0b3 4571 for_each_cos_in_tx_queue(fp, cos) {
65565884 4572 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
6383c0b3 4573
51c1a580
MS
4574 DP(NETIF_MSG_IFUP,
4575 "allocating tx memory of fp %d cos %d\n",
6383c0b3
AE
4576 index, cos);
4577
cd2b0389
JP
4578 txdata->tx_buf_ring = kcalloc(NUM_TX_BD,
4579 sizeof(struct sw_tx_bd),
4580 GFP_KERNEL);
4581 if (!txdata->tx_buf_ring)
4582 goto alloc_mem_err;
4583 txdata->tx_desc_ring = BNX2X_PCI_ALLOC(&txdata->tx_desc_mapping,
4584 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4585 if (!txdata->tx_desc_ring)
4586 goto alloc_mem_err;
6383c0b3 4587 }
b3b83c3f
DK
4588 }
4589
4590 /* Rx */
4591 if (!skip_rx_queue(bp, index)) {
4592 /* fastpath rx rings: rx_buf rx_desc rx_comp */
cd2b0389
JP
4593 bnx2x_fp(bp, index, rx_buf_ring) =
4594 kcalloc(NUM_RX_BD, sizeof(struct sw_rx_bd), GFP_KERNEL);
4595 if (!bnx2x_fp(bp, index, rx_buf_ring))
4596 goto alloc_mem_err;
4597 bnx2x_fp(bp, index, rx_desc_ring) =
4598 BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_desc_mapping),
4599 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4600 if (!bnx2x_fp(bp, index, rx_desc_ring))
4601 goto alloc_mem_err;
b3b83c3f 4602
75b29459 4603 /* Seed all CQEs by 1s */
cd2b0389
JP
4604 bnx2x_fp(bp, index, rx_comp_ring) =
4605 BNX2X_PCI_FALLOC(&bnx2x_fp(bp, index, rx_comp_mapping),
4606 sizeof(struct eth_fast_path_rx_cqe) * NUM_RCQ_BD);
4607 if (!bnx2x_fp(bp, index, rx_comp_ring))
4608 goto alloc_mem_err;
b3b83c3f
DK
4609
4610 /* SGE ring */
cd2b0389
JP
4611 bnx2x_fp(bp, index, rx_page_ring) =
4612 kcalloc(NUM_RX_SGE, sizeof(struct sw_rx_page),
4613 GFP_KERNEL);
4614 if (!bnx2x_fp(bp, index, rx_page_ring))
4615 goto alloc_mem_err;
4616 bnx2x_fp(bp, index, rx_sge_ring) =
4617 BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_sge_mapping),
4618 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4619 if (!bnx2x_fp(bp, index, rx_sge_ring))
4620 goto alloc_mem_err;
b3b83c3f
DK
4621 /* RX BD ring */
4622 bnx2x_set_next_page_rx_bd(fp);
4623
4624 /* CQ ring */
4625 bnx2x_set_next_page_rx_cq(fp);
4626
4627 /* BDs */
4628 ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
4629 if (ring_size < rx_ring_size)
4630 goto alloc_mem_err;
4631 }
4632
4633 return 0;
4634
4635/* handles low memory cases */
4636alloc_mem_err:
4637 BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
4638 index, ring_size);
4639 /* FW will drop all packets if queue is not big enough,
4640 * In these cases we disable the queue
6383c0b3 4641 * Min size is different for OOO, TPA and non-TPA queues
b3b83c3f 4642 */
7e6b4d44 4643 if (ring_size < (fp->mode == TPA_MODE_DISABLED ?
eb722d7a 4644 MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
b3b83c3f
DK
4645 /* release memory allocated for this queue */
4646 bnx2x_free_fp_mem_at(bp, index);
4647 return -ENOMEM;
4648 }
4649 return 0;
4650}
4651
a8f47eb7 4652static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp)
55c11941
MS
4653{
4654 if (!NO_FCOE(bp))
4655 /* FCoE */
4656 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp)))
4657 /* we will fail load process instead of mark
4658 * NO_FCOE_FLAG
4659 */
4660 return -ENOMEM;
4661
4662 return 0;
4663}
4664
a8f47eb7 4665static int bnx2x_alloc_fp_mem(struct bnx2x *bp)
b3b83c3f
DK
4666{
4667 int i;
4668
55c11941
MS
4669 /* 1. Allocate FP for leading - fatal if error
4670 * 2. Allocate RSS - fix number of queues if error
b3b83c3f
DK
4671 */
4672
4673 /* leading */
4674 if (bnx2x_alloc_fp_mem_at(bp, 0))
4675 return -ENOMEM;
6383c0b3 4676
b3b83c3f
DK
4677 /* RSS */
4678 for_each_nondefault_eth_queue(bp, i)
4679 if (bnx2x_alloc_fp_mem_at(bp, i))
4680 break;
4681
4682 /* handle memory failures */
4683 if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
4684 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
4685
4686 WARN_ON(delta < 0);
4864a16a 4687 bnx2x_shrink_eth_fp(bp, delta);
55c11941
MS
4688 if (CNIC_SUPPORT(bp))
4689 /* move non eth FPs next to last eth FP
4690 * must be done in that order
4691 * FCOE_IDX < FWD_IDX < OOO_IDX
4692 */
b3b83c3f 4693
55c11941
MS
4694 /* move FCoE fp even NO_FCOE_FLAG is on */
4695 bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta);
4696 bp->num_ethernet_queues -= delta;
4697 bp->num_queues = bp->num_ethernet_queues +
4698 bp->num_cnic_queues;
b3b83c3f
DK
4699 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
4700 bp->num_queues + delta, bp->num_queues);
4701 }
4702
4703 return 0;
4704}
d6214d7a 4705
523224a3
DK
4706void bnx2x_free_mem_bp(struct bnx2x *bp)
4707{
c3146eb6
DK
4708 int i;
4709
4710 for (i = 0; i < bp->fp_array_size; i++)
4711 kfree(bp->fp[i].tpa_info);
523224a3 4712 kfree(bp->fp);
15192a8c
BW
4713 kfree(bp->sp_objs);
4714 kfree(bp->fp_stats);
65565884 4715 kfree(bp->bnx2x_txq);
523224a3
DK
4716 kfree(bp->msix_table);
4717 kfree(bp->ilt);
4718}
4719
0329aba1 4720int bnx2x_alloc_mem_bp(struct bnx2x *bp)
523224a3
DK
4721{
4722 struct bnx2x_fastpath *fp;
4723 struct msix_entry *tbl;
4724 struct bnx2x_ilt *ilt;
6383c0b3 4725 int msix_table_size = 0;
55c11941 4726 int fp_array_size, txq_array_size;
15192a8c 4727 int i;
6383c0b3
AE
4728
4729 /*
4730 * The biggest MSI-X table we might need is as a maximum number of fast
2de67439 4731 * path IGU SBs plus default SB (for PF only).
6383c0b3 4732 */
1ab4434c
AE
4733 msix_table_size = bp->igu_sb_cnt;
4734 if (IS_PF(bp))
4735 msix_table_size++;
4736 BNX2X_DEV_INFO("msix_table_size %d\n", msix_table_size);
523224a3 4737
6383c0b3 4738 /* fp array: RSS plus CNIC related L2 queues */
55c11941 4739 fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + CNIC_SUPPORT(bp);
c3146eb6
DK
4740 bp->fp_array_size = fp_array_size;
4741 BNX2X_DEV_INFO("fp_array_size %d\n", bp->fp_array_size);
15192a8c 4742
c3146eb6 4743 fp = kcalloc(bp->fp_array_size, sizeof(*fp), GFP_KERNEL);
523224a3
DK
4744 if (!fp)
4745 goto alloc_err;
c3146eb6 4746 for (i = 0; i < bp->fp_array_size; i++) {
15192a8c
BW
4747 fp[i].tpa_info =
4748 kcalloc(ETH_MAX_AGGREGATION_QUEUES_E1H_E2,
4749 sizeof(struct bnx2x_agg_info), GFP_KERNEL);
4750 if (!(fp[i].tpa_info))
4751 goto alloc_err;
4752 }
4753
523224a3
DK
4754 bp->fp = fp;
4755
15192a8c 4756 /* allocate sp objs */
c3146eb6 4757 bp->sp_objs = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_sp_objs),
15192a8c
BW
4758 GFP_KERNEL);
4759 if (!bp->sp_objs)
4760 goto alloc_err;
4761
4762 /* allocate fp_stats */
c3146eb6 4763 bp->fp_stats = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_fp_stats),
15192a8c
BW
4764 GFP_KERNEL);
4765 if (!bp->fp_stats)
4766 goto alloc_err;
4767
65565884 4768 /* Allocate memory for the transmission queues array */
55c11941
MS
4769 txq_array_size =
4770 BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS + CNIC_SUPPORT(bp);
4771 BNX2X_DEV_INFO("txq_array_size %d", txq_array_size);
4772
4773 bp->bnx2x_txq = kcalloc(txq_array_size, sizeof(struct bnx2x_fp_txdata),
4774 GFP_KERNEL);
65565884
MS
4775 if (!bp->bnx2x_txq)
4776 goto alloc_err;
4777
523224a3 4778 /* msix table */
01e23742 4779 tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL);
523224a3
DK
4780 if (!tbl)
4781 goto alloc_err;
4782 bp->msix_table = tbl;
4783
4784 /* ilt */
4785 ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
4786 if (!ilt)
4787 goto alloc_err;
4788 bp->ilt = ilt;
4789
4790 return 0;
4791alloc_err:
4792 bnx2x_free_mem_bp(bp);
4793 return -ENOMEM;
523224a3
DK
4794}
4795
a9fccec7 4796int bnx2x_reload_if_running(struct net_device *dev)
66371c44
MM
4797{
4798 struct bnx2x *bp = netdev_priv(dev);
4799
4800 if (unlikely(!netif_running(dev)))
4801 return 0;
4802
5d07d868 4803 bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
66371c44
MM
4804 return bnx2x_nic_load(bp, LOAD_NORMAL);
4805}
4806
1ac9e428
YR
4807int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
4808{
4809 u32 sel_phy_idx = 0;
4810 if (bp->link_params.num_phys <= 1)
4811 return INT_PHY;
4812
4813 if (bp->link_vars.link_up) {
4814 sel_phy_idx = EXT_PHY1;
4815 /* In case link is SERDES, check if the EXT_PHY2 is the one */
4816 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
4817 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
4818 sel_phy_idx = EXT_PHY2;
4819 } else {
4820
4821 switch (bnx2x_phy_selection(&bp->link_params)) {
4822 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
4823 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
4824 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
4825 sel_phy_idx = EXT_PHY1;
4826 break;
4827 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
4828 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
4829 sel_phy_idx = EXT_PHY2;
4830 break;
4831 }
4832 }
4833
4834 return sel_phy_idx;
1ac9e428
YR
4835}
4836int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
4837{
4838 u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
4839 /*
2de67439 4840 * The selected activated PHY is always after swapping (in case PHY
1ac9e428
YR
4841 * swapping is enabled). So when swapping is enabled, we need to reverse
4842 * the configuration
4843 */
4844
4845 if (bp->link_params.multi_phy_config &
4846 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
4847 if (sel_phy_idx == EXT_PHY1)
4848 sel_phy_idx = EXT_PHY2;
4849 else if (sel_phy_idx == EXT_PHY2)
4850 sel_phy_idx = EXT_PHY1;
4851 }
4852 return LINK_CONFIG_IDX(sel_phy_idx);
4853}
4854
55c11941 4855#ifdef NETDEV_FCOE_WWNN
bf61ee14
VZ
4856int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
4857{
4858 struct bnx2x *bp = netdev_priv(dev);
4859 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
4860
4861 switch (type) {
4862 case NETDEV_FCOE_WWNN:
4863 *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi,
4864 cp->fcoe_wwn_node_name_lo);
4865 break;
4866 case NETDEV_FCOE_WWPN:
4867 *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi,
4868 cp->fcoe_wwn_port_name_lo);
4869 break;
4870 default:
51c1a580 4871 BNX2X_ERR("Wrong WWN type requested - %d\n", type);
bf61ee14
VZ
4872 return -EINVAL;
4873 }
4874
4875 return 0;
4876}
4877#endif
4878
9f6c9258
DK
4879/* called with rtnl_lock */
4880int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
4881{
4882 struct bnx2x *bp = netdev_priv(dev);
9f6c9258 4883
0650c0b8
YM
4884 if (pci_num_vf(bp->pdev)) {
4885 DP(BNX2X_MSG_IOV, "VFs are enabled, can not change MTU\n");
4886 return -EPERM;
4887 }
4888
9f6c9258 4889 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
51c1a580 4890 BNX2X_ERR("Can't perform change MTU during parity recovery\n");
9f6c9258
DK
4891 return -EAGAIN;
4892 }
4893
4894 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
51c1a580
MS
4895 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) {
4896 BNX2X_ERR("Can't support requested MTU size\n");
9f6c9258 4897 return -EINVAL;
51c1a580 4898 }
9f6c9258
DK
4899
4900 /* This does not race with packet allocation
4901 * because the actual alloc size is
4902 * only updated as part of load
4903 */
4904 dev->mtu = new_mtu;
4905
230d00eb
YM
4906 if (IS_PF(bp) && SHMEM2_HAS(bp, curr_cfg))
4907 SHMEM2_WR(bp, curr_cfg, CURR_CFG_MET_OS);
4908
66371c44
MM
4909 return bnx2x_reload_if_running(dev);
4910}
4911
c8f44aff 4912netdev_features_t bnx2x_fix_features(struct net_device *dev,
621b4d66 4913 netdev_features_t features)
66371c44
MM
4914{
4915 struct bnx2x *bp = netdev_priv(dev);
4916
909d9faa
YM
4917 if (pci_num_vf(bp->pdev)) {
4918 netdev_features_t changed = dev->features ^ features;
4919
4920 /* Revert the requested changes in features if they
4921 * would require internal reload of PF in bnx2x_set_features().
4922 */
4923 if (!(features & NETIF_F_RXCSUM) && !bp->disable_tpa) {
4924 features &= ~NETIF_F_RXCSUM;
4925 features |= dev->features & NETIF_F_RXCSUM;
4926 }
4927
4928 if (changed & NETIF_F_LOOPBACK) {
4929 features &= ~NETIF_F_LOOPBACK;
4930 features |= dev->features & NETIF_F_LOOPBACK;
4931 }
4932 }
4933
66371c44 4934 /* TPA requires Rx CSUM offloading */
aebf6244 4935 if (!(features & NETIF_F_RXCSUM)) {
66371c44 4936 features &= ~NETIF_F_LRO;
621b4d66
DK
4937 features &= ~NETIF_F_GRO;
4938 }
66371c44
MM
4939
4940 return features;
4941}
4942
c8f44aff 4943int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
66371c44
MM
4944{
4945 struct bnx2x *bp = netdev_priv(dev);
f8dcb5e3 4946 netdev_features_t changes = features ^ dev->features;
538dd2e3 4947 bool bnx2x_reload = false;
f8dcb5e3 4948 int rc;
621b4d66 4949
909d9faa
YM
4950 /* VFs or non SRIOV PFs should be able to change loopback feature */
4951 if (!pci_num_vf(bp->pdev)) {
4952 if (features & NETIF_F_LOOPBACK) {
4953 if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
4954 bp->link_params.loopback_mode = LOOPBACK_BMAC;
4955 bnx2x_reload = true;
4956 }
4957 } else {
4958 if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
4959 bp->link_params.loopback_mode = LOOPBACK_NONE;
4960 bnx2x_reload = true;
4961 }
538dd2e3
MB
4962 }
4963 }
4964
16a5fd92 4965 /* if GRO is changed while LRO is enabled, don't force a reload */
f8dcb5e3
MS
4966 if ((changes & NETIF_F_GRO) && (features & NETIF_F_LRO))
4967 changes &= ~NETIF_F_GRO;
8802f579 4968
aebf6244 4969 /* if GRO is changed while HW TPA is off, don't force a reload */
f8dcb5e3
MS
4970 if ((changes & NETIF_F_GRO) && bp->disable_tpa)
4971 changes &= ~NETIF_F_GRO;
aebf6244 4972
8802f579 4973 if (changes)
538dd2e3 4974 bnx2x_reload = true;
8802f579 4975
538dd2e3 4976 if (bnx2x_reload) {
f8dcb5e3
MS
4977 if (bp->recovery_state == BNX2X_RECOVERY_DONE) {
4978 dev->features = features;
4979 rc = bnx2x_reload_if_running(dev);
4980 return rc ? rc : 1;
4981 }
66371c44 4982 /* else: bnx2x_nic_load() will be called at end of recovery */
9f6c9258
DK
4983 }
4984
66371c44 4985 return 0;
9f6c9258
DK
4986}
4987
4988void bnx2x_tx_timeout(struct net_device *dev)
4989{
4990 struct bnx2x *bp = netdev_priv(dev);
4991
4992#ifdef BNX2X_STOP_ON_ERROR
4993 if (!bp->panic)
4994 bnx2x_panic();
4995#endif
7be08a72 4996
9f6c9258 4997 /* This allows the netif to be shutdown gracefully before resetting */
230bb0f3 4998 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_TX_TIMEOUT, 0);
9f6c9258
DK
4999}
5000
9f6c9258
DK
5001int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
5002{
5003 struct net_device *dev = pci_get_drvdata(pdev);
5004 struct bnx2x *bp;
5005
5006 if (!dev) {
5007 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
5008 return -ENODEV;
5009 }
5010 bp = netdev_priv(dev);
5011
5012 rtnl_lock();
5013
5014 pci_save_state(pdev);
5015
5016 if (!netif_running(dev)) {
5017 rtnl_unlock();
5018 return 0;
5019 }
5020
5021 netif_device_detach(dev);
5022
5d07d868 5023 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
9f6c9258
DK
5024
5025 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
5026
5027 rtnl_unlock();
5028
5029 return 0;
5030}
5031
5032int bnx2x_resume(struct pci_dev *pdev)
5033{
5034 struct net_device *dev = pci_get_drvdata(pdev);
5035 struct bnx2x *bp;
5036 int rc;
5037
5038 if (!dev) {
5039 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
5040 return -ENODEV;
5041 }
5042 bp = netdev_priv(dev);
5043
5044 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
51c1a580 5045 BNX2X_ERR("Handling parity error recovery. Try again later\n");
9f6c9258
DK
5046 return -EAGAIN;
5047 }
5048
5049 rtnl_lock();
5050
5051 pci_restore_state(pdev);
5052
5053 if (!netif_running(dev)) {
5054 rtnl_unlock();
5055 return 0;
5056 }
5057
5058 bnx2x_set_power_state(bp, PCI_D0);
5059 netif_device_attach(dev);
5060
5061 rc = bnx2x_nic_load(bp, LOAD_OPEN);
5062
5063 rtnl_unlock();
5064
5065 return rc;
5066}
619c5cb6 5067
619c5cb6
VZ
5068void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
5069 u32 cid)
5070{
b9871bcf
AE
5071 if (!cxt) {
5072 BNX2X_ERR("bad context pointer %p\n", cxt);
5073 return;
5074 }
5075
619c5cb6
VZ
5076 /* ustorm cxt validation */
5077 cxt->ustorm_ag_context.cdu_usage =
5078 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
5079 CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
5080 /* xcontext validation */
5081 cxt->xstorm_ag_context.cdu_reserved =
5082 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
5083 CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
5084}
5085
1191cb83
ED
5086static void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
5087 u8 fw_sb_id, u8 sb_index,
5088 u8 ticks)
619c5cb6 5089{
619c5cb6
VZ
5090 u32 addr = BAR_CSTRORM_INTMEM +
5091 CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
5092 REG_WR8(bp, addr, ticks);
51c1a580
MS
5093 DP(NETIF_MSG_IFUP,
5094 "port %x fw_sb_id %d sb_index %d ticks %d\n",
5095 port, fw_sb_id, sb_index, ticks);
619c5cb6
VZ
5096}
5097
1191cb83
ED
5098static void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
5099 u16 fw_sb_id, u8 sb_index,
5100 u8 disable)
619c5cb6
VZ
5101{
5102 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
5103 u32 addr = BAR_CSTRORM_INTMEM +
5104 CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
0c14e5ce 5105 u8 flags = REG_RD8(bp, addr);
619c5cb6
VZ
5106 /* clear and set */
5107 flags &= ~HC_INDEX_DATA_HC_ENABLED;
5108 flags |= enable_flag;
0c14e5ce 5109 REG_WR8(bp, addr, flags);
51c1a580
MS
5110 DP(NETIF_MSG_IFUP,
5111 "port %x fw_sb_id %d sb_index %d disable %d\n",
5112 port, fw_sb_id, sb_index, disable);
619c5cb6
VZ
5113}
5114
5115void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
5116 u8 sb_index, u8 disable, u16 usec)
5117{
5118 int port = BP_PORT(bp);
5119 u8 ticks = usec / BNX2X_BTR;
5120
5121 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
5122
5123 disable = disable ? 1 : (usec ? 0 : 1);
5124 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
5125}
230bb0f3
YM
5126
5127void bnx2x_schedule_sp_rtnl(struct bnx2x *bp, enum sp_rtnl_flag flag,
5128 u32 verbose)
5129{
4e857c58 5130 smp_mb__before_atomic();
230bb0f3 5131 set_bit(flag, &bp->sp_rtnl_state);
4e857c58 5132 smp_mb__after_atomic();
230bb0f3
YM
5133 DP((BNX2X_MSG_SP | verbose), "Scheduling sp_rtnl task [Flag: %d]\n",
5134 flag);
5135 schedule_delayed_work(&bp->sp_rtnl_task, 0);
5136}
5137EXPORT_SYMBOL(bnx2x_schedule_sp_rtnl);
This page took 0.842345 seconds and 5 git commands to generate.