r8152: fix the checking of the usb speed
[deliverable/linux.git] / drivers / net / ethernet / broadcom / bnx2x / bnx2x_cmn.c
CommitLineData
9f6c9258
DK
1/* bnx2x_cmn.c: Broadcom Everest network driver.
2 *
247fa82b 3 * Copyright (c) 2007-2013 Broadcom Corporation
9f6c9258
DK
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
08f6dd89 9 * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
9f6c9258
DK
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
15 *
16 */
17
f1deab50
JP
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
9f6c9258 20#include <linux/etherdevice.h>
9bcc0893 21#include <linux/if_vlan.h>
a6b7a407 22#include <linux/interrupt.h>
9f6c9258 23#include <linux/ip.h>
9969085e 24#include <net/tcp.h>
f2e0899f 25#include <net/ipv6.h>
7f3e01fe 26#include <net/ip6_checksum.h>
076bb0c8 27#include <net/busy_poll.h>
c0cba59e 28#include <linux/prefetch.h>
9f6c9258 29#include "bnx2x_cmn.h"
523224a3 30#include "bnx2x_init.h"
042181f5 31#include "bnx2x_sp.h"
9f6c9258 32
a8f47eb7 33static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp);
34static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp);
35static int bnx2x_alloc_fp_mem(struct bnx2x *bp);
36static int bnx2x_poll(struct napi_struct *napi, int budget);
37
38static void bnx2x_add_all_napi_cnic(struct bnx2x *bp)
39{
40 int i;
41
42 /* Add NAPI objects */
43 for_each_rx_queue_cnic(bp, i) {
44 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
45 bnx2x_poll, NAPI_POLL_WEIGHT);
46 napi_hash_add(&bnx2x_fp(bp, i, napi));
47 }
48}
49
50static void bnx2x_add_all_napi(struct bnx2x *bp)
51{
52 int i;
53
54 /* Add NAPI objects */
55 for_each_eth_queue(bp, i) {
56 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
57 bnx2x_poll, NAPI_POLL_WEIGHT);
58 napi_hash_add(&bnx2x_fp(bp, i, napi));
59 }
60}
61
62static int bnx2x_calc_num_queues(struct bnx2x *bp)
63{
7d0445d6 64 int nq = bnx2x_num_queues ? : netif_get_num_default_rss_queues();
ff2ad307
MS
65
66 /* Reduce memory usage in kdump environment by using only one queue */
67 if (reset_devices)
68 nq = 1;
69
7d0445d6
MS
70 nq = clamp(nq, 1, BNX2X_MAX_QUEUES(bp));
71 return nq;
a8f47eb7 72}
73
b3b83c3f
DK
74/**
75 * bnx2x_move_fp - move content of the fastpath structure.
76 *
77 * @bp: driver handle
78 * @from: source FP index
79 * @to: destination FP index
80 *
81 * Makes sure the contents of the bp->fp[to].napi is kept
72754080
AE
82 * intact. This is done by first copying the napi struct from
83 * the target to the source, and then mem copying the entire
65565884
MS
84 * source onto the target. Update txdata pointers and related
85 * content.
b3b83c3f
DK
86 */
87static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
88{
89 struct bnx2x_fastpath *from_fp = &bp->fp[from];
90 struct bnx2x_fastpath *to_fp = &bp->fp[to];
15192a8c
BW
91 struct bnx2x_sp_objs *from_sp_objs = &bp->sp_objs[from];
92 struct bnx2x_sp_objs *to_sp_objs = &bp->sp_objs[to];
93 struct bnx2x_fp_stats *from_fp_stats = &bp->fp_stats[from];
94 struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to];
65565884
MS
95 int old_max_eth_txqs, new_max_eth_txqs;
96 int old_txdata_index = 0, new_txdata_index = 0;
34d5626a 97 struct bnx2x_agg_info *old_tpa_info = to_fp->tpa_info;
72754080
AE
98
99 /* Copy the NAPI object as it has been already initialized */
100 from_fp->napi = to_fp->napi;
101
b3b83c3f
DK
102 /* Move bnx2x_fastpath contents */
103 memcpy(to_fp, from_fp, sizeof(*to_fp));
104 to_fp->index = to;
65565884 105
34d5626a
YM
106 /* Retain the tpa_info of the original `to' version as we don't want
107 * 2 FPs to contain the same tpa_info pointer.
108 */
109 to_fp->tpa_info = old_tpa_info;
110
15192a8c
BW
111 /* move sp_objs contents as well, as their indices match fp ones */
112 memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs));
113
114 /* move fp_stats contents as well, as their indices match fp ones */
115 memcpy(to_fp_stats, from_fp_stats, sizeof(*to_fp_stats));
116
65565884
MS
117 /* Update txdata pointers in fp and move txdata content accordingly:
118 * Each fp consumes 'max_cos' txdata structures, so the index should be
119 * decremented by max_cos x delta.
120 */
121
122 old_max_eth_txqs = BNX2X_NUM_ETH_QUEUES(bp) * (bp)->max_cos;
123 new_max_eth_txqs = (BNX2X_NUM_ETH_QUEUES(bp) - from + to) *
124 (bp)->max_cos;
125 if (from == FCOE_IDX(bp)) {
126 old_txdata_index = old_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
127 new_txdata_index = new_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
128 }
129
4864a16a
YM
130 memcpy(&bp->bnx2x_txq[new_txdata_index],
131 &bp->bnx2x_txq[old_txdata_index],
65565884
MS
132 sizeof(struct bnx2x_fp_txdata));
133 to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index];
b3b83c3f
DK
134}
135
8ca5e17e
AE
136/**
137 * bnx2x_fill_fw_str - Fill buffer with FW version string.
138 *
139 * @bp: driver handle
140 * @buf: character buffer to fill with the fw name
141 * @buf_len: length of the above buffer
142 *
143 */
144void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len)
145{
146 if (IS_PF(bp)) {
147 u8 phy_fw_ver[PHY_FW_VER_LEN];
148
149 phy_fw_ver[0] = '\0';
150 bnx2x_get_ext_phy_fw_version(&bp->link_params,
151 phy_fw_ver, PHY_FW_VER_LEN);
152 strlcpy(buf, bp->fw_ver, buf_len);
153 snprintf(buf + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
154 "bc %d.%d.%d%s%s",
155 (bp->common.bc_ver & 0xff0000) >> 16,
156 (bp->common.bc_ver & 0xff00) >> 8,
157 (bp->common.bc_ver & 0xff),
158 ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
159 } else {
6411280a 160 bnx2x_vf_fill_fw_str(bp, buf, buf_len);
8ca5e17e
AE
161 }
162}
163
4864a16a
YM
164/**
165 * bnx2x_shrink_eth_fp - guarantees fastpath structures stay intact
166 *
167 * @bp: driver handle
168 * @delta: number of eth queues which were not allocated
169 */
170static void bnx2x_shrink_eth_fp(struct bnx2x *bp, int delta)
171{
172 int i, cos, old_eth_num = BNX2X_NUM_ETH_QUEUES(bp);
173
174 /* Queue pointer cannot be re-set on an fp-basis, as moving pointer
16a5fd92 175 * backward along the array could cause memory to be overridden
4864a16a
YM
176 */
177 for (cos = 1; cos < bp->max_cos; cos++) {
178 for (i = 0; i < old_eth_num - delta; i++) {
179 struct bnx2x_fastpath *fp = &bp->fp[i];
180 int new_idx = cos * (old_eth_num - delta) + i;
181
182 memcpy(&bp->bnx2x_txq[new_idx], fp->txdata_ptr[cos],
183 sizeof(struct bnx2x_fp_txdata));
184 fp->txdata_ptr[cos] = &bp->bnx2x_txq[new_idx];
185 }
186 }
187}
188
a8f47eb7 189int bnx2x_load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
619c5cb6 190
9f6c9258
DK
191/* free skb in the packet ring at pos idx
192 * return idx of last bd freed
193 */
6383c0b3 194static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
2df1a70a
TH
195 u16 idx, unsigned int *pkts_compl,
196 unsigned int *bytes_compl)
9f6c9258 197{
6383c0b3 198 struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
9f6c9258
DK
199 struct eth_tx_start_bd *tx_start_bd;
200 struct eth_tx_bd *tx_data_bd;
201 struct sk_buff *skb = tx_buf->skb;
202 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
203 int nbd;
95e92fd4 204 u16 split_bd_len = 0;
9f6c9258
DK
205
206 /* prefetch skb end pointer to speedup dev_kfree_skb() */
207 prefetch(&skb->end);
208
51c1a580 209 DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n",
6383c0b3 210 txdata->txq_index, idx, tx_buf, skb);
9f6c9258 211
6383c0b3 212 tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
9f6c9258
DK
213
214 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
215#ifdef BNX2X_STOP_ON_ERROR
216 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
217 BNX2X_ERR("BAD nbd!\n");
218 bnx2x_panic();
219 }
220#endif
221 new_cons = nbd + tx_buf->first_bd;
222
223 /* Get the next bd */
224 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
225
226 /* Skip a parse bd... */
227 --nbd;
228 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
229
95e92fd4 230 /* TSO headers+data bds share a common mapping. See bnx2x_tx_split() */
9f6c9258 231 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
95e92fd4
MS
232 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
233 split_bd_len = BD_UNMAP_LEN(tx_data_bd);
9f6c9258
DK
234 --nbd;
235 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
236 }
237
95e92fd4
MS
238 /* unmap first bd */
239 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
240 BD_UNMAP_LEN(tx_start_bd) + split_bd_len,
241 DMA_TO_DEVICE);
242
9f6c9258
DK
243 /* now free frags */
244 while (nbd > 0) {
245
6383c0b3 246 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
9f6c9258
DK
247 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
248 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
249 if (--nbd)
250 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
251 }
252
253 /* release skb */
254 WARN_ON(!skb);
d8290ae5 255 if (likely(skb)) {
2df1a70a
TH
256 (*pkts_compl)++;
257 (*bytes_compl) += skb->len;
258 }
d8290ae5 259
40955532 260 dev_kfree_skb_any(skb);
9f6c9258
DK
261 tx_buf->first_bd = 0;
262 tx_buf->skb = NULL;
263
264 return new_cons;
265}
266
6383c0b3 267int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
9f6c9258 268{
9f6c9258 269 struct netdev_queue *txq;
6383c0b3 270 u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
2df1a70a 271 unsigned int pkts_compl = 0, bytes_compl = 0;
9f6c9258
DK
272
273#ifdef BNX2X_STOP_ON_ERROR
274 if (unlikely(bp->panic))
275 return -1;
276#endif
277
6383c0b3
AE
278 txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
279 hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
280 sw_cons = txdata->tx_pkt_cons;
9f6c9258
DK
281
282 while (sw_cons != hw_cons) {
283 u16 pkt_cons;
284
285 pkt_cons = TX_BD(sw_cons);
286
51c1a580
MS
287 DP(NETIF_MSG_TX_DONE,
288 "queue[%d]: hw_cons %u sw_cons %u pkt_cons %u\n",
6383c0b3 289 txdata->txq_index, hw_cons, sw_cons, pkt_cons);
9f6c9258 290
2df1a70a 291 bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
2de67439 292 &pkts_compl, &bytes_compl);
2df1a70a 293
9f6c9258
DK
294 sw_cons++;
295 }
296
2df1a70a
TH
297 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
298
6383c0b3
AE
299 txdata->tx_pkt_cons = sw_cons;
300 txdata->tx_bd_cons = bd_cons;
9f6c9258
DK
301
302 /* Need to make the tx_bd_cons update visible to start_xmit()
303 * before checking for netif_tx_queue_stopped(). Without the
304 * memory barrier, there is a small possibility that
305 * start_xmit() will miss it and cause the queue to be stopped
306 * forever.
619c5cb6
VZ
307 * On the other hand we need an rmb() here to ensure the proper
308 * ordering of bit testing in the following
309 * netif_tx_queue_stopped(txq) call.
9f6c9258
DK
310 */
311 smp_mb();
312
9f6c9258 313 if (unlikely(netif_tx_queue_stopped(txq))) {
16a5fd92 314 /* Taking tx_lock() is needed to prevent re-enabling the queue
9f6c9258
DK
315 * while it's empty. This could have happen if rx_action() gets
316 * suspended in bnx2x_tx_int() after the condition before
317 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
318 *
319 * stops the queue->sees fresh tx_bd_cons->releases the queue->
320 * sends some packets consuming the whole queue again->
321 * stops the queue
322 */
323
324 __netif_tx_lock(txq, smp_processor_id());
325
326 if ((netif_tx_queue_stopped(txq)) &&
327 (bp->state == BNX2X_STATE_OPEN) &&
7df2dc6b 328 (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT))
9f6c9258
DK
329 netif_tx_wake_queue(txq);
330
331 __netif_tx_unlock(txq);
332 }
333 return 0;
334}
335
336static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
337 u16 idx)
338{
339 u16 last_max = fp->last_max_sge;
340
341 if (SUB_S16(idx, last_max) > 0)
342 fp->last_max_sge = idx;
343}
344
621b4d66
DK
345static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
346 u16 sge_len,
347 struct eth_end_agg_rx_cqe *cqe)
9f6c9258
DK
348{
349 struct bnx2x *bp = fp->bp;
9f6c9258
DK
350 u16 last_max, last_elem, first_elem;
351 u16 delta = 0;
352 u16 i;
353
354 if (!sge_len)
355 return;
356
357 /* First mark all used pages */
358 for (i = 0; i < sge_len; i++)
619c5cb6 359 BIT_VEC64_CLEAR_BIT(fp->sge_mask,
621b4d66 360 RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i])));
9f6c9258
DK
361
362 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
621b4d66 363 sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
9f6c9258
DK
364
365 /* Here we assume that the last SGE index is the biggest */
366 prefetch((void *)(fp->sge_mask));
523224a3 367 bnx2x_update_last_max_sge(fp,
621b4d66 368 le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
9f6c9258
DK
369
370 last_max = RX_SGE(fp->last_max_sge);
619c5cb6
VZ
371 last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
372 first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
9f6c9258
DK
373
374 /* If ring is not full */
375 if (last_elem + 1 != first_elem)
376 last_elem++;
377
378 /* Now update the prod */
379 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
380 if (likely(fp->sge_mask[i]))
381 break;
382
619c5cb6
VZ
383 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
384 delta += BIT_VEC64_ELEM_SZ;
9f6c9258
DK
385 }
386
387 if (delta > 0) {
388 fp->rx_sge_prod += delta;
389 /* clear page-end entries */
390 bnx2x_clear_sge_mask_next_elems(fp);
391 }
392
393 DP(NETIF_MSG_RX_STATUS,
394 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
395 fp->last_max_sge, fp->rx_sge_prod);
396}
397
2de67439 398/* Get Toeplitz hash value in the skb using the value from the
e52fcb24
ED
399 * CQE (calculated by HW).
400 */
401static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
a334b5fb 402 const struct eth_fast_path_rx_cqe *cqe,
5495ab75 403 enum pkt_hash_types *rxhash_type)
e52fcb24 404{
2de67439 405 /* Get Toeplitz hash from CQE */
e52fcb24 406 if ((bp->dev->features & NETIF_F_RXHASH) &&
a334b5fb
ED
407 (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) {
408 enum eth_rss_hash_type htype;
409
410 htype = cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE;
5495ab75
TH
411 *rxhash_type = ((htype == TCP_IPV4_HASH_TYPE) ||
412 (htype == TCP_IPV6_HASH_TYPE)) ?
413 PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3;
414
e52fcb24 415 return le32_to_cpu(cqe->rss_hash_result);
a334b5fb 416 }
5495ab75 417 *rxhash_type = PKT_HASH_TYPE_NONE;
e52fcb24
ED
418 return 0;
419}
420
9f6c9258 421static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
e52fcb24 422 u16 cons, u16 prod,
619c5cb6 423 struct eth_fast_path_rx_cqe *cqe)
9f6c9258
DK
424{
425 struct bnx2x *bp = fp->bp;
426 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
427 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
428 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
429 dma_addr_t mapping;
619c5cb6
VZ
430 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
431 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
9f6c9258 432
619c5cb6
VZ
433 /* print error if current state != stop */
434 if (tpa_info->tpa_state != BNX2X_TPA_STOP)
9f6c9258
DK
435 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
436
e52fcb24 437 /* Try to map an empty data buffer from the aggregation info */
619c5cb6 438 mapping = dma_map_single(&bp->pdev->dev,
e52fcb24 439 first_buf->data + NET_SKB_PAD,
619c5cb6
VZ
440 fp->rx_buf_size, DMA_FROM_DEVICE);
441 /*
442 * ...if it fails - move the skb from the consumer to the producer
443 * and set the current aggregation state as ERROR to drop it
444 * when TPA_STOP arrives.
445 */
446
447 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
448 /* Move the BD from the consumer to the producer */
e52fcb24 449 bnx2x_reuse_rx_data(fp, cons, prod);
619c5cb6
VZ
450 tpa_info->tpa_state = BNX2X_TPA_ERROR;
451 return;
452 }
9f6c9258 453
e52fcb24
ED
454 /* move empty data from pool to prod */
455 prod_rx_buf->data = first_buf->data;
619c5cb6 456 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
e52fcb24 457 /* point prod_bd to new data */
9f6c9258
DK
458 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
459 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
460
619c5cb6
VZ
461 /* move partial skb from cons to pool (don't unmap yet) */
462 *first_buf = *cons_rx_buf;
463
464 /* mark bin state as START */
465 tpa_info->parsing_flags =
466 le16_to_cpu(cqe->pars_flags.flags);
467 tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
468 tpa_info->tpa_state = BNX2X_TPA_START;
469 tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
470 tpa_info->placement_offset = cqe->placement_offset;
5495ab75 471 tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->rxhash_type);
621b4d66
DK
472 if (fp->mode == TPA_MODE_GRO) {
473 u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
924d75ab 474 tpa_info->full_page = SGE_PAGES / gro_size * gro_size;
621b4d66
DK
475 tpa_info->gro_size = gro_size;
476 }
619c5cb6 477
9f6c9258
DK
478#ifdef BNX2X_STOP_ON_ERROR
479 fp->tpa_queue_used |= (1 << queue);
480#ifdef _ASM_GENERIC_INT_L64_H
481 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
482#else
483 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
484#endif
485 fp->tpa_queue_used);
486#endif
487}
488
e4e3c02a
VZ
489/* Timestamp option length allowed for TPA aggregation:
490 *
491 * nop nop kind length echo val
492 */
493#define TPA_TSTAMP_OPT_LEN 12
494/**
cbf1de72 495 * bnx2x_set_gro_params - compute GRO values
e4e3c02a 496 *
cbf1de72 497 * @skb: packet skb
e8920674
DK
498 * @parsing_flags: parsing flags from the START CQE
499 * @len_on_bd: total length of the first packet for the
500 * aggregation.
cbf1de72 501 * @pkt_len: length of all segments
e8920674
DK
502 *
503 * Approximate value of the MSS for this aggregation calculated using
504 * the first packet of it.
2de67439 505 * Compute number of aggregated segments, and gso_type.
e4e3c02a 506 */
cbf1de72 507static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags,
ab5777d7
YM
508 u16 len_on_bd, unsigned int pkt_len,
509 u16 num_of_coalesced_segs)
e4e3c02a 510{
cbf1de72 511 /* TPA aggregation won't have either IP options or TCP options
619c5cb6 512 * other than timestamp or IPv6 extension headers.
e4e3c02a 513 */
619c5cb6
VZ
514 u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
515
516 if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
cbf1de72 517 PRS_FLAG_OVERETH_IPV6) {
619c5cb6 518 hdrs_len += sizeof(struct ipv6hdr);
cbf1de72
YM
519 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
520 } else {
619c5cb6 521 hdrs_len += sizeof(struct iphdr);
cbf1de72
YM
522 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
523 }
e4e3c02a
VZ
524
525 /* Check if there was a TCP timestamp, if there is it's will
526 * always be 12 bytes length: nop nop kind length echo val.
527 *
528 * Otherwise FW would close the aggregation.
529 */
530 if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
531 hdrs_len += TPA_TSTAMP_OPT_LEN;
532
cbf1de72
YM
533 skb_shinfo(skb)->gso_size = len_on_bd - hdrs_len;
534
535 /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
536 * to skb_shinfo(skb)->gso_segs
537 */
ab5777d7 538 NAPI_GRO_CB(skb)->count = num_of_coalesced_segs;
e4e3c02a
VZ
539}
540
996dedba
MS
541static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp,
542 u16 index, gfp_t gfp_mask)
1191cb83 543{
996dedba 544 struct page *page = alloc_pages(gfp_mask, PAGES_PER_SGE_SHIFT);
1191cb83
ED
545 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
546 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
547 dma_addr_t mapping;
548
549 if (unlikely(page == NULL)) {
550 BNX2X_ERR("Can't alloc sge\n");
551 return -ENOMEM;
552 }
553
554 mapping = dma_map_page(&bp->pdev->dev, page, 0,
924d75ab 555 SGE_PAGES, DMA_FROM_DEVICE);
1191cb83
ED
556 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
557 __free_pages(page, PAGES_PER_SGE_SHIFT);
558 BNX2X_ERR("Can't map sge\n");
559 return -ENOMEM;
560 }
561
562 sw_buf->page = page;
563 dma_unmap_addr_set(sw_buf, mapping, mapping);
564
565 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
566 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
567
568 return 0;
569}
570
9f6c9258 571static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
621b4d66
DK
572 struct bnx2x_agg_info *tpa_info,
573 u16 pages,
574 struct sk_buff *skb,
619c5cb6
VZ
575 struct eth_end_agg_rx_cqe *cqe,
576 u16 cqe_idx)
9f6c9258
DK
577{
578 struct sw_rx_page *rx_pg, old_rx_pg;
621b4d66
DK
579 u32 i, frag_len, frag_size;
580 int err, j, frag_id = 0;
619c5cb6 581 u16 len_on_bd = tpa_info->len_on_bd;
621b4d66 582 u16 full_page = 0, gro_size = 0;
9f6c9258 583
619c5cb6 584 frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
621b4d66
DK
585
586 if (fp->mode == TPA_MODE_GRO) {
587 gro_size = tpa_info->gro_size;
588 full_page = tpa_info->full_page;
589 }
9f6c9258
DK
590
591 /* This is needed in order to enable forwarding support */
cbf1de72
YM
592 if (frag_size)
593 bnx2x_set_gro_params(skb, tpa_info->parsing_flags, len_on_bd,
ab5777d7
YM
594 le16_to_cpu(cqe->pkt_len),
595 le16_to_cpu(cqe->num_of_coalesced_segs));
621b4d66 596
9f6c9258 597#ifdef BNX2X_STOP_ON_ERROR
924d75ab 598 if (pages > min_t(u32, 8, MAX_SKB_FRAGS) * SGE_PAGES) {
9f6c9258
DK
599 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
600 pages, cqe_idx);
619c5cb6 601 BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
9f6c9258
DK
602 bnx2x_panic();
603 return -EINVAL;
604 }
605#endif
606
607 /* Run through the SGL and compose the fragmented skb */
608 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
619c5cb6 609 u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
9f6c9258
DK
610
611 /* FW gives the indices of the SGE as if the ring is an array
612 (meaning that "next" element will consume 2 indices) */
621b4d66
DK
613 if (fp->mode == TPA_MODE_GRO)
614 frag_len = min_t(u32, frag_size, (u32)full_page);
615 else /* LRO */
924d75ab 616 frag_len = min_t(u32, frag_size, (u32)SGE_PAGES);
621b4d66 617
9f6c9258
DK
618 rx_pg = &fp->rx_page_ring[sge_idx];
619 old_rx_pg = *rx_pg;
620
621 /* If we fail to allocate a substitute page, we simply stop
622 where we are and drop the whole packet */
996dedba 623 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx, GFP_ATOMIC);
9f6c9258 624 if (unlikely(err)) {
15192a8c 625 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
9f6c9258
DK
626 return err;
627 }
628
16a5fd92 629 /* Unmap the page as we're going to pass it to the stack */
9f6c9258
DK
630 dma_unmap_page(&bp->pdev->dev,
631 dma_unmap_addr(&old_rx_pg, mapping),
924d75ab 632 SGE_PAGES, DMA_FROM_DEVICE);
9f6c9258 633 /* Add one frag and update the appropriate fields in the skb */
621b4d66
DK
634 if (fp->mode == TPA_MODE_LRO)
635 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
636 else { /* GRO */
637 int rem;
638 int offset = 0;
639 for (rem = frag_len; rem > 0; rem -= gro_size) {
640 int len = rem > gro_size ? gro_size : rem;
641 skb_fill_page_desc(skb, frag_id++,
642 old_rx_pg.page, offset, len);
643 if (offset)
644 get_page(old_rx_pg.page);
645 offset += len;
646 }
647 }
9f6c9258
DK
648
649 skb->data_len += frag_len;
924d75ab 650 skb->truesize += SGE_PAGES;
9f6c9258
DK
651 skb->len += frag_len;
652
653 frag_size -= frag_len;
654 }
655
656 return 0;
657}
658
d46d132c
ED
659static void bnx2x_frag_free(const struct bnx2x_fastpath *fp, void *data)
660{
661 if (fp->rx_frag_size)
662 put_page(virt_to_head_page(data));
663 else
664 kfree(data);
665}
666
996dedba 667static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp, gfp_t gfp_mask)
d46d132c 668{
996dedba
MS
669 if (fp->rx_frag_size) {
670 /* GFP_KERNEL allocations are used only during initialization */
671 if (unlikely(gfp_mask & __GFP_WAIT))
672 return (void *)__get_free_page(gfp_mask);
673
d46d132c 674 return netdev_alloc_frag(fp->rx_frag_size);
996dedba 675 }
d46d132c 676
996dedba 677 return kmalloc(fp->rx_buf_size + NET_SKB_PAD, gfp_mask);
d46d132c
ED
678}
679
9969085e
YM
680#ifdef CONFIG_INET
681static void bnx2x_gro_ip_csum(struct bnx2x *bp, struct sk_buff *skb)
682{
683 const struct iphdr *iph = ip_hdr(skb);
684 struct tcphdr *th;
685
686 skb_set_transport_header(skb, sizeof(struct iphdr));
687 th = tcp_hdr(skb);
688
689 th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
690 iph->saddr, iph->daddr, 0);
691}
692
693static void bnx2x_gro_ipv6_csum(struct bnx2x *bp, struct sk_buff *skb)
694{
695 struct ipv6hdr *iph = ipv6_hdr(skb);
696 struct tcphdr *th;
697
698 skb_set_transport_header(skb, sizeof(struct ipv6hdr));
699 th = tcp_hdr(skb);
700
701 th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
702 &iph->saddr, &iph->daddr, 0);
703}
2c2d06d5
YM
704
705static void bnx2x_gro_csum(struct bnx2x *bp, struct sk_buff *skb,
706 void (*gro_func)(struct bnx2x*, struct sk_buff*))
707{
708 skb_set_network_header(skb, 0);
709 gro_func(bp, skb);
710 tcp_gro_complete(skb);
711}
9969085e
YM
712#endif
713
714static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp,
715 struct sk_buff *skb)
716{
717#ifdef CONFIG_INET
cbf1de72 718 if (skb_shinfo(skb)->gso_size) {
9969085e
YM
719 switch (be16_to_cpu(skb->protocol)) {
720 case ETH_P_IP:
2c2d06d5 721 bnx2x_gro_csum(bp, skb, bnx2x_gro_ip_csum);
9969085e
YM
722 break;
723 case ETH_P_IPV6:
2c2d06d5 724 bnx2x_gro_csum(bp, skb, bnx2x_gro_ipv6_csum);
9969085e
YM
725 break;
726 default:
2c2d06d5 727 BNX2X_ERR("Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n",
9969085e
YM
728 be16_to_cpu(skb->protocol));
729 }
9969085e
YM
730 }
731#endif
60e66fee 732 skb_record_rx_queue(skb, fp->rx_queue);
9969085e
YM
733 napi_gro_receive(&fp->napi, skb);
734}
735
1191cb83
ED
736static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
737 struct bnx2x_agg_info *tpa_info,
738 u16 pages,
739 struct eth_end_agg_rx_cqe *cqe,
740 u16 cqe_idx)
9f6c9258 741{
619c5cb6 742 struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
621b4d66 743 u8 pad = tpa_info->placement_offset;
619c5cb6 744 u16 len = tpa_info->len_on_bd;
e52fcb24 745 struct sk_buff *skb = NULL;
621b4d66 746 u8 *new_data, *data = rx_buf->data;
619c5cb6
VZ
747 u8 old_tpa_state = tpa_info->tpa_state;
748
749 tpa_info->tpa_state = BNX2X_TPA_STOP;
750
751 /* If we there was an error during the handling of the TPA_START -
752 * drop this aggregation.
753 */
754 if (old_tpa_state == BNX2X_TPA_ERROR)
755 goto drop;
756
e52fcb24 757 /* Try to allocate the new data */
996dedba 758 new_data = bnx2x_frag_alloc(fp, GFP_ATOMIC);
9f6c9258
DK
759 /* Unmap skb in the pool anyway, as we are going to change
760 pool entry status to BNX2X_TPA_STOP even if new skb allocation
761 fails. */
762 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
a8c94b91 763 fp->rx_buf_size, DMA_FROM_DEVICE);
e52fcb24 764 if (likely(new_data))
d46d132c 765 skb = build_skb(data, fp->rx_frag_size);
9f6c9258 766
e52fcb24 767 if (likely(skb)) {
9f6c9258 768#ifdef BNX2X_STOP_ON_ERROR
a8c94b91 769 if (pad + len > fp->rx_buf_size) {
51c1a580 770 BNX2X_ERR("skb_put is about to fail... pad %d len %d rx_buf_size %d\n",
a8c94b91 771 pad, len, fp->rx_buf_size);
9f6c9258
DK
772 bnx2x_panic();
773 return;
774 }
775#endif
776
e52fcb24 777 skb_reserve(skb, pad + NET_SKB_PAD);
9f6c9258 778 skb_put(skb, len);
5495ab75 779 skb_set_hash(skb, tpa_info->rxhash, tpa_info->rxhash_type);
9f6c9258
DK
780
781 skb->protocol = eth_type_trans(skb, bp->dev);
782 skb->ip_summed = CHECKSUM_UNNECESSARY;
783
621b4d66
DK
784 if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
785 skb, cqe, cqe_idx)) {
619c5cb6 786 if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
86a9bad3 787 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tpa_info->vlan_tag);
9969085e 788 bnx2x_gro_receive(bp, fp, skb);
9f6c9258 789 } else {
51c1a580
MS
790 DP(NETIF_MSG_RX_STATUS,
791 "Failed to allocate new pages - dropping packet!\n");
40955532 792 dev_kfree_skb_any(skb);
9f6c9258
DK
793 }
794
e52fcb24
ED
795 /* put new data in bin */
796 rx_buf->data = new_data;
9f6c9258 797
619c5cb6 798 return;
9f6c9258 799 }
07b0f009
ED
800 if (new_data)
801 bnx2x_frag_free(fp, new_data);
619c5cb6
VZ
802drop:
803 /* drop the packet and keep the buffer in the bin */
804 DP(NETIF_MSG_RX_STATUS,
805 "Failed to allocate or map a new skb - dropping packet!\n");
15192a8c 806 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++;
9f6c9258
DK
807}
808
996dedba
MS
809static int bnx2x_alloc_rx_data(struct bnx2x *bp, struct bnx2x_fastpath *fp,
810 u16 index, gfp_t gfp_mask)
1191cb83
ED
811{
812 u8 *data;
813 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
814 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
815 dma_addr_t mapping;
816
996dedba 817 data = bnx2x_frag_alloc(fp, gfp_mask);
1191cb83
ED
818 if (unlikely(data == NULL))
819 return -ENOMEM;
820
821 mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD,
822 fp->rx_buf_size,
823 DMA_FROM_DEVICE);
824 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
d46d132c 825 bnx2x_frag_free(fp, data);
1191cb83
ED
826 BNX2X_ERR("Can't map rx data\n");
827 return -ENOMEM;
828 }
829
830 rx_buf->data = data;
831 dma_unmap_addr_set(rx_buf, mapping, mapping);
832
833 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
834 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
835
836 return 0;
837}
838
15192a8c
BW
839static
840void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
841 struct bnx2x_fastpath *fp,
842 struct bnx2x_eth_q_stats *qstats)
d6cb3e41 843{
e488921f
MS
844 /* Do nothing if no L4 csum validation was done.
845 * We do not check whether IP csum was validated. For IPv4 we assume
846 * that if the card got as far as validating the L4 csum, it also
847 * validated the IP csum. IPv6 has no IP csum.
848 */
d6cb3e41 849 if (cqe->fast_path_cqe.status_flags &
e488921f 850 ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)
d6cb3e41
ED
851 return;
852
e488921f 853 /* If L4 validation was done, check if an error was found. */
d6cb3e41
ED
854
855 if (cqe->fast_path_cqe.type_error_flags &
856 (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
857 ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
15192a8c 858 qstats->hw_csum_err++;
d6cb3e41
ED
859 else
860 skb->ip_summed = CHECKSUM_UNNECESSARY;
861}
9f6c9258 862
a8f47eb7 863static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
9f6c9258
DK
864{
865 struct bnx2x *bp = fp->bp;
866 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
75b29459 867 u16 sw_comp_cons, sw_comp_prod;
9f6c9258 868 int rx_pkt = 0;
75b29459
DK
869 union eth_rx_cqe *cqe;
870 struct eth_fast_path_rx_cqe *cqe_fp;
9f6c9258
DK
871
872#ifdef BNX2X_STOP_ON_ERROR
873 if (unlikely(bp->panic))
874 return 0;
875#endif
b3529744
EB
876 if (budget <= 0)
877 return rx_pkt;
9f6c9258 878
9f6c9258
DK
879 bd_cons = fp->rx_bd_cons;
880 bd_prod = fp->rx_bd_prod;
881 bd_prod_fw = bd_prod;
882 sw_comp_cons = fp->rx_comp_cons;
883 sw_comp_prod = fp->rx_comp_prod;
884
75b29459
DK
885 comp_ring_cons = RCQ_BD(sw_comp_cons);
886 cqe = &fp->rx_comp_ring[comp_ring_cons];
887 cqe_fp = &cqe->fast_path_cqe;
9f6c9258
DK
888
889 DP(NETIF_MSG_RX_STATUS,
75b29459 890 "queue[%d]: sw_comp_cons %u\n", fp->index, sw_comp_cons);
9f6c9258 891
75b29459 892 while (BNX2X_IS_CQE_COMPLETED(cqe_fp)) {
9f6c9258
DK
893 struct sw_rx_bd *rx_buf = NULL;
894 struct sk_buff *skb;
9f6c9258 895 u8 cqe_fp_flags;
619c5cb6 896 enum eth_rx_cqe_type cqe_fp_type;
621b4d66 897 u16 len, pad, queue;
e52fcb24 898 u8 *data;
bd5cef03 899 u32 rxhash;
5495ab75 900 enum pkt_hash_types rxhash_type;
9f6c9258 901
619c5cb6
VZ
902#ifdef BNX2X_STOP_ON_ERROR
903 if (unlikely(bp->panic))
904 return 0;
905#endif
906
9f6c9258
DK
907 bd_prod = RX_BD(bd_prod);
908 bd_cons = RX_BD(bd_cons);
909
9aaae044 910 /* A rmb() is required to ensure that the CQE is not read
911 * before it is written by the adapter DMA. PCI ordering
912 * rules will make sure the other fields are written before
913 * the marker at the end of struct eth_fast_path_rx_cqe
914 * but without rmb() a weakly ordered processor can process
915 * stale data. Without the barrier TPA state-machine might
916 * enter inconsistent state and kernel stack might be
917 * provided with incorrect packet description - these lead
918 * to various kernel crashed.
919 */
920 rmb();
921
619c5cb6
VZ
922 cqe_fp_flags = cqe_fp->type_error_flags;
923 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
9f6c9258 924
51c1a580
MS
925 DP(NETIF_MSG_RX_STATUS,
926 "CQE type %x err %x status %x queue %x vlan %x len %u\n",
927 CQE_TYPE(cqe_fp_flags),
619c5cb6
VZ
928 cqe_fp_flags, cqe_fp->status_flags,
929 le32_to_cpu(cqe_fp->rss_hash_result),
621b4d66
DK
930 le16_to_cpu(cqe_fp->vlan_tag),
931 le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len));
9f6c9258
DK
932
933 /* is this a slowpath msg? */
619c5cb6 934 if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
9f6c9258
DK
935 bnx2x_sp_event(fp, cqe);
936 goto next_cqe;
e52fcb24 937 }
621b4d66 938
e52fcb24
ED
939 rx_buf = &fp->rx_buf_ring[bd_cons];
940 data = rx_buf->data;
9f6c9258 941
e52fcb24 942 if (!CQE_TYPE_FAST(cqe_fp_type)) {
621b4d66
DK
943 struct bnx2x_agg_info *tpa_info;
944 u16 frag_size, pages;
619c5cb6 945#ifdef BNX2X_STOP_ON_ERROR
e52fcb24
ED
946 /* sanity check */
947 if (fp->disable_tpa &&
948 (CQE_TYPE_START(cqe_fp_type) ||
949 CQE_TYPE_STOP(cqe_fp_type)))
51c1a580 950 BNX2X_ERR("START/STOP packet while disable_tpa type %x\n",
e52fcb24 951 CQE_TYPE(cqe_fp_type));
619c5cb6 952#endif
9f6c9258 953
e52fcb24
ED
954 if (CQE_TYPE_START(cqe_fp_type)) {
955 u16 queue = cqe_fp->queue_index;
956 DP(NETIF_MSG_RX_STATUS,
957 "calling tpa_start on queue %d\n",
958 queue);
9f6c9258 959
e52fcb24
ED
960 bnx2x_tpa_start(fp, queue,
961 bd_cons, bd_prod,
962 cqe_fp);
621b4d66 963
e52fcb24 964 goto next_rx;
621b4d66
DK
965 }
966 queue = cqe->end_agg_cqe.queue_index;
967 tpa_info = &fp->tpa_info[queue];
968 DP(NETIF_MSG_RX_STATUS,
969 "calling tpa_stop on queue %d\n",
970 queue);
971
972 frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) -
973 tpa_info->len_on_bd;
974
975 if (fp->mode == TPA_MODE_GRO)
976 pages = (frag_size + tpa_info->full_page - 1) /
977 tpa_info->full_page;
978 else
979 pages = SGE_PAGE_ALIGN(frag_size) >>
980 SGE_PAGE_SHIFT;
981
982 bnx2x_tpa_stop(bp, fp, tpa_info, pages,
983 &cqe->end_agg_cqe, comp_ring_cons);
9f6c9258 984#ifdef BNX2X_STOP_ON_ERROR
621b4d66
DK
985 if (bp->panic)
986 return 0;
9f6c9258
DK
987#endif
988
621b4d66
DK
989 bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe);
990 goto next_cqe;
e52fcb24
ED
991 }
992 /* non TPA */
621b4d66 993 len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len);
e52fcb24
ED
994 pad = cqe_fp->placement_offset;
995 dma_sync_single_for_cpu(&bp->pdev->dev,
9f6c9258 996 dma_unmap_addr(rx_buf, mapping),
e52fcb24
ED
997 pad + RX_COPY_THRESH,
998 DMA_FROM_DEVICE);
999 pad += NET_SKB_PAD;
1000 prefetch(data + pad); /* speedup eth_type_trans() */
1001 /* is this an error packet? */
1002 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
51c1a580 1003 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
e52fcb24
ED
1004 "ERROR flags %x rx packet %u\n",
1005 cqe_fp_flags, sw_comp_cons);
15192a8c 1006 bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++;
e52fcb24
ED
1007 goto reuse_rx;
1008 }
9f6c9258 1009
e52fcb24
ED
1010 /* Since we don't have a jumbo ring
1011 * copy small packets if mtu > 1500
1012 */
1013 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1014 (len <= RX_COPY_THRESH)) {
1015 skb = netdev_alloc_skb_ip_align(bp->dev, len);
1016 if (skb == NULL) {
51c1a580 1017 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
e52fcb24 1018 "ERROR packet dropped because of alloc failure\n");
15192a8c 1019 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
9f6c9258
DK
1020 goto reuse_rx;
1021 }
e52fcb24
ED
1022 memcpy(skb->data, data + pad, len);
1023 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
1024 } else {
996dedba
MS
1025 if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod,
1026 GFP_ATOMIC) == 0)) {
9f6c9258 1027 dma_unmap_single(&bp->pdev->dev,
e52fcb24 1028 dma_unmap_addr(rx_buf, mapping),
a8c94b91 1029 fp->rx_buf_size,
9f6c9258 1030 DMA_FROM_DEVICE);
d46d132c 1031 skb = build_skb(data, fp->rx_frag_size);
e52fcb24 1032 if (unlikely(!skb)) {
d46d132c 1033 bnx2x_frag_free(fp, data);
15192a8c
BW
1034 bnx2x_fp_qstats(bp, fp)->
1035 rx_skb_alloc_failed++;
e52fcb24
ED
1036 goto next_rx;
1037 }
9f6c9258 1038 skb_reserve(skb, pad);
9f6c9258 1039 } else {
51c1a580
MS
1040 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
1041 "ERROR packet dropped because of alloc failure\n");
15192a8c 1042 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
9f6c9258 1043reuse_rx:
e52fcb24 1044 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
9f6c9258
DK
1045 goto next_rx;
1046 }
036d2df9 1047 }
9f6c9258 1048
036d2df9
DK
1049 skb_put(skb, len);
1050 skb->protocol = eth_type_trans(skb, bp->dev);
9f6c9258 1051
036d2df9 1052 /* Set Toeplitz hash for a none-LRO skb */
5495ab75
TH
1053 rxhash = bnx2x_get_rxhash(bp, cqe_fp, &rxhash_type);
1054 skb_set_hash(skb, rxhash, rxhash_type);
9f6c9258 1055
036d2df9 1056 skb_checksum_none_assert(skb);
f85582f8 1057
d6cb3e41 1058 if (bp->dev->features & NETIF_F_RXCSUM)
15192a8c
BW
1059 bnx2x_csum_validate(skb, cqe, fp,
1060 bnx2x_fp_qstats(bp, fp));
9f6c9258 1061
f233cafe 1062 skb_record_rx_queue(skb, fp->rx_queue);
9f6c9258 1063
619c5cb6
VZ
1064 if (le16_to_cpu(cqe_fp->pars_flags.flags) &
1065 PARSING_FLAGS_VLAN)
86a9bad3 1066 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
619c5cb6 1067 le16_to_cpu(cqe_fp->vlan_tag));
9f6c9258 1068
8b80cda5 1069 skb_mark_napi_id(skb, &fp->napi);
8f20aa57
DK
1070
1071 if (bnx2x_fp_ll_polling(fp))
1072 netif_receive_skb(skb);
1073 else
1074 napi_gro_receive(&fp->napi, skb);
9f6c9258 1075next_rx:
e52fcb24 1076 rx_buf->data = NULL;
9f6c9258
DK
1077
1078 bd_cons = NEXT_RX_IDX(bd_cons);
1079 bd_prod = NEXT_RX_IDX(bd_prod);
1080 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1081 rx_pkt++;
1082next_cqe:
1083 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1084 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1085
75b29459
DK
1086 /* mark CQE as free */
1087 BNX2X_SEED_CQE(cqe_fp);
1088
9f6c9258
DK
1089 if (rx_pkt == budget)
1090 break;
75b29459
DK
1091
1092 comp_ring_cons = RCQ_BD(sw_comp_cons);
1093 cqe = &fp->rx_comp_ring[comp_ring_cons];
1094 cqe_fp = &cqe->fast_path_cqe;
9f6c9258
DK
1095 } /* while */
1096
1097 fp->rx_bd_cons = bd_cons;
1098 fp->rx_bd_prod = bd_prod_fw;
1099 fp->rx_comp_cons = sw_comp_cons;
1100 fp->rx_comp_prod = sw_comp_prod;
1101
1102 /* Update producers */
1103 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1104 fp->rx_sge_prod);
1105
1106 fp->rx_pkt += rx_pkt;
1107 fp->rx_calls++;
1108
1109 return rx_pkt;
1110}
1111
1112static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1113{
1114 struct bnx2x_fastpath *fp = fp_cookie;
1115 struct bnx2x *bp = fp->bp;
6383c0b3 1116 u8 cos;
9f6c9258 1117
51c1a580
MS
1118 DP(NETIF_MSG_INTR,
1119 "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n",
523224a3 1120 fp->index, fp->fw_sb_id, fp->igu_sb_id);
ecf01c22 1121
523224a3 1122 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
9f6c9258
DK
1123
1124#ifdef BNX2X_STOP_ON_ERROR
1125 if (unlikely(bp->panic))
1126 return IRQ_HANDLED;
1127#endif
1128
1129 /* Handle Rx and Tx according to MSI-X vector */
6383c0b3 1130 for_each_cos_in_tx_queue(fp, cos)
65565884 1131 prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
6383c0b3 1132
523224a3 1133 prefetch(&fp->sb_running_index[SM_RX_ID]);
9f6c9258
DK
1134 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1135
1136 return IRQ_HANDLED;
1137}
1138
9f6c9258
DK
1139/* HW Lock for shared dual port PHYs */
1140void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1141{
1142 mutex_lock(&bp->port.phy_mutex);
1143
8203c4b6 1144 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
9f6c9258
DK
1145}
1146
1147void bnx2x_release_phy_lock(struct bnx2x *bp)
1148{
8203c4b6 1149 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
9f6c9258
DK
1150
1151 mutex_unlock(&bp->port.phy_mutex);
1152}
1153
0793f83f
DK
1154/* calculates MF speed according to current linespeed and MF configuration */
1155u16 bnx2x_get_mf_speed(struct bnx2x *bp)
1156{
1157 u16 line_speed = bp->link_vars.line_speed;
1158 if (IS_MF(bp)) {
faa6fcbb
DK
1159 u16 maxCfg = bnx2x_extract_max_cfg(bp,
1160 bp->mf_config[BP_VN(bp)]);
1161
1162 /* Calculate the current MAX line speed limit for the MF
1163 * devices
0793f83f 1164 */
faa6fcbb
DK
1165 if (IS_MF_SI(bp))
1166 line_speed = (line_speed * maxCfg) / 100;
1167 else { /* SD mode */
0793f83f
DK
1168 u16 vn_max_rate = maxCfg * 100;
1169
1170 if (vn_max_rate < line_speed)
1171 line_speed = vn_max_rate;
faa6fcbb 1172 }
0793f83f
DK
1173 }
1174
1175 return line_speed;
1176}
1177
2ae17f66
VZ
1178/**
1179 * bnx2x_fill_report_data - fill link report data to report
1180 *
1181 * @bp: driver handle
1182 * @data: link state to update
1183 *
1184 * It uses a none-atomic bit operations because is called under the mutex.
1185 */
1191cb83
ED
1186static void bnx2x_fill_report_data(struct bnx2x *bp,
1187 struct bnx2x_link_report_data *data)
2ae17f66
VZ
1188{
1189 u16 line_speed = bnx2x_get_mf_speed(bp);
1190
1191 memset(data, 0, sizeof(*data));
1192
16a5fd92 1193 /* Fill the report data: effective line speed */
2ae17f66
VZ
1194 data->line_speed = line_speed;
1195
1196 /* Link is down */
1197 if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
1198 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1199 &data->link_report_flags);
1200
1201 /* Full DUPLEX */
1202 if (bp->link_vars.duplex == DUPLEX_FULL)
1203 __set_bit(BNX2X_LINK_REPORT_FD, &data->link_report_flags);
1204
1205 /* Rx Flow Control is ON */
1206 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
1207 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON, &data->link_report_flags);
1208
1209 /* Tx Flow Control is ON */
1210 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1211 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
1212}
1213
1214/**
1215 * bnx2x_link_report - report link status to OS.
1216 *
1217 * @bp: driver handle
1218 *
1219 * Calls the __bnx2x_link_report() under the same locking scheme
1220 * as a link/PHY state managing code to ensure a consistent link
1221 * reporting.
1222 */
1223
9f6c9258
DK
1224void bnx2x_link_report(struct bnx2x *bp)
1225{
2ae17f66
VZ
1226 bnx2x_acquire_phy_lock(bp);
1227 __bnx2x_link_report(bp);
1228 bnx2x_release_phy_lock(bp);
1229}
9f6c9258 1230
2ae17f66
VZ
1231/**
1232 * __bnx2x_link_report - report link status to OS.
1233 *
1234 * @bp: driver handle
1235 *
16a5fd92 1236 * None atomic implementation.
2ae17f66
VZ
1237 * Should be called under the phy_lock.
1238 */
1239void __bnx2x_link_report(struct bnx2x *bp)
1240{
1241 struct bnx2x_link_report_data cur_data;
9f6c9258 1242
2ae17f66 1243 /* reread mf_cfg */
ad5afc89 1244 if (IS_PF(bp) && !CHIP_IS_E1(bp))
2ae17f66
VZ
1245 bnx2x_read_mf_cfg(bp);
1246
1247 /* Read the current link report info */
1248 bnx2x_fill_report_data(bp, &cur_data);
1249
1250 /* Don't report link down or exactly the same link status twice */
1251 if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
1252 (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1253 &bp->last_reported_link.link_report_flags) &&
1254 test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1255 &cur_data.link_report_flags)))
1256 return;
1257
1258 bp->link_cnt++;
9f6c9258 1259
2ae17f66
VZ
1260 /* We are going to report a new link parameters now -
1261 * remember the current data for the next time.
1262 */
1263 memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
9f6c9258 1264
2ae17f66
VZ
1265 if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1266 &cur_data.link_report_flags)) {
1267 netif_carrier_off(bp->dev);
1268 netdev_err(bp->dev, "NIC Link is Down\n");
1269 return;
1270 } else {
94f05b0f
JP
1271 const char *duplex;
1272 const char *flow;
1273
2ae17f66 1274 netif_carrier_on(bp->dev);
9f6c9258 1275
2ae17f66
VZ
1276 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
1277 &cur_data.link_report_flags))
94f05b0f 1278 duplex = "full";
9f6c9258 1279 else
94f05b0f 1280 duplex = "half";
9f6c9258 1281
2ae17f66
VZ
1282 /* Handle the FC at the end so that only these flags would be
1283 * possibly set. This way we may easily check if there is no FC
1284 * enabled.
1285 */
1286 if (cur_data.link_report_flags) {
1287 if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1288 &cur_data.link_report_flags)) {
2ae17f66
VZ
1289 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1290 &cur_data.link_report_flags))
94f05b0f
JP
1291 flow = "ON - receive & transmit";
1292 else
1293 flow = "ON - receive";
9f6c9258 1294 } else {
94f05b0f 1295 flow = "ON - transmit";
9f6c9258 1296 }
94f05b0f
JP
1297 } else {
1298 flow = "none";
9f6c9258 1299 }
94f05b0f
JP
1300 netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
1301 cur_data.line_speed, duplex, flow);
9f6c9258
DK
1302 }
1303}
1304
1191cb83
ED
1305static void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
1306{
1307 int i;
1308
1309 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1310 struct eth_rx_sge *sge;
1311
1312 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
1313 sge->addr_hi =
1314 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
1315 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1316
1317 sge->addr_lo =
1318 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
1319 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1320 }
1321}
1322
1323static void bnx2x_free_tpa_pool(struct bnx2x *bp,
1324 struct bnx2x_fastpath *fp, int last)
1325{
1326 int i;
1327
1328 for (i = 0; i < last; i++) {
1329 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
1330 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
1331 u8 *data = first_buf->data;
1332
1333 if (data == NULL) {
1334 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
1335 continue;
1336 }
1337 if (tpa_info->tpa_state == BNX2X_TPA_START)
1338 dma_unmap_single(&bp->pdev->dev,
1339 dma_unmap_addr(first_buf, mapping),
1340 fp->rx_buf_size, DMA_FROM_DEVICE);
d46d132c 1341 bnx2x_frag_free(fp, data);
1191cb83
ED
1342 first_buf->data = NULL;
1343 }
1344}
1345
55c11941
MS
1346void bnx2x_init_rx_rings_cnic(struct bnx2x *bp)
1347{
1348 int j;
1349
1350 for_each_rx_queue_cnic(bp, j) {
1351 struct bnx2x_fastpath *fp = &bp->fp[j];
1352
1353 fp->rx_bd_cons = 0;
1354
1355 /* Activate BD ring */
1356 /* Warning!
1357 * this will generate an interrupt (to the TSTORM)
1358 * must only be done after chip is initialized
1359 */
1360 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1361 fp->rx_sge_prod);
1362 }
1363}
1364
9f6c9258
DK
1365void bnx2x_init_rx_rings(struct bnx2x *bp)
1366{
1367 int func = BP_FUNC(bp);
523224a3 1368 u16 ring_prod;
9f6c9258 1369 int i, j;
25141580 1370
b3b83c3f 1371 /* Allocate TPA resources */
55c11941 1372 for_each_eth_queue(bp, j) {
523224a3 1373 struct bnx2x_fastpath *fp = &bp->fp[j];
9f6c9258 1374
a8c94b91
VZ
1375 DP(NETIF_MSG_IFUP,
1376 "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
1377
523224a3 1378 if (!fp->disable_tpa) {
16a5fd92 1379 /* Fill the per-aggregation pool */
dfacf138 1380 for (i = 0; i < MAX_AGG_QS(bp); i++) {
619c5cb6
VZ
1381 struct bnx2x_agg_info *tpa_info =
1382 &fp->tpa_info[i];
1383 struct sw_rx_bd *first_buf =
1384 &tpa_info->first_buf;
1385
996dedba
MS
1386 first_buf->data =
1387 bnx2x_frag_alloc(fp, GFP_KERNEL);
e52fcb24 1388 if (!first_buf->data) {
51c1a580
MS
1389 BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
1390 j);
9f6c9258
DK
1391 bnx2x_free_tpa_pool(bp, fp, i);
1392 fp->disable_tpa = 1;
1393 break;
1394 }
619c5cb6
VZ
1395 dma_unmap_addr_set(first_buf, mapping, 0);
1396 tpa_info->tpa_state = BNX2X_TPA_STOP;
9f6c9258 1397 }
523224a3
DK
1398
1399 /* "next page" elements initialization */
1400 bnx2x_set_next_page_sgl(fp);
1401
1402 /* set SGEs bit mask */
1403 bnx2x_init_sge_ring_bit_mask(fp);
1404
1405 /* Allocate SGEs and initialize the ring elements */
1406 for (i = 0, ring_prod = 0;
1407 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
1408
996dedba
MS
1409 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod,
1410 GFP_KERNEL) < 0) {
51c1a580
MS
1411 BNX2X_ERR("was only able to allocate %d rx sges\n",
1412 i);
1413 BNX2X_ERR("disabling TPA for queue[%d]\n",
1414 j);
523224a3 1415 /* Cleanup already allocated elements */
619c5cb6
VZ
1416 bnx2x_free_rx_sge_range(bp, fp,
1417 ring_prod);
1418 bnx2x_free_tpa_pool(bp, fp,
dfacf138 1419 MAX_AGG_QS(bp));
523224a3
DK
1420 fp->disable_tpa = 1;
1421 ring_prod = 0;
1422 break;
1423 }
1424 ring_prod = NEXT_SGE_IDX(ring_prod);
1425 }
1426
1427 fp->rx_sge_prod = ring_prod;
9f6c9258
DK
1428 }
1429 }
1430
55c11941 1431 for_each_eth_queue(bp, j) {
9f6c9258
DK
1432 struct bnx2x_fastpath *fp = &bp->fp[j];
1433
1434 fp->rx_bd_cons = 0;
9f6c9258 1435
b3b83c3f
DK
1436 /* Activate BD ring */
1437 /* Warning!
1438 * this will generate an interrupt (to the TSTORM)
1439 * must only be done after chip is initialized
1440 */
1441 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1442 fp->rx_sge_prod);
9f6c9258 1443
9f6c9258
DK
1444 if (j != 0)
1445 continue;
1446
619c5cb6 1447 if (CHIP_IS_E1(bp)) {
f2e0899f
DK
1448 REG_WR(bp, BAR_USTRORM_INTMEM +
1449 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1450 U64_LO(fp->rx_comp_mapping));
1451 REG_WR(bp, BAR_USTRORM_INTMEM +
1452 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1453 U64_HI(fp->rx_comp_mapping));
1454 }
9f6c9258
DK
1455 }
1456}
f85582f8 1457
55c11941 1458static void bnx2x_free_tx_skbs_queue(struct bnx2x_fastpath *fp)
9f6c9258 1459{
6383c0b3 1460 u8 cos;
55c11941 1461 struct bnx2x *bp = fp->bp;
9f6c9258 1462
55c11941
MS
1463 for_each_cos_in_tx_queue(fp, cos) {
1464 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
1465 unsigned pkts_compl = 0, bytes_compl = 0;
9f6c9258 1466
55c11941
MS
1467 u16 sw_prod = txdata->tx_pkt_prod;
1468 u16 sw_cons = txdata->tx_pkt_cons;
9f6c9258 1469
55c11941
MS
1470 while (sw_cons != sw_prod) {
1471 bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
1472 &pkts_compl, &bytes_compl);
1473 sw_cons++;
9f6c9258 1474 }
55c11941
MS
1475
1476 netdev_tx_reset_queue(
1477 netdev_get_tx_queue(bp->dev,
1478 txdata->txq_index));
1479 }
1480}
1481
1482static void bnx2x_free_tx_skbs_cnic(struct bnx2x *bp)
1483{
1484 int i;
1485
1486 for_each_tx_queue_cnic(bp, i) {
1487 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1488 }
1489}
1490
1491static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1492{
1493 int i;
1494
1495 for_each_eth_queue(bp, i) {
1496 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
9f6c9258
DK
1497 }
1498}
1499
b3b83c3f
DK
1500static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1501{
1502 struct bnx2x *bp = fp->bp;
1503 int i;
1504
1505 /* ring wasn't allocated */
1506 if (fp->rx_buf_ring == NULL)
1507 return;
1508
1509 for (i = 0; i < NUM_RX_BD; i++) {
1510 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
e52fcb24 1511 u8 *data = rx_buf->data;
b3b83c3f 1512
e52fcb24 1513 if (data == NULL)
b3b83c3f 1514 continue;
b3b83c3f
DK
1515 dma_unmap_single(&bp->pdev->dev,
1516 dma_unmap_addr(rx_buf, mapping),
1517 fp->rx_buf_size, DMA_FROM_DEVICE);
1518
e52fcb24 1519 rx_buf->data = NULL;
d46d132c 1520 bnx2x_frag_free(fp, data);
b3b83c3f
DK
1521 }
1522}
1523
55c11941
MS
1524static void bnx2x_free_rx_skbs_cnic(struct bnx2x *bp)
1525{
1526 int j;
1527
1528 for_each_rx_queue_cnic(bp, j) {
1529 bnx2x_free_rx_bds(&bp->fp[j]);
1530 }
1531}
1532
9f6c9258
DK
1533static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1534{
b3b83c3f 1535 int j;
9f6c9258 1536
55c11941 1537 for_each_eth_queue(bp, j) {
9f6c9258
DK
1538 struct bnx2x_fastpath *fp = &bp->fp[j];
1539
b3b83c3f 1540 bnx2x_free_rx_bds(fp);
9f6c9258 1541
9f6c9258 1542 if (!fp->disable_tpa)
dfacf138 1543 bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
9f6c9258
DK
1544 }
1545}
1546
a8f47eb7 1547static void bnx2x_free_skbs_cnic(struct bnx2x *bp)
55c11941
MS
1548{
1549 bnx2x_free_tx_skbs_cnic(bp);
1550 bnx2x_free_rx_skbs_cnic(bp);
1551}
1552
9f6c9258
DK
1553void bnx2x_free_skbs(struct bnx2x *bp)
1554{
1555 bnx2x_free_tx_skbs(bp);
1556 bnx2x_free_rx_skbs(bp);
1557}
1558
e3835b99
DK
1559void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1560{
1561 /* load old values */
1562 u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1563
1564 if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1565 /* leave all but MAX value */
1566 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1567
1568 /* set new MAX value */
1569 mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1570 & FUNC_MF_CFG_MAX_BW_MASK;
1571
1572 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1573 }
1574}
1575
ca92429f
DK
1576/**
1577 * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1578 *
1579 * @bp: driver handle
1580 * @nvecs: number of vectors to be released
1581 */
1582static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
9f6c9258 1583{
ca92429f 1584 int i, offset = 0;
9f6c9258 1585
ca92429f
DK
1586 if (nvecs == offset)
1587 return;
ad5afc89
AE
1588
1589 /* VFs don't have a default SB */
1590 if (IS_PF(bp)) {
1591 free_irq(bp->msix_table[offset].vector, bp->dev);
1592 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
1593 bp->msix_table[offset].vector);
1594 offset++;
1595 }
55c11941
MS
1596
1597 if (CNIC_SUPPORT(bp)) {
1598 if (nvecs == offset)
1599 return;
1600 offset++;
1601 }
ca92429f 1602
ec6ba945 1603 for_each_eth_queue(bp, i) {
ca92429f
DK
1604 if (nvecs == offset)
1605 return;
51c1a580
MS
1606 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq\n",
1607 i, bp->msix_table[offset].vector);
9f6c9258 1608
ca92429f 1609 free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
9f6c9258
DK
1610 }
1611}
1612
d6214d7a 1613void bnx2x_free_irq(struct bnx2x *bp)
9f6c9258 1614{
30a5de77 1615 if (bp->flags & USING_MSIX_FLAG &&
ad5afc89
AE
1616 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1617 int nvecs = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_SUPPORT(bp);
1618
1619 /* vfs don't have a default status block */
1620 if (IS_PF(bp))
1621 nvecs++;
1622
1623 bnx2x_free_msix_irqs(bp, nvecs);
1624 } else {
30a5de77 1625 free_irq(bp->dev->irq, bp->dev);
ad5afc89 1626 }
9f6c9258
DK
1627}
1628
0e8d2ec5 1629int bnx2x_enable_msix(struct bnx2x *bp)
9f6c9258 1630{
1ab4434c 1631 int msix_vec = 0, i, rc;
9f6c9258 1632
1ab4434c
AE
1633 /* VFs don't have a default status block */
1634 if (IS_PF(bp)) {
1635 bp->msix_table[msix_vec].entry = msix_vec;
1636 BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n",
1637 bp->msix_table[0].entry);
1638 msix_vec++;
1639 }
9f6c9258 1640
55c11941
MS
1641 /* Cnic requires an msix vector for itself */
1642 if (CNIC_SUPPORT(bp)) {
1643 bp->msix_table[msix_vec].entry = msix_vec;
1644 BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n",
1645 msix_vec, bp->msix_table[msix_vec].entry);
1646 msix_vec++;
1647 }
1648
6383c0b3 1649 /* We need separate vectors for ETH queues only (not FCoE) */
ec6ba945 1650 for_each_eth_queue(bp, i) {
d6214d7a 1651 bp->msix_table[msix_vec].entry = msix_vec;
51c1a580
MS
1652 BNX2X_DEV_INFO("msix_table[%d].entry = %d (fastpath #%u)\n",
1653 msix_vec, msix_vec, i);
d6214d7a 1654 msix_vec++;
9f6c9258
DK
1655 }
1656
1ab4434c
AE
1657 DP(BNX2X_MSG_SP, "about to request enable msix with %d vectors\n",
1658 msix_vec);
d6214d7a 1659
a5444b17
AG
1660 rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0],
1661 BNX2X_MIN_MSIX_VEC_CNT(bp), msix_vec);
9f6c9258
DK
1662 /*
1663 * reconfigure number of tx/rx queues according to available
1664 * MSI-X vectors
1665 */
a5444b17 1666 if (rc == -ENOSPC) {
30a5de77 1667 /* Get by with single vector */
a5444b17
AG
1668 rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0], 1, 1);
1669 if (rc < 0) {
30a5de77
DK
1670 BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n",
1671 rc);
1672 goto no_msix;
1673 }
1674
1675 BNX2X_DEV_INFO("Using single MSI-X vector\n");
1676 bp->flags |= USING_SINGLE_MSIX_FLAG;
1677
55c11941
MS
1678 BNX2X_DEV_INFO("set number of queues to 1\n");
1679 bp->num_ethernet_queues = 1;
1680 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
30a5de77 1681 } else if (rc < 0) {
a5444b17 1682 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
30a5de77 1683 goto no_msix;
a5444b17
AG
1684 } else if (rc < msix_vec) {
1685 /* how less vectors we will have? */
1686 int diff = msix_vec - rc;
1687
1688 BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
1689
1690 /*
1691 * decrease number of queues by number of unallocated entries
1692 */
1693 bp->num_ethernet_queues -= diff;
1694 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1695
1696 BNX2X_DEV_INFO("New queue configuration set: %d\n",
1697 bp->num_queues);
9f6c9258
DK
1698 }
1699
1700 bp->flags |= USING_MSIX_FLAG;
1701
1702 return 0;
30a5de77
DK
1703
1704no_msix:
1705 /* fall to INTx if not enough memory */
1706 if (rc == -ENOMEM)
1707 bp->flags |= DISABLE_MSI_FLAG;
1708
1709 return rc;
9f6c9258
DK
1710}
1711
1712static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1713{
ca92429f 1714 int i, rc, offset = 0;
9f6c9258 1715
ad5afc89
AE
1716 /* no default status block for vf */
1717 if (IS_PF(bp)) {
1718 rc = request_irq(bp->msix_table[offset++].vector,
1719 bnx2x_msix_sp_int, 0,
1720 bp->dev->name, bp->dev);
1721 if (rc) {
1722 BNX2X_ERR("request sp irq failed\n");
1723 return -EBUSY;
1724 }
9f6c9258
DK
1725 }
1726
55c11941
MS
1727 if (CNIC_SUPPORT(bp))
1728 offset++;
1729
ec6ba945 1730 for_each_eth_queue(bp, i) {
9f6c9258
DK
1731 struct bnx2x_fastpath *fp = &bp->fp[i];
1732 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1733 bp->dev->name, i);
1734
d6214d7a 1735 rc = request_irq(bp->msix_table[offset].vector,
9f6c9258
DK
1736 bnx2x_msix_fp_int, 0, fp->name, fp);
1737 if (rc) {
ca92429f
DK
1738 BNX2X_ERR("request fp #%d irq (%d) failed rc %d\n", i,
1739 bp->msix_table[offset].vector, rc);
1740 bnx2x_free_msix_irqs(bp, offset);
9f6c9258
DK
1741 return -EBUSY;
1742 }
1743
d6214d7a 1744 offset++;
9f6c9258
DK
1745 }
1746
ec6ba945 1747 i = BNX2X_NUM_ETH_QUEUES(bp);
ad5afc89
AE
1748 if (IS_PF(bp)) {
1749 offset = 1 + CNIC_SUPPORT(bp);
1750 netdev_info(bp->dev,
1751 "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n",
1752 bp->msix_table[0].vector,
1753 0, bp->msix_table[offset].vector,
1754 i - 1, bp->msix_table[offset + i - 1].vector);
1755 } else {
1756 offset = CNIC_SUPPORT(bp);
1757 netdev_info(bp->dev,
1758 "using MSI-X IRQs: fp[%d] %d ... fp[%d] %d\n",
1759 0, bp->msix_table[offset].vector,
1760 i - 1, bp->msix_table[offset + i - 1].vector);
1761 }
9f6c9258
DK
1762 return 0;
1763}
1764
d6214d7a 1765int bnx2x_enable_msi(struct bnx2x *bp)
9f6c9258
DK
1766{
1767 int rc;
1768
1769 rc = pci_enable_msi(bp->pdev);
1770 if (rc) {
51c1a580 1771 BNX2X_DEV_INFO("MSI is not attainable\n");
9f6c9258
DK
1772 return -1;
1773 }
1774 bp->flags |= USING_MSI_FLAG;
1775
1776 return 0;
1777}
1778
1779static int bnx2x_req_irq(struct bnx2x *bp)
1780{
1781 unsigned long flags;
30a5de77 1782 unsigned int irq;
9f6c9258 1783
30a5de77 1784 if (bp->flags & (USING_MSI_FLAG | USING_MSIX_FLAG))
9f6c9258
DK
1785 flags = 0;
1786 else
1787 flags = IRQF_SHARED;
1788
30a5de77
DK
1789 if (bp->flags & USING_MSIX_FLAG)
1790 irq = bp->msix_table[0].vector;
1791 else
1792 irq = bp->pdev->irq;
1793
1794 return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev);
9f6c9258
DK
1795}
1796
c957d09f 1797static int bnx2x_setup_irqs(struct bnx2x *bp)
619c5cb6
VZ
1798{
1799 int rc = 0;
30a5de77
DK
1800 if (bp->flags & USING_MSIX_FLAG &&
1801 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
619c5cb6
VZ
1802 rc = bnx2x_req_msix_irqs(bp);
1803 if (rc)
1804 return rc;
1805 } else {
619c5cb6
VZ
1806 rc = bnx2x_req_irq(bp);
1807 if (rc) {
1808 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
1809 return rc;
1810 }
1811 if (bp->flags & USING_MSI_FLAG) {
1812 bp->dev->irq = bp->pdev->irq;
30a5de77
DK
1813 netdev_info(bp->dev, "using MSI IRQ %d\n",
1814 bp->dev->irq);
1815 }
1816 if (bp->flags & USING_MSIX_FLAG) {
1817 bp->dev->irq = bp->msix_table[0].vector;
1818 netdev_info(bp->dev, "using MSIX IRQ %d\n",
1819 bp->dev->irq);
619c5cb6
VZ
1820 }
1821 }
1822
1823 return 0;
1824}
1825
55c11941
MS
1826static void bnx2x_napi_enable_cnic(struct bnx2x *bp)
1827{
1828 int i;
1829
8f20aa57
DK
1830 for_each_rx_queue_cnic(bp, i) {
1831 bnx2x_fp_init_lock(&bp->fp[i]);
55c11941 1832 napi_enable(&bnx2x_fp(bp, i, napi));
8f20aa57 1833 }
55c11941
MS
1834}
1835
1191cb83 1836static void bnx2x_napi_enable(struct bnx2x *bp)
9f6c9258
DK
1837{
1838 int i;
1839
8f20aa57
DK
1840 for_each_eth_queue(bp, i) {
1841 bnx2x_fp_init_lock(&bp->fp[i]);
9f6c9258 1842 napi_enable(&bnx2x_fp(bp, i, napi));
8f20aa57 1843 }
9f6c9258
DK
1844}
1845
55c11941
MS
1846static void bnx2x_napi_disable_cnic(struct bnx2x *bp)
1847{
1848 int i;
1849
8f20aa57 1850 for_each_rx_queue_cnic(bp, i) {
55c11941 1851 napi_disable(&bnx2x_fp(bp, i, napi));
9a2620c8
YM
1852 while (!bnx2x_fp_ll_disable(&bp->fp[i]))
1853 usleep_range(1000, 2000);
8f20aa57 1854 }
55c11941
MS
1855}
1856
1191cb83 1857static void bnx2x_napi_disable(struct bnx2x *bp)
9f6c9258
DK
1858{
1859 int i;
1860
8f20aa57 1861 for_each_eth_queue(bp, i) {
9f6c9258 1862 napi_disable(&bnx2x_fp(bp, i, napi));
9a2620c8
YM
1863 while (!bnx2x_fp_ll_disable(&bp->fp[i]))
1864 usleep_range(1000, 2000);
8f20aa57 1865 }
9f6c9258
DK
1866}
1867
1868void bnx2x_netif_start(struct bnx2x *bp)
1869{
4b7ed897
DK
1870 if (netif_running(bp->dev)) {
1871 bnx2x_napi_enable(bp);
55c11941
MS
1872 if (CNIC_LOADED(bp))
1873 bnx2x_napi_enable_cnic(bp);
4b7ed897
DK
1874 bnx2x_int_enable(bp);
1875 if (bp->state == BNX2X_STATE_OPEN)
1876 netif_tx_wake_all_queues(bp->dev);
9f6c9258
DK
1877 }
1878}
1879
1880void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1881{
1882 bnx2x_int_disable_sync(bp, disable_hw);
1883 bnx2x_napi_disable(bp);
55c11941
MS
1884 if (CNIC_LOADED(bp))
1885 bnx2x_napi_disable_cnic(bp);
9f6c9258 1886}
9f6c9258 1887
f663dd9a 1888u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
99932d4f 1889 void *accel_priv, select_queue_fallback_t fallback)
8307fa3e 1890{
8307fa3e 1891 struct bnx2x *bp = netdev_priv(dev);
cdb9d6ae 1892
55c11941 1893 if (CNIC_LOADED(bp) && !NO_FCOE(bp)) {
8307fa3e
VZ
1894 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1895 u16 ether_type = ntohs(hdr->h_proto);
1896
1897 /* Skip VLAN tag if present */
1898 if (ether_type == ETH_P_8021Q) {
1899 struct vlan_ethhdr *vhdr =
1900 (struct vlan_ethhdr *)skb->data;
1901
1902 ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1903 }
1904
1905 /* If ethertype is FCoE or FIP - use FCoE ring */
1906 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
6383c0b3 1907 return bnx2x_fcoe_tx(bp, txq_index);
8307fa3e 1908 }
55c11941 1909
cdb9d6ae 1910 /* select a non-FCoE queue */
99932d4f 1911 return fallback(dev, skb) % BNX2X_NUM_ETH_QUEUES(bp);
8307fa3e
VZ
1912}
1913
d6214d7a
DK
1914void bnx2x_set_num_queues(struct bnx2x *bp)
1915{
96305234 1916 /* RSS queues */
55c11941 1917 bp->num_ethernet_queues = bnx2x_calc_num_queues(bp);
ec6ba945 1918
a3348722
BW
1919 /* override in STORAGE SD modes */
1920 if (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))
55c11941
MS
1921 bp->num_ethernet_queues = 1;
1922
ec6ba945 1923 /* Add special queues */
55c11941
MS
1924 bp->num_cnic_queues = CNIC_SUPPORT(bp); /* For FCOE */
1925 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
65565884
MS
1926
1927 BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
ec6ba945
VZ
1928}
1929
cdb9d6ae
VZ
1930/**
1931 * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
1932 *
1933 * @bp: Driver handle
1934 *
1935 * We currently support for at most 16 Tx queues for each CoS thus we will
1936 * allocate a multiple of 16 for ETH L2 rings according to the value of the
1937 * bp->max_cos.
1938 *
1939 * If there is an FCoE L2 queue the appropriate Tx queue will have the next
1940 * index after all ETH L2 indices.
1941 *
1942 * If the actual number of Tx queues (for each CoS) is less than 16 then there
1943 * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
16a5fd92 1944 * 16..31,...) with indices that are not coupled with any real Tx queue.
cdb9d6ae
VZ
1945 *
1946 * The proper configuration of skb->queue_mapping is handled by
1947 * bnx2x_select_queue() and __skb_tx_hash().
1948 *
1949 * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
1950 * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
1951 */
55c11941 1952static int bnx2x_set_real_num_queues(struct bnx2x *bp, int include_cnic)
ec6ba945 1953{
6383c0b3 1954 int rc, tx, rx;
ec6ba945 1955
65565884 1956 tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos;
55c11941 1957 rx = BNX2X_NUM_ETH_QUEUES(bp);
ec6ba945 1958
6383c0b3 1959/* account for fcoe queue */
55c11941
MS
1960 if (include_cnic && !NO_FCOE(bp)) {
1961 rx++;
1962 tx++;
6383c0b3 1963 }
6383c0b3
AE
1964
1965 rc = netif_set_real_num_tx_queues(bp->dev, tx);
1966 if (rc) {
1967 BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
1968 return rc;
1969 }
1970 rc = netif_set_real_num_rx_queues(bp->dev, rx);
1971 if (rc) {
1972 BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
1973 return rc;
1974 }
1975
51c1a580 1976 DP(NETIF_MSG_IFUP, "Setting real num queues to (tx, rx) (%d, %d)\n",
6383c0b3
AE
1977 tx, rx);
1978
ec6ba945
VZ
1979 return rc;
1980}
1981
1191cb83 1982static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
a8c94b91
VZ
1983{
1984 int i;
1985
1986 for_each_queue(bp, i) {
1987 struct bnx2x_fastpath *fp = &bp->fp[i];
e52fcb24 1988 u32 mtu;
a8c94b91
VZ
1989
1990 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
1991 if (IS_FCOE_IDX(i))
1992 /*
1993 * Although there are no IP frames expected to arrive to
1994 * this ring we still want to add an
1995 * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
1996 * overrun attack.
1997 */
e52fcb24 1998 mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
a8c94b91 1999 else
e52fcb24
ED
2000 mtu = bp->dev->mtu;
2001 fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START +
2002 IP_HEADER_ALIGNMENT_PADDING +
2003 ETH_OVREHEAD +
2004 mtu +
2005 BNX2X_FW_RX_ALIGN_END;
16a5fd92 2006 /* Note : rx_buf_size doesn't take into account NET_SKB_PAD */
d46d132c
ED
2007 if (fp->rx_buf_size + NET_SKB_PAD <= PAGE_SIZE)
2008 fp->rx_frag_size = fp->rx_buf_size + NET_SKB_PAD;
2009 else
2010 fp->rx_frag_size = 0;
a8c94b91
VZ
2011 }
2012}
2013
60cad4e6 2014static int bnx2x_init_rss(struct bnx2x *bp)
619c5cb6
VZ
2015{
2016 int i;
619c5cb6
VZ
2017 u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
2018
16a5fd92 2019 /* Prepare the initial contents for the indirection table if RSS is
619c5cb6
VZ
2020 * enabled
2021 */
5d317c6a
MS
2022 for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++)
2023 bp->rss_conf_obj.ind_table[i] =
96305234
DK
2024 bp->fp->cl_id +
2025 ethtool_rxfh_indir_default(i, num_eth_queues);
619c5cb6
VZ
2026
2027 /*
2028 * For 57710 and 57711 SEARCHER configuration (rss_keys) is
2029 * per-port, so if explicit configuration is needed , do it only
2030 * for a PMF.
2031 *
2032 * For 57712 and newer on the other hand it's a per-function
2033 * configuration.
2034 */
5d317c6a 2035 return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp));
619c5cb6
VZ
2036}
2037
60cad4e6
AE
2038int bnx2x_rss(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
2039 bool config_hash, bool enable)
619c5cb6 2040{
3b603066 2041 struct bnx2x_config_rss_params params = {NULL};
619c5cb6
VZ
2042
2043 /* Although RSS is meaningless when there is a single HW queue we
2044 * still need it enabled in order to have HW Rx hash generated.
2045 *
2046 * if (!is_eth_multi(bp))
2047 * bp->multi_mode = ETH_RSS_MODE_DISABLED;
2048 */
2049
96305234 2050 params.rss_obj = rss_obj;
619c5cb6
VZ
2051
2052 __set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
2053
60cad4e6
AE
2054 if (enable) {
2055 __set_bit(BNX2X_RSS_MODE_REGULAR, &params.rss_flags);
2056
2057 /* RSS configuration */
2058 __set_bit(BNX2X_RSS_IPV4, &params.rss_flags);
2059 __set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags);
2060 __set_bit(BNX2X_RSS_IPV6, &params.rss_flags);
2061 __set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags);
2062 if (rss_obj->udp_rss_v4)
2063 __set_bit(BNX2X_RSS_IPV4_UDP, &params.rss_flags);
2064 if (rss_obj->udp_rss_v6)
2065 __set_bit(BNX2X_RSS_IPV6_UDP, &params.rss_flags);
2066 } else {
2067 __set_bit(BNX2X_RSS_MODE_DISABLED, &params.rss_flags);
2068 }
619c5cb6 2069
96305234
DK
2070 /* Hash bits */
2071 params.rss_result_mask = MULTI_MASK;
619c5cb6 2072
5d317c6a 2073 memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table));
619c5cb6 2074
96305234
DK
2075 if (config_hash) {
2076 /* RSS keys */
60cad4e6 2077 prandom_bytes(params.rss_key, T_ETH_RSS_KEY * 4);
96305234 2078 __set_bit(BNX2X_RSS_SET_SRCH, &params.rss_flags);
619c5cb6
VZ
2079 }
2080
60cad4e6
AE
2081 if (IS_PF(bp))
2082 return bnx2x_config_rss(bp, &params);
2083 else
2084 return bnx2x_vfpf_config_rss(bp, &params);
619c5cb6
VZ
2085}
2086
1191cb83 2087static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
619c5cb6 2088{
3b603066 2089 struct bnx2x_func_state_params func_params = {NULL};
619c5cb6
VZ
2090
2091 /* Prepare parameters for function state transitions */
2092 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
2093
2094 func_params.f_obj = &bp->func_obj;
2095 func_params.cmd = BNX2X_F_CMD_HW_INIT;
2096
2097 func_params.params.hw_init.load_phase = load_code;
2098
2099 return bnx2x_func_state_change(bp, &func_params);
2100}
2101
2102/*
2103 * Cleans the object that have internal lists without sending
16a5fd92 2104 * ramrods. Should be run when interrupts are disabled.
619c5cb6 2105 */
7fa6f340 2106void bnx2x_squeeze_objects(struct bnx2x *bp)
619c5cb6
VZ
2107{
2108 int rc;
2109 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
3b603066 2110 struct bnx2x_mcast_ramrod_params rparam = {NULL};
15192a8c 2111 struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
619c5cb6
VZ
2112
2113 /***************** Cleanup MACs' object first *************************/
2114
2115 /* Wait for completion of requested */
2116 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
2117 /* Perform a dry cleanup */
2118 __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
2119
2120 /* Clean ETH primary MAC */
2121 __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
15192a8c 2122 rc = mac_obj->delete_all(bp, &bp->sp_objs->mac_obj, &vlan_mac_flags,
619c5cb6
VZ
2123 &ramrod_flags);
2124 if (rc != 0)
2125 BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
2126
2127 /* Cleanup UC list */
2128 vlan_mac_flags = 0;
2129 __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
2130 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
2131 &ramrod_flags);
2132 if (rc != 0)
2133 BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
2134
2135 /***************** Now clean mcast object *****************************/
2136 rparam.mcast_obj = &bp->mcast_obj;
2137 __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
2138
8b09be5f
YM
2139 /* Add a DEL command... - Since we're doing a driver cleanup only,
2140 * we take a lock surrounding both the initial send and the CONTs,
2141 * as we don't want a true completion to disrupt us in the middle.
2142 */
2143 netif_addr_lock_bh(bp->dev);
619c5cb6
VZ
2144 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
2145 if (rc < 0)
51c1a580
MS
2146 BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n",
2147 rc);
619c5cb6
VZ
2148
2149 /* ...and wait until all pending commands are cleared */
2150 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2151 while (rc != 0) {
2152 if (rc < 0) {
2153 BNX2X_ERR("Failed to clean multi-cast object: %d\n",
2154 rc);
8b09be5f 2155 netif_addr_unlock_bh(bp->dev);
619c5cb6
VZ
2156 return;
2157 }
2158
2159 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2160 }
8b09be5f 2161 netif_addr_unlock_bh(bp->dev);
619c5cb6
VZ
2162}
2163
2164#ifndef BNX2X_STOP_ON_ERROR
2165#define LOAD_ERROR_EXIT(bp, label) \
2166 do { \
2167 (bp)->state = BNX2X_STATE_ERROR; \
2168 goto label; \
2169 } while (0)
55c11941
MS
2170
2171#define LOAD_ERROR_EXIT_CNIC(bp, label) \
2172 do { \
2173 bp->cnic_loaded = false; \
2174 goto label; \
2175 } while (0)
2176#else /*BNX2X_STOP_ON_ERROR*/
619c5cb6
VZ
2177#define LOAD_ERROR_EXIT(bp, label) \
2178 do { \
2179 (bp)->state = BNX2X_STATE_ERROR; \
2180 (bp)->panic = 1; \
2181 return -EBUSY; \
2182 } while (0)
55c11941
MS
2183#define LOAD_ERROR_EXIT_CNIC(bp, label) \
2184 do { \
2185 bp->cnic_loaded = false; \
2186 (bp)->panic = 1; \
2187 return -EBUSY; \
2188 } while (0)
2189#endif /*BNX2X_STOP_ON_ERROR*/
619c5cb6 2190
ad5afc89
AE
2191static void bnx2x_free_fw_stats_mem(struct bnx2x *bp)
2192{
2193 BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
2194 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2195 return;
2196}
2197
2198static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
452427b0 2199{
8db573ba 2200 int num_groups, vf_headroom = 0;
ad5afc89 2201 int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1;
452427b0 2202
ad5afc89
AE
2203 /* number of queues for statistics is number of eth queues + FCoE */
2204 u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe_stats;
452427b0 2205
ad5afc89
AE
2206 /* Total number of FW statistics requests =
2207 * 1 for port stats + 1 for PF stats + potential 2 for FCoE (fcoe proper
2208 * and fcoe l2 queue) stats + num of queues (which includes another 1
2209 * for fcoe l2 queue if applicable)
2210 */
2211 bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats;
452427b0 2212
8db573ba
AE
2213 /* vf stats appear in the request list, but their data is allocated by
2214 * the VFs themselves. We don't include them in the bp->fw_stats_num as
2215 * it is used to determine where to place the vf stats queries in the
2216 * request struct
2217 */
2218 if (IS_SRIOV(bp))
6411280a 2219 vf_headroom = bnx2x_vf_headroom(bp);
8db573ba 2220
ad5afc89
AE
2221 /* Request is built from stats_query_header and an array of
2222 * stats_query_cmd_group each of which contains
2223 * STATS_QUERY_CMD_COUNT rules. The real number or requests is
2224 * configured in the stats_query_header.
2225 */
2226 num_groups =
8db573ba
AE
2227 (((bp->fw_stats_num + vf_headroom) / STATS_QUERY_CMD_COUNT) +
2228 (((bp->fw_stats_num + vf_headroom) % STATS_QUERY_CMD_COUNT) ?
ad5afc89
AE
2229 1 : 0));
2230
8db573ba
AE
2231 DP(BNX2X_MSG_SP, "stats fw_stats_num %d, vf headroom %d, num_groups %d\n",
2232 bp->fw_stats_num, vf_headroom, num_groups);
ad5afc89
AE
2233 bp->fw_stats_req_sz = sizeof(struct stats_query_header) +
2234 num_groups * sizeof(struct stats_query_cmd_group);
2235
2236 /* Data for statistics requests + stats_counter
2237 * stats_counter holds per-STORM counters that are incremented
2238 * when STORM has finished with the current request.
2239 * memory for FCoE offloaded statistics are counted anyway,
2240 * even if they will not be sent.
2241 * VF stats are not accounted for here as the data of VF stats is stored
2242 * in memory allocated by the VF, not here.
2243 */
2244 bp->fw_stats_data_sz = sizeof(struct per_port_stats) +
2245 sizeof(struct per_pf_stats) +
2246 sizeof(struct fcoe_statistics_params) +
2247 sizeof(struct per_queue_stats) * num_queue_stats +
2248 sizeof(struct stats_counter);
2249
cd2b0389
JP
2250 bp->fw_stats = BNX2X_PCI_ALLOC(&bp->fw_stats_mapping,
2251 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2252 if (!bp->fw_stats)
2253 goto alloc_mem_err;
ad5afc89
AE
2254
2255 /* Set shortcuts */
2256 bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats;
2257 bp->fw_stats_req_mapping = bp->fw_stats_mapping;
2258 bp->fw_stats_data = (struct bnx2x_fw_stats_data *)
2259 ((u8 *)bp->fw_stats + bp->fw_stats_req_sz);
2260 bp->fw_stats_data_mapping = bp->fw_stats_mapping +
2261 bp->fw_stats_req_sz;
2262
6bf07b8e 2263 DP(BNX2X_MSG_SP, "statistics request base address set to %x %x\n",
ad5afc89
AE
2264 U64_HI(bp->fw_stats_req_mapping),
2265 U64_LO(bp->fw_stats_req_mapping));
6bf07b8e 2266 DP(BNX2X_MSG_SP, "statistics data base address set to %x %x\n",
ad5afc89
AE
2267 U64_HI(bp->fw_stats_data_mapping),
2268 U64_LO(bp->fw_stats_data_mapping));
2269 return 0;
2270
2271alloc_mem_err:
2272 bnx2x_free_fw_stats_mem(bp);
2273 BNX2X_ERR("Can't allocate FW stats memory\n");
2274 return -ENOMEM;
2275}
2276
2277/* send load request to mcp and analyze response */
2278static int bnx2x_nic_load_request(struct bnx2x *bp, u32 *load_code)
2279{
178135c1
DK
2280 u32 param;
2281
ad5afc89
AE
2282 /* init fw_seq */
2283 bp->fw_seq =
2284 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
2285 DRV_MSG_SEQ_NUMBER_MASK);
2286 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
2287
2288 /* Get current FW pulse sequence */
2289 bp->fw_drv_pulse_wr_seq =
2290 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) &
2291 DRV_PULSE_SEQ_MASK);
2292 BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
2293
178135c1
DK
2294 param = DRV_MSG_CODE_LOAD_REQ_WITH_LFA;
2295
2296 if (IS_MF_SD(bp) && bnx2x_port_after_undi(bp))
2297 param |= DRV_MSG_CODE_LOAD_REQ_FORCE_LFA;
2298
ad5afc89 2299 /* load request */
178135c1 2300 (*load_code) = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, param);
ad5afc89
AE
2301
2302 /* if mcp fails to respond we must abort */
2303 if (!(*load_code)) {
2304 BNX2X_ERR("MCP response failure, aborting\n");
2305 return -EBUSY;
2306 }
2307
2308 /* If mcp refused (e.g. other port is in diagnostic mode) we
2309 * must abort
2310 */
2311 if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) {
2312 BNX2X_ERR("MCP refused load request, aborting\n");
2313 return -EBUSY;
2314 }
2315 return 0;
2316}
2317
2318/* check whether another PF has already loaded FW to chip. In
2319 * virtualized environments a pf from another VM may have already
2320 * initialized the device including loading FW
2321 */
91ebb929 2322int bnx2x_compare_fw_ver(struct bnx2x *bp, u32 load_code, bool print_err)
ad5afc89
AE
2323{
2324 /* is another pf loaded on this engine? */
2325 if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
2326 load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
2327 /* build my FW version dword */
2328 u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) +
2329 (BCM_5710_FW_MINOR_VERSION << 8) +
2330 (BCM_5710_FW_REVISION_VERSION << 16) +
2331 (BCM_5710_FW_ENGINEERING_VERSION << 24);
2332
2333 /* read loaded FW from chip */
2334 u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
2335
2336 DP(BNX2X_MSG_SP, "loaded fw %x, my fw %x\n",
2337 loaded_fw, my_fw);
2338
2339 /* abort nic load if version mismatch */
2340 if (my_fw != loaded_fw) {
91ebb929
YM
2341 if (print_err)
2342 BNX2X_ERR("bnx2x with FW %x was already loaded which mismatches my %x FW. Aborting\n",
2343 loaded_fw, my_fw);
2344 else
2345 BNX2X_DEV_INFO("bnx2x with FW %x was already loaded which mismatches my %x FW, possibly due to MF UNDI\n",
2346 loaded_fw, my_fw);
ad5afc89
AE
2347 return -EBUSY;
2348 }
2349 }
2350 return 0;
2351}
2352
2353/* returns the "mcp load_code" according to global load_count array */
2354static int bnx2x_nic_load_no_mcp(struct bnx2x *bp, int port)
2355{
2356 int path = BP_PATH(bp);
2357
2358 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
a8f47eb7 2359 path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
2360 bnx2x_load_count[path][2]);
2361 bnx2x_load_count[path][0]++;
2362 bnx2x_load_count[path][1 + port]++;
ad5afc89 2363 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
a8f47eb7 2364 path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
2365 bnx2x_load_count[path][2]);
2366 if (bnx2x_load_count[path][0] == 1)
ad5afc89 2367 return FW_MSG_CODE_DRV_LOAD_COMMON;
a8f47eb7 2368 else if (bnx2x_load_count[path][1 + port] == 1)
ad5afc89
AE
2369 return FW_MSG_CODE_DRV_LOAD_PORT;
2370 else
2371 return FW_MSG_CODE_DRV_LOAD_FUNCTION;
2372}
2373
2374/* mark PMF if applicable */
2375static void bnx2x_nic_load_pmf(struct bnx2x *bp, u32 load_code)
2376{
2377 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2378 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
2379 (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
2380 bp->port.pmf = 1;
2381 /* We need the barrier to ensure the ordering between the
2382 * writing to bp->port.pmf here and reading it from the
2383 * bnx2x_periodic_task().
2384 */
2385 smp_mb();
2386 } else {
2387 bp->port.pmf = 0;
452427b0
YM
2388 }
2389
ad5afc89
AE
2390 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2391}
2392
2393static void bnx2x_nic_load_afex_dcc(struct bnx2x *bp, int load_code)
2394{
2395 if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2396 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
2397 (bp->common.shmem2_base)) {
2398 if (SHMEM2_HAS(bp, dcc_support))
2399 SHMEM2_WR(bp, dcc_support,
2400 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
2401 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
2402 if (SHMEM2_HAS(bp, afex_driver_support))
2403 SHMEM2_WR(bp, afex_driver_support,
2404 SHMEM_AFEX_SUPPORTED_VERSION_ONE);
2405 }
2406
2407 /* Set AFEX default VLAN tag to an invalid value */
2408 bp->afex_def_vlan_tag = -1;
452427b0
YM
2409}
2410
1191cb83
ED
2411/**
2412 * bnx2x_bz_fp - zero content of the fastpath structure.
2413 *
2414 * @bp: driver handle
2415 * @index: fastpath index to be zeroed
2416 *
2417 * Makes sure the contents of the bp->fp[index].napi is kept
2418 * intact.
2419 */
2420static void bnx2x_bz_fp(struct bnx2x *bp, int index)
2421{
2422 struct bnx2x_fastpath *fp = &bp->fp[index];
65565884 2423 int cos;
1191cb83 2424 struct napi_struct orig_napi = fp->napi;
15192a8c 2425 struct bnx2x_agg_info *orig_tpa_info = fp->tpa_info;
d76a6111 2426
1191cb83 2427 /* bzero bnx2x_fastpath contents */
c3146eb6
DK
2428 if (fp->tpa_info)
2429 memset(fp->tpa_info, 0, ETH_MAX_AGGREGATION_QUEUES_E1H_E2 *
2430 sizeof(struct bnx2x_agg_info));
2431 memset(fp, 0, sizeof(*fp));
1191cb83
ED
2432
2433 /* Restore the NAPI object as it has been already initialized */
2434 fp->napi = orig_napi;
15192a8c 2435 fp->tpa_info = orig_tpa_info;
1191cb83
ED
2436 fp->bp = bp;
2437 fp->index = index;
2438 if (IS_ETH_FP(fp))
2439 fp->max_cos = bp->max_cos;
2440 else
2441 /* Special queues support only one CoS */
2442 fp->max_cos = 1;
2443
65565884 2444 /* Init txdata pointers */
65565884
MS
2445 if (IS_FCOE_FP(fp))
2446 fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)];
65565884
MS
2447 if (IS_ETH_FP(fp))
2448 for_each_cos_in_tx_queue(fp, cos)
2449 fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos *
2450 BNX2X_NUM_ETH_QUEUES(bp) + index];
2451
16a5fd92 2452 /* set the tpa flag for each queue. The tpa flag determines the queue
1191cb83
ED
2453 * minimal size so it must be set prior to queue memory allocation
2454 */
2455 fp->disable_tpa = !(bp->flags & TPA_ENABLE_FLAG ||
2456 (bp->flags & GRO_ENABLE_FLAG &&
2457 bnx2x_mtu_allows_gro(bp->dev->mtu)));
2458 if (bp->flags & TPA_ENABLE_FLAG)
2459 fp->mode = TPA_MODE_LRO;
2460 else if (bp->flags & GRO_ENABLE_FLAG)
2461 fp->mode = TPA_MODE_GRO;
2462
1191cb83
ED
2463 /* We don't want TPA on an FCoE L2 ring */
2464 if (IS_FCOE_FP(fp))
2465 fp->disable_tpa = 1;
55c11941
MS
2466}
2467
2468int bnx2x_load_cnic(struct bnx2x *bp)
2469{
2470 int i, rc, port = BP_PORT(bp);
2471
2472 DP(NETIF_MSG_IFUP, "Starting CNIC-related load\n");
2473
2474 mutex_init(&bp->cnic_mutex);
2475
ad5afc89
AE
2476 if (IS_PF(bp)) {
2477 rc = bnx2x_alloc_mem_cnic(bp);
2478 if (rc) {
2479 BNX2X_ERR("Unable to allocate bp memory for cnic\n");
2480 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2481 }
55c11941
MS
2482 }
2483
2484 rc = bnx2x_alloc_fp_mem_cnic(bp);
2485 if (rc) {
2486 BNX2X_ERR("Unable to allocate memory for cnic fps\n");
2487 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2488 }
2489
2490 /* Update the number of queues with the cnic queues */
2491 rc = bnx2x_set_real_num_queues(bp, 1);
2492 if (rc) {
2493 BNX2X_ERR("Unable to set real_num_queues including cnic\n");
2494 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2495 }
2496
2497 /* Add all CNIC NAPI objects */
2498 bnx2x_add_all_napi_cnic(bp);
2499 DP(NETIF_MSG_IFUP, "cnic napi added\n");
2500 bnx2x_napi_enable_cnic(bp);
2501
2502 rc = bnx2x_init_hw_func_cnic(bp);
2503 if (rc)
2504 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic1);
2505
2506 bnx2x_nic_init_cnic(bp);
2507
ad5afc89
AE
2508 if (IS_PF(bp)) {
2509 /* Enable Timer scan */
2510 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
2511
2512 /* setup cnic queues */
2513 for_each_cnic_queue(bp, i) {
2514 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2515 if (rc) {
2516 BNX2X_ERR("Queue setup failed\n");
2517 LOAD_ERROR_EXIT(bp, load_error_cnic2);
2518 }
55c11941
MS
2519 }
2520 }
2521
2522 /* Initialize Rx filter. */
8b09be5f 2523 bnx2x_set_rx_mode_inner(bp);
55c11941
MS
2524
2525 /* re-read iscsi info */
2526 bnx2x_get_iscsi_info(bp);
2527 bnx2x_setup_cnic_irq_info(bp);
2528 bnx2x_setup_cnic_info(bp);
2529 bp->cnic_loaded = true;
2530 if (bp->state == BNX2X_STATE_OPEN)
2531 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
2532
55c11941
MS
2533 DP(NETIF_MSG_IFUP, "Ending successfully CNIC-related load\n");
2534
2535 return 0;
2536
2537#ifndef BNX2X_STOP_ON_ERROR
2538load_error_cnic2:
2539 /* Disable Timer scan */
2540 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
2541
2542load_error_cnic1:
2543 bnx2x_napi_disable_cnic(bp);
2544 /* Update the number of queues without the cnic queues */
d9d81862 2545 if (bnx2x_set_real_num_queues(bp, 0))
55c11941
MS
2546 BNX2X_ERR("Unable to set real_num_queues not including cnic\n");
2547load_error_cnic0:
2548 BNX2X_ERR("CNIC-related load failed\n");
2549 bnx2x_free_fp_mem_cnic(bp);
2550 bnx2x_free_mem_cnic(bp);
2551 return rc;
2552#endif /* ! BNX2X_STOP_ON_ERROR */
1191cb83
ED
2553}
2554
9f6c9258
DK
2555/* must be called with rtnl_lock */
2556int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2557{
619c5cb6 2558 int port = BP_PORT(bp);
ad5afc89 2559 int i, rc = 0, load_code = 0;
9f6c9258 2560
55c11941
MS
2561 DP(NETIF_MSG_IFUP, "Starting NIC load\n");
2562 DP(NETIF_MSG_IFUP,
2563 "CNIC is %s\n", CNIC_ENABLED(bp) ? "enabled" : "disabled");
2564
9f6c9258 2565#ifdef BNX2X_STOP_ON_ERROR
51c1a580
MS
2566 if (unlikely(bp->panic)) {
2567 BNX2X_ERR("Can't load NIC when there is panic\n");
9f6c9258 2568 return -EPERM;
51c1a580 2569 }
9f6c9258
DK
2570#endif
2571
2572 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
2573
16a5fd92 2574 /* zero the structure w/o any lock, before SP handler is initialized */
2ae17f66
VZ
2575 memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
2576 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
2577 &bp->last_reported_link.link_report_flags);
2ae17f66 2578
ad5afc89
AE
2579 if (IS_PF(bp))
2580 /* must be called before memory allocation and HW init */
2581 bnx2x_ilt_set_info(bp);
523224a3 2582
6383c0b3
AE
2583 /*
2584 * Zero fastpath structures preserving invariants like napi, which are
2585 * allocated only once, fp index, max_cos, bp pointer.
65565884 2586 * Also set fp->disable_tpa and txdata_ptr.
b3b83c3f 2587 */
51c1a580 2588 DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
b3b83c3f
DK
2589 for_each_queue(bp, i)
2590 bnx2x_bz_fp(bp, i);
55c11941
MS
2591 memset(bp->bnx2x_txq, 0, (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS +
2592 bp->num_cnic_queues) *
2593 sizeof(struct bnx2x_fp_txdata));
b3b83c3f 2594
55c11941 2595 bp->fcoe_init = false;
6383c0b3 2596
a8c94b91
VZ
2597 /* Set the receive queues buffer size */
2598 bnx2x_set_rx_buf_size(bp);
2599
ad5afc89
AE
2600 if (IS_PF(bp)) {
2601 rc = bnx2x_alloc_mem(bp);
2602 if (rc) {
2603 BNX2X_ERR("Unable to allocate bp memory\n");
2604 return rc;
2605 }
2606 }
2607
ad5afc89
AE
2608 /* need to be done after alloc mem, since it's self adjusting to amount
2609 * of memory available for RSS queues
2610 */
2611 rc = bnx2x_alloc_fp_mem(bp);
2612 if (rc) {
2613 BNX2X_ERR("Unable to allocate memory for fps\n");
2614 LOAD_ERROR_EXIT(bp, load_error0);
2615 }
d6214d7a 2616
e3ed4eae
DK
2617 /* Allocated memory for FW statistics */
2618 if (bnx2x_alloc_fw_stats_mem(bp))
2619 LOAD_ERROR_EXIT(bp, load_error0);
2620
8d9ac297
AE
2621 /* request pf to initialize status blocks */
2622 if (IS_VF(bp)) {
2623 rc = bnx2x_vfpf_init(bp);
2624 if (rc)
2625 LOAD_ERROR_EXIT(bp, load_error0);
2626 }
2627
b3b83c3f
DK
2628 /* As long as bnx2x_alloc_mem() may possibly update
2629 * bp->num_queues, bnx2x_set_real_num_queues() should always
55c11941 2630 * come after it. At this stage cnic queues are not counted.
b3b83c3f 2631 */
55c11941 2632 rc = bnx2x_set_real_num_queues(bp, 0);
d6214d7a 2633 if (rc) {
ec6ba945 2634 BNX2X_ERR("Unable to set real_num_queues\n");
619c5cb6 2635 LOAD_ERROR_EXIT(bp, load_error0);
9f6c9258
DK
2636 }
2637
6383c0b3 2638 /* configure multi cos mappings in kernel.
16a5fd92
YM
2639 * this configuration may be overridden by a multi class queue
2640 * discipline or by a dcbx negotiation result.
6383c0b3
AE
2641 */
2642 bnx2x_setup_tc(bp->dev, bp->max_cos);
2643
26614ba5
MS
2644 /* Add all NAPI objects */
2645 bnx2x_add_all_napi(bp);
55c11941 2646 DP(NETIF_MSG_IFUP, "napi added\n");
9f6c9258
DK
2647 bnx2x_napi_enable(bp);
2648
ad5afc89
AE
2649 if (IS_PF(bp)) {
2650 /* set pf load just before approaching the MCP */
2651 bnx2x_set_pf_load(bp);
2652
2653 /* if mcp exists send load request and analyze response */
2654 if (!BP_NOMCP(bp)) {
2655 /* attempt to load pf */
2656 rc = bnx2x_nic_load_request(bp, &load_code);
2657 if (rc)
2658 LOAD_ERROR_EXIT(bp, load_error1);
2659
2660 /* what did mcp say? */
91ebb929 2661 rc = bnx2x_compare_fw_ver(bp, load_code, true);
ad5afc89
AE
2662 if (rc) {
2663 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
d1e2d966
AE
2664 LOAD_ERROR_EXIT(bp, load_error2);
2665 }
ad5afc89
AE
2666 } else {
2667 load_code = bnx2x_nic_load_no_mcp(bp, port);
d1e2d966 2668 }
9f6c9258 2669
ad5afc89
AE
2670 /* mark pmf if applicable */
2671 bnx2x_nic_load_pmf(bp, load_code);
9f6c9258 2672
ad5afc89
AE
2673 /* Init Function state controlling object */
2674 bnx2x__init_func_obj(bp);
6383c0b3 2675
ad5afc89
AE
2676 /* Initialize HW */
2677 rc = bnx2x_init_hw(bp, load_code);
2678 if (rc) {
2679 BNX2X_ERR("HW init failed, aborting\n");
2680 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2681 LOAD_ERROR_EXIT(bp, load_error2);
2682 }
9f6c9258
DK
2683 }
2684
ecf01c22
YM
2685 bnx2x_pre_irq_nic_init(bp);
2686
d6214d7a
DK
2687 /* Connect to IRQs */
2688 rc = bnx2x_setup_irqs(bp);
523224a3 2689 if (rc) {
ad5afc89
AE
2690 BNX2X_ERR("setup irqs failed\n");
2691 if (IS_PF(bp))
2692 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
619c5cb6 2693 LOAD_ERROR_EXIT(bp, load_error2);
523224a3
DK
2694 }
2695
619c5cb6 2696 /* Init per-function objects */
ad5afc89 2697 if (IS_PF(bp)) {
ecf01c22
YM
2698 /* Setup NIC internals and enable interrupts */
2699 bnx2x_post_irq_nic_init(bp, load_code);
2700
ad5afc89 2701 bnx2x_init_bp_objs(bp);
b56e9670 2702 bnx2x_iov_nic_init(bp);
a3348722 2703
ad5afc89
AE
2704 /* Set AFEX default VLAN tag to an invalid value */
2705 bp->afex_def_vlan_tag = -1;
2706 bnx2x_nic_load_afex_dcc(bp, load_code);
2707 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
2708 rc = bnx2x_func_start(bp);
2709 if (rc) {
2710 BNX2X_ERR("Function start failed!\n");
2711 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
9f6c9258 2712
619c5cb6 2713 LOAD_ERROR_EXIT(bp, load_error3);
9f6c9258 2714 }
9f6c9258 2715
ad5afc89
AE
2716 /* Send LOAD_DONE command to MCP */
2717 if (!BP_NOMCP(bp)) {
2718 load_code = bnx2x_fw_command(bp,
2719 DRV_MSG_CODE_LOAD_DONE, 0);
2720 if (!load_code) {
2721 BNX2X_ERR("MCP response failure, aborting\n");
2722 rc = -EBUSY;
2723 LOAD_ERROR_EXIT(bp, load_error3);
2724 }
2725 }
9f6c9258 2726
0c14e5ce
AE
2727 /* initialize FW coalescing state machines in RAM */
2728 bnx2x_update_coalesce(bp);
60cad4e6 2729 }
0c14e5ce 2730
60cad4e6
AE
2731 /* setup the leading queue */
2732 rc = bnx2x_setup_leading(bp);
2733 if (rc) {
2734 BNX2X_ERR("Setup leading failed!\n");
2735 LOAD_ERROR_EXIT(bp, load_error3);
2736 }
ad5afc89 2737
60cad4e6
AE
2738 /* set up the rest of the queues */
2739 for_each_nondefault_eth_queue(bp, i) {
2740 if (IS_PF(bp))
2741 rc = bnx2x_setup_queue(bp, &bp->fp[i], false);
2742 else /* VF */
2743 rc = bnx2x_vfpf_setup_q(bp, &bp->fp[i], false);
ad5afc89 2744 if (rc) {
60cad4e6 2745 BNX2X_ERR("Queue %d setup failed\n", i);
ad5afc89
AE
2746 LOAD_ERROR_EXIT(bp, load_error3);
2747 }
60cad4e6 2748 }
8d9ac297 2749
60cad4e6
AE
2750 /* setup rss */
2751 rc = bnx2x_init_rss(bp);
2752 if (rc) {
2753 BNX2X_ERR("PF RSS init failed\n");
2754 LOAD_ERROR_EXIT(bp, load_error3);
51c1a580 2755 }
619c5cb6 2756
523224a3
DK
2757 /* Now when Clients are configured we are ready to work */
2758 bp->state = BNX2X_STATE_OPEN;
2759
619c5cb6 2760 /* Configure a ucast MAC */
ad5afc89
AE
2761 if (IS_PF(bp))
2762 rc = bnx2x_set_eth_mac(bp, true);
8d9ac297 2763 else /* vf */
f8f4f61a
DK
2764 rc = bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr, bp->fp->index,
2765 true);
51c1a580
MS
2766 if (rc) {
2767 BNX2X_ERR("Setting Ethernet MAC failed\n");
55c11941 2768 LOAD_ERROR_EXIT(bp, load_error3);
51c1a580 2769 }
6e30dd4e 2770
ad5afc89 2771 if (IS_PF(bp) && bp->pending_max) {
e3835b99
DK
2772 bnx2x_update_max_mf_config(bp, bp->pending_max);
2773 bp->pending_max = 0;
2774 }
2775
ad5afc89
AE
2776 if (bp->port.pmf) {
2777 rc = bnx2x_initial_phy_init(bp, load_mode);
2778 if (rc)
2779 LOAD_ERROR_EXIT(bp, load_error3);
2780 }
c63da990 2781 bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_BOOT_FROM_SAN;
9f6c9258 2782
619c5cb6
VZ
2783 /* Start fast path */
2784
2785 /* Initialize Rx filter. */
8b09be5f 2786 bnx2x_set_rx_mode_inner(bp);
6e30dd4e 2787
619c5cb6 2788 /* Start the Tx */
9f6c9258
DK
2789 switch (load_mode) {
2790 case LOAD_NORMAL:
16a5fd92 2791 /* Tx queue should be only re-enabled */
523224a3 2792 netif_tx_wake_all_queues(bp->dev);
9f6c9258
DK
2793 break;
2794
2795 case LOAD_OPEN:
2796 netif_tx_start_all_queues(bp->dev);
4e857c58 2797 smp_mb__after_atomic();
9f6c9258
DK
2798 break;
2799
2800 case LOAD_DIAG:
8970b2e4 2801 case LOAD_LOOPBACK_EXT:
9f6c9258
DK
2802 bp->state = BNX2X_STATE_DIAG;
2803 break;
2804
2805 default:
2806 break;
2807 }
2808
00253a8c 2809 if (bp->port.pmf)
4c704899 2810 bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_PORT_MASK, 0);
00253a8c 2811 else
9f6c9258
DK
2812 bnx2x__link_status_update(bp);
2813
2814 /* start the timer */
2815 mod_timer(&bp->timer, jiffies + bp->current_interval);
2816
55c11941
MS
2817 if (CNIC_ENABLED(bp))
2818 bnx2x_load_cnic(bp);
9f6c9258 2819
42f8277f
YM
2820 if (IS_PF(bp))
2821 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);
2822
ad5afc89
AE
2823 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
2824 /* mark driver is loaded in shmem2 */
9ce392d4
YM
2825 u32 val;
2826 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2827 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2828 val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
2829 DRV_FLAGS_CAPABILITIES_LOADED_L2);
2830 }
2831
619c5cb6 2832 /* Wait for all pending SP commands to complete */
ad5afc89 2833 if (IS_PF(bp) && !bnx2x_wait_sp_comp(bp, ~0x0UL)) {
619c5cb6 2834 BNX2X_ERR("Timeout waiting for SP elements to complete\n");
5d07d868 2835 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
619c5cb6
VZ
2836 return -EBUSY;
2837 }
6891dd25 2838
9876879f
BW
2839 /* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */
2840 if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG))
2841 bnx2x_dcbx_init(bp, false);
2842
55c11941
MS
2843 DP(NETIF_MSG_IFUP, "Ending successfully NIC load\n");
2844
9f6c9258
DK
2845 return 0;
2846
619c5cb6 2847#ifndef BNX2X_STOP_ON_ERROR
9f6c9258 2848load_error3:
ad5afc89
AE
2849 if (IS_PF(bp)) {
2850 bnx2x_int_disable_sync(bp, 1);
d6214d7a 2851
ad5afc89
AE
2852 /* Clean queueable objects */
2853 bnx2x_squeeze_objects(bp);
2854 }
619c5cb6 2855
9f6c9258
DK
2856 /* Free SKBs, SGEs, TPA pool and driver internals */
2857 bnx2x_free_skbs(bp);
ec6ba945 2858 for_each_rx_queue(bp, i)
9f6c9258 2859 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
d6214d7a 2860
9f6c9258 2861 /* Release IRQs */
d6214d7a
DK
2862 bnx2x_free_irq(bp);
2863load_error2:
ad5afc89 2864 if (IS_PF(bp) && !BP_NOMCP(bp)) {
d6214d7a
DK
2865 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
2866 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
2867 }
2868
2869 bp->port.pmf = 0;
9f6c9258
DK
2870load_error1:
2871 bnx2x_napi_disable(bp);
722c6f58 2872 bnx2x_del_all_napi(bp);
ad5afc89 2873
889b9af3 2874 /* clear pf_load status, as it was already set */
ad5afc89
AE
2875 if (IS_PF(bp))
2876 bnx2x_clear_pf_load(bp);
d6214d7a 2877load_error0:
ad5afc89 2878 bnx2x_free_fw_stats_mem(bp);
e3ed4eae 2879 bnx2x_free_fp_mem(bp);
9f6c9258
DK
2880 bnx2x_free_mem(bp);
2881
2882 return rc;
619c5cb6 2883#endif /* ! BNX2X_STOP_ON_ERROR */
9f6c9258
DK
2884}
2885
7fa6f340 2886int bnx2x_drain_tx_queues(struct bnx2x *bp)
ad5afc89
AE
2887{
2888 u8 rc = 0, cos, i;
2889
2890 /* Wait until tx fastpath tasks complete */
2891 for_each_tx_queue(bp, i) {
2892 struct bnx2x_fastpath *fp = &bp->fp[i];
2893
2894 for_each_cos_in_tx_queue(fp, cos)
2895 rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]);
2896 if (rc)
2897 return rc;
2898 }
2899 return 0;
2900}
2901
9f6c9258 2902/* must be called with rtnl_lock */
5d07d868 2903int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
9f6c9258
DK
2904{
2905 int i;
c9ee9206
VZ
2906 bool global = false;
2907
55c11941
MS
2908 DP(NETIF_MSG_IFUP, "Starting NIC unload\n");
2909
9ce392d4 2910 /* mark driver is unloaded in shmem2 */
ad5afc89 2911 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
9ce392d4
YM
2912 u32 val;
2913 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2914 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2915 val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
2916 }
2917
80bfe5cc 2918 if (IS_PF(bp) && bp->recovery_state != BNX2X_RECOVERY_DONE &&
ad5afc89
AE
2919 (bp->state == BNX2X_STATE_CLOSED ||
2920 bp->state == BNX2X_STATE_ERROR)) {
c9ee9206
VZ
2921 /* We can get here if the driver has been unloaded
2922 * during parity error recovery and is either waiting for a
2923 * leader to complete or for other functions to unload and
2924 * then ifdown has been issued. In this case we want to
2925 * unload and let other functions to complete a recovery
2926 * process.
2927 */
9f6c9258
DK
2928 bp->recovery_state = BNX2X_RECOVERY_DONE;
2929 bp->is_leader = 0;
c9ee9206
VZ
2930 bnx2x_release_leader_lock(bp);
2931 smp_mb();
2932
51c1a580
MS
2933 DP(NETIF_MSG_IFDOWN, "Releasing a leadership...\n");
2934 BNX2X_ERR("Can't unload in closed or error state\n");
9f6c9258
DK
2935 return -EINVAL;
2936 }
2937
80bfe5cc 2938 /* Nothing to do during unload if previous bnx2x_nic_load()
16a5fd92 2939 * have not completed successfully - all resources are released.
80bfe5cc
YM
2940 *
2941 * we can get here only after unsuccessful ndo_* callback, during which
2942 * dev->IFF_UP flag is still on.
2943 */
2944 if (bp->state == BNX2X_STATE_CLOSED || bp->state == BNX2X_STATE_ERROR)
2945 return 0;
2946
2947 /* It's important to set the bp->state to the value different from
87b7ba3d
VZ
2948 * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
2949 * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
2950 */
2951 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
2952 smp_mb();
2953
78c3bcc5
AE
2954 /* indicate to VFs that the PF is going down */
2955 bnx2x_iov_channel_down(bp);
2956
55c11941
MS
2957 if (CNIC_LOADED(bp))
2958 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
2959
9505ee37
VZ
2960 /* Stop Tx */
2961 bnx2x_tx_disable(bp);
65565884 2962 netdev_reset_tc(bp->dev);
9505ee37 2963
9f6c9258 2964 bp->rx_mode = BNX2X_RX_MODE_NONE;
9f6c9258 2965
9f6c9258 2966 del_timer_sync(&bp->timer);
f85582f8 2967
ad5afc89
AE
2968 if (IS_PF(bp)) {
2969 /* Set ALWAYS_ALIVE bit in shmem */
2970 bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
2971 bnx2x_drv_pulse(bp);
2972 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2973 bnx2x_save_statistics(bp);
2974 }
9f6c9258 2975
ad5afc89
AE
2976 /* wait till consumers catch up with producers in all queues */
2977 bnx2x_drain_tx_queues(bp);
9f6c9258 2978
9b176b6b
AE
2979 /* if VF indicate to PF this function is going down (PF will delete sp
2980 * elements and clear initializations
2981 */
2982 if (IS_VF(bp))
2983 bnx2x_vfpf_close_vf(bp);
2984 else if (unload_mode != UNLOAD_RECOVERY)
2985 /* if this is a normal/close unload need to clean up chip*/
5d07d868 2986 bnx2x_chip_cleanup(bp, unload_mode, keep_link);
523224a3 2987 else {
c9ee9206
VZ
2988 /* Send the UNLOAD_REQUEST to the MCP */
2989 bnx2x_send_unload_req(bp, unload_mode);
2990
16a5fd92 2991 /* Prevent transactions to host from the functions on the
c9ee9206 2992 * engine that doesn't reset global blocks in case of global
16a5fd92 2993 * attention once global blocks are reset and gates are opened
c9ee9206
VZ
2994 * (the engine which leader will perform the recovery
2995 * last).
2996 */
2997 if (!CHIP_IS_E1x(bp))
2998 bnx2x_pf_disable(bp);
2999
3000 /* Disable HW interrupts, NAPI */
523224a3 3001 bnx2x_netif_stop(bp, 1);
26614ba5
MS
3002 /* Delete all NAPI objects */
3003 bnx2x_del_all_napi(bp);
55c11941
MS
3004 if (CNIC_LOADED(bp))
3005 bnx2x_del_all_napi_cnic(bp);
523224a3 3006 /* Release IRQs */
d6214d7a 3007 bnx2x_free_irq(bp);
c9ee9206
VZ
3008
3009 /* Report UNLOAD_DONE to MCP */
5d07d868 3010 bnx2x_send_unload_done(bp, false);
523224a3 3011 }
9f6c9258 3012
619c5cb6 3013 /*
16a5fd92 3014 * At this stage no more interrupts will arrive so we may safely clean
619c5cb6
VZ
3015 * the queueable objects here in case they failed to get cleaned so far.
3016 */
ad5afc89
AE
3017 if (IS_PF(bp))
3018 bnx2x_squeeze_objects(bp);
619c5cb6 3019
79616895
VZ
3020 /* There should be no more pending SP commands at this stage */
3021 bp->sp_state = 0;
3022
9f6c9258
DK
3023 bp->port.pmf = 0;
3024
a0d307b2
DK
3025 /* clear pending work in rtnl task */
3026 bp->sp_rtnl_state = 0;
3027 smp_mb();
3028
9f6c9258
DK
3029 /* Free SKBs, SGEs, TPA pool and driver internals */
3030 bnx2x_free_skbs(bp);
55c11941
MS
3031 if (CNIC_LOADED(bp))
3032 bnx2x_free_skbs_cnic(bp);
ec6ba945 3033 for_each_rx_queue(bp, i)
9f6c9258 3034 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
d6214d7a 3035
ad5afc89
AE
3036 bnx2x_free_fp_mem(bp);
3037 if (CNIC_LOADED(bp))
55c11941 3038 bnx2x_free_fp_mem_cnic(bp);
9f6c9258 3039
ad5afc89 3040 if (IS_PF(bp)) {
ad5afc89
AE
3041 if (CNIC_LOADED(bp))
3042 bnx2x_free_mem_cnic(bp);
3043 }
b4cddbd6
AE
3044 bnx2x_free_mem(bp);
3045
9f6c9258 3046 bp->state = BNX2X_STATE_CLOSED;
55c11941 3047 bp->cnic_loaded = false;
9f6c9258 3048
42f8277f
YM
3049 /* Clear driver version indication in shmem */
3050 if (IS_PF(bp))
3051 bnx2x_update_mng_version(bp);
3052
c9ee9206
VZ
3053 /* Check if there are pending parity attentions. If there are - set
3054 * RECOVERY_IN_PROGRESS.
3055 */
ad5afc89 3056 if (IS_PF(bp) && bnx2x_chk_parity_attn(bp, &global, false)) {
c9ee9206
VZ
3057 bnx2x_set_reset_in_progress(bp);
3058
3059 /* Set RESET_IS_GLOBAL if needed */
3060 if (global)
3061 bnx2x_set_reset_global(bp);
3062 }
3063
9f6c9258
DK
3064 /* The last driver must disable a "close the gate" if there is no
3065 * parity attention or "process kill" pending.
3066 */
ad5afc89
AE
3067 if (IS_PF(bp) &&
3068 !bnx2x_clear_pf_load(bp) &&
3069 bnx2x_reset_is_done(bp, BP_PATH(bp)))
9f6c9258
DK
3070 bnx2x_disable_close_the_gate(bp);
3071
55c11941
MS
3072 DP(NETIF_MSG_IFUP, "Ending NIC unload\n");
3073
9f6c9258
DK
3074 return 0;
3075}
f85582f8 3076
9f6c9258
DK
3077int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
3078{
3079 u16 pmcsr;
3080
adf5f6a1 3081 /* If there is no power capability, silently succeed */
29ed74c3 3082 if (!bp->pdev->pm_cap) {
51c1a580 3083 BNX2X_DEV_INFO("No power capability. Breaking.\n");
adf5f6a1
DK
3084 return 0;
3085 }
3086
29ed74c3 3087 pci_read_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL, &pmcsr);
9f6c9258
DK
3088
3089 switch (state) {
3090 case PCI_D0:
29ed74c3 3091 pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
9f6c9258
DK
3092 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3093 PCI_PM_CTRL_PME_STATUS));
3094
3095 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3096 /* delay required during transition out of D3hot */
3097 msleep(20);
3098 break;
3099
3100 case PCI_D3hot:
3101 /* If there are other clients above don't
3102 shut down the power */
3103 if (atomic_read(&bp->pdev->enable_cnt) != 1)
3104 return 0;
3105 /* Don't shut down the power for emulation and FPGA */
3106 if (CHIP_REV_IS_SLOW(bp))
3107 return 0;
3108
3109 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3110 pmcsr |= 3;
3111
3112 if (bp->wol)
3113 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3114
29ed74c3 3115 pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
9f6c9258
DK
3116 pmcsr);
3117
3118 /* No more memory access after this point until
3119 * device is brought back to D0.
3120 */
3121 break;
3122
3123 default:
51c1a580 3124 dev_err(&bp->pdev->dev, "Can't support state = %d\n", state);
9f6c9258
DK
3125 return -EINVAL;
3126 }
3127 return 0;
3128}
3129
9f6c9258
DK
3130/*
3131 * net_device service functions
3132 */
a8f47eb7 3133static int bnx2x_poll(struct napi_struct *napi, int budget)
9f6c9258
DK
3134{
3135 int work_done = 0;
6383c0b3 3136 u8 cos;
9f6c9258
DK
3137 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
3138 napi);
3139 struct bnx2x *bp = fp->bp;
3140
3141 while (1) {
3142#ifdef BNX2X_STOP_ON_ERROR
3143 if (unlikely(bp->panic)) {
3144 napi_complete(napi);
3145 return 0;
3146 }
3147#endif
8f20aa57
DK
3148 if (!bnx2x_fp_lock_napi(fp))
3149 return work_done;
9f6c9258 3150
6383c0b3 3151 for_each_cos_in_tx_queue(fp, cos)
65565884
MS
3152 if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
3153 bnx2x_tx_int(bp, fp->txdata_ptr[cos]);
6383c0b3 3154
9f6c9258
DK
3155 if (bnx2x_has_rx_work(fp)) {
3156 work_done += bnx2x_rx_int(fp, budget - work_done);
3157
3158 /* must not complete if we consumed full budget */
8f20aa57
DK
3159 if (work_done >= budget) {
3160 bnx2x_fp_unlock_napi(fp);
9f6c9258 3161 break;
8f20aa57 3162 }
9f6c9258
DK
3163 }
3164
3165 /* Fall out from the NAPI loop if needed */
8f20aa57
DK
3166 if (!bnx2x_fp_unlock_napi(fp) &&
3167 !(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
55c11941 3168
ec6ba945
VZ
3169 /* No need to update SB for FCoE L2 ring as long as
3170 * it's connected to the default SB and the SB
3171 * has been updated when NAPI was scheduled.
3172 */
3173 if (IS_FCOE_FP(fp)) {
3174 napi_complete(napi);
3175 break;
3176 }
9f6c9258 3177 bnx2x_update_fpsb_idx(fp);
f85582f8
DK
3178 /* bnx2x_has_rx_work() reads the status block,
3179 * thus we need to ensure that status block indices
3180 * have been actually read (bnx2x_update_fpsb_idx)
3181 * prior to this check (bnx2x_has_rx_work) so that
3182 * we won't write the "newer" value of the status block
3183 * to IGU (if there was a DMA right after
3184 * bnx2x_has_rx_work and if there is no rmb, the memory
3185 * reading (bnx2x_update_fpsb_idx) may be postponed
3186 * to right before bnx2x_ack_sb). In this case there
3187 * will never be another interrupt until there is
3188 * another update of the status block, while there
3189 * is still unhandled work.
3190 */
9f6c9258
DK
3191 rmb();
3192
3193 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
3194 napi_complete(napi);
3195 /* Re-enable interrupts */
51c1a580 3196 DP(NETIF_MSG_RX_STATUS,
523224a3
DK
3197 "Update index to %d\n", fp->fp_hc_idx);
3198 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
3199 le16_to_cpu(fp->fp_hc_idx),
9f6c9258
DK
3200 IGU_INT_ENABLE, 1);
3201 break;
3202 }
3203 }
3204 }
3205
3206 return work_done;
3207}
3208
e0d1095a 3209#ifdef CONFIG_NET_RX_BUSY_POLL
8f20aa57
DK
3210/* must be called with local_bh_disable()d */
3211int bnx2x_low_latency_recv(struct napi_struct *napi)
3212{
3213 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
3214 napi);
3215 struct bnx2x *bp = fp->bp;
3216 int found = 0;
3217
3218 if ((bp->state == BNX2X_STATE_CLOSED) ||
3219 (bp->state == BNX2X_STATE_ERROR) ||
3220 (bp->flags & (TPA_ENABLE_FLAG | GRO_ENABLE_FLAG)))
3221 return LL_FLUSH_FAILED;
3222
3223 if (!bnx2x_fp_lock_poll(fp))
3224 return LL_FLUSH_BUSY;
3225
75b29459 3226 if (bnx2x_has_rx_work(fp))
8f20aa57 3227 found = bnx2x_rx_int(fp, 4);
8f20aa57
DK
3228
3229 bnx2x_fp_unlock_poll(fp);
3230
3231 return found;
3232}
3233#endif
3234
9f6c9258
DK
3235/* we split the first BD into headers and data BDs
3236 * to ease the pain of our fellow microcode engineers
3237 * we use one mapping for both BDs
9f6c9258 3238 */
91226790
DK
3239static u16 bnx2x_tx_split(struct bnx2x *bp,
3240 struct bnx2x_fp_txdata *txdata,
3241 struct sw_tx_bd *tx_buf,
3242 struct eth_tx_start_bd **tx_bd, u16 hlen,
3243 u16 bd_prod)
9f6c9258
DK
3244{
3245 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
3246 struct eth_tx_bd *d_tx_bd;
3247 dma_addr_t mapping;
3248 int old_len = le16_to_cpu(h_tx_bd->nbytes);
3249
3250 /* first fix first BD */
9f6c9258
DK
3251 h_tx_bd->nbytes = cpu_to_le16(hlen);
3252
91226790
DK
3253 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x)\n",
3254 h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo);
9f6c9258
DK
3255
3256 /* now get a new data BD
3257 * (after the pbd) and fill it */
3258 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
6383c0b3 3259 d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
9f6c9258
DK
3260
3261 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
3262 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
3263
3264 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3265 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3266 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
3267
3268 /* this marks the BD as one that has no individual mapping */
3269 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
3270
3271 DP(NETIF_MSG_TX_QUEUED,
3272 "TSO split data size is %d (%x:%x)\n",
3273 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
3274
3275 /* update tx_bd */
3276 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
3277
3278 return bd_prod;
3279}
3280
86564c3f
YM
3281#define bswab32(b32) ((__force __le32) swab32((__force __u32) (b32)))
3282#define bswab16(b16) ((__force __le16) swab16((__force __u16) (b16)))
91226790 3283static __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
9f6c9258 3284{
86564c3f
YM
3285 __sum16 tsum = (__force __sum16) csum;
3286
9f6c9258 3287 if (fix > 0)
86564c3f
YM
3288 tsum = ~csum_fold(csum_sub((__force __wsum) csum,
3289 csum_partial(t_header - fix, fix, 0)));
9f6c9258
DK
3290
3291 else if (fix < 0)
86564c3f
YM
3292 tsum = ~csum_fold(csum_add((__force __wsum) csum,
3293 csum_partial(t_header, -fix, 0)));
9f6c9258 3294
e2593fcd 3295 return bswab16(tsum);
9f6c9258
DK
3296}
3297
91226790 3298static u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
9f6c9258
DK
3299{
3300 u32 rc;
a848ade4
DK
3301 __u8 prot = 0;
3302 __be16 protocol;
9f6c9258
DK
3303
3304 if (skb->ip_summed != CHECKSUM_PARTIAL)
a848ade4 3305 return XMIT_PLAIN;
9f6c9258 3306
a848ade4
DK
3307 protocol = vlan_get_protocol(skb);
3308 if (protocol == htons(ETH_P_IPV6)) {
3309 rc = XMIT_CSUM_V6;
3310 prot = ipv6_hdr(skb)->nexthdr;
3311 } else {
3312 rc = XMIT_CSUM_V4;
3313 prot = ip_hdr(skb)->protocol;
3314 }
9f6c9258 3315
a848ade4
DK
3316 if (!CHIP_IS_E1x(bp) && skb->encapsulation) {
3317 if (inner_ip_hdr(skb)->version == 6) {
3318 rc |= XMIT_CSUM_ENC_V6;
3319 if (inner_ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
3320 rc |= XMIT_CSUM_TCP;
9f6c9258 3321 } else {
a848ade4
DK
3322 rc |= XMIT_CSUM_ENC_V4;
3323 if (inner_ip_hdr(skb)->protocol == IPPROTO_TCP)
9f6c9258
DK
3324 rc |= XMIT_CSUM_TCP;
3325 }
3326 }
a848ade4
DK
3327 if (prot == IPPROTO_TCP)
3328 rc |= XMIT_CSUM_TCP;
9f6c9258 3329
36a8f39e
ED
3330 if (skb_is_gso(skb)) {
3331 if (skb_is_gso_v6(skb)) {
3332 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP);
3333 if (rc & XMIT_CSUM_ENC)
3334 rc |= XMIT_GSO_ENC_V6;
3335 } else {
3336 rc |= (XMIT_GSO_V4 | XMIT_CSUM_TCP);
3337 if (rc & XMIT_CSUM_ENC)
3338 rc |= XMIT_GSO_ENC_V4;
3339 }
a848ade4 3340 }
9f6c9258
DK
3341
3342 return rc;
3343}
3344
3345#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
3346/* check if packet requires linearization (packet is too fragmented)
3347 no need to check fragmentation if page size > 8K (there will be no
3348 violation to FW restrictions) */
3349static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
3350 u32 xmit_type)
3351{
3352 int to_copy = 0;
3353 int hlen = 0;
3354 int first_bd_sz = 0;
3355
3356 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
3357 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
3358
3359 if (xmit_type & XMIT_GSO) {
3360 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
3361 /* Check if LSO packet needs to be copied:
3362 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
3363 int wnd_size = MAX_FETCH_BD - 3;
3364 /* Number of windows to check */
3365 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
3366 int wnd_idx = 0;
3367 int frag_idx = 0;
3368 u32 wnd_sum = 0;
3369
3370 /* Headers length */
3371 hlen = (int)(skb_transport_header(skb) - skb->data) +
3372 tcp_hdrlen(skb);
3373
3374 /* Amount of data (w/o headers) on linear part of SKB*/
3375 first_bd_sz = skb_headlen(skb) - hlen;
3376
3377 wnd_sum = first_bd_sz;
3378
3379 /* Calculate the first sum - it's special */
3380 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
3381 wnd_sum +=
9e903e08 3382 skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]);
9f6c9258
DK
3383
3384 /* If there was data on linear skb data - check it */
3385 if (first_bd_sz > 0) {
3386 if (unlikely(wnd_sum < lso_mss)) {
3387 to_copy = 1;
3388 goto exit_lbl;
3389 }
3390
3391 wnd_sum -= first_bd_sz;
3392 }
3393
3394 /* Others are easier: run through the frag list and
3395 check all windows */
3396 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
3397 wnd_sum +=
9e903e08 3398 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]);
9f6c9258
DK
3399
3400 if (unlikely(wnd_sum < lso_mss)) {
3401 to_copy = 1;
3402 break;
3403 }
3404 wnd_sum -=
9e903e08 3405 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx]);
9f6c9258
DK
3406 }
3407 } else {
3408 /* in non-LSO too fragmented packet should always
3409 be linearized */
3410 to_copy = 1;
3411 }
3412 }
3413
3414exit_lbl:
3415 if (unlikely(to_copy))
3416 DP(NETIF_MSG_TX_QUEUED,
51c1a580 3417 "Linearization IS REQUIRED for %s packet. num_frags %d hlen %d first_bd_sz %d\n",
9f6c9258
DK
3418 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
3419 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
3420
3421 return to_copy;
3422}
3423#endif
3424
91226790
DK
3425static void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
3426 u32 xmit_type)
f2e0899f 3427{
a848ade4
DK
3428 struct ipv6hdr *ipv6;
3429
2297a2da
VZ
3430 *parsing_data |= (skb_shinfo(skb)->gso_size <<
3431 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
3432 ETH_TX_PARSE_BD_E2_LSO_MSS;
a848ade4
DK
3433
3434 if (xmit_type & XMIT_GSO_ENC_V6)
3435 ipv6 = inner_ipv6_hdr(skb);
3436 else if (xmit_type & XMIT_GSO_V6)
3437 ipv6 = ipv6_hdr(skb);
3438 else
3439 ipv6 = NULL;
3440
3441 if (ipv6 && ipv6->nexthdr == NEXTHDR_IPV6)
2297a2da 3442 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
f2e0899f
DK
3443}
3444
3445/**
e8920674 3446 * bnx2x_set_pbd_gso - update PBD in GSO case.
f2e0899f 3447 *
e8920674
DK
3448 * @skb: packet skb
3449 * @pbd: parse BD
3450 * @xmit_type: xmit flags
f2e0899f 3451 */
91226790
DK
3452static void bnx2x_set_pbd_gso(struct sk_buff *skb,
3453 struct eth_tx_parse_bd_e1x *pbd,
057cf65e 3454 struct eth_tx_start_bd *tx_start_bd,
91226790 3455 u32 xmit_type)
f2e0899f
DK
3456{
3457 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
86564c3f 3458 pbd->tcp_send_seq = bswab32(tcp_hdr(skb)->seq);
91226790 3459 pbd->tcp_flags = pbd_tcp_flags(tcp_hdr(skb));
f2e0899f
DK
3460
3461 if (xmit_type & XMIT_GSO_V4) {
86564c3f 3462 pbd->ip_id = bswab16(ip_hdr(skb)->id);
f2e0899f 3463 pbd->tcp_pseudo_csum =
86564c3f
YM
3464 bswab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
3465 ip_hdr(skb)->daddr,
3466 0, IPPROTO_TCP, 0));
f2e0899f 3467
057cf65e
YM
3468 /* GSO on 57710/57711 needs FW to calculate IP checksum */
3469 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IP_CSUM;
3470 } else {
f2e0899f 3471 pbd->tcp_pseudo_csum =
86564c3f
YM
3472 bswab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3473 &ipv6_hdr(skb)->daddr,
3474 0, IPPROTO_TCP, 0));
057cf65e 3475 }
f2e0899f 3476
86564c3f
YM
3477 pbd->global_data |=
3478 cpu_to_le16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN);
f2e0899f 3479}
f85582f8 3480
a848ade4
DK
3481/**
3482 * bnx2x_set_pbd_csum_enc - update PBD with checksum and return header length
3483 *
3484 * @bp: driver handle
3485 * @skb: packet skb
3486 * @parsing_data: data to be updated
3487 * @xmit_type: xmit flags
3488 *
3489 * 57712/578xx related, when skb has encapsulation
3490 */
3491static u8 bnx2x_set_pbd_csum_enc(struct bnx2x *bp, struct sk_buff *skb,
3492 u32 *parsing_data, u32 xmit_type)
3493{
3494 *parsing_data |=
3495 ((((u8 *)skb_inner_transport_header(skb) - skb->data) >> 1) <<
3496 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3497 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
3498
3499 if (xmit_type & XMIT_CSUM_TCP) {
3500 *parsing_data |= ((inner_tcp_hdrlen(skb) / 4) <<
3501 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3502 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
3503
3504 return skb_inner_transport_header(skb) +
3505 inner_tcp_hdrlen(skb) - skb->data;
3506 }
3507
3508 /* We support checksum offload for TCP and UDP only.
3509 * No need to pass the UDP header length - it's a constant.
3510 */
3511 return skb_inner_transport_header(skb) +
3512 sizeof(struct udphdr) - skb->data;
3513}
3514
f2e0899f 3515/**
e8920674 3516 * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
f2e0899f 3517 *
e8920674
DK
3518 * @bp: driver handle
3519 * @skb: packet skb
3520 * @parsing_data: data to be updated
3521 * @xmit_type: xmit flags
f2e0899f 3522 *
91226790 3523 * 57712/578xx related
f2e0899f 3524 */
91226790
DK
3525static u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
3526 u32 *parsing_data, u32 xmit_type)
f2e0899f 3527{
e39aece7 3528 *parsing_data |=
2de67439 3529 ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
91226790
DK
3530 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3531 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
f2e0899f 3532
e39aece7
VZ
3533 if (xmit_type & XMIT_CSUM_TCP) {
3534 *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
3535 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3536 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
f2e0899f 3537
e39aece7 3538 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
924d75ab
YM
3539 }
3540 /* We support checksum offload for TCP and UDP only.
3541 * No need to pass the UDP header length - it's a constant.
3542 */
3543 return skb_transport_header(skb) + sizeof(struct udphdr) - skb->data;
f2e0899f
DK
3544}
3545
a848ade4 3546/* set FW indication according to inner or outer protocols if tunneled */
91226790
DK
3547static void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3548 struct eth_tx_start_bd *tx_start_bd,
3549 u32 xmit_type)
93ef5c02 3550{
93ef5c02
DK
3551 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
3552
a848ade4 3553 if (xmit_type & (XMIT_CSUM_ENC_V6 | XMIT_CSUM_V6))
91226790 3554 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
93ef5c02
DK
3555
3556 if (!(xmit_type & XMIT_CSUM_TCP))
3557 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
93ef5c02
DK
3558}
3559
f2e0899f 3560/**
e8920674 3561 * bnx2x_set_pbd_csum - update PBD with checksum and return header length
f2e0899f 3562 *
e8920674
DK
3563 * @bp: driver handle
3564 * @skb: packet skb
3565 * @pbd: parse BD to be updated
3566 * @xmit_type: xmit flags
f2e0899f 3567 */
91226790
DK
3568static u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3569 struct eth_tx_parse_bd_e1x *pbd,
3570 u32 xmit_type)
f2e0899f 3571{
e39aece7 3572 u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
f2e0899f
DK
3573
3574 /* for now NS flag is not used in Linux */
3575 pbd->global_data =
86564c3f
YM
3576 cpu_to_le16(hlen |
3577 ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3578 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
f2e0899f
DK
3579
3580 pbd->ip_hlen_w = (skb_transport_header(skb) -
e39aece7 3581 skb_network_header(skb)) >> 1;
f2e0899f 3582
e39aece7
VZ
3583 hlen += pbd->ip_hlen_w;
3584
3585 /* We support checksum offload for TCP and UDP only */
3586 if (xmit_type & XMIT_CSUM_TCP)
3587 hlen += tcp_hdrlen(skb) / 2;
3588 else
3589 hlen += sizeof(struct udphdr) / 2;
f2e0899f
DK
3590
3591 pbd->total_hlen_w = cpu_to_le16(hlen);
3592 hlen = hlen*2;
3593
3594 if (xmit_type & XMIT_CSUM_TCP) {
86564c3f 3595 pbd->tcp_pseudo_csum = bswab16(tcp_hdr(skb)->check);
f2e0899f
DK
3596
3597 } else {
3598 s8 fix = SKB_CS_OFF(skb); /* signed! */
3599
3600 DP(NETIF_MSG_TX_QUEUED,
3601 "hlen %d fix %d csum before fix %x\n",
3602 le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
3603
3604 /* HW bug: fixup the CSUM */
3605 pbd->tcp_pseudo_csum =
3606 bnx2x_csum_fix(skb_transport_header(skb),
3607 SKB_CS(skb), fix);
3608
3609 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
3610 pbd->tcp_pseudo_csum);
3611 }
3612
3613 return hlen;
3614}
f85582f8 3615
a848ade4
DK
3616static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb,
3617 struct eth_tx_parse_bd_e2 *pbd_e2,
3618 struct eth_tx_parse_2nd_bd *pbd2,
3619 u16 *global_data,
3620 u32 xmit_type)
3621{
e287a75c 3622 u16 hlen_w = 0;
a848ade4 3623 u8 outerip_off, outerip_len = 0;
e768fb29 3624
e287a75c
DK
3625 /* from outer IP to transport */
3626 hlen_w = (skb_inner_transport_header(skb) -
3627 skb_network_header(skb)) >> 1;
a848ade4
DK
3628
3629 /* transport len */
e768fb29 3630 hlen_w += inner_tcp_hdrlen(skb) >> 1;
a848ade4 3631
e287a75c 3632 pbd2->fw_ip_hdr_to_payload_w = hlen_w;
a848ade4 3633
e768fb29
DK
3634 /* outer IP header info */
3635 if (xmit_type & XMIT_CSUM_V4) {
e287a75c 3636 struct iphdr *iph = ip_hdr(skb);
1b4fc0e2
DK
3637 u32 csum = (__force u32)(~iph->check) -
3638 (__force u32)iph->tot_len -
3639 (__force u32)iph->frag_off;
c957d09f 3640
a848ade4 3641 pbd2->fw_ip_csum_wo_len_flags_frag =
c957d09f 3642 bswab16(csum_fold((__force __wsum)csum));
a848ade4
DK
3643 } else {
3644 pbd2->fw_ip_hdr_to_payload_w =
e287a75c 3645 hlen_w - ((sizeof(struct ipv6hdr)) >> 1);
a848ade4
DK
3646 }
3647
3648 pbd2->tcp_send_seq = bswab32(inner_tcp_hdr(skb)->seq);
3649
3650 pbd2->tcp_flags = pbd_tcp_flags(inner_tcp_hdr(skb));
3651
3652 if (xmit_type & XMIT_GSO_V4) {
e287a75c 3653 pbd2->hw_ip_id = bswab16(inner_ip_hdr(skb)->id);
a848ade4
DK
3654
3655 pbd_e2->data.tunnel_data.pseudo_csum =
3656 bswab16(~csum_tcpudp_magic(
3657 inner_ip_hdr(skb)->saddr,
3658 inner_ip_hdr(skb)->daddr,
3659 0, IPPROTO_TCP, 0));
3660
3661 outerip_len = ip_hdr(skb)->ihl << 1;
3662 } else {
3663 pbd_e2->data.tunnel_data.pseudo_csum =
3664 bswab16(~csum_ipv6_magic(
3665 &inner_ipv6_hdr(skb)->saddr,
3666 &inner_ipv6_hdr(skb)->daddr,
3667 0, IPPROTO_TCP, 0));
3668 }
3669
3670 outerip_off = (skb_network_header(skb) - skb->data) >> 1;
3671
3672 *global_data |=
3673 outerip_off |
3674 (!!(xmit_type & XMIT_CSUM_V6) <<
3675 ETH_TX_PARSE_2ND_BD_IP_HDR_TYPE_OUTER_SHIFT) |
3676 (outerip_len <<
3677 ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT) |
3678 ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3679 ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN_SHIFT);
65bc0cfe
DK
3680
3681 if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
3682 SET_FLAG(*global_data, ETH_TX_PARSE_2ND_BD_TUNNEL_UDP_EXIST, 1);
3683 pbd2->tunnel_udp_hdr_start_w = skb_transport_offset(skb) >> 1;
3684 }
a848ade4
DK
3685}
3686
9f6c9258
DK
3687/* called with netif_tx_lock
3688 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
3689 * netif_wake_queue()
3690 */
3691netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
3692{
3693 struct bnx2x *bp = netdev_priv(dev);
6383c0b3 3694
9f6c9258 3695 struct netdev_queue *txq;
6383c0b3 3696 struct bnx2x_fp_txdata *txdata;
9f6c9258 3697 struct sw_tx_bd *tx_buf;
619c5cb6 3698 struct eth_tx_start_bd *tx_start_bd, *first_bd;
9f6c9258 3699 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
523224a3 3700 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
f2e0899f 3701 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
a848ade4 3702 struct eth_tx_parse_2nd_bd *pbd2 = NULL;
2297a2da 3703 u32 pbd_e2_parsing_data = 0;
9f6c9258 3704 u16 pkt_prod, bd_prod;
65565884 3705 int nbd, txq_index;
9f6c9258
DK
3706 dma_addr_t mapping;
3707 u32 xmit_type = bnx2x_xmit_type(bp, skb);
3708 int i;
3709 u8 hlen = 0;
3710 __le16 pkt_size = 0;
3711 struct ethhdr *eth;
3712 u8 mac_type = UNICAST_ADDRESS;
3713
3714#ifdef BNX2X_STOP_ON_ERROR
3715 if (unlikely(bp->panic))
3716 return NETDEV_TX_BUSY;
3717#endif
3718
6383c0b3
AE
3719 txq_index = skb_get_queue_mapping(skb);
3720 txq = netdev_get_tx_queue(dev, txq_index);
3721
55c11941 3722 BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + (CNIC_LOADED(bp) ? 1 : 0));
6383c0b3 3723
65565884 3724 txdata = &bp->bnx2x_txq[txq_index];
6383c0b3
AE
3725
3726 /* enable this debug print to view the transmission queue being used
51c1a580 3727 DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n",
6383c0b3 3728 txq_index, fp_index, txdata_index); */
9f6c9258 3729
16a5fd92 3730 /* enable this debug print to view the transmission details
51c1a580
MS
3731 DP(NETIF_MSG_TX_QUEUED,
3732 "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n",
6383c0b3 3733 txdata->cid, fp_index, txdata_index, txdata, fp); */
9f6c9258 3734
6383c0b3 3735 if (unlikely(bnx2x_tx_avail(bp, txdata) <
7df2dc6b
DK
3736 skb_shinfo(skb)->nr_frags +
3737 BDS_PER_TX_PKT +
3738 NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT))) {
2384d6aa 3739 /* Handle special storage cases separately */
c96bdc0c
DK
3740 if (txdata->tx_ring_size == 0) {
3741 struct bnx2x_eth_q_stats *q_stats =
3742 bnx2x_fp_qstats(bp, txdata->parent_fp);
3743 q_stats->driver_filtered_tx_pkt++;
3744 dev_kfree_skb(skb);
3745 return NETDEV_TX_OK;
3746 }
2de67439
YM
3747 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
3748 netif_tx_stop_queue(txq);
c96bdc0c 3749 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
2384d6aa 3750
9f6c9258
DK
3751 return NETDEV_TX_BUSY;
3752 }
3753
51c1a580 3754 DP(NETIF_MSG_TX_QUEUED,
04c46736 3755 "queue[%d]: SKB: summed %x protocol %x protocol(%x,%x) gso type %x xmit_type %x len %d\n",
6383c0b3 3756 txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
04c46736
YM
3757 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type,
3758 skb->len);
9f6c9258
DK
3759
3760 eth = (struct ethhdr *)skb->data;
3761
3762 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
3763 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
3764 if (is_broadcast_ether_addr(eth->h_dest))
3765 mac_type = BROADCAST_ADDRESS;
3766 else
3767 mac_type = MULTICAST_ADDRESS;
3768 }
3769
91226790 3770#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT)
9f6c9258
DK
3771 /* First, check if we need to linearize the skb (due to FW
3772 restrictions). No need to check fragmentation if page size > 8K
3773 (there will be no violation to FW restrictions) */
3774 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
3775 /* Statistics of linearization */
3776 bp->lin_cnt++;
3777 if (skb_linearize(skb) != 0) {
51c1a580
MS
3778 DP(NETIF_MSG_TX_QUEUED,
3779 "SKB linearization failed - silently dropping this SKB\n");
9f6c9258
DK
3780 dev_kfree_skb_any(skb);
3781 return NETDEV_TX_OK;
3782 }
3783 }
3784#endif
619c5cb6
VZ
3785 /* Map skb linear data for DMA */
3786 mapping = dma_map_single(&bp->pdev->dev, skb->data,
3787 skb_headlen(skb), DMA_TO_DEVICE);
3788 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
51c1a580
MS
3789 DP(NETIF_MSG_TX_QUEUED,
3790 "SKB mapping failed - silently dropping this SKB\n");
619c5cb6
VZ
3791 dev_kfree_skb_any(skb);
3792 return NETDEV_TX_OK;
3793 }
9f6c9258
DK
3794 /*
3795 Please read carefully. First we use one BD which we mark as start,
3796 then we have a parsing info BD (used for TSO or xsum),
3797 and only then we have the rest of the TSO BDs.
3798 (don't forget to mark the last one as last,
3799 and to unmap only AFTER you write to the BD ...)
3800 And above all, all pdb sizes are in words - NOT DWORDS!
3801 */
3802
619c5cb6
VZ
3803 /* get current pkt produced now - advance it just before sending packet
3804 * since mapping of pages may fail and cause packet to be dropped
3805 */
6383c0b3
AE
3806 pkt_prod = txdata->tx_pkt_prod;
3807 bd_prod = TX_BD(txdata->tx_bd_prod);
9f6c9258 3808
619c5cb6
VZ
3809 /* get a tx_buf and first BD
3810 * tx_start_bd may be changed during SPLIT,
3811 * but first_bd will always stay first
3812 */
6383c0b3
AE
3813 tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
3814 tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
619c5cb6 3815 first_bd = tx_start_bd;
9f6c9258
DK
3816
3817 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
f85582f8 3818
91226790
DK
3819 /* header nbd: indirectly zero other flags! */
3820 tx_start_bd->general_data = 1 << ETH_TX_START_BD_HDR_NBDS_SHIFT;
9f6c9258
DK
3821
3822 /* remember the first BD of the packet */
6383c0b3 3823 tx_buf->first_bd = txdata->tx_bd_prod;
9f6c9258
DK
3824 tx_buf->skb = skb;
3825 tx_buf->flags = 0;
3826
3827 DP(NETIF_MSG_TX_QUEUED,
3828 "sending pkt %u @%p next_idx %u bd %u @%p\n",
6383c0b3 3829 pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
9f6c9258 3830
eab6d18d 3831 if (vlan_tx_tag_present(skb)) {
523224a3
DK
3832 tx_start_bd->vlan_or_ethertype =
3833 cpu_to_le16(vlan_tx_tag_get(skb));
3834 tx_start_bd->bd_flags.as_bitfield |=
3835 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
dc1ba591
AE
3836 } else {
3837 /* when transmitting in a vf, start bd must hold the ethertype
3838 * for fw to enforce it
3839 */
91226790 3840 if (IS_VF(bp))
dc1ba591
AE
3841 tx_start_bd->vlan_or_ethertype =
3842 cpu_to_le16(ntohs(eth->h_proto));
91226790 3843 else
dc1ba591
AE
3844 /* used by FW for packet accounting */
3845 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
dc1ba591 3846 }
9f6c9258 3847
91226790
DK
3848 nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
3849
9f6c9258
DK
3850 /* turn on parsing and get a BD */
3851 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9f6c9258 3852
93ef5c02
DK
3853 if (xmit_type & XMIT_CSUM)
3854 bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
9f6c9258 3855
619c5cb6 3856 if (!CHIP_IS_E1x(bp)) {
6383c0b3 3857 pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
f2e0899f 3858 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
a848ade4
DK
3859
3860 if (xmit_type & XMIT_CSUM_ENC) {
3861 u16 global_data = 0;
3862
3863 /* Set PBD in enc checksum offload case */
3864 hlen = bnx2x_set_pbd_csum_enc(bp, skb,
3865 &pbd_e2_parsing_data,
3866 xmit_type);
3867
3868 /* turn on 2nd parsing and get a BD */
3869 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3870
3871 pbd2 = &txdata->tx_desc_ring[bd_prod].parse_2nd_bd;
3872
3873 memset(pbd2, 0, sizeof(*pbd2));
3874
3875 pbd_e2->data.tunnel_data.ip_hdr_start_inner_w =
3876 (skb_inner_network_header(skb) -
3877 skb->data) >> 1;
3878
3879 if (xmit_type & XMIT_GSO_ENC)
3880 bnx2x_update_pbds_gso_enc(skb, pbd_e2, pbd2,
3881 &global_data,
3882 xmit_type);
3883
3884 pbd2->global_data = cpu_to_le16(global_data);
3885
3886 /* add addition parse BD indication to start BD */
3887 SET_FLAG(tx_start_bd->general_data,
3888 ETH_TX_START_BD_PARSE_NBDS, 1);
3889 /* set encapsulation flag in start BD */
3890 SET_FLAG(tx_start_bd->general_data,
3891 ETH_TX_START_BD_TUNNEL_EXIST, 1);
3892 nbd++;
3893 } else if (xmit_type & XMIT_CSUM) {
91226790 3894 /* Set PBD in checksum offload case w/o encapsulation */
2297a2da
VZ
3895 hlen = bnx2x_set_pbd_csum_e2(bp, skb,
3896 &pbd_e2_parsing_data,
3897 xmit_type);
a848ade4 3898 }
dc1ba591 3899
babe723d
YM
3900 /* Add the macs to the parsing BD if this is a vf or if
3901 * Tx Switching is enabled.
3902 */
91226790
DK
3903 if (IS_VF(bp)) {
3904 /* override GRE parameters in BD */
3905 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
3906 &pbd_e2->data.mac_addr.src_mid,
3907 &pbd_e2->data.mac_addr.src_lo,
619c5cb6 3908 eth->h_source);
91226790 3909
babe723d
YM
3910 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi,
3911 &pbd_e2->data.mac_addr.dst_mid,
3912 &pbd_e2->data.mac_addr.dst_lo,
3913 eth->h_dest);
3914 } else if (bp->flags & TX_SWITCHING) {
91226790
DK
3915 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi,
3916 &pbd_e2->data.mac_addr.dst_mid,
3917 &pbd_e2->data.mac_addr.dst_lo,
619c5cb6
VZ
3918 eth->h_dest);
3919 }
96bed4b9
YM
3920
3921 SET_FLAG(pbd_e2_parsing_data,
3922 ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, mac_type);
f2e0899f 3923 } else {
96bed4b9 3924 u16 global_data = 0;
6383c0b3 3925 pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
f2e0899f
DK
3926 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
3927 /* Set PBD in checksum offload case */
3928 if (xmit_type & XMIT_CSUM)
3929 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
9f6c9258 3930
96bed4b9
YM
3931 SET_FLAG(global_data,
3932 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type);
3933 pbd_e1x->global_data |= cpu_to_le16(global_data);
9f6c9258
DK
3934 }
3935
f85582f8 3936 /* Setup the data pointer of the first BD of the packet */
9f6c9258
DK
3937 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3938 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9f6c9258
DK
3939 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
3940 pkt_size = tx_start_bd->nbytes;
3941
51c1a580 3942 DP(NETIF_MSG_TX_QUEUED,
91226790 3943 "first bd @%p addr (%x:%x) nbytes %d flags %x vlan %x\n",
9f6c9258 3944 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
91226790 3945 le16_to_cpu(tx_start_bd->nbytes),
523224a3
DK
3946 tx_start_bd->bd_flags.as_bitfield,
3947 le16_to_cpu(tx_start_bd->vlan_or_ethertype));
9f6c9258
DK
3948
3949 if (xmit_type & XMIT_GSO) {
3950
3951 DP(NETIF_MSG_TX_QUEUED,
3952 "TSO packet len %d hlen %d total len %d tso size %d\n",
3953 skb->len, hlen, skb_headlen(skb),
3954 skb_shinfo(skb)->gso_size);
3955
3956 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
3957
91226790
DK
3958 if (unlikely(skb_headlen(skb) > hlen)) {
3959 nbd++;
6383c0b3
AE
3960 bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
3961 &tx_start_bd, hlen,
91226790
DK
3962 bd_prod);
3963 }
619c5cb6 3964 if (!CHIP_IS_E1x(bp))
2297a2da
VZ
3965 bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
3966 xmit_type);
f2e0899f 3967 else
44dbc78e 3968 bnx2x_set_pbd_gso(skb, pbd_e1x, first_bd, xmit_type);
9f6c9258 3969 }
2297a2da
VZ
3970
3971 /* Set the PBD's parsing_data field if not zero
3972 * (for the chips newer than 57711).
3973 */
3974 if (pbd_e2_parsing_data)
3975 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
3976
9f6c9258
DK
3977 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
3978
f85582f8 3979 /* Handle fragmented skb */
9f6c9258
DK
3980 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3981 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3982
9e903e08
ED
3983 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
3984 skb_frag_size(frag), DMA_TO_DEVICE);
619c5cb6 3985 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
2df1a70a 3986 unsigned int pkts_compl = 0, bytes_compl = 0;
619c5cb6 3987
51c1a580
MS
3988 DP(NETIF_MSG_TX_QUEUED,
3989 "Unable to map page - dropping packet...\n");
619c5cb6
VZ
3990
3991 /* we need unmap all buffers already mapped
3992 * for this SKB;
3993 * first_bd->nbd need to be properly updated
3994 * before call to bnx2x_free_tx_pkt
3995 */
3996 first_bd->nbd = cpu_to_le16(nbd);
6383c0b3 3997 bnx2x_free_tx_pkt(bp, txdata,
2df1a70a
TH
3998 TX_BD(txdata->tx_pkt_prod),
3999 &pkts_compl, &bytes_compl);
619c5cb6
VZ
4000 return NETDEV_TX_OK;
4001 }
4002
9f6c9258 4003 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
6383c0b3 4004 tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
9f6c9258 4005 if (total_pkt_bd == NULL)
6383c0b3 4006 total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
9f6c9258 4007
9f6c9258
DK
4008 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
4009 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9e903e08
ED
4010 tx_data_bd->nbytes = cpu_to_le16(skb_frag_size(frag));
4011 le16_add_cpu(&pkt_size, skb_frag_size(frag));
619c5cb6 4012 nbd++;
9f6c9258
DK
4013
4014 DP(NETIF_MSG_TX_QUEUED,
4015 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
4016 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
4017 le16_to_cpu(tx_data_bd->nbytes));
4018 }
4019
4020 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
4021
619c5cb6
VZ
4022 /* update with actual num BDs */
4023 first_bd->nbd = cpu_to_le16(nbd);
4024
9f6c9258
DK
4025 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
4026
4027 /* now send a tx doorbell, counting the next BD
4028 * if the packet contains or ends with it
4029 */
4030 if (TX_BD_POFF(bd_prod) < nbd)
4031 nbd++;
4032
619c5cb6
VZ
4033 /* total_pkt_bytes should be set on the first data BD if
4034 * it's not an LSO packet and there is more than one
4035 * data BD. In this case pkt_size is limited by an MTU value.
4036 * However we prefer to set it for an LSO packet (while we don't
4037 * have to) in order to save some CPU cycles in a none-LSO
4038 * case, when we much more care about them.
4039 */
9f6c9258
DK
4040 if (total_pkt_bd != NULL)
4041 total_pkt_bd->total_pkt_bytes = pkt_size;
4042
523224a3 4043 if (pbd_e1x)
9f6c9258 4044 DP(NETIF_MSG_TX_QUEUED,
51c1a580 4045 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u tcp_flags %x xsum %x seq %u hlen %u\n",
523224a3
DK
4046 pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
4047 pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
4048 pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
4049 le16_to_cpu(pbd_e1x->total_hlen_w));
f2e0899f
DK
4050 if (pbd_e2)
4051 DP(NETIF_MSG_TX_QUEUED,
4052 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
91226790
DK
4053 pbd_e2,
4054 pbd_e2->data.mac_addr.dst_hi,
4055 pbd_e2->data.mac_addr.dst_mid,
4056 pbd_e2->data.mac_addr.dst_lo,
4057 pbd_e2->data.mac_addr.src_hi,
4058 pbd_e2->data.mac_addr.src_mid,
4059 pbd_e2->data.mac_addr.src_lo,
f2e0899f 4060 pbd_e2->parsing_data);
9f6c9258
DK
4061 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
4062
2df1a70a
TH
4063 netdev_tx_sent_queue(txq, skb->len);
4064
8373c57d
WB
4065 skb_tx_timestamp(skb);
4066
6383c0b3 4067 txdata->tx_pkt_prod++;
9f6c9258
DK
4068 /*
4069 * Make sure that the BD data is updated before updating the producer
4070 * since FW might read the BD right after the producer is updated.
4071 * This is only applicable for weak-ordered memory model archs such
4072 * as IA-64. The following barrier is also mandatory since FW will
4073 * assumes packets must have BDs.
4074 */
4075 wmb();
4076
6383c0b3 4077 txdata->tx_db.data.prod += nbd;
9f6c9258 4078 barrier();
f85582f8 4079
6383c0b3 4080 DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
9f6c9258
DK
4081
4082 mmiowb();
4083
6383c0b3 4084 txdata->tx_bd_prod += nbd;
9f6c9258 4085
7df2dc6b 4086 if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_DESC_PER_TX_PKT)) {
9f6c9258
DK
4087 netif_tx_stop_queue(txq);
4088
4089 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
4090 * ordering of set_bit() in netif_tx_stop_queue() and read of
4091 * fp->bd_tx_cons */
4092 smp_mb();
4093
15192a8c 4094 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
7df2dc6b 4095 if (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT)
9f6c9258
DK
4096 netif_tx_wake_queue(txq);
4097 }
6383c0b3 4098 txdata->tx_pkt++;
9f6c9258
DK
4099
4100 return NETDEV_TX_OK;
4101}
f85582f8 4102
6383c0b3
AE
4103/**
4104 * bnx2x_setup_tc - routine to configure net_device for multi tc
4105 *
4106 * @netdev: net device to configure
4107 * @tc: number of traffic classes to enable
4108 *
4109 * callback connected to the ndo_setup_tc function pointer
4110 */
4111int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
4112{
4113 int cos, prio, count, offset;
4114 struct bnx2x *bp = netdev_priv(dev);
4115
4116 /* setup tc must be called under rtnl lock */
4117 ASSERT_RTNL();
4118
16a5fd92 4119 /* no traffic classes requested. Aborting */
6383c0b3
AE
4120 if (!num_tc) {
4121 netdev_reset_tc(dev);
4122 return 0;
4123 }
4124
4125 /* requested to support too many traffic classes */
4126 if (num_tc > bp->max_cos) {
6bf07b8e 4127 BNX2X_ERR("support for too many traffic classes requested: %d. Max supported is %d\n",
51c1a580 4128 num_tc, bp->max_cos);
6383c0b3
AE
4129 return -EINVAL;
4130 }
4131
4132 /* declare amount of supported traffic classes */
4133 if (netdev_set_num_tc(dev, num_tc)) {
51c1a580 4134 BNX2X_ERR("failed to declare %d traffic classes\n", num_tc);
6383c0b3
AE
4135 return -EINVAL;
4136 }
4137
4138 /* configure priority to traffic class mapping */
4139 for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
4140 netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[prio]);
51c1a580
MS
4141 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4142 "mapping priority %d to tc %d\n",
6383c0b3
AE
4143 prio, bp->prio_to_cos[prio]);
4144 }
4145
16a5fd92 4146 /* Use this configuration to differentiate tc0 from other COSes
6383c0b3
AE
4147 This can be used for ets or pfc, and save the effort of setting
4148 up a multio class queue disc or negotiating DCBX with a switch
4149 netdev_set_prio_tc_map(dev, 0, 0);
94f05b0f 4150 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0);
6383c0b3
AE
4151 for (prio = 1; prio < 16; prio++) {
4152 netdev_set_prio_tc_map(dev, prio, 1);
94f05b0f 4153 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1);
6383c0b3
AE
4154 } */
4155
4156 /* configure traffic class to transmission queue mapping */
4157 for (cos = 0; cos < bp->max_cos; cos++) {
4158 count = BNX2X_NUM_ETH_QUEUES(bp);
65565884 4159 offset = cos * BNX2X_NUM_NON_CNIC_QUEUES(bp);
6383c0b3 4160 netdev_set_tc_queue(dev, cos, count, offset);
51c1a580
MS
4161 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4162 "mapping tc %d to offset %d count %d\n",
6383c0b3
AE
4163 cos, offset, count);
4164 }
4165
4166 return 0;
4167}
4168
9f6c9258
DK
4169/* called with rtnl_lock */
4170int bnx2x_change_mac_addr(struct net_device *dev, void *p)
4171{
4172 struct sockaddr *addr = p;
4173 struct bnx2x *bp = netdev_priv(dev);
619c5cb6 4174 int rc = 0;
9f6c9258 4175
51c1a580
MS
4176 if (!bnx2x_is_valid_ether_addr(bp, addr->sa_data)) {
4177 BNX2X_ERR("Requested MAC address is not valid\n");
614c76df 4178 return -EINVAL;
51c1a580 4179 }
614c76df 4180
a3348722
BW
4181 if ((IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp)) &&
4182 !is_zero_ether_addr(addr->sa_data)) {
51c1a580 4183 BNX2X_ERR("Can't configure non-zero address on iSCSI or FCoE functions in MF-SD mode\n");
9f6c9258 4184 return -EINVAL;
51c1a580 4185 }
9f6c9258 4186
619c5cb6
VZ
4187 if (netif_running(dev)) {
4188 rc = bnx2x_set_eth_mac(bp, false);
4189 if (rc)
4190 return rc;
4191 }
4192
9f6c9258 4193 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
619c5cb6 4194
523224a3 4195 if (netif_running(dev))
619c5cb6 4196 rc = bnx2x_set_eth_mac(bp, true);
9f6c9258 4197
619c5cb6 4198 return rc;
9f6c9258
DK
4199}
4200
b3b83c3f
DK
4201static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
4202{
4203 union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
4204 struct bnx2x_fastpath *fp = &bp->fp[fp_index];
6383c0b3 4205 u8 cos;
b3b83c3f
DK
4206
4207 /* Common */
55c11941 4208
b3b83c3f
DK
4209 if (IS_FCOE_IDX(fp_index)) {
4210 memset(sb, 0, sizeof(union host_hc_status_block));
4211 fp->status_blk_mapping = 0;
b3b83c3f 4212 } else {
b3b83c3f 4213 /* status blocks */
619c5cb6 4214 if (!CHIP_IS_E1x(bp))
b3b83c3f
DK
4215 BNX2X_PCI_FREE(sb->e2_sb,
4216 bnx2x_fp(bp, fp_index,
4217 status_blk_mapping),
4218 sizeof(struct host_hc_status_block_e2));
4219 else
4220 BNX2X_PCI_FREE(sb->e1x_sb,
4221 bnx2x_fp(bp, fp_index,
4222 status_blk_mapping),
4223 sizeof(struct host_hc_status_block_e1x));
b3b83c3f 4224 }
55c11941 4225
b3b83c3f
DK
4226 /* Rx */
4227 if (!skip_rx_queue(bp, fp_index)) {
4228 bnx2x_free_rx_bds(fp);
4229
4230 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4231 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
4232 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
4233 bnx2x_fp(bp, fp_index, rx_desc_mapping),
4234 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4235
4236 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
4237 bnx2x_fp(bp, fp_index, rx_comp_mapping),
4238 sizeof(struct eth_fast_path_rx_cqe) *
4239 NUM_RCQ_BD);
4240
4241 /* SGE ring */
4242 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
4243 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
4244 bnx2x_fp(bp, fp_index, rx_sge_mapping),
4245 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4246 }
4247
4248 /* Tx */
4249 if (!skip_tx_queue(bp, fp_index)) {
4250 /* fastpath tx rings: tx_buf tx_desc */
6383c0b3 4251 for_each_cos_in_tx_queue(fp, cos) {
65565884 4252 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
6383c0b3 4253
51c1a580 4254 DP(NETIF_MSG_IFDOWN,
94f05b0f 4255 "freeing tx memory of fp %d cos %d cid %d\n",
6383c0b3
AE
4256 fp_index, cos, txdata->cid);
4257
4258 BNX2X_FREE(txdata->tx_buf_ring);
4259 BNX2X_PCI_FREE(txdata->tx_desc_ring,
4260 txdata->tx_desc_mapping,
4261 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4262 }
b3b83c3f
DK
4263 }
4264 /* end of fastpath */
4265}
4266
a8f47eb7 4267static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp)
55c11941
MS
4268{
4269 int i;
4270 for_each_cnic_queue(bp, i)
4271 bnx2x_free_fp_mem_at(bp, i);
4272}
4273
b3b83c3f
DK
4274void bnx2x_free_fp_mem(struct bnx2x *bp)
4275{
4276 int i;
55c11941 4277 for_each_eth_queue(bp, i)
b3b83c3f
DK
4278 bnx2x_free_fp_mem_at(bp, i);
4279}
4280
1191cb83 4281static void set_sb_shortcuts(struct bnx2x *bp, int index)
b3b83c3f
DK
4282{
4283 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
619c5cb6 4284 if (!CHIP_IS_E1x(bp)) {
b3b83c3f
DK
4285 bnx2x_fp(bp, index, sb_index_values) =
4286 (__le16 *)status_blk.e2_sb->sb.index_values;
4287 bnx2x_fp(bp, index, sb_running_index) =
4288 (__le16 *)status_blk.e2_sb->sb.running_index;
4289 } else {
4290 bnx2x_fp(bp, index, sb_index_values) =
4291 (__le16 *)status_blk.e1x_sb->sb.index_values;
4292 bnx2x_fp(bp, index, sb_running_index) =
4293 (__le16 *)status_blk.e1x_sb->sb.running_index;
4294 }
4295}
4296
1191cb83
ED
4297/* Returns the number of actually allocated BDs */
4298static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
4299 int rx_ring_size)
4300{
4301 struct bnx2x *bp = fp->bp;
4302 u16 ring_prod, cqe_ring_prod;
4303 int i, failure_cnt = 0;
4304
4305 fp->rx_comp_cons = 0;
4306 cqe_ring_prod = ring_prod = 0;
4307
4308 /* This routine is called only during fo init so
4309 * fp->eth_q_stats.rx_skb_alloc_failed = 0
4310 */
4311 for (i = 0; i < rx_ring_size; i++) {
996dedba 4312 if (bnx2x_alloc_rx_data(bp, fp, ring_prod, GFP_KERNEL) < 0) {
1191cb83
ED
4313 failure_cnt++;
4314 continue;
4315 }
4316 ring_prod = NEXT_RX_IDX(ring_prod);
4317 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4318 WARN_ON(ring_prod <= (i - failure_cnt));
4319 }
4320
4321 if (failure_cnt)
4322 BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n",
4323 i - failure_cnt, fp->index);
4324
4325 fp->rx_bd_prod = ring_prod;
4326 /* Limit the CQE producer by the CQE ring size */
4327 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
4328 cqe_ring_prod);
4329 fp->rx_pkt = fp->rx_calls = 0;
4330
15192a8c 4331 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed += failure_cnt;
1191cb83
ED
4332
4333 return i - failure_cnt;
4334}
4335
4336static void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
4337{
4338 int i;
4339
4340 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4341 struct eth_rx_cqe_next_page *nextpg;
4342
4343 nextpg = (struct eth_rx_cqe_next_page *)
4344 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4345 nextpg->addr_hi =
4346 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4347 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4348 nextpg->addr_lo =
4349 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4350 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4351 }
4352}
4353
b3b83c3f
DK
4354static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
4355{
4356 union host_hc_status_block *sb;
4357 struct bnx2x_fastpath *fp = &bp->fp[index];
4358 int ring_size = 0;
6383c0b3 4359 u8 cos;
c2188952 4360 int rx_ring_size = 0;
b3b83c3f 4361
a3348722
BW
4362 if (!bp->rx_ring_size &&
4363 (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) {
614c76df
DK
4364 rx_ring_size = MIN_RX_SIZE_NONTPA;
4365 bp->rx_ring_size = rx_ring_size;
55c11941 4366 } else if (!bp->rx_ring_size) {
c2188952
VZ
4367 rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
4368
065f8b92
YM
4369 if (CHIP_IS_E3(bp)) {
4370 u32 cfg = SHMEM_RD(bp,
4371 dev_info.port_hw_config[BP_PORT(bp)].
4372 default_cfg);
4373
4374 /* Decrease ring size for 1G functions */
4375 if ((cfg & PORT_HW_CFG_NET_SERDES_IF_MASK) ==
4376 PORT_HW_CFG_NET_SERDES_IF_SGMII)
4377 rx_ring_size /= 10;
4378 }
d760fc37 4379
c2188952
VZ
4380 /* allocate at least number of buffers required by FW */
4381 rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
4382 MIN_RX_SIZE_TPA, rx_ring_size);
4383
4384 bp->rx_ring_size = rx_ring_size;
614c76df 4385 } else /* if rx_ring_size specified - use it */
c2188952 4386 rx_ring_size = bp->rx_ring_size;
b3b83c3f 4387
04c46736
YM
4388 DP(BNX2X_MSG_SP, "calculated rx_ring_size %d\n", rx_ring_size);
4389
b3b83c3f
DK
4390 /* Common */
4391 sb = &bnx2x_fp(bp, index, status_blk);
55c11941 4392
b3b83c3f 4393 if (!IS_FCOE_IDX(index)) {
b3b83c3f 4394 /* status blocks */
cd2b0389
JP
4395 if (!CHIP_IS_E1x(bp)) {
4396 sb->e2_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping),
4397 sizeof(struct host_hc_status_block_e2));
4398 if (!sb->e2_sb)
4399 goto alloc_mem_err;
4400 } else {
4401 sb->e1x_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping),
4402 sizeof(struct host_hc_status_block_e1x));
4403 if (!sb->e1x_sb)
4404 goto alloc_mem_err;
4405 }
b3b83c3f 4406 }
8eef2af1
DK
4407
4408 /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
4409 * set shortcuts for it.
4410 */
4411 if (!IS_FCOE_IDX(index))
4412 set_sb_shortcuts(bp, index);
b3b83c3f
DK
4413
4414 /* Tx */
4415 if (!skip_tx_queue(bp, index)) {
4416 /* fastpath tx rings: tx_buf tx_desc */
6383c0b3 4417 for_each_cos_in_tx_queue(fp, cos) {
65565884 4418 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
6383c0b3 4419
51c1a580
MS
4420 DP(NETIF_MSG_IFUP,
4421 "allocating tx memory of fp %d cos %d\n",
6383c0b3
AE
4422 index, cos);
4423
cd2b0389
JP
4424 txdata->tx_buf_ring = kcalloc(NUM_TX_BD,
4425 sizeof(struct sw_tx_bd),
4426 GFP_KERNEL);
4427 if (!txdata->tx_buf_ring)
4428 goto alloc_mem_err;
4429 txdata->tx_desc_ring = BNX2X_PCI_ALLOC(&txdata->tx_desc_mapping,
4430 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4431 if (!txdata->tx_desc_ring)
4432 goto alloc_mem_err;
6383c0b3 4433 }
b3b83c3f
DK
4434 }
4435
4436 /* Rx */
4437 if (!skip_rx_queue(bp, index)) {
4438 /* fastpath rx rings: rx_buf rx_desc rx_comp */
cd2b0389
JP
4439 bnx2x_fp(bp, index, rx_buf_ring) =
4440 kcalloc(NUM_RX_BD, sizeof(struct sw_rx_bd), GFP_KERNEL);
4441 if (!bnx2x_fp(bp, index, rx_buf_ring))
4442 goto alloc_mem_err;
4443 bnx2x_fp(bp, index, rx_desc_ring) =
4444 BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_desc_mapping),
4445 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4446 if (!bnx2x_fp(bp, index, rx_desc_ring))
4447 goto alloc_mem_err;
b3b83c3f 4448
75b29459 4449 /* Seed all CQEs by 1s */
cd2b0389
JP
4450 bnx2x_fp(bp, index, rx_comp_ring) =
4451 BNX2X_PCI_FALLOC(&bnx2x_fp(bp, index, rx_comp_mapping),
4452 sizeof(struct eth_fast_path_rx_cqe) * NUM_RCQ_BD);
4453 if (!bnx2x_fp(bp, index, rx_comp_ring))
4454 goto alloc_mem_err;
b3b83c3f
DK
4455
4456 /* SGE ring */
cd2b0389
JP
4457 bnx2x_fp(bp, index, rx_page_ring) =
4458 kcalloc(NUM_RX_SGE, sizeof(struct sw_rx_page),
4459 GFP_KERNEL);
4460 if (!bnx2x_fp(bp, index, rx_page_ring))
4461 goto alloc_mem_err;
4462 bnx2x_fp(bp, index, rx_sge_ring) =
4463 BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_sge_mapping),
4464 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4465 if (!bnx2x_fp(bp, index, rx_sge_ring))
4466 goto alloc_mem_err;
b3b83c3f
DK
4467 /* RX BD ring */
4468 bnx2x_set_next_page_rx_bd(fp);
4469
4470 /* CQ ring */
4471 bnx2x_set_next_page_rx_cq(fp);
4472
4473 /* BDs */
4474 ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
4475 if (ring_size < rx_ring_size)
4476 goto alloc_mem_err;
4477 }
4478
4479 return 0;
4480
4481/* handles low memory cases */
4482alloc_mem_err:
4483 BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
4484 index, ring_size);
4485 /* FW will drop all packets if queue is not big enough,
4486 * In these cases we disable the queue
6383c0b3 4487 * Min size is different for OOO, TPA and non-TPA queues
b3b83c3f
DK
4488 */
4489 if (ring_size < (fp->disable_tpa ?
eb722d7a 4490 MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
b3b83c3f
DK
4491 /* release memory allocated for this queue */
4492 bnx2x_free_fp_mem_at(bp, index);
4493 return -ENOMEM;
4494 }
4495 return 0;
4496}
4497
a8f47eb7 4498static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp)
55c11941
MS
4499{
4500 if (!NO_FCOE(bp))
4501 /* FCoE */
4502 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp)))
4503 /* we will fail load process instead of mark
4504 * NO_FCOE_FLAG
4505 */
4506 return -ENOMEM;
4507
4508 return 0;
4509}
4510
a8f47eb7 4511static int bnx2x_alloc_fp_mem(struct bnx2x *bp)
b3b83c3f
DK
4512{
4513 int i;
4514
55c11941
MS
4515 /* 1. Allocate FP for leading - fatal if error
4516 * 2. Allocate RSS - fix number of queues if error
b3b83c3f
DK
4517 */
4518
4519 /* leading */
4520 if (bnx2x_alloc_fp_mem_at(bp, 0))
4521 return -ENOMEM;
6383c0b3 4522
b3b83c3f
DK
4523 /* RSS */
4524 for_each_nondefault_eth_queue(bp, i)
4525 if (bnx2x_alloc_fp_mem_at(bp, i))
4526 break;
4527
4528 /* handle memory failures */
4529 if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
4530 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
4531
4532 WARN_ON(delta < 0);
4864a16a 4533 bnx2x_shrink_eth_fp(bp, delta);
55c11941
MS
4534 if (CNIC_SUPPORT(bp))
4535 /* move non eth FPs next to last eth FP
4536 * must be done in that order
4537 * FCOE_IDX < FWD_IDX < OOO_IDX
4538 */
b3b83c3f 4539
55c11941
MS
4540 /* move FCoE fp even NO_FCOE_FLAG is on */
4541 bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta);
4542 bp->num_ethernet_queues -= delta;
4543 bp->num_queues = bp->num_ethernet_queues +
4544 bp->num_cnic_queues;
b3b83c3f
DK
4545 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
4546 bp->num_queues + delta, bp->num_queues);
4547 }
4548
4549 return 0;
4550}
d6214d7a 4551
523224a3
DK
4552void bnx2x_free_mem_bp(struct bnx2x *bp)
4553{
c3146eb6
DK
4554 int i;
4555
4556 for (i = 0; i < bp->fp_array_size; i++)
4557 kfree(bp->fp[i].tpa_info);
523224a3 4558 kfree(bp->fp);
15192a8c
BW
4559 kfree(bp->sp_objs);
4560 kfree(bp->fp_stats);
65565884 4561 kfree(bp->bnx2x_txq);
523224a3
DK
4562 kfree(bp->msix_table);
4563 kfree(bp->ilt);
4564}
4565
0329aba1 4566int bnx2x_alloc_mem_bp(struct bnx2x *bp)
523224a3
DK
4567{
4568 struct bnx2x_fastpath *fp;
4569 struct msix_entry *tbl;
4570 struct bnx2x_ilt *ilt;
6383c0b3 4571 int msix_table_size = 0;
55c11941 4572 int fp_array_size, txq_array_size;
15192a8c 4573 int i;
6383c0b3
AE
4574
4575 /*
4576 * The biggest MSI-X table we might need is as a maximum number of fast
2de67439 4577 * path IGU SBs plus default SB (for PF only).
6383c0b3 4578 */
1ab4434c
AE
4579 msix_table_size = bp->igu_sb_cnt;
4580 if (IS_PF(bp))
4581 msix_table_size++;
4582 BNX2X_DEV_INFO("msix_table_size %d\n", msix_table_size);
523224a3 4583
6383c0b3 4584 /* fp array: RSS plus CNIC related L2 queues */
55c11941 4585 fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + CNIC_SUPPORT(bp);
c3146eb6
DK
4586 bp->fp_array_size = fp_array_size;
4587 BNX2X_DEV_INFO("fp_array_size %d\n", bp->fp_array_size);
15192a8c 4588
c3146eb6 4589 fp = kcalloc(bp->fp_array_size, sizeof(*fp), GFP_KERNEL);
523224a3
DK
4590 if (!fp)
4591 goto alloc_err;
c3146eb6 4592 for (i = 0; i < bp->fp_array_size; i++) {
15192a8c
BW
4593 fp[i].tpa_info =
4594 kcalloc(ETH_MAX_AGGREGATION_QUEUES_E1H_E2,
4595 sizeof(struct bnx2x_agg_info), GFP_KERNEL);
4596 if (!(fp[i].tpa_info))
4597 goto alloc_err;
4598 }
4599
523224a3
DK
4600 bp->fp = fp;
4601
15192a8c 4602 /* allocate sp objs */
c3146eb6 4603 bp->sp_objs = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_sp_objs),
15192a8c
BW
4604 GFP_KERNEL);
4605 if (!bp->sp_objs)
4606 goto alloc_err;
4607
4608 /* allocate fp_stats */
c3146eb6 4609 bp->fp_stats = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_fp_stats),
15192a8c
BW
4610 GFP_KERNEL);
4611 if (!bp->fp_stats)
4612 goto alloc_err;
4613
65565884 4614 /* Allocate memory for the transmission queues array */
55c11941
MS
4615 txq_array_size =
4616 BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS + CNIC_SUPPORT(bp);
4617 BNX2X_DEV_INFO("txq_array_size %d", txq_array_size);
4618
4619 bp->bnx2x_txq = kcalloc(txq_array_size, sizeof(struct bnx2x_fp_txdata),
4620 GFP_KERNEL);
65565884
MS
4621 if (!bp->bnx2x_txq)
4622 goto alloc_err;
4623
523224a3 4624 /* msix table */
01e23742 4625 tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL);
523224a3
DK
4626 if (!tbl)
4627 goto alloc_err;
4628 bp->msix_table = tbl;
4629
4630 /* ilt */
4631 ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
4632 if (!ilt)
4633 goto alloc_err;
4634 bp->ilt = ilt;
4635
4636 return 0;
4637alloc_err:
4638 bnx2x_free_mem_bp(bp);
4639 return -ENOMEM;
523224a3
DK
4640}
4641
a9fccec7 4642int bnx2x_reload_if_running(struct net_device *dev)
66371c44
MM
4643{
4644 struct bnx2x *bp = netdev_priv(dev);
4645
4646 if (unlikely(!netif_running(dev)))
4647 return 0;
4648
5d07d868 4649 bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
66371c44
MM
4650 return bnx2x_nic_load(bp, LOAD_NORMAL);
4651}
4652
1ac9e428
YR
4653int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
4654{
4655 u32 sel_phy_idx = 0;
4656 if (bp->link_params.num_phys <= 1)
4657 return INT_PHY;
4658
4659 if (bp->link_vars.link_up) {
4660 sel_phy_idx = EXT_PHY1;
4661 /* In case link is SERDES, check if the EXT_PHY2 is the one */
4662 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
4663 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
4664 sel_phy_idx = EXT_PHY2;
4665 } else {
4666
4667 switch (bnx2x_phy_selection(&bp->link_params)) {
4668 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
4669 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
4670 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
4671 sel_phy_idx = EXT_PHY1;
4672 break;
4673 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
4674 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
4675 sel_phy_idx = EXT_PHY2;
4676 break;
4677 }
4678 }
4679
4680 return sel_phy_idx;
1ac9e428
YR
4681}
4682int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
4683{
4684 u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
4685 /*
2de67439 4686 * The selected activated PHY is always after swapping (in case PHY
1ac9e428
YR
4687 * swapping is enabled). So when swapping is enabled, we need to reverse
4688 * the configuration
4689 */
4690
4691 if (bp->link_params.multi_phy_config &
4692 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
4693 if (sel_phy_idx == EXT_PHY1)
4694 sel_phy_idx = EXT_PHY2;
4695 else if (sel_phy_idx == EXT_PHY2)
4696 sel_phy_idx = EXT_PHY1;
4697 }
4698 return LINK_CONFIG_IDX(sel_phy_idx);
4699}
4700
55c11941 4701#ifdef NETDEV_FCOE_WWNN
bf61ee14
VZ
4702int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
4703{
4704 struct bnx2x *bp = netdev_priv(dev);
4705 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
4706
4707 switch (type) {
4708 case NETDEV_FCOE_WWNN:
4709 *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi,
4710 cp->fcoe_wwn_node_name_lo);
4711 break;
4712 case NETDEV_FCOE_WWPN:
4713 *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi,
4714 cp->fcoe_wwn_port_name_lo);
4715 break;
4716 default:
51c1a580 4717 BNX2X_ERR("Wrong WWN type requested - %d\n", type);
bf61ee14
VZ
4718 return -EINVAL;
4719 }
4720
4721 return 0;
4722}
4723#endif
4724
9f6c9258
DK
4725/* called with rtnl_lock */
4726int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
4727{
4728 struct bnx2x *bp = netdev_priv(dev);
9f6c9258
DK
4729
4730 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
51c1a580 4731 BNX2X_ERR("Can't perform change MTU during parity recovery\n");
9f6c9258
DK
4732 return -EAGAIN;
4733 }
4734
4735 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
51c1a580
MS
4736 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) {
4737 BNX2X_ERR("Can't support requested MTU size\n");
9f6c9258 4738 return -EINVAL;
51c1a580 4739 }
9f6c9258
DK
4740
4741 /* This does not race with packet allocation
4742 * because the actual alloc size is
4743 * only updated as part of load
4744 */
4745 dev->mtu = new_mtu;
4746
66371c44
MM
4747 return bnx2x_reload_if_running(dev);
4748}
4749
c8f44aff 4750netdev_features_t bnx2x_fix_features(struct net_device *dev,
621b4d66 4751 netdev_features_t features)
66371c44
MM
4752{
4753 struct bnx2x *bp = netdev_priv(dev);
4754
4755 /* TPA requires Rx CSUM offloading */
621b4d66 4756 if (!(features & NETIF_F_RXCSUM) || bp->disable_tpa) {
66371c44 4757 features &= ~NETIF_F_LRO;
621b4d66
DK
4758 features &= ~NETIF_F_GRO;
4759 }
66371c44
MM
4760
4761 return features;
4762}
4763
c8f44aff 4764int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
66371c44
MM
4765{
4766 struct bnx2x *bp = netdev_priv(dev);
4767 u32 flags = bp->flags;
8802f579 4768 u32 changes;
538dd2e3 4769 bool bnx2x_reload = false;
66371c44
MM
4770
4771 if (features & NETIF_F_LRO)
4772 flags |= TPA_ENABLE_FLAG;
4773 else
4774 flags &= ~TPA_ENABLE_FLAG;
4775
621b4d66
DK
4776 if (features & NETIF_F_GRO)
4777 flags |= GRO_ENABLE_FLAG;
4778 else
4779 flags &= ~GRO_ENABLE_FLAG;
4780
538dd2e3
MB
4781 if (features & NETIF_F_LOOPBACK) {
4782 if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
4783 bp->link_params.loopback_mode = LOOPBACK_BMAC;
4784 bnx2x_reload = true;
4785 }
4786 } else {
4787 if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
4788 bp->link_params.loopback_mode = LOOPBACK_NONE;
4789 bnx2x_reload = true;
4790 }
4791 }
4792
8802f579
ED
4793 changes = flags ^ bp->flags;
4794
16a5fd92 4795 /* if GRO is changed while LRO is enabled, don't force a reload */
8802f579
ED
4796 if ((changes & GRO_ENABLE_FLAG) && (flags & TPA_ENABLE_FLAG))
4797 changes &= ~GRO_ENABLE_FLAG;
4798
4799 if (changes)
538dd2e3 4800 bnx2x_reload = true;
8802f579
ED
4801
4802 bp->flags = flags;
66371c44 4803
538dd2e3 4804 if (bnx2x_reload) {
66371c44
MM
4805 if (bp->recovery_state == BNX2X_RECOVERY_DONE)
4806 return bnx2x_reload_if_running(dev);
4807 /* else: bnx2x_nic_load() will be called at end of recovery */
9f6c9258
DK
4808 }
4809
66371c44 4810 return 0;
9f6c9258
DK
4811}
4812
4813void bnx2x_tx_timeout(struct net_device *dev)
4814{
4815 struct bnx2x *bp = netdev_priv(dev);
4816
4817#ifdef BNX2X_STOP_ON_ERROR
4818 if (!bp->panic)
4819 bnx2x_panic();
4820#endif
7be08a72 4821
9f6c9258 4822 /* This allows the netif to be shutdown gracefully before resetting */
230bb0f3 4823 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_TX_TIMEOUT, 0);
9f6c9258
DK
4824}
4825
9f6c9258
DK
4826int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
4827{
4828 struct net_device *dev = pci_get_drvdata(pdev);
4829 struct bnx2x *bp;
4830
4831 if (!dev) {
4832 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4833 return -ENODEV;
4834 }
4835 bp = netdev_priv(dev);
4836
4837 rtnl_lock();
4838
4839 pci_save_state(pdev);
4840
4841 if (!netif_running(dev)) {
4842 rtnl_unlock();
4843 return 0;
4844 }
4845
4846 netif_device_detach(dev);
4847
5d07d868 4848 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
9f6c9258
DK
4849
4850 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
4851
4852 rtnl_unlock();
4853
4854 return 0;
4855}
4856
4857int bnx2x_resume(struct pci_dev *pdev)
4858{
4859 struct net_device *dev = pci_get_drvdata(pdev);
4860 struct bnx2x *bp;
4861 int rc;
4862
4863 if (!dev) {
4864 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4865 return -ENODEV;
4866 }
4867 bp = netdev_priv(dev);
4868
4869 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
51c1a580 4870 BNX2X_ERR("Handling parity error recovery. Try again later\n");
9f6c9258
DK
4871 return -EAGAIN;
4872 }
4873
4874 rtnl_lock();
4875
4876 pci_restore_state(pdev);
4877
4878 if (!netif_running(dev)) {
4879 rtnl_unlock();
4880 return 0;
4881 }
4882
4883 bnx2x_set_power_state(bp, PCI_D0);
4884 netif_device_attach(dev);
4885
4886 rc = bnx2x_nic_load(bp, LOAD_OPEN);
4887
4888 rtnl_unlock();
4889
4890 return rc;
4891}
619c5cb6 4892
619c5cb6
VZ
4893void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
4894 u32 cid)
4895{
b9871bcf
AE
4896 if (!cxt) {
4897 BNX2X_ERR("bad context pointer %p\n", cxt);
4898 return;
4899 }
4900
619c5cb6
VZ
4901 /* ustorm cxt validation */
4902 cxt->ustorm_ag_context.cdu_usage =
4903 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
4904 CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
4905 /* xcontext validation */
4906 cxt->xstorm_ag_context.cdu_reserved =
4907 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
4908 CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
4909}
4910
1191cb83
ED
4911static void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
4912 u8 fw_sb_id, u8 sb_index,
4913 u8 ticks)
619c5cb6 4914{
619c5cb6
VZ
4915 u32 addr = BAR_CSTRORM_INTMEM +
4916 CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
4917 REG_WR8(bp, addr, ticks);
51c1a580
MS
4918 DP(NETIF_MSG_IFUP,
4919 "port %x fw_sb_id %d sb_index %d ticks %d\n",
4920 port, fw_sb_id, sb_index, ticks);
619c5cb6
VZ
4921}
4922
1191cb83
ED
4923static void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
4924 u16 fw_sb_id, u8 sb_index,
4925 u8 disable)
619c5cb6
VZ
4926{
4927 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
4928 u32 addr = BAR_CSTRORM_INTMEM +
4929 CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
0c14e5ce 4930 u8 flags = REG_RD8(bp, addr);
619c5cb6
VZ
4931 /* clear and set */
4932 flags &= ~HC_INDEX_DATA_HC_ENABLED;
4933 flags |= enable_flag;
0c14e5ce 4934 REG_WR8(bp, addr, flags);
51c1a580
MS
4935 DP(NETIF_MSG_IFUP,
4936 "port %x fw_sb_id %d sb_index %d disable %d\n",
4937 port, fw_sb_id, sb_index, disable);
619c5cb6
VZ
4938}
4939
4940void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
4941 u8 sb_index, u8 disable, u16 usec)
4942{
4943 int port = BP_PORT(bp);
4944 u8 ticks = usec / BNX2X_BTR;
4945
4946 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
4947
4948 disable = disable ? 1 : (usec ? 0 : 1);
4949 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
4950}
230bb0f3
YM
4951
4952void bnx2x_schedule_sp_rtnl(struct bnx2x *bp, enum sp_rtnl_flag flag,
4953 u32 verbose)
4954{
4e857c58 4955 smp_mb__before_atomic();
230bb0f3 4956 set_bit(flag, &bp->sp_rtnl_state);
4e857c58 4957 smp_mb__after_atomic();
230bb0f3
YM
4958 DP((BNX2X_MSG_SP | verbose), "Scheduling sp_rtnl task [Flag: %d]\n",
4959 flag);
4960 schedule_delayed_work(&bp->sp_rtnl_task, 0);
4961}
4962EXPORT_SYMBOL(bnx2x_schedule_sp_rtnl);
This page took 1.183592 seconds and 5 git commands to generate.