net: bcmgenet: remove unused bh_lock member
[deliverable/linux.git] / drivers / net / ethernet / broadcom / bnx2x / bnx2x_cmn.c
CommitLineData
9f6c9258
DK
1/* bnx2x_cmn.c: Broadcom Everest network driver.
2 *
247fa82b 3 * Copyright (c) 2007-2013 Broadcom Corporation
9f6c9258
DK
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
15 *
16 */
17
f1deab50
JP
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
9f6c9258 20#include <linux/etherdevice.h>
9bcc0893 21#include <linux/if_vlan.h>
a6b7a407 22#include <linux/interrupt.h>
9f6c9258 23#include <linux/ip.h>
9969085e 24#include <net/tcp.h>
f2e0899f 25#include <net/ipv6.h>
7f3e01fe 26#include <net/ip6_checksum.h>
076bb0c8 27#include <net/busy_poll.h>
c0cba59e 28#include <linux/prefetch.h>
9f6c9258 29#include "bnx2x_cmn.h"
523224a3 30#include "bnx2x_init.h"
042181f5 31#include "bnx2x_sp.h"
9f6c9258 32
a8f47eb7 33static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp);
34static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp);
35static int bnx2x_alloc_fp_mem(struct bnx2x *bp);
36static int bnx2x_poll(struct napi_struct *napi, int budget);
37
38static void bnx2x_add_all_napi_cnic(struct bnx2x *bp)
39{
40 int i;
41
42 /* Add NAPI objects */
43 for_each_rx_queue_cnic(bp, i) {
44 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
45 bnx2x_poll, NAPI_POLL_WEIGHT);
46 napi_hash_add(&bnx2x_fp(bp, i, napi));
47 }
48}
49
50static void bnx2x_add_all_napi(struct bnx2x *bp)
51{
52 int i;
53
54 /* Add NAPI objects */
55 for_each_eth_queue(bp, i) {
56 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
57 bnx2x_poll, NAPI_POLL_WEIGHT);
58 napi_hash_add(&bnx2x_fp(bp, i, napi));
59 }
60}
61
62static int bnx2x_calc_num_queues(struct bnx2x *bp)
63{
64 return bnx2x_num_queues ?
65 min_t(int, bnx2x_num_queues, BNX2X_MAX_QUEUES(bp)) :
66 min_t(int, netif_get_num_default_rss_queues(),
67 BNX2X_MAX_QUEUES(bp));
68}
69
b3b83c3f
DK
70/**
71 * bnx2x_move_fp - move content of the fastpath structure.
72 *
73 * @bp: driver handle
74 * @from: source FP index
75 * @to: destination FP index
76 *
77 * Makes sure the contents of the bp->fp[to].napi is kept
72754080
AE
78 * intact. This is done by first copying the napi struct from
79 * the target to the source, and then mem copying the entire
65565884
MS
80 * source onto the target. Update txdata pointers and related
81 * content.
b3b83c3f
DK
82 */
83static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
84{
85 struct bnx2x_fastpath *from_fp = &bp->fp[from];
86 struct bnx2x_fastpath *to_fp = &bp->fp[to];
15192a8c
BW
87 struct bnx2x_sp_objs *from_sp_objs = &bp->sp_objs[from];
88 struct bnx2x_sp_objs *to_sp_objs = &bp->sp_objs[to];
89 struct bnx2x_fp_stats *from_fp_stats = &bp->fp_stats[from];
90 struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to];
65565884
MS
91 int old_max_eth_txqs, new_max_eth_txqs;
92 int old_txdata_index = 0, new_txdata_index = 0;
34d5626a 93 struct bnx2x_agg_info *old_tpa_info = to_fp->tpa_info;
72754080
AE
94
95 /* Copy the NAPI object as it has been already initialized */
96 from_fp->napi = to_fp->napi;
97
b3b83c3f
DK
98 /* Move bnx2x_fastpath contents */
99 memcpy(to_fp, from_fp, sizeof(*to_fp));
100 to_fp->index = to;
65565884 101
34d5626a
YM
102 /* Retain the tpa_info of the original `to' version as we don't want
103 * 2 FPs to contain the same tpa_info pointer.
104 */
105 to_fp->tpa_info = old_tpa_info;
106
15192a8c
BW
107 /* move sp_objs contents as well, as their indices match fp ones */
108 memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs));
109
110 /* move fp_stats contents as well, as their indices match fp ones */
111 memcpy(to_fp_stats, from_fp_stats, sizeof(*to_fp_stats));
112
65565884
MS
113 /* Update txdata pointers in fp and move txdata content accordingly:
114 * Each fp consumes 'max_cos' txdata structures, so the index should be
115 * decremented by max_cos x delta.
116 */
117
118 old_max_eth_txqs = BNX2X_NUM_ETH_QUEUES(bp) * (bp)->max_cos;
119 new_max_eth_txqs = (BNX2X_NUM_ETH_QUEUES(bp) - from + to) *
120 (bp)->max_cos;
121 if (from == FCOE_IDX(bp)) {
122 old_txdata_index = old_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
123 new_txdata_index = new_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
124 }
125
4864a16a
YM
126 memcpy(&bp->bnx2x_txq[new_txdata_index],
127 &bp->bnx2x_txq[old_txdata_index],
65565884
MS
128 sizeof(struct bnx2x_fp_txdata));
129 to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index];
b3b83c3f
DK
130}
131
8ca5e17e
AE
132/**
133 * bnx2x_fill_fw_str - Fill buffer with FW version string.
134 *
135 * @bp: driver handle
136 * @buf: character buffer to fill with the fw name
137 * @buf_len: length of the above buffer
138 *
139 */
140void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len)
141{
142 if (IS_PF(bp)) {
143 u8 phy_fw_ver[PHY_FW_VER_LEN];
144
145 phy_fw_ver[0] = '\0';
146 bnx2x_get_ext_phy_fw_version(&bp->link_params,
147 phy_fw_ver, PHY_FW_VER_LEN);
148 strlcpy(buf, bp->fw_ver, buf_len);
149 snprintf(buf + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
150 "bc %d.%d.%d%s%s",
151 (bp->common.bc_ver & 0xff0000) >> 16,
152 (bp->common.bc_ver & 0xff00) >> 8,
153 (bp->common.bc_ver & 0xff),
154 ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
155 } else {
6411280a 156 bnx2x_vf_fill_fw_str(bp, buf, buf_len);
8ca5e17e
AE
157 }
158}
159
4864a16a
YM
160/**
161 * bnx2x_shrink_eth_fp - guarantees fastpath structures stay intact
162 *
163 * @bp: driver handle
164 * @delta: number of eth queues which were not allocated
165 */
166static void bnx2x_shrink_eth_fp(struct bnx2x *bp, int delta)
167{
168 int i, cos, old_eth_num = BNX2X_NUM_ETH_QUEUES(bp);
169
170 /* Queue pointer cannot be re-set on an fp-basis, as moving pointer
16a5fd92 171 * backward along the array could cause memory to be overridden
4864a16a
YM
172 */
173 for (cos = 1; cos < bp->max_cos; cos++) {
174 for (i = 0; i < old_eth_num - delta; i++) {
175 struct bnx2x_fastpath *fp = &bp->fp[i];
176 int new_idx = cos * (old_eth_num - delta) + i;
177
178 memcpy(&bp->bnx2x_txq[new_idx], fp->txdata_ptr[cos],
179 sizeof(struct bnx2x_fp_txdata));
180 fp->txdata_ptr[cos] = &bp->bnx2x_txq[new_idx];
181 }
182 }
183}
184
a8f47eb7 185int bnx2x_load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
619c5cb6 186
9f6c9258
DK
187/* free skb in the packet ring at pos idx
188 * return idx of last bd freed
189 */
6383c0b3 190static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
2df1a70a
TH
191 u16 idx, unsigned int *pkts_compl,
192 unsigned int *bytes_compl)
9f6c9258 193{
6383c0b3 194 struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
9f6c9258
DK
195 struct eth_tx_start_bd *tx_start_bd;
196 struct eth_tx_bd *tx_data_bd;
197 struct sk_buff *skb = tx_buf->skb;
198 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
199 int nbd;
95e92fd4 200 u16 split_bd_len = 0;
9f6c9258
DK
201
202 /* prefetch skb end pointer to speedup dev_kfree_skb() */
203 prefetch(&skb->end);
204
51c1a580 205 DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n",
6383c0b3 206 txdata->txq_index, idx, tx_buf, skb);
9f6c9258 207
6383c0b3 208 tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
9f6c9258
DK
209
210 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
211#ifdef BNX2X_STOP_ON_ERROR
212 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
213 BNX2X_ERR("BAD nbd!\n");
214 bnx2x_panic();
215 }
216#endif
217 new_cons = nbd + tx_buf->first_bd;
218
219 /* Get the next bd */
220 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
221
222 /* Skip a parse bd... */
223 --nbd;
224 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
225
95e92fd4 226 /* TSO headers+data bds share a common mapping. See bnx2x_tx_split() */
9f6c9258 227 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
95e92fd4
MS
228 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
229 split_bd_len = BD_UNMAP_LEN(tx_data_bd);
9f6c9258
DK
230 --nbd;
231 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
232 }
233
95e92fd4
MS
234 /* unmap first bd */
235 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
236 BD_UNMAP_LEN(tx_start_bd) + split_bd_len,
237 DMA_TO_DEVICE);
238
9f6c9258
DK
239 /* now free frags */
240 while (nbd > 0) {
241
6383c0b3 242 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
9f6c9258
DK
243 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
244 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
245 if (--nbd)
246 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
247 }
248
249 /* release skb */
250 WARN_ON(!skb);
d8290ae5 251 if (likely(skb)) {
2df1a70a
TH
252 (*pkts_compl)++;
253 (*bytes_compl) += skb->len;
254 }
d8290ae5 255
40955532 256 dev_kfree_skb_any(skb);
9f6c9258
DK
257 tx_buf->first_bd = 0;
258 tx_buf->skb = NULL;
259
260 return new_cons;
261}
262
6383c0b3 263int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
9f6c9258 264{
9f6c9258 265 struct netdev_queue *txq;
6383c0b3 266 u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
2df1a70a 267 unsigned int pkts_compl = 0, bytes_compl = 0;
9f6c9258
DK
268
269#ifdef BNX2X_STOP_ON_ERROR
270 if (unlikely(bp->panic))
271 return -1;
272#endif
273
6383c0b3
AE
274 txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
275 hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
276 sw_cons = txdata->tx_pkt_cons;
9f6c9258
DK
277
278 while (sw_cons != hw_cons) {
279 u16 pkt_cons;
280
281 pkt_cons = TX_BD(sw_cons);
282
51c1a580
MS
283 DP(NETIF_MSG_TX_DONE,
284 "queue[%d]: hw_cons %u sw_cons %u pkt_cons %u\n",
6383c0b3 285 txdata->txq_index, hw_cons, sw_cons, pkt_cons);
9f6c9258 286
2df1a70a 287 bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
2de67439 288 &pkts_compl, &bytes_compl);
2df1a70a 289
9f6c9258
DK
290 sw_cons++;
291 }
292
2df1a70a
TH
293 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
294
6383c0b3
AE
295 txdata->tx_pkt_cons = sw_cons;
296 txdata->tx_bd_cons = bd_cons;
9f6c9258
DK
297
298 /* Need to make the tx_bd_cons update visible to start_xmit()
299 * before checking for netif_tx_queue_stopped(). Without the
300 * memory barrier, there is a small possibility that
301 * start_xmit() will miss it and cause the queue to be stopped
302 * forever.
619c5cb6
VZ
303 * On the other hand we need an rmb() here to ensure the proper
304 * ordering of bit testing in the following
305 * netif_tx_queue_stopped(txq) call.
9f6c9258
DK
306 */
307 smp_mb();
308
9f6c9258 309 if (unlikely(netif_tx_queue_stopped(txq))) {
16a5fd92 310 /* Taking tx_lock() is needed to prevent re-enabling the queue
9f6c9258
DK
311 * while it's empty. This could have happen if rx_action() gets
312 * suspended in bnx2x_tx_int() after the condition before
313 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
314 *
315 * stops the queue->sees fresh tx_bd_cons->releases the queue->
316 * sends some packets consuming the whole queue again->
317 * stops the queue
318 */
319
320 __netif_tx_lock(txq, smp_processor_id());
321
322 if ((netif_tx_queue_stopped(txq)) &&
323 (bp->state == BNX2X_STATE_OPEN) &&
7df2dc6b 324 (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT))
9f6c9258
DK
325 netif_tx_wake_queue(txq);
326
327 __netif_tx_unlock(txq);
328 }
329 return 0;
330}
331
332static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
333 u16 idx)
334{
335 u16 last_max = fp->last_max_sge;
336
337 if (SUB_S16(idx, last_max) > 0)
338 fp->last_max_sge = idx;
339}
340
621b4d66
DK
341static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
342 u16 sge_len,
343 struct eth_end_agg_rx_cqe *cqe)
9f6c9258
DK
344{
345 struct bnx2x *bp = fp->bp;
9f6c9258
DK
346 u16 last_max, last_elem, first_elem;
347 u16 delta = 0;
348 u16 i;
349
350 if (!sge_len)
351 return;
352
353 /* First mark all used pages */
354 for (i = 0; i < sge_len; i++)
619c5cb6 355 BIT_VEC64_CLEAR_BIT(fp->sge_mask,
621b4d66 356 RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i])));
9f6c9258
DK
357
358 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
621b4d66 359 sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
9f6c9258
DK
360
361 /* Here we assume that the last SGE index is the biggest */
362 prefetch((void *)(fp->sge_mask));
523224a3 363 bnx2x_update_last_max_sge(fp,
621b4d66 364 le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
9f6c9258
DK
365
366 last_max = RX_SGE(fp->last_max_sge);
619c5cb6
VZ
367 last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
368 first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
9f6c9258
DK
369
370 /* If ring is not full */
371 if (last_elem + 1 != first_elem)
372 last_elem++;
373
374 /* Now update the prod */
375 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
376 if (likely(fp->sge_mask[i]))
377 break;
378
619c5cb6
VZ
379 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
380 delta += BIT_VEC64_ELEM_SZ;
9f6c9258
DK
381 }
382
383 if (delta > 0) {
384 fp->rx_sge_prod += delta;
385 /* clear page-end entries */
386 bnx2x_clear_sge_mask_next_elems(fp);
387 }
388
389 DP(NETIF_MSG_RX_STATUS,
390 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
391 fp->last_max_sge, fp->rx_sge_prod);
392}
393
2de67439 394/* Get Toeplitz hash value in the skb using the value from the
e52fcb24
ED
395 * CQE (calculated by HW).
396 */
397static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
a334b5fb 398 const struct eth_fast_path_rx_cqe *cqe,
5495ab75 399 enum pkt_hash_types *rxhash_type)
e52fcb24 400{
2de67439 401 /* Get Toeplitz hash from CQE */
e52fcb24 402 if ((bp->dev->features & NETIF_F_RXHASH) &&
a334b5fb
ED
403 (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) {
404 enum eth_rss_hash_type htype;
405
406 htype = cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE;
5495ab75
TH
407 *rxhash_type = ((htype == TCP_IPV4_HASH_TYPE) ||
408 (htype == TCP_IPV6_HASH_TYPE)) ?
409 PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3;
410
e52fcb24 411 return le32_to_cpu(cqe->rss_hash_result);
a334b5fb 412 }
5495ab75 413 *rxhash_type = PKT_HASH_TYPE_NONE;
e52fcb24
ED
414 return 0;
415}
416
9f6c9258 417static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
e52fcb24 418 u16 cons, u16 prod,
619c5cb6 419 struct eth_fast_path_rx_cqe *cqe)
9f6c9258
DK
420{
421 struct bnx2x *bp = fp->bp;
422 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
423 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
424 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
425 dma_addr_t mapping;
619c5cb6
VZ
426 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
427 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
9f6c9258 428
619c5cb6
VZ
429 /* print error if current state != stop */
430 if (tpa_info->tpa_state != BNX2X_TPA_STOP)
9f6c9258
DK
431 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
432
e52fcb24 433 /* Try to map an empty data buffer from the aggregation info */
619c5cb6 434 mapping = dma_map_single(&bp->pdev->dev,
e52fcb24 435 first_buf->data + NET_SKB_PAD,
619c5cb6
VZ
436 fp->rx_buf_size, DMA_FROM_DEVICE);
437 /*
438 * ...if it fails - move the skb from the consumer to the producer
439 * and set the current aggregation state as ERROR to drop it
440 * when TPA_STOP arrives.
441 */
442
443 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
444 /* Move the BD from the consumer to the producer */
e52fcb24 445 bnx2x_reuse_rx_data(fp, cons, prod);
619c5cb6
VZ
446 tpa_info->tpa_state = BNX2X_TPA_ERROR;
447 return;
448 }
9f6c9258 449
e52fcb24
ED
450 /* move empty data from pool to prod */
451 prod_rx_buf->data = first_buf->data;
619c5cb6 452 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
e52fcb24 453 /* point prod_bd to new data */
9f6c9258
DK
454 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
455 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
456
619c5cb6
VZ
457 /* move partial skb from cons to pool (don't unmap yet) */
458 *first_buf = *cons_rx_buf;
459
460 /* mark bin state as START */
461 tpa_info->parsing_flags =
462 le16_to_cpu(cqe->pars_flags.flags);
463 tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
464 tpa_info->tpa_state = BNX2X_TPA_START;
465 tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
466 tpa_info->placement_offset = cqe->placement_offset;
5495ab75 467 tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->rxhash_type);
621b4d66
DK
468 if (fp->mode == TPA_MODE_GRO) {
469 u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
924d75ab 470 tpa_info->full_page = SGE_PAGES / gro_size * gro_size;
621b4d66
DK
471 tpa_info->gro_size = gro_size;
472 }
619c5cb6 473
9f6c9258
DK
474#ifdef BNX2X_STOP_ON_ERROR
475 fp->tpa_queue_used |= (1 << queue);
476#ifdef _ASM_GENERIC_INT_L64_H
477 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
478#else
479 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
480#endif
481 fp->tpa_queue_used);
482#endif
483}
484
e4e3c02a
VZ
485/* Timestamp option length allowed for TPA aggregation:
486 *
487 * nop nop kind length echo val
488 */
489#define TPA_TSTAMP_OPT_LEN 12
490/**
cbf1de72 491 * bnx2x_set_gro_params - compute GRO values
e4e3c02a 492 *
cbf1de72 493 * @skb: packet skb
e8920674
DK
494 * @parsing_flags: parsing flags from the START CQE
495 * @len_on_bd: total length of the first packet for the
496 * aggregation.
cbf1de72 497 * @pkt_len: length of all segments
e8920674
DK
498 *
499 * Approximate value of the MSS for this aggregation calculated using
500 * the first packet of it.
2de67439 501 * Compute number of aggregated segments, and gso_type.
e4e3c02a 502 */
cbf1de72 503static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags,
ab5777d7
YM
504 u16 len_on_bd, unsigned int pkt_len,
505 u16 num_of_coalesced_segs)
e4e3c02a 506{
cbf1de72 507 /* TPA aggregation won't have either IP options or TCP options
619c5cb6 508 * other than timestamp or IPv6 extension headers.
e4e3c02a 509 */
619c5cb6
VZ
510 u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
511
512 if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
cbf1de72 513 PRS_FLAG_OVERETH_IPV6) {
619c5cb6 514 hdrs_len += sizeof(struct ipv6hdr);
cbf1de72
YM
515 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
516 } else {
619c5cb6 517 hdrs_len += sizeof(struct iphdr);
cbf1de72
YM
518 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
519 }
e4e3c02a
VZ
520
521 /* Check if there was a TCP timestamp, if there is it's will
522 * always be 12 bytes length: nop nop kind length echo val.
523 *
524 * Otherwise FW would close the aggregation.
525 */
526 if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
527 hdrs_len += TPA_TSTAMP_OPT_LEN;
528
cbf1de72
YM
529 skb_shinfo(skb)->gso_size = len_on_bd - hdrs_len;
530
531 /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
532 * to skb_shinfo(skb)->gso_segs
533 */
ab5777d7 534 NAPI_GRO_CB(skb)->count = num_of_coalesced_segs;
e4e3c02a
VZ
535}
536
996dedba
MS
537static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp,
538 u16 index, gfp_t gfp_mask)
1191cb83 539{
996dedba 540 struct page *page = alloc_pages(gfp_mask, PAGES_PER_SGE_SHIFT);
1191cb83
ED
541 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
542 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
543 dma_addr_t mapping;
544
545 if (unlikely(page == NULL)) {
546 BNX2X_ERR("Can't alloc sge\n");
547 return -ENOMEM;
548 }
549
550 mapping = dma_map_page(&bp->pdev->dev, page, 0,
924d75ab 551 SGE_PAGES, DMA_FROM_DEVICE);
1191cb83
ED
552 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
553 __free_pages(page, PAGES_PER_SGE_SHIFT);
554 BNX2X_ERR("Can't map sge\n");
555 return -ENOMEM;
556 }
557
558 sw_buf->page = page;
559 dma_unmap_addr_set(sw_buf, mapping, mapping);
560
561 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
562 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
563
564 return 0;
565}
566
9f6c9258 567static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
621b4d66
DK
568 struct bnx2x_agg_info *tpa_info,
569 u16 pages,
570 struct sk_buff *skb,
619c5cb6
VZ
571 struct eth_end_agg_rx_cqe *cqe,
572 u16 cqe_idx)
9f6c9258
DK
573{
574 struct sw_rx_page *rx_pg, old_rx_pg;
621b4d66
DK
575 u32 i, frag_len, frag_size;
576 int err, j, frag_id = 0;
619c5cb6 577 u16 len_on_bd = tpa_info->len_on_bd;
621b4d66 578 u16 full_page = 0, gro_size = 0;
9f6c9258 579
619c5cb6 580 frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
621b4d66
DK
581
582 if (fp->mode == TPA_MODE_GRO) {
583 gro_size = tpa_info->gro_size;
584 full_page = tpa_info->full_page;
585 }
9f6c9258
DK
586
587 /* This is needed in order to enable forwarding support */
cbf1de72
YM
588 if (frag_size)
589 bnx2x_set_gro_params(skb, tpa_info->parsing_flags, len_on_bd,
ab5777d7
YM
590 le16_to_cpu(cqe->pkt_len),
591 le16_to_cpu(cqe->num_of_coalesced_segs));
621b4d66 592
9f6c9258 593#ifdef BNX2X_STOP_ON_ERROR
924d75ab 594 if (pages > min_t(u32, 8, MAX_SKB_FRAGS) * SGE_PAGES) {
9f6c9258
DK
595 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
596 pages, cqe_idx);
619c5cb6 597 BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
9f6c9258
DK
598 bnx2x_panic();
599 return -EINVAL;
600 }
601#endif
602
603 /* Run through the SGL and compose the fragmented skb */
604 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
619c5cb6 605 u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
9f6c9258
DK
606
607 /* FW gives the indices of the SGE as if the ring is an array
608 (meaning that "next" element will consume 2 indices) */
621b4d66
DK
609 if (fp->mode == TPA_MODE_GRO)
610 frag_len = min_t(u32, frag_size, (u32)full_page);
611 else /* LRO */
924d75ab 612 frag_len = min_t(u32, frag_size, (u32)SGE_PAGES);
621b4d66 613
9f6c9258
DK
614 rx_pg = &fp->rx_page_ring[sge_idx];
615 old_rx_pg = *rx_pg;
616
617 /* If we fail to allocate a substitute page, we simply stop
618 where we are and drop the whole packet */
996dedba 619 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx, GFP_ATOMIC);
9f6c9258 620 if (unlikely(err)) {
15192a8c 621 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
9f6c9258
DK
622 return err;
623 }
624
16a5fd92 625 /* Unmap the page as we're going to pass it to the stack */
9f6c9258
DK
626 dma_unmap_page(&bp->pdev->dev,
627 dma_unmap_addr(&old_rx_pg, mapping),
924d75ab 628 SGE_PAGES, DMA_FROM_DEVICE);
9f6c9258 629 /* Add one frag and update the appropriate fields in the skb */
621b4d66
DK
630 if (fp->mode == TPA_MODE_LRO)
631 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
632 else { /* GRO */
633 int rem;
634 int offset = 0;
635 for (rem = frag_len; rem > 0; rem -= gro_size) {
636 int len = rem > gro_size ? gro_size : rem;
637 skb_fill_page_desc(skb, frag_id++,
638 old_rx_pg.page, offset, len);
639 if (offset)
640 get_page(old_rx_pg.page);
641 offset += len;
642 }
643 }
9f6c9258
DK
644
645 skb->data_len += frag_len;
924d75ab 646 skb->truesize += SGE_PAGES;
9f6c9258
DK
647 skb->len += frag_len;
648
649 frag_size -= frag_len;
650 }
651
652 return 0;
653}
654
d46d132c
ED
655static void bnx2x_frag_free(const struct bnx2x_fastpath *fp, void *data)
656{
657 if (fp->rx_frag_size)
658 put_page(virt_to_head_page(data));
659 else
660 kfree(data);
661}
662
996dedba 663static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp, gfp_t gfp_mask)
d46d132c 664{
996dedba
MS
665 if (fp->rx_frag_size) {
666 /* GFP_KERNEL allocations are used only during initialization */
667 if (unlikely(gfp_mask & __GFP_WAIT))
668 return (void *)__get_free_page(gfp_mask);
669
d46d132c 670 return netdev_alloc_frag(fp->rx_frag_size);
996dedba 671 }
d46d132c 672
996dedba 673 return kmalloc(fp->rx_buf_size + NET_SKB_PAD, gfp_mask);
d46d132c
ED
674}
675
9969085e
YM
676#ifdef CONFIG_INET
677static void bnx2x_gro_ip_csum(struct bnx2x *bp, struct sk_buff *skb)
678{
679 const struct iphdr *iph = ip_hdr(skb);
680 struct tcphdr *th;
681
682 skb_set_transport_header(skb, sizeof(struct iphdr));
683 th = tcp_hdr(skb);
684
685 th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
686 iph->saddr, iph->daddr, 0);
687}
688
689static void bnx2x_gro_ipv6_csum(struct bnx2x *bp, struct sk_buff *skb)
690{
691 struct ipv6hdr *iph = ipv6_hdr(skb);
692 struct tcphdr *th;
693
694 skb_set_transport_header(skb, sizeof(struct ipv6hdr));
695 th = tcp_hdr(skb);
696
697 th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
698 &iph->saddr, &iph->daddr, 0);
699}
2c2d06d5
YM
700
701static void bnx2x_gro_csum(struct bnx2x *bp, struct sk_buff *skb,
702 void (*gro_func)(struct bnx2x*, struct sk_buff*))
703{
704 skb_set_network_header(skb, 0);
705 gro_func(bp, skb);
706 tcp_gro_complete(skb);
707}
9969085e
YM
708#endif
709
710static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp,
711 struct sk_buff *skb)
712{
713#ifdef CONFIG_INET
cbf1de72 714 if (skb_shinfo(skb)->gso_size) {
9969085e
YM
715 switch (be16_to_cpu(skb->protocol)) {
716 case ETH_P_IP:
2c2d06d5 717 bnx2x_gro_csum(bp, skb, bnx2x_gro_ip_csum);
9969085e
YM
718 break;
719 case ETH_P_IPV6:
2c2d06d5 720 bnx2x_gro_csum(bp, skb, bnx2x_gro_ipv6_csum);
9969085e
YM
721 break;
722 default:
2c2d06d5 723 BNX2X_ERR("Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n",
9969085e
YM
724 be16_to_cpu(skb->protocol));
725 }
9969085e
YM
726 }
727#endif
60e66fee 728 skb_record_rx_queue(skb, fp->rx_queue);
9969085e
YM
729 napi_gro_receive(&fp->napi, skb);
730}
731
1191cb83
ED
732static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
733 struct bnx2x_agg_info *tpa_info,
734 u16 pages,
735 struct eth_end_agg_rx_cqe *cqe,
736 u16 cqe_idx)
9f6c9258 737{
619c5cb6 738 struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
621b4d66 739 u8 pad = tpa_info->placement_offset;
619c5cb6 740 u16 len = tpa_info->len_on_bd;
e52fcb24 741 struct sk_buff *skb = NULL;
621b4d66 742 u8 *new_data, *data = rx_buf->data;
619c5cb6
VZ
743 u8 old_tpa_state = tpa_info->tpa_state;
744
745 tpa_info->tpa_state = BNX2X_TPA_STOP;
746
747 /* If we there was an error during the handling of the TPA_START -
748 * drop this aggregation.
749 */
750 if (old_tpa_state == BNX2X_TPA_ERROR)
751 goto drop;
752
e52fcb24 753 /* Try to allocate the new data */
996dedba 754 new_data = bnx2x_frag_alloc(fp, GFP_ATOMIC);
9f6c9258
DK
755 /* Unmap skb in the pool anyway, as we are going to change
756 pool entry status to BNX2X_TPA_STOP even if new skb allocation
757 fails. */
758 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
a8c94b91 759 fp->rx_buf_size, DMA_FROM_DEVICE);
e52fcb24 760 if (likely(new_data))
d46d132c 761 skb = build_skb(data, fp->rx_frag_size);
9f6c9258 762
e52fcb24 763 if (likely(skb)) {
9f6c9258 764#ifdef BNX2X_STOP_ON_ERROR
a8c94b91 765 if (pad + len > fp->rx_buf_size) {
51c1a580 766 BNX2X_ERR("skb_put is about to fail... pad %d len %d rx_buf_size %d\n",
a8c94b91 767 pad, len, fp->rx_buf_size);
9f6c9258
DK
768 bnx2x_panic();
769 return;
770 }
771#endif
772
e52fcb24 773 skb_reserve(skb, pad + NET_SKB_PAD);
9f6c9258 774 skb_put(skb, len);
5495ab75 775 skb_set_hash(skb, tpa_info->rxhash, tpa_info->rxhash_type);
9f6c9258
DK
776
777 skb->protocol = eth_type_trans(skb, bp->dev);
778 skb->ip_summed = CHECKSUM_UNNECESSARY;
779
621b4d66
DK
780 if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
781 skb, cqe, cqe_idx)) {
619c5cb6 782 if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
86a9bad3 783 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tpa_info->vlan_tag);
9969085e 784 bnx2x_gro_receive(bp, fp, skb);
9f6c9258 785 } else {
51c1a580
MS
786 DP(NETIF_MSG_RX_STATUS,
787 "Failed to allocate new pages - dropping packet!\n");
40955532 788 dev_kfree_skb_any(skb);
9f6c9258
DK
789 }
790
e52fcb24
ED
791 /* put new data in bin */
792 rx_buf->data = new_data;
9f6c9258 793
619c5cb6 794 return;
9f6c9258 795 }
d46d132c 796 bnx2x_frag_free(fp, new_data);
619c5cb6
VZ
797drop:
798 /* drop the packet and keep the buffer in the bin */
799 DP(NETIF_MSG_RX_STATUS,
800 "Failed to allocate or map a new skb - dropping packet!\n");
15192a8c 801 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++;
9f6c9258
DK
802}
803
996dedba
MS
804static int bnx2x_alloc_rx_data(struct bnx2x *bp, struct bnx2x_fastpath *fp,
805 u16 index, gfp_t gfp_mask)
1191cb83
ED
806{
807 u8 *data;
808 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
809 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
810 dma_addr_t mapping;
811
996dedba 812 data = bnx2x_frag_alloc(fp, gfp_mask);
1191cb83
ED
813 if (unlikely(data == NULL))
814 return -ENOMEM;
815
816 mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD,
817 fp->rx_buf_size,
818 DMA_FROM_DEVICE);
819 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
d46d132c 820 bnx2x_frag_free(fp, data);
1191cb83
ED
821 BNX2X_ERR("Can't map rx data\n");
822 return -ENOMEM;
823 }
824
825 rx_buf->data = data;
826 dma_unmap_addr_set(rx_buf, mapping, mapping);
827
828 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
829 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
830
831 return 0;
832}
833
15192a8c
BW
834static
835void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
836 struct bnx2x_fastpath *fp,
837 struct bnx2x_eth_q_stats *qstats)
d6cb3e41 838{
e488921f
MS
839 /* Do nothing if no L4 csum validation was done.
840 * We do not check whether IP csum was validated. For IPv4 we assume
841 * that if the card got as far as validating the L4 csum, it also
842 * validated the IP csum. IPv6 has no IP csum.
843 */
d6cb3e41 844 if (cqe->fast_path_cqe.status_flags &
e488921f 845 ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)
d6cb3e41
ED
846 return;
847
e488921f 848 /* If L4 validation was done, check if an error was found. */
d6cb3e41
ED
849
850 if (cqe->fast_path_cqe.type_error_flags &
851 (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
852 ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
15192a8c 853 qstats->hw_csum_err++;
d6cb3e41
ED
854 else
855 skb->ip_summed = CHECKSUM_UNNECESSARY;
856}
9f6c9258 857
a8f47eb7 858static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
9f6c9258
DK
859{
860 struct bnx2x *bp = fp->bp;
861 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
75b29459 862 u16 sw_comp_cons, sw_comp_prod;
9f6c9258 863 int rx_pkt = 0;
75b29459
DK
864 union eth_rx_cqe *cqe;
865 struct eth_fast_path_rx_cqe *cqe_fp;
9f6c9258
DK
866
867#ifdef BNX2X_STOP_ON_ERROR
868 if (unlikely(bp->panic))
869 return 0;
870#endif
871
9f6c9258
DK
872 bd_cons = fp->rx_bd_cons;
873 bd_prod = fp->rx_bd_prod;
874 bd_prod_fw = bd_prod;
875 sw_comp_cons = fp->rx_comp_cons;
876 sw_comp_prod = fp->rx_comp_prod;
877
75b29459
DK
878 comp_ring_cons = RCQ_BD(sw_comp_cons);
879 cqe = &fp->rx_comp_ring[comp_ring_cons];
880 cqe_fp = &cqe->fast_path_cqe;
9f6c9258
DK
881
882 DP(NETIF_MSG_RX_STATUS,
75b29459 883 "queue[%d]: sw_comp_cons %u\n", fp->index, sw_comp_cons);
9f6c9258 884
75b29459 885 while (BNX2X_IS_CQE_COMPLETED(cqe_fp)) {
9f6c9258
DK
886 struct sw_rx_bd *rx_buf = NULL;
887 struct sk_buff *skb;
9f6c9258 888 u8 cqe_fp_flags;
619c5cb6 889 enum eth_rx_cqe_type cqe_fp_type;
621b4d66 890 u16 len, pad, queue;
e52fcb24 891 u8 *data;
bd5cef03 892 u32 rxhash;
5495ab75 893 enum pkt_hash_types rxhash_type;
9f6c9258 894
619c5cb6
VZ
895#ifdef BNX2X_STOP_ON_ERROR
896 if (unlikely(bp->panic))
897 return 0;
898#endif
899
9f6c9258
DK
900 bd_prod = RX_BD(bd_prod);
901 bd_cons = RX_BD(bd_cons);
902
619c5cb6
VZ
903 cqe_fp_flags = cqe_fp->type_error_flags;
904 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
9f6c9258 905
51c1a580
MS
906 DP(NETIF_MSG_RX_STATUS,
907 "CQE type %x err %x status %x queue %x vlan %x len %u\n",
908 CQE_TYPE(cqe_fp_flags),
619c5cb6
VZ
909 cqe_fp_flags, cqe_fp->status_flags,
910 le32_to_cpu(cqe_fp->rss_hash_result),
621b4d66
DK
911 le16_to_cpu(cqe_fp->vlan_tag),
912 le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len));
9f6c9258
DK
913
914 /* is this a slowpath msg? */
619c5cb6 915 if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
9f6c9258
DK
916 bnx2x_sp_event(fp, cqe);
917 goto next_cqe;
e52fcb24 918 }
621b4d66 919
e52fcb24
ED
920 rx_buf = &fp->rx_buf_ring[bd_cons];
921 data = rx_buf->data;
9f6c9258 922
e52fcb24 923 if (!CQE_TYPE_FAST(cqe_fp_type)) {
621b4d66
DK
924 struct bnx2x_agg_info *tpa_info;
925 u16 frag_size, pages;
619c5cb6 926#ifdef BNX2X_STOP_ON_ERROR
e52fcb24
ED
927 /* sanity check */
928 if (fp->disable_tpa &&
929 (CQE_TYPE_START(cqe_fp_type) ||
930 CQE_TYPE_STOP(cqe_fp_type)))
51c1a580 931 BNX2X_ERR("START/STOP packet while disable_tpa type %x\n",
e52fcb24 932 CQE_TYPE(cqe_fp_type));
619c5cb6 933#endif
9f6c9258 934
e52fcb24
ED
935 if (CQE_TYPE_START(cqe_fp_type)) {
936 u16 queue = cqe_fp->queue_index;
937 DP(NETIF_MSG_RX_STATUS,
938 "calling tpa_start on queue %d\n",
939 queue);
9f6c9258 940
e52fcb24
ED
941 bnx2x_tpa_start(fp, queue,
942 bd_cons, bd_prod,
943 cqe_fp);
621b4d66 944
e52fcb24 945 goto next_rx;
621b4d66
DK
946 }
947 queue = cqe->end_agg_cqe.queue_index;
948 tpa_info = &fp->tpa_info[queue];
949 DP(NETIF_MSG_RX_STATUS,
950 "calling tpa_stop on queue %d\n",
951 queue);
952
953 frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) -
954 tpa_info->len_on_bd;
955
956 if (fp->mode == TPA_MODE_GRO)
957 pages = (frag_size + tpa_info->full_page - 1) /
958 tpa_info->full_page;
959 else
960 pages = SGE_PAGE_ALIGN(frag_size) >>
961 SGE_PAGE_SHIFT;
962
963 bnx2x_tpa_stop(bp, fp, tpa_info, pages,
964 &cqe->end_agg_cqe, comp_ring_cons);
9f6c9258 965#ifdef BNX2X_STOP_ON_ERROR
621b4d66
DK
966 if (bp->panic)
967 return 0;
9f6c9258
DK
968#endif
969
621b4d66
DK
970 bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe);
971 goto next_cqe;
e52fcb24
ED
972 }
973 /* non TPA */
621b4d66 974 len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len);
e52fcb24
ED
975 pad = cqe_fp->placement_offset;
976 dma_sync_single_for_cpu(&bp->pdev->dev,
9f6c9258 977 dma_unmap_addr(rx_buf, mapping),
e52fcb24
ED
978 pad + RX_COPY_THRESH,
979 DMA_FROM_DEVICE);
980 pad += NET_SKB_PAD;
981 prefetch(data + pad); /* speedup eth_type_trans() */
982 /* is this an error packet? */
983 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
51c1a580 984 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
e52fcb24
ED
985 "ERROR flags %x rx packet %u\n",
986 cqe_fp_flags, sw_comp_cons);
15192a8c 987 bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++;
e52fcb24
ED
988 goto reuse_rx;
989 }
9f6c9258 990
e52fcb24
ED
991 /* Since we don't have a jumbo ring
992 * copy small packets if mtu > 1500
993 */
994 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
995 (len <= RX_COPY_THRESH)) {
996 skb = netdev_alloc_skb_ip_align(bp->dev, len);
997 if (skb == NULL) {
51c1a580 998 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
e52fcb24 999 "ERROR packet dropped because of alloc failure\n");
15192a8c 1000 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
9f6c9258
DK
1001 goto reuse_rx;
1002 }
e52fcb24
ED
1003 memcpy(skb->data, data + pad, len);
1004 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
1005 } else {
996dedba
MS
1006 if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod,
1007 GFP_ATOMIC) == 0)) {
9f6c9258 1008 dma_unmap_single(&bp->pdev->dev,
e52fcb24 1009 dma_unmap_addr(rx_buf, mapping),
a8c94b91 1010 fp->rx_buf_size,
9f6c9258 1011 DMA_FROM_DEVICE);
d46d132c 1012 skb = build_skb(data, fp->rx_frag_size);
e52fcb24 1013 if (unlikely(!skb)) {
d46d132c 1014 bnx2x_frag_free(fp, data);
15192a8c
BW
1015 bnx2x_fp_qstats(bp, fp)->
1016 rx_skb_alloc_failed++;
e52fcb24
ED
1017 goto next_rx;
1018 }
9f6c9258 1019 skb_reserve(skb, pad);
9f6c9258 1020 } else {
51c1a580
MS
1021 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
1022 "ERROR packet dropped because of alloc failure\n");
15192a8c 1023 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
9f6c9258 1024reuse_rx:
e52fcb24 1025 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
9f6c9258
DK
1026 goto next_rx;
1027 }
036d2df9 1028 }
9f6c9258 1029
036d2df9
DK
1030 skb_put(skb, len);
1031 skb->protocol = eth_type_trans(skb, bp->dev);
9f6c9258 1032
036d2df9 1033 /* Set Toeplitz hash for a none-LRO skb */
5495ab75
TH
1034 rxhash = bnx2x_get_rxhash(bp, cqe_fp, &rxhash_type);
1035 skb_set_hash(skb, rxhash, rxhash_type);
9f6c9258 1036
036d2df9 1037 skb_checksum_none_assert(skb);
f85582f8 1038
d6cb3e41 1039 if (bp->dev->features & NETIF_F_RXCSUM)
15192a8c
BW
1040 bnx2x_csum_validate(skb, cqe, fp,
1041 bnx2x_fp_qstats(bp, fp));
9f6c9258 1042
f233cafe 1043 skb_record_rx_queue(skb, fp->rx_queue);
9f6c9258 1044
619c5cb6
VZ
1045 if (le16_to_cpu(cqe_fp->pars_flags.flags) &
1046 PARSING_FLAGS_VLAN)
86a9bad3 1047 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
619c5cb6 1048 le16_to_cpu(cqe_fp->vlan_tag));
9f6c9258 1049
8b80cda5 1050 skb_mark_napi_id(skb, &fp->napi);
8f20aa57
DK
1051
1052 if (bnx2x_fp_ll_polling(fp))
1053 netif_receive_skb(skb);
1054 else
1055 napi_gro_receive(&fp->napi, skb);
9f6c9258 1056next_rx:
e52fcb24 1057 rx_buf->data = NULL;
9f6c9258
DK
1058
1059 bd_cons = NEXT_RX_IDX(bd_cons);
1060 bd_prod = NEXT_RX_IDX(bd_prod);
1061 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1062 rx_pkt++;
1063next_cqe:
1064 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1065 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1066
75b29459
DK
1067 /* mark CQE as free */
1068 BNX2X_SEED_CQE(cqe_fp);
1069
9f6c9258
DK
1070 if (rx_pkt == budget)
1071 break;
75b29459
DK
1072
1073 comp_ring_cons = RCQ_BD(sw_comp_cons);
1074 cqe = &fp->rx_comp_ring[comp_ring_cons];
1075 cqe_fp = &cqe->fast_path_cqe;
9f6c9258
DK
1076 } /* while */
1077
1078 fp->rx_bd_cons = bd_cons;
1079 fp->rx_bd_prod = bd_prod_fw;
1080 fp->rx_comp_cons = sw_comp_cons;
1081 fp->rx_comp_prod = sw_comp_prod;
1082
1083 /* Update producers */
1084 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1085 fp->rx_sge_prod);
1086
1087 fp->rx_pkt += rx_pkt;
1088 fp->rx_calls++;
1089
1090 return rx_pkt;
1091}
1092
1093static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1094{
1095 struct bnx2x_fastpath *fp = fp_cookie;
1096 struct bnx2x *bp = fp->bp;
6383c0b3 1097 u8 cos;
9f6c9258 1098
51c1a580
MS
1099 DP(NETIF_MSG_INTR,
1100 "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n",
523224a3 1101 fp->index, fp->fw_sb_id, fp->igu_sb_id);
ecf01c22 1102
523224a3 1103 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
9f6c9258
DK
1104
1105#ifdef BNX2X_STOP_ON_ERROR
1106 if (unlikely(bp->panic))
1107 return IRQ_HANDLED;
1108#endif
1109
1110 /* Handle Rx and Tx according to MSI-X vector */
6383c0b3 1111 for_each_cos_in_tx_queue(fp, cos)
65565884 1112 prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
6383c0b3 1113
523224a3 1114 prefetch(&fp->sb_running_index[SM_RX_ID]);
9f6c9258
DK
1115 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1116
1117 return IRQ_HANDLED;
1118}
1119
9f6c9258
DK
1120/* HW Lock for shared dual port PHYs */
1121void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1122{
1123 mutex_lock(&bp->port.phy_mutex);
1124
8203c4b6 1125 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
9f6c9258
DK
1126}
1127
1128void bnx2x_release_phy_lock(struct bnx2x *bp)
1129{
8203c4b6 1130 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
9f6c9258
DK
1131
1132 mutex_unlock(&bp->port.phy_mutex);
1133}
1134
0793f83f
DK
1135/* calculates MF speed according to current linespeed and MF configuration */
1136u16 bnx2x_get_mf_speed(struct bnx2x *bp)
1137{
1138 u16 line_speed = bp->link_vars.line_speed;
1139 if (IS_MF(bp)) {
faa6fcbb
DK
1140 u16 maxCfg = bnx2x_extract_max_cfg(bp,
1141 bp->mf_config[BP_VN(bp)]);
1142
1143 /* Calculate the current MAX line speed limit for the MF
1144 * devices
0793f83f 1145 */
faa6fcbb
DK
1146 if (IS_MF_SI(bp))
1147 line_speed = (line_speed * maxCfg) / 100;
1148 else { /* SD mode */
0793f83f
DK
1149 u16 vn_max_rate = maxCfg * 100;
1150
1151 if (vn_max_rate < line_speed)
1152 line_speed = vn_max_rate;
faa6fcbb 1153 }
0793f83f
DK
1154 }
1155
1156 return line_speed;
1157}
1158
2ae17f66
VZ
1159/**
1160 * bnx2x_fill_report_data - fill link report data to report
1161 *
1162 * @bp: driver handle
1163 * @data: link state to update
1164 *
1165 * It uses a none-atomic bit operations because is called under the mutex.
1166 */
1191cb83
ED
1167static void bnx2x_fill_report_data(struct bnx2x *bp,
1168 struct bnx2x_link_report_data *data)
2ae17f66
VZ
1169{
1170 u16 line_speed = bnx2x_get_mf_speed(bp);
1171
1172 memset(data, 0, sizeof(*data));
1173
16a5fd92 1174 /* Fill the report data: effective line speed */
2ae17f66
VZ
1175 data->line_speed = line_speed;
1176
1177 /* Link is down */
1178 if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
1179 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1180 &data->link_report_flags);
1181
1182 /* Full DUPLEX */
1183 if (bp->link_vars.duplex == DUPLEX_FULL)
1184 __set_bit(BNX2X_LINK_REPORT_FD, &data->link_report_flags);
1185
1186 /* Rx Flow Control is ON */
1187 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
1188 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON, &data->link_report_flags);
1189
1190 /* Tx Flow Control is ON */
1191 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1192 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
1193}
1194
1195/**
1196 * bnx2x_link_report - report link status to OS.
1197 *
1198 * @bp: driver handle
1199 *
1200 * Calls the __bnx2x_link_report() under the same locking scheme
1201 * as a link/PHY state managing code to ensure a consistent link
1202 * reporting.
1203 */
1204
9f6c9258
DK
1205void bnx2x_link_report(struct bnx2x *bp)
1206{
2ae17f66
VZ
1207 bnx2x_acquire_phy_lock(bp);
1208 __bnx2x_link_report(bp);
1209 bnx2x_release_phy_lock(bp);
1210}
9f6c9258 1211
2ae17f66
VZ
1212/**
1213 * __bnx2x_link_report - report link status to OS.
1214 *
1215 * @bp: driver handle
1216 *
16a5fd92 1217 * None atomic implementation.
2ae17f66
VZ
1218 * Should be called under the phy_lock.
1219 */
1220void __bnx2x_link_report(struct bnx2x *bp)
1221{
1222 struct bnx2x_link_report_data cur_data;
9f6c9258 1223
2ae17f66 1224 /* reread mf_cfg */
ad5afc89 1225 if (IS_PF(bp) && !CHIP_IS_E1(bp))
2ae17f66
VZ
1226 bnx2x_read_mf_cfg(bp);
1227
1228 /* Read the current link report info */
1229 bnx2x_fill_report_data(bp, &cur_data);
1230
1231 /* Don't report link down or exactly the same link status twice */
1232 if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
1233 (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1234 &bp->last_reported_link.link_report_flags) &&
1235 test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1236 &cur_data.link_report_flags)))
1237 return;
1238
1239 bp->link_cnt++;
9f6c9258 1240
2ae17f66
VZ
1241 /* We are going to report a new link parameters now -
1242 * remember the current data for the next time.
1243 */
1244 memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
9f6c9258 1245
2ae17f66
VZ
1246 if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1247 &cur_data.link_report_flags)) {
1248 netif_carrier_off(bp->dev);
1249 netdev_err(bp->dev, "NIC Link is Down\n");
1250 return;
1251 } else {
94f05b0f
JP
1252 const char *duplex;
1253 const char *flow;
1254
2ae17f66 1255 netif_carrier_on(bp->dev);
9f6c9258 1256
2ae17f66
VZ
1257 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
1258 &cur_data.link_report_flags))
94f05b0f 1259 duplex = "full";
9f6c9258 1260 else
94f05b0f 1261 duplex = "half";
9f6c9258 1262
2ae17f66
VZ
1263 /* Handle the FC at the end so that only these flags would be
1264 * possibly set. This way we may easily check if there is no FC
1265 * enabled.
1266 */
1267 if (cur_data.link_report_flags) {
1268 if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1269 &cur_data.link_report_flags)) {
2ae17f66
VZ
1270 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1271 &cur_data.link_report_flags))
94f05b0f
JP
1272 flow = "ON - receive & transmit";
1273 else
1274 flow = "ON - receive";
9f6c9258 1275 } else {
94f05b0f 1276 flow = "ON - transmit";
9f6c9258 1277 }
94f05b0f
JP
1278 } else {
1279 flow = "none";
9f6c9258 1280 }
94f05b0f
JP
1281 netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
1282 cur_data.line_speed, duplex, flow);
9f6c9258
DK
1283 }
1284}
1285
1191cb83
ED
1286static void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
1287{
1288 int i;
1289
1290 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1291 struct eth_rx_sge *sge;
1292
1293 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
1294 sge->addr_hi =
1295 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
1296 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1297
1298 sge->addr_lo =
1299 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
1300 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1301 }
1302}
1303
1304static void bnx2x_free_tpa_pool(struct bnx2x *bp,
1305 struct bnx2x_fastpath *fp, int last)
1306{
1307 int i;
1308
1309 for (i = 0; i < last; i++) {
1310 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
1311 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
1312 u8 *data = first_buf->data;
1313
1314 if (data == NULL) {
1315 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
1316 continue;
1317 }
1318 if (tpa_info->tpa_state == BNX2X_TPA_START)
1319 dma_unmap_single(&bp->pdev->dev,
1320 dma_unmap_addr(first_buf, mapping),
1321 fp->rx_buf_size, DMA_FROM_DEVICE);
d46d132c 1322 bnx2x_frag_free(fp, data);
1191cb83
ED
1323 first_buf->data = NULL;
1324 }
1325}
1326
55c11941
MS
1327void bnx2x_init_rx_rings_cnic(struct bnx2x *bp)
1328{
1329 int j;
1330
1331 for_each_rx_queue_cnic(bp, j) {
1332 struct bnx2x_fastpath *fp = &bp->fp[j];
1333
1334 fp->rx_bd_cons = 0;
1335
1336 /* Activate BD ring */
1337 /* Warning!
1338 * this will generate an interrupt (to the TSTORM)
1339 * must only be done after chip is initialized
1340 */
1341 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1342 fp->rx_sge_prod);
1343 }
1344}
1345
9f6c9258
DK
1346void bnx2x_init_rx_rings(struct bnx2x *bp)
1347{
1348 int func = BP_FUNC(bp);
523224a3 1349 u16 ring_prod;
9f6c9258 1350 int i, j;
25141580 1351
b3b83c3f 1352 /* Allocate TPA resources */
55c11941 1353 for_each_eth_queue(bp, j) {
523224a3 1354 struct bnx2x_fastpath *fp = &bp->fp[j];
9f6c9258 1355
a8c94b91
VZ
1356 DP(NETIF_MSG_IFUP,
1357 "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
1358
523224a3 1359 if (!fp->disable_tpa) {
16a5fd92 1360 /* Fill the per-aggregation pool */
dfacf138 1361 for (i = 0; i < MAX_AGG_QS(bp); i++) {
619c5cb6
VZ
1362 struct bnx2x_agg_info *tpa_info =
1363 &fp->tpa_info[i];
1364 struct sw_rx_bd *first_buf =
1365 &tpa_info->first_buf;
1366
996dedba
MS
1367 first_buf->data =
1368 bnx2x_frag_alloc(fp, GFP_KERNEL);
e52fcb24 1369 if (!first_buf->data) {
51c1a580
MS
1370 BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
1371 j);
9f6c9258
DK
1372 bnx2x_free_tpa_pool(bp, fp, i);
1373 fp->disable_tpa = 1;
1374 break;
1375 }
619c5cb6
VZ
1376 dma_unmap_addr_set(first_buf, mapping, 0);
1377 tpa_info->tpa_state = BNX2X_TPA_STOP;
9f6c9258 1378 }
523224a3
DK
1379
1380 /* "next page" elements initialization */
1381 bnx2x_set_next_page_sgl(fp);
1382
1383 /* set SGEs bit mask */
1384 bnx2x_init_sge_ring_bit_mask(fp);
1385
1386 /* Allocate SGEs and initialize the ring elements */
1387 for (i = 0, ring_prod = 0;
1388 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
1389
996dedba
MS
1390 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod,
1391 GFP_KERNEL) < 0) {
51c1a580
MS
1392 BNX2X_ERR("was only able to allocate %d rx sges\n",
1393 i);
1394 BNX2X_ERR("disabling TPA for queue[%d]\n",
1395 j);
523224a3 1396 /* Cleanup already allocated elements */
619c5cb6
VZ
1397 bnx2x_free_rx_sge_range(bp, fp,
1398 ring_prod);
1399 bnx2x_free_tpa_pool(bp, fp,
dfacf138 1400 MAX_AGG_QS(bp));
523224a3
DK
1401 fp->disable_tpa = 1;
1402 ring_prod = 0;
1403 break;
1404 }
1405 ring_prod = NEXT_SGE_IDX(ring_prod);
1406 }
1407
1408 fp->rx_sge_prod = ring_prod;
9f6c9258
DK
1409 }
1410 }
1411
55c11941 1412 for_each_eth_queue(bp, j) {
9f6c9258
DK
1413 struct bnx2x_fastpath *fp = &bp->fp[j];
1414
1415 fp->rx_bd_cons = 0;
9f6c9258 1416
b3b83c3f
DK
1417 /* Activate BD ring */
1418 /* Warning!
1419 * this will generate an interrupt (to the TSTORM)
1420 * must only be done after chip is initialized
1421 */
1422 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1423 fp->rx_sge_prod);
9f6c9258 1424
9f6c9258
DK
1425 if (j != 0)
1426 continue;
1427
619c5cb6 1428 if (CHIP_IS_E1(bp)) {
f2e0899f
DK
1429 REG_WR(bp, BAR_USTRORM_INTMEM +
1430 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1431 U64_LO(fp->rx_comp_mapping));
1432 REG_WR(bp, BAR_USTRORM_INTMEM +
1433 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1434 U64_HI(fp->rx_comp_mapping));
1435 }
9f6c9258
DK
1436 }
1437}
f85582f8 1438
55c11941 1439static void bnx2x_free_tx_skbs_queue(struct bnx2x_fastpath *fp)
9f6c9258 1440{
6383c0b3 1441 u8 cos;
55c11941 1442 struct bnx2x *bp = fp->bp;
9f6c9258 1443
55c11941
MS
1444 for_each_cos_in_tx_queue(fp, cos) {
1445 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
1446 unsigned pkts_compl = 0, bytes_compl = 0;
9f6c9258 1447
55c11941
MS
1448 u16 sw_prod = txdata->tx_pkt_prod;
1449 u16 sw_cons = txdata->tx_pkt_cons;
9f6c9258 1450
55c11941
MS
1451 while (sw_cons != sw_prod) {
1452 bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
1453 &pkts_compl, &bytes_compl);
1454 sw_cons++;
9f6c9258 1455 }
55c11941
MS
1456
1457 netdev_tx_reset_queue(
1458 netdev_get_tx_queue(bp->dev,
1459 txdata->txq_index));
1460 }
1461}
1462
1463static void bnx2x_free_tx_skbs_cnic(struct bnx2x *bp)
1464{
1465 int i;
1466
1467 for_each_tx_queue_cnic(bp, i) {
1468 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1469 }
1470}
1471
1472static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1473{
1474 int i;
1475
1476 for_each_eth_queue(bp, i) {
1477 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
9f6c9258
DK
1478 }
1479}
1480
b3b83c3f
DK
1481static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1482{
1483 struct bnx2x *bp = fp->bp;
1484 int i;
1485
1486 /* ring wasn't allocated */
1487 if (fp->rx_buf_ring == NULL)
1488 return;
1489
1490 for (i = 0; i < NUM_RX_BD; i++) {
1491 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
e52fcb24 1492 u8 *data = rx_buf->data;
b3b83c3f 1493
e52fcb24 1494 if (data == NULL)
b3b83c3f 1495 continue;
b3b83c3f
DK
1496 dma_unmap_single(&bp->pdev->dev,
1497 dma_unmap_addr(rx_buf, mapping),
1498 fp->rx_buf_size, DMA_FROM_DEVICE);
1499
e52fcb24 1500 rx_buf->data = NULL;
d46d132c 1501 bnx2x_frag_free(fp, data);
b3b83c3f
DK
1502 }
1503}
1504
55c11941
MS
1505static void bnx2x_free_rx_skbs_cnic(struct bnx2x *bp)
1506{
1507 int j;
1508
1509 for_each_rx_queue_cnic(bp, j) {
1510 bnx2x_free_rx_bds(&bp->fp[j]);
1511 }
1512}
1513
9f6c9258
DK
1514static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1515{
b3b83c3f 1516 int j;
9f6c9258 1517
55c11941 1518 for_each_eth_queue(bp, j) {
9f6c9258
DK
1519 struct bnx2x_fastpath *fp = &bp->fp[j];
1520
b3b83c3f 1521 bnx2x_free_rx_bds(fp);
9f6c9258 1522
9f6c9258 1523 if (!fp->disable_tpa)
dfacf138 1524 bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
9f6c9258
DK
1525 }
1526}
1527
a8f47eb7 1528static void bnx2x_free_skbs_cnic(struct bnx2x *bp)
55c11941
MS
1529{
1530 bnx2x_free_tx_skbs_cnic(bp);
1531 bnx2x_free_rx_skbs_cnic(bp);
1532}
1533
9f6c9258
DK
1534void bnx2x_free_skbs(struct bnx2x *bp)
1535{
1536 bnx2x_free_tx_skbs(bp);
1537 bnx2x_free_rx_skbs(bp);
1538}
1539
e3835b99
DK
1540void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1541{
1542 /* load old values */
1543 u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1544
1545 if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1546 /* leave all but MAX value */
1547 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1548
1549 /* set new MAX value */
1550 mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1551 & FUNC_MF_CFG_MAX_BW_MASK;
1552
1553 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1554 }
1555}
1556
ca92429f
DK
1557/**
1558 * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1559 *
1560 * @bp: driver handle
1561 * @nvecs: number of vectors to be released
1562 */
1563static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
9f6c9258 1564{
ca92429f 1565 int i, offset = 0;
9f6c9258 1566
ca92429f
DK
1567 if (nvecs == offset)
1568 return;
ad5afc89
AE
1569
1570 /* VFs don't have a default SB */
1571 if (IS_PF(bp)) {
1572 free_irq(bp->msix_table[offset].vector, bp->dev);
1573 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
1574 bp->msix_table[offset].vector);
1575 offset++;
1576 }
55c11941
MS
1577
1578 if (CNIC_SUPPORT(bp)) {
1579 if (nvecs == offset)
1580 return;
1581 offset++;
1582 }
ca92429f 1583
ec6ba945 1584 for_each_eth_queue(bp, i) {
ca92429f
DK
1585 if (nvecs == offset)
1586 return;
51c1a580
MS
1587 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq\n",
1588 i, bp->msix_table[offset].vector);
9f6c9258 1589
ca92429f 1590 free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
9f6c9258
DK
1591 }
1592}
1593
d6214d7a 1594void bnx2x_free_irq(struct bnx2x *bp)
9f6c9258 1595{
30a5de77 1596 if (bp->flags & USING_MSIX_FLAG &&
ad5afc89
AE
1597 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1598 int nvecs = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_SUPPORT(bp);
1599
1600 /* vfs don't have a default status block */
1601 if (IS_PF(bp))
1602 nvecs++;
1603
1604 bnx2x_free_msix_irqs(bp, nvecs);
1605 } else {
30a5de77 1606 free_irq(bp->dev->irq, bp->dev);
ad5afc89 1607 }
9f6c9258
DK
1608}
1609
0e8d2ec5 1610int bnx2x_enable_msix(struct bnx2x *bp)
9f6c9258 1611{
1ab4434c 1612 int msix_vec = 0, i, rc;
9f6c9258 1613
1ab4434c
AE
1614 /* VFs don't have a default status block */
1615 if (IS_PF(bp)) {
1616 bp->msix_table[msix_vec].entry = msix_vec;
1617 BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n",
1618 bp->msix_table[0].entry);
1619 msix_vec++;
1620 }
9f6c9258 1621
55c11941
MS
1622 /* Cnic requires an msix vector for itself */
1623 if (CNIC_SUPPORT(bp)) {
1624 bp->msix_table[msix_vec].entry = msix_vec;
1625 BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n",
1626 msix_vec, bp->msix_table[msix_vec].entry);
1627 msix_vec++;
1628 }
1629
6383c0b3 1630 /* We need separate vectors for ETH queues only (not FCoE) */
ec6ba945 1631 for_each_eth_queue(bp, i) {
d6214d7a 1632 bp->msix_table[msix_vec].entry = msix_vec;
51c1a580
MS
1633 BNX2X_DEV_INFO("msix_table[%d].entry = %d (fastpath #%u)\n",
1634 msix_vec, msix_vec, i);
d6214d7a 1635 msix_vec++;
9f6c9258
DK
1636 }
1637
1ab4434c
AE
1638 DP(BNX2X_MSG_SP, "about to request enable msix with %d vectors\n",
1639 msix_vec);
d6214d7a 1640
a5444b17
AG
1641 rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0],
1642 BNX2X_MIN_MSIX_VEC_CNT(bp), msix_vec);
9f6c9258
DK
1643 /*
1644 * reconfigure number of tx/rx queues according to available
1645 * MSI-X vectors
1646 */
a5444b17 1647 if (rc == -ENOSPC) {
30a5de77 1648 /* Get by with single vector */
a5444b17
AG
1649 rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0], 1, 1);
1650 if (rc < 0) {
30a5de77
DK
1651 BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n",
1652 rc);
1653 goto no_msix;
1654 }
1655
1656 BNX2X_DEV_INFO("Using single MSI-X vector\n");
1657 bp->flags |= USING_SINGLE_MSIX_FLAG;
1658
55c11941
MS
1659 BNX2X_DEV_INFO("set number of queues to 1\n");
1660 bp->num_ethernet_queues = 1;
1661 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
30a5de77 1662 } else if (rc < 0) {
a5444b17 1663 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
30a5de77 1664 goto no_msix;
a5444b17
AG
1665 } else if (rc < msix_vec) {
1666 /* how less vectors we will have? */
1667 int diff = msix_vec - rc;
1668
1669 BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
1670
1671 /*
1672 * decrease number of queues by number of unallocated entries
1673 */
1674 bp->num_ethernet_queues -= diff;
1675 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1676
1677 BNX2X_DEV_INFO("New queue configuration set: %d\n",
1678 bp->num_queues);
9f6c9258
DK
1679 }
1680
1681 bp->flags |= USING_MSIX_FLAG;
1682
1683 return 0;
30a5de77
DK
1684
1685no_msix:
1686 /* fall to INTx if not enough memory */
1687 if (rc == -ENOMEM)
1688 bp->flags |= DISABLE_MSI_FLAG;
1689
1690 return rc;
9f6c9258
DK
1691}
1692
1693static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1694{
ca92429f 1695 int i, rc, offset = 0;
9f6c9258 1696
ad5afc89
AE
1697 /* no default status block for vf */
1698 if (IS_PF(bp)) {
1699 rc = request_irq(bp->msix_table[offset++].vector,
1700 bnx2x_msix_sp_int, 0,
1701 bp->dev->name, bp->dev);
1702 if (rc) {
1703 BNX2X_ERR("request sp irq failed\n");
1704 return -EBUSY;
1705 }
9f6c9258
DK
1706 }
1707
55c11941
MS
1708 if (CNIC_SUPPORT(bp))
1709 offset++;
1710
ec6ba945 1711 for_each_eth_queue(bp, i) {
9f6c9258
DK
1712 struct bnx2x_fastpath *fp = &bp->fp[i];
1713 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1714 bp->dev->name, i);
1715
d6214d7a 1716 rc = request_irq(bp->msix_table[offset].vector,
9f6c9258
DK
1717 bnx2x_msix_fp_int, 0, fp->name, fp);
1718 if (rc) {
ca92429f
DK
1719 BNX2X_ERR("request fp #%d irq (%d) failed rc %d\n", i,
1720 bp->msix_table[offset].vector, rc);
1721 bnx2x_free_msix_irqs(bp, offset);
9f6c9258
DK
1722 return -EBUSY;
1723 }
1724
d6214d7a 1725 offset++;
9f6c9258
DK
1726 }
1727
ec6ba945 1728 i = BNX2X_NUM_ETH_QUEUES(bp);
ad5afc89
AE
1729 if (IS_PF(bp)) {
1730 offset = 1 + CNIC_SUPPORT(bp);
1731 netdev_info(bp->dev,
1732 "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n",
1733 bp->msix_table[0].vector,
1734 0, bp->msix_table[offset].vector,
1735 i - 1, bp->msix_table[offset + i - 1].vector);
1736 } else {
1737 offset = CNIC_SUPPORT(bp);
1738 netdev_info(bp->dev,
1739 "using MSI-X IRQs: fp[%d] %d ... fp[%d] %d\n",
1740 0, bp->msix_table[offset].vector,
1741 i - 1, bp->msix_table[offset + i - 1].vector);
1742 }
9f6c9258
DK
1743 return 0;
1744}
1745
d6214d7a 1746int bnx2x_enable_msi(struct bnx2x *bp)
9f6c9258
DK
1747{
1748 int rc;
1749
1750 rc = pci_enable_msi(bp->pdev);
1751 if (rc) {
51c1a580 1752 BNX2X_DEV_INFO("MSI is not attainable\n");
9f6c9258
DK
1753 return -1;
1754 }
1755 bp->flags |= USING_MSI_FLAG;
1756
1757 return 0;
1758}
1759
1760static int bnx2x_req_irq(struct bnx2x *bp)
1761{
1762 unsigned long flags;
30a5de77 1763 unsigned int irq;
9f6c9258 1764
30a5de77 1765 if (bp->flags & (USING_MSI_FLAG | USING_MSIX_FLAG))
9f6c9258
DK
1766 flags = 0;
1767 else
1768 flags = IRQF_SHARED;
1769
30a5de77
DK
1770 if (bp->flags & USING_MSIX_FLAG)
1771 irq = bp->msix_table[0].vector;
1772 else
1773 irq = bp->pdev->irq;
1774
1775 return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev);
9f6c9258
DK
1776}
1777
c957d09f 1778static int bnx2x_setup_irqs(struct bnx2x *bp)
619c5cb6
VZ
1779{
1780 int rc = 0;
30a5de77
DK
1781 if (bp->flags & USING_MSIX_FLAG &&
1782 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
619c5cb6
VZ
1783 rc = bnx2x_req_msix_irqs(bp);
1784 if (rc)
1785 return rc;
1786 } else {
619c5cb6
VZ
1787 rc = bnx2x_req_irq(bp);
1788 if (rc) {
1789 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
1790 return rc;
1791 }
1792 if (bp->flags & USING_MSI_FLAG) {
1793 bp->dev->irq = bp->pdev->irq;
30a5de77
DK
1794 netdev_info(bp->dev, "using MSI IRQ %d\n",
1795 bp->dev->irq);
1796 }
1797 if (bp->flags & USING_MSIX_FLAG) {
1798 bp->dev->irq = bp->msix_table[0].vector;
1799 netdev_info(bp->dev, "using MSIX IRQ %d\n",
1800 bp->dev->irq);
619c5cb6
VZ
1801 }
1802 }
1803
1804 return 0;
1805}
1806
55c11941
MS
1807static void bnx2x_napi_enable_cnic(struct bnx2x *bp)
1808{
1809 int i;
1810
8f20aa57
DK
1811 for_each_rx_queue_cnic(bp, i) {
1812 bnx2x_fp_init_lock(&bp->fp[i]);
55c11941 1813 napi_enable(&bnx2x_fp(bp, i, napi));
8f20aa57 1814 }
55c11941
MS
1815}
1816
1191cb83 1817static void bnx2x_napi_enable(struct bnx2x *bp)
9f6c9258
DK
1818{
1819 int i;
1820
8f20aa57
DK
1821 for_each_eth_queue(bp, i) {
1822 bnx2x_fp_init_lock(&bp->fp[i]);
9f6c9258 1823 napi_enable(&bnx2x_fp(bp, i, napi));
8f20aa57 1824 }
9f6c9258
DK
1825}
1826
55c11941
MS
1827static void bnx2x_napi_disable_cnic(struct bnx2x *bp)
1828{
1829 int i;
1830
8f20aa57 1831 for_each_rx_queue_cnic(bp, i) {
55c11941 1832 napi_disable(&bnx2x_fp(bp, i, napi));
9a2620c8
YM
1833 while (!bnx2x_fp_ll_disable(&bp->fp[i]))
1834 usleep_range(1000, 2000);
8f20aa57 1835 }
55c11941
MS
1836}
1837
1191cb83 1838static void bnx2x_napi_disable(struct bnx2x *bp)
9f6c9258
DK
1839{
1840 int i;
1841
8f20aa57 1842 for_each_eth_queue(bp, i) {
9f6c9258 1843 napi_disable(&bnx2x_fp(bp, i, napi));
9a2620c8
YM
1844 while (!bnx2x_fp_ll_disable(&bp->fp[i]))
1845 usleep_range(1000, 2000);
8f20aa57 1846 }
9f6c9258
DK
1847}
1848
1849void bnx2x_netif_start(struct bnx2x *bp)
1850{
4b7ed897
DK
1851 if (netif_running(bp->dev)) {
1852 bnx2x_napi_enable(bp);
55c11941
MS
1853 if (CNIC_LOADED(bp))
1854 bnx2x_napi_enable_cnic(bp);
4b7ed897
DK
1855 bnx2x_int_enable(bp);
1856 if (bp->state == BNX2X_STATE_OPEN)
1857 netif_tx_wake_all_queues(bp->dev);
9f6c9258
DK
1858 }
1859}
1860
1861void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1862{
1863 bnx2x_int_disable_sync(bp, disable_hw);
1864 bnx2x_napi_disable(bp);
55c11941
MS
1865 if (CNIC_LOADED(bp))
1866 bnx2x_napi_disable_cnic(bp);
9f6c9258 1867}
9f6c9258 1868
f663dd9a 1869u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
99932d4f 1870 void *accel_priv, select_queue_fallback_t fallback)
8307fa3e 1871{
8307fa3e 1872 struct bnx2x *bp = netdev_priv(dev);
cdb9d6ae 1873
55c11941 1874 if (CNIC_LOADED(bp) && !NO_FCOE(bp)) {
8307fa3e
VZ
1875 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1876 u16 ether_type = ntohs(hdr->h_proto);
1877
1878 /* Skip VLAN tag if present */
1879 if (ether_type == ETH_P_8021Q) {
1880 struct vlan_ethhdr *vhdr =
1881 (struct vlan_ethhdr *)skb->data;
1882
1883 ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1884 }
1885
1886 /* If ethertype is FCoE or FIP - use FCoE ring */
1887 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
6383c0b3 1888 return bnx2x_fcoe_tx(bp, txq_index);
8307fa3e 1889 }
55c11941 1890
cdb9d6ae 1891 /* select a non-FCoE queue */
99932d4f 1892 return fallback(dev, skb) % BNX2X_NUM_ETH_QUEUES(bp);
8307fa3e
VZ
1893}
1894
d6214d7a
DK
1895void bnx2x_set_num_queues(struct bnx2x *bp)
1896{
96305234 1897 /* RSS queues */
55c11941 1898 bp->num_ethernet_queues = bnx2x_calc_num_queues(bp);
ec6ba945 1899
a3348722
BW
1900 /* override in STORAGE SD modes */
1901 if (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))
55c11941
MS
1902 bp->num_ethernet_queues = 1;
1903
ec6ba945 1904 /* Add special queues */
55c11941
MS
1905 bp->num_cnic_queues = CNIC_SUPPORT(bp); /* For FCOE */
1906 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
65565884
MS
1907
1908 BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
ec6ba945
VZ
1909}
1910
cdb9d6ae
VZ
1911/**
1912 * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
1913 *
1914 * @bp: Driver handle
1915 *
1916 * We currently support for at most 16 Tx queues for each CoS thus we will
1917 * allocate a multiple of 16 for ETH L2 rings according to the value of the
1918 * bp->max_cos.
1919 *
1920 * If there is an FCoE L2 queue the appropriate Tx queue will have the next
1921 * index after all ETH L2 indices.
1922 *
1923 * If the actual number of Tx queues (for each CoS) is less than 16 then there
1924 * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
16a5fd92 1925 * 16..31,...) with indices that are not coupled with any real Tx queue.
cdb9d6ae
VZ
1926 *
1927 * The proper configuration of skb->queue_mapping is handled by
1928 * bnx2x_select_queue() and __skb_tx_hash().
1929 *
1930 * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
1931 * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
1932 */
55c11941 1933static int bnx2x_set_real_num_queues(struct bnx2x *bp, int include_cnic)
ec6ba945 1934{
6383c0b3 1935 int rc, tx, rx;
ec6ba945 1936
65565884 1937 tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos;
55c11941 1938 rx = BNX2X_NUM_ETH_QUEUES(bp);
ec6ba945 1939
6383c0b3 1940/* account for fcoe queue */
55c11941
MS
1941 if (include_cnic && !NO_FCOE(bp)) {
1942 rx++;
1943 tx++;
6383c0b3 1944 }
6383c0b3
AE
1945
1946 rc = netif_set_real_num_tx_queues(bp->dev, tx);
1947 if (rc) {
1948 BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
1949 return rc;
1950 }
1951 rc = netif_set_real_num_rx_queues(bp->dev, rx);
1952 if (rc) {
1953 BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
1954 return rc;
1955 }
1956
51c1a580 1957 DP(NETIF_MSG_IFUP, "Setting real num queues to (tx, rx) (%d, %d)\n",
6383c0b3
AE
1958 tx, rx);
1959
ec6ba945
VZ
1960 return rc;
1961}
1962
1191cb83 1963static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
a8c94b91
VZ
1964{
1965 int i;
1966
1967 for_each_queue(bp, i) {
1968 struct bnx2x_fastpath *fp = &bp->fp[i];
e52fcb24 1969 u32 mtu;
a8c94b91
VZ
1970
1971 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
1972 if (IS_FCOE_IDX(i))
1973 /*
1974 * Although there are no IP frames expected to arrive to
1975 * this ring we still want to add an
1976 * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
1977 * overrun attack.
1978 */
e52fcb24 1979 mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
a8c94b91 1980 else
e52fcb24
ED
1981 mtu = bp->dev->mtu;
1982 fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START +
1983 IP_HEADER_ALIGNMENT_PADDING +
1984 ETH_OVREHEAD +
1985 mtu +
1986 BNX2X_FW_RX_ALIGN_END;
16a5fd92 1987 /* Note : rx_buf_size doesn't take into account NET_SKB_PAD */
d46d132c
ED
1988 if (fp->rx_buf_size + NET_SKB_PAD <= PAGE_SIZE)
1989 fp->rx_frag_size = fp->rx_buf_size + NET_SKB_PAD;
1990 else
1991 fp->rx_frag_size = 0;
a8c94b91
VZ
1992 }
1993}
1994
60cad4e6 1995static int bnx2x_init_rss(struct bnx2x *bp)
619c5cb6
VZ
1996{
1997 int i;
619c5cb6
VZ
1998 u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
1999
16a5fd92 2000 /* Prepare the initial contents for the indirection table if RSS is
619c5cb6
VZ
2001 * enabled
2002 */
5d317c6a
MS
2003 for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++)
2004 bp->rss_conf_obj.ind_table[i] =
96305234
DK
2005 bp->fp->cl_id +
2006 ethtool_rxfh_indir_default(i, num_eth_queues);
619c5cb6
VZ
2007
2008 /*
2009 * For 57710 and 57711 SEARCHER configuration (rss_keys) is
2010 * per-port, so if explicit configuration is needed , do it only
2011 * for a PMF.
2012 *
2013 * For 57712 and newer on the other hand it's a per-function
2014 * configuration.
2015 */
5d317c6a 2016 return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp));
619c5cb6
VZ
2017}
2018
60cad4e6
AE
2019int bnx2x_rss(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
2020 bool config_hash, bool enable)
619c5cb6 2021{
3b603066 2022 struct bnx2x_config_rss_params params = {NULL};
619c5cb6
VZ
2023
2024 /* Although RSS is meaningless when there is a single HW queue we
2025 * still need it enabled in order to have HW Rx hash generated.
2026 *
2027 * if (!is_eth_multi(bp))
2028 * bp->multi_mode = ETH_RSS_MODE_DISABLED;
2029 */
2030
96305234 2031 params.rss_obj = rss_obj;
619c5cb6
VZ
2032
2033 __set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
2034
60cad4e6
AE
2035 if (enable) {
2036 __set_bit(BNX2X_RSS_MODE_REGULAR, &params.rss_flags);
2037
2038 /* RSS configuration */
2039 __set_bit(BNX2X_RSS_IPV4, &params.rss_flags);
2040 __set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags);
2041 __set_bit(BNX2X_RSS_IPV6, &params.rss_flags);
2042 __set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags);
2043 if (rss_obj->udp_rss_v4)
2044 __set_bit(BNX2X_RSS_IPV4_UDP, &params.rss_flags);
2045 if (rss_obj->udp_rss_v6)
2046 __set_bit(BNX2X_RSS_IPV6_UDP, &params.rss_flags);
2047 } else {
2048 __set_bit(BNX2X_RSS_MODE_DISABLED, &params.rss_flags);
2049 }
619c5cb6 2050
96305234
DK
2051 /* Hash bits */
2052 params.rss_result_mask = MULTI_MASK;
619c5cb6 2053
5d317c6a 2054 memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table));
619c5cb6 2055
96305234
DK
2056 if (config_hash) {
2057 /* RSS keys */
60cad4e6 2058 prandom_bytes(params.rss_key, T_ETH_RSS_KEY * 4);
96305234 2059 __set_bit(BNX2X_RSS_SET_SRCH, &params.rss_flags);
619c5cb6
VZ
2060 }
2061
60cad4e6
AE
2062 if (IS_PF(bp))
2063 return bnx2x_config_rss(bp, &params);
2064 else
2065 return bnx2x_vfpf_config_rss(bp, &params);
619c5cb6
VZ
2066}
2067
1191cb83 2068static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
619c5cb6 2069{
3b603066 2070 struct bnx2x_func_state_params func_params = {NULL};
619c5cb6
VZ
2071
2072 /* Prepare parameters for function state transitions */
2073 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
2074
2075 func_params.f_obj = &bp->func_obj;
2076 func_params.cmd = BNX2X_F_CMD_HW_INIT;
2077
2078 func_params.params.hw_init.load_phase = load_code;
2079
2080 return bnx2x_func_state_change(bp, &func_params);
2081}
2082
2083/*
2084 * Cleans the object that have internal lists without sending
16a5fd92 2085 * ramrods. Should be run when interrupts are disabled.
619c5cb6 2086 */
7fa6f340 2087void bnx2x_squeeze_objects(struct bnx2x *bp)
619c5cb6
VZ
2088{
2089 int rc;
2090 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
3b603066 2091 struct bnx2x_mcast_ramrod_params rparam = {NULL};
15192a8c 2092 struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
619c5cb6
VZ
2093
2094 /***************** Cleanup MACs' object first *************************/
2095
2096 /* Wait for completion of requested */
2097 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
2098 /* Perform a dry cleanup */
2099 __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
2100
2101 /* Clean ETH primary MAC */
2102 __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
15192a8c 2103 rc = mac_obj->delete_all(bp, &bp->sp_objs->mac_obj, &vlan_mac_flags,
619c5cb6
VZ
2104 &ramrod_flags);
2105 if (rc != 0)
2106 BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
2107
2108 /* Cleanup UC list */
2109 vlan_mac_flags = 0;
2110 __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
2111 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
2112 &ramrod_flags);
2113 if (rc != 0)
2114 BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
2115
2116 /***************** Now clean mcast object *****************************/
2117 rparam.mcast_obj = &bp->mcast_obj;
2118 __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
2119
8b09be5f
YM
2120 /* Add a DEL command... - Since we're doing a driver cleanup only,
2121 * we take a lock surrounding both the initial send and the CONTs,
2122 * as we don't want a true completion to disrupt us in the middle.
2123 */
2124 netif_addr_lock_bh(bp->dev);
619c5cb6
VZ
2125 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
2126 if (rc < 0)
51c1a580
MS
2127 BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n",
2128 rc);
619c5cb6
VZ
2129
2130 /* ...and wait until all pending commands are cleared */
2131 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2132 while (rc != 0) {
2133 if (rc < 0) {
2134 BNX2X_ERR("Failed to clean multi-cast object: %d\n",
2135 rc);
8b09be5f 2136 netif_addr_unlock_bh(bp->dev);
619c5cb6
VZ
2137 return;
2138 }
2139
2140 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2141 }
8b09be5f 2142 netif_addr_unlock_bh(bp->dev);
619c5cb6
VZ
2143}
2144
2145#ifndef BNX2X_STOP_ON_ERROR
2146#define LOAD_ERROR_EXIT(bp, label) \
2147 do { \
2148 (bp)->state = BNX2X_STATE_ERROR; \
2149 goto label; \
2150 } while (0)
55c11941
MS
2151
2152#define LOAD_ERROR_EXIT_CNIC(bp, label) \
2153 do { \
2154 bp->cnic_loaded = false; \
2155 goto label; \
2156 } while (0)
2157#else /*BNX2X_STOP_ON_ERROR*/
619c5cb6
VZ
2158#define LOAD_ERROR_EXIT(bp, label) \
2159 do { \
2160 (bp)->state = BNX2X_STATE_ERROR; \
2161 (bp)->panic = 1; \
2162 return -EBUSY; \
2163 } while (0)
55c11941
MS
2164#define LOAD_ERROR_EXIT_CNIC(bp, label) \
2165 do { \
2166 bp->cnic_loaded = false; \
2167 (bp)->panic = 1; \
2168 return -EBUSY; \
2169 } while (0)
2170#endif /*BNX2X_STOP_ON_ERROR*/
619c5cb6 2171
ad5afc89
AE
2172static void bnx2x_free_fw_stats_mem(struct bnx2x *bp)
2173{
2174 BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
2175 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2176 return;
2177}
2178
2179static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
452427b0 2180{
8db573ba 2181 int num_groups, vf_headroom = 0;
ad5afc89 2182 int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1;
452427b0 2183
ad5afc89
AE
2184 /* number of queues for statistics is number of eth queues + FCoE */
2185 u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe_stats;
452427b0 2186
ad5afc89
AE
2187 /* Total number of FW statistics requests =
2188 * 1 for port stats + 1 for PF stats + potential 2 for FCoE (fcoe proper
2189 * and fcoe l2 queue) stats + num of queues (which includes another 1
2190 * for fcoe l2 queue if applicable)
2191 */
2192 bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats;
452427b0 2193
8db573ba
AE
2194 /* vf stats appear in the request list, but their data is allocated by
2195 * the VFs themselves. We don't include them in the bp->fw_stats_num as
2196 * it is used to determine where to place the vf stats queries in the
2197 * request struct
2198 */
2199 if (IS_SRIOV(bp))
6411280a 2200 vf_headroom = bnx2x_vf_headroom(bp);
8db573ba 2201
ad5afc89
AE
2202 /* Request is built from stats_query_header and an array of
2203 * stats_query_cmd_group each of which contains
2204 * STATS_QUERY_CMD_COUNT rules. The real number or requests is
2205 * configured in the stats_query_header.
2206 */
2207 num_groups =
8db573ba
AE
2208 (((bp->fw_stats_num + vf_headroom) / STATS_QUERY_CMD_COUNT) +
2209 (((bp->fw_stats_num + vf_headroom) % STATS_QUERY_CMD_COUNT) ?
ad5afc89
AE
2210 1 : 0));
2211
8db573ba
AE
2212 DP(BNX2X_MSG_SP, "stats fw_stats_num %d, vf headroom %d, num_groups %d\n",
2213 bp->fw_stats_num, vf_headroom, num_groups);
ad5afc89
AE
2214 bp->fw_stats_req_sz = sizeof(struct stats_query_header) +
2215 num_groups * sizeof(struct stats_query_cmd_group);
2216
2217 /* Data for statistics requests + stats_counter
2218 * stats_counter holds per-STORM counters that are incremented
2219 * when STORM has finished with the current request.
2220 * memory for FCoE offloaded statistics are counted anyway,
2221 * even if they will not be sent.
2222 * VF stats are not accounted for here as the data of VF stats is stored
2223 * in memory allocated by the VF, not here.
2224 */
2225 bp->fw_stats_data_sz = sizeof(struct per_port_stats) +
2226 sizeof(struct per_pf_stats) +
2227 sizeof(struct fcoe_statistics_params) +
2228 sizeof(struct per_queue_stats) * num_queue_stats +
2229 sizeof(struct stats_counter);
2230
2231 BNX2X_PCI_ALLOC(bp->fw_stats, &bp->fw_stats_mapping,
2232 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2233
2234 /* Set shortcuts */
2235 bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats;
2236 bp->fw_stats_req_mapping = bp->fw_stats_mapping;
2237 bp->fw_stats_data = (struct bnx2x_fw_stats_data *)
2238 ((u8 *)bp->fw_stats + bp->fw_stats_req_sz);
2239 bp->fw_stats_data_mapping = bp->fw_stats_mapping +
2240 bp->fw_stats_req_sz;
2241
6bf07b8e 2242 DP(BNX2X_MSG_SP, "statistics request base address set to %x %x\n",
ad5afc89
AE
2243 U64_HI(bp->fw_stats_req_mapping),
2244 U64_LO(bp->fw_stats_req_mapping));
6bf07b8e 2245 DP(BNX2X_MSG_SP, "statistics data base address set to %x %x\n",
ad5afc89
AE
2246 U64_HI(bp->fw_stats_data_mapping),
2247 U64_LO(bp->fw_stats_data_mapping));
2248 return 0;
2249
2250alloc_mem_err:
2251 bnx2x_free_fw_stats_mem(bp);
2252 BNX2X_ERR("Can't allocate FW stats memory\n");
2253 return -ENOMEM;
2254}
2255
2256/* send load request to mcp and analyze response */
2257static int bnx2x_nic_load_request(struct bnx2x *bp, u32 *load_code)
2258{
178135c1
DK
2259 u32 param;
2260
ad5afc89
AE
2261 /* init fw_seq */
2262 bp->fw_seq =
2263 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
2264 DRV_MSG_SEQ_NUMBER_MASK);
2265 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
2266
2267 /* Get current FW pulse sequence */
2268 bp->fw_drv_pulse_wr_seq =
2269 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) &
2270 DRV_PULSE_SEQ_MASK);
2271 BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
2272
178135c1
DK
2273 param = DRV_MSG_CODE_LOAD_REQ_WITH_LFA;
2274
2275 if (IS_MF_SD(bp) && bnx2x_port_after_undi(bp))
2276 param |= DRV_MSG_CODE_LOAD_REQ_FORCE_LFA;
2277
ad5afc89 2278 /* load request */
178135c1 2279 (*load_code) = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, param);
ad5afc89
AE
2280
2281 /* if mcp fails to respond we must abort */
2282 if (!(*load_code)) {
2283 BNX2X_ERR("MCP response failure, aborting\n");
2284 return -EBUSY;
2285 }
2286
2287 /* If mcp refused (e.g. other port is in diagnostic mode) we
2288 * must abort
2289 */
2290 if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) {
2291 BNX2X_ERR("MCP refused load request, aborting\n");
2292 return -EBUSY;
2293 }
2294 return 0;
2295}
2296
2297/* check whether another PF has already loaded FW to chip. In
2298 * virtualized environments a pf from another VM may have already
2299 * initialized the device including loading FW
2300 */
91ebb929 2301int bnx2x_compare_fw_ver(struct bnx2x *bp, u32 load_code, bool print_err)
ad5afc89
AE
2302{
2303 /* is another pf loaded on this engine? */
2304 if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
2305 load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
2306 /* build my FW version dword */
2307 u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) +
2308 (BCM_5710_FW_MINOR_VERSION << 8) +
2309 (BCM_5710_FW_REVISION_VERSION << 16) +
2310 (BCM_5710_FW_ENGINEERING_VERSION << 24);
2311
2312 /* read loaded FW from chip */
2313 u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
2314
2315 DP(BNX2X_MSG_SP, "loaded fw %x, my fw %x\n",
2316 loaded_fw, my_fw);
2317
2318 /* abort nic load if version mismatch */
2319 if (my_fw != loaded_fw) {
91ebb929
YM
2320 if (print_err)
2321 BNX2X_ERR("bnx2x with FW %x was already loaded which mismatches my %x FW. Aborting\n",
2322 loaded_fw, my_fw);
2323 else
2324 BNX2X_DEV_INFO("bnx2x with FW %x was already loaded which mismatches my %x FW, possibly due to MF UNDI\n",
2325 loaded_fw, my_fw);
ad5afc89
AE
2326 return -EBUSY;
2327 }
2328 }
2329 return 0;
2330}
2331
2332/* returns the "mcp load_code" according to global load_count array */
2333static int bnx2x_nic_load_no_mcp(struct bnx2x *bp, int port)
2334{
2335 int path = BP_PATH(bp);
2336
2337 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
a8f47eb7 2338 path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
2339 bnx2x_load_count[path][2]);
2340 bnx2x_load_count[path][0]++;
2341 bnx2x_load_count[path][1 + port]++;
ad5afc89 2342 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
a8f47eb7 2343 path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
2344 bnx2x_load_count[path][2]);
2345 if (bnx2x_load_count[path][0] == 1)
ad5afc89 2346 return FW_MSG_CODE_DRV_LOAD_COMMON;
a8f47eb7 2347 else if (bnx2x_load_count[path][1 + port] == 1)
ad5afc89
AE
2348 return FW_MSG_CODE_DRV_LOAD_PORT;
2349 else
2350 return FW_MSG_CODE_DRV_LOAD_FUNCTION;
2351}
2352
2353/* mark PMF if applicable */
2354static void bnx2x_nic_load_pmf(struct bnx2x *bp, u32 load_code)
2355{
2356 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2357 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
2358 (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
2359 bp->port.pmf = 1;
2360 /* We need the barrier to ensure the ordering between the
2361 * writing to bp->port.pmf here and reading it from the
2362 * bnx2x_periodic_task().
2363 */
2364 smp_mb();
2365 } else {
2366 bp->port.pmf = 0;
452427b0
YM
2367 }
2368
ad5afc89
AE
2369 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2370}
2371
2372static void bnx2x_nic_load_afex_dcc(struct bnx2x *bp, int load_code)
2373{
2374 if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2375 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
2376 (bp->common.shmem2_base)) {
2377 if (SHMEM2_HAS(bp, dcc_support))
2378 SHMEM2_WR(bp, dcc_support,
2379 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
2380 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
2381 if (SHMEM2_HAS(bp, afex_driver_support))
2382 SHMEM2_WR(bp, afex_driver_support,
2383 SHMEM_AFEX_SUPPORTED_VERSION_ONE);
2384 }
2385
2386 /* Set AFEX default VLAN tag to an invalid value */
2387 bp->afex_def_vlan_tag = -1;
452427b0
YM
2388}
2389
1191cb83
ED
2390/**
2391 * bnx2x_bz_fp - zero content of the fastpath structure.
2392 *
2393 * @bp: driver handle
2394 * @index: fastpath index to be zeroed
2395 *
2396 * Makes sure the contents of the bp->fp[index].napi is kept
2397 * intact.
2398 */
2399static void bnx2x_bz_fp(struct bnx2x *bp, int index)
2400{
2401 struct bnx2x_fastpath *fp = &bp->fp[index];
65565884 2402 int cos;
1191cb83 2403 struct napi_struct orig_napi = fp->napi;
15192a8c 2404 struct bnx2x_agg_info *orig_tpa_info = fp->tpa_info;
d76a6111 2405
1191cb83 2406 /* bzero bnx2x_fastpath contents */
c3146eb6
DK
2407 if (fp->tpa_info)
2408 memset(fp->tpa_info, 0, ETH_MAX_AGGREGATION_QUEUES_E1H_E2 *
2409 sizeof(struct bnx2x_agg_info));
2410 memset(fp, 0, sizeof(*fp));
1191cb83
ED
2411
2412 /* Restore the NAPI object as it has been already initialized */
2413 fp->napi = orig_napi;
15192a8c 2414 fp->tpa_info = orig_tpa_info;
1191cb83
ED
2415 fp->bp = bp;
2416 fp->index = index;
2417 if (IS_ETH_FP(fp))
2418 fp->max_cos = bp->max_cos;
2419 else
2420 /* Special queues support only one CoS */
2421 fp->max_cos = 1;
2422
65565884 2423 /* Init txdata pointers */
65565884
MS
2424 if (IS_FCOE_FP(fp))
2425 fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)];
65565884
MS
2426 if (IS_ETH_FP(fp))
2427 for_each_cos_in_tx_queue(fp, cos)
2428 fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos *
2429 BNX2X_NUM_ETH_QUEUES(bp) + index];
2430
16a5fd92 2431 /* set the tpa flag for each queue. The tpa flag determines the queue
1191cb83
ED
2432 * minimal size so it must be set prior to queue memory allocation
2433 */
2434 fp->disable_tpa = !(bp->flags & TPA_ENABLE_FLAG ||
2435 (bp->flags & GRO_ENABLE_FLAG &&
2436 bnx2x_mtu_allows_gro(bp->dev->mtu)));
2437 if (bp->flags & TPA_ENABLE_FLAG)
2438 fp->mode = TPA_MODE_LRO;
2439 else if (bp->flags & GRO_ENABLE_FLAG)
2440 fp->mode = TPA_MODE_GRO;
2441
1191cb83
ED
2442 /* We don't want TPA on an FCoE L2 ring */
2443 if (IS_FCOE_FP(fp))
2444 fp->disable_tpa = 1;
55c11941
MS
2445}
2446
2447int bnx2x_load_cnic(struct bnx2x *bp)
2448{
2449 int i, rc, port = BP_PORT(bp);
2450
2451 DP(NETIF_MSG_IFUP, "Starting CNIC-related load\n");
2452
2453 mutex_init(&bp->cnic_mutex);
2454
ad5afc89
AE
2455 if (IS_PF(bp)) {
2456 rc = bnx2x_alloc_mem_cnic(bp);
2457 if (rc) {
2458 BNX2X_ERR("Unable to allocate bp memory for cnic\n");
2459 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2460 }
55c11941
MS
2461 }
2462
2463 rc = bnx2x_alloc_fp_mem_cnic(bp);
2464 if (rc) {
2465 BNX2X_ERR("Unable to allocate memory for cnic fps\n");
2466 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2467 }
2468
2469 /* Update the number of queues with the cnic queues */
2470 rc = bnx2x_set_real_num_queues(bp, 1);
2471 if (rc) {
2472 BNX2X_ERR("Unable to set real_num_queues including cnic\n");
2473 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2474 }
2475
2476 /* Add all CNIC NAPI objects */
2477 bnx2x_add_all_napi_cnic(bp);
2478 DP(NETIF_MSG_IFUP, "cnic napi added\n");
2479 bnx2x_napi_enable_cnic(bp);
2480
2481 rc = bnx2x_init_hw_func_cnic(bp);
2482 if (rc)
2483 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic1);
2484
2485 bnx2x_nic_init_cnic(bp);
2486
ad5afc89
AE
2487 if (IS_PF(bp)) {
2488 /* Enable Timer scan */
2489 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
2490
2491 /* setup cnic queues */
2492 for_each_cnic_queue(bp, i) {
2493 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2494 if (rc) {
2495 BNX2X_ERR("Queue setup failed\n");
2496 LOAD_ERROR_EXIT(bp, load_error_cnic2);
2497 }
55c11941
MS
2498 }
2499 }
2500
2501 /* Initialize Rx filter. */
8b09be5f 2502 bnx2x_set_rx_mode_inner(bp);
55c11941
MS
2503
2504 /* re-read iscsi info */
2505 bnx2x_get_iscsi_info(bp);
2506 bnx2x_setup_cnic_irq_info(bp);
2507 bnx2x_setup_cnic_info(bp);
2508 bp->cnic_loaded = true;
2509 if (bp->state == BNX2X_STATE_OPEN)
2510 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
2511
55c11941
MS
2512 DP(NETIF_MSG_IFUP, "Ending successfully CNIC-related load\n");
2513
2514 return 0;
2515
2516#ifndef BNX2X_STOP_ON_ERROR
2517load_error_cnic2:
2518 /* Disable Timer scan */
2519 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
2520
2521load_error_cnic1:
2522 bnx2x_napi_disable_cnic(bp);
2523 /* Update the number of queues without the cnic queues */
d9d81862 2524 if (bnx2x_set_real_num_queues(bp, 0))
55c11941
MS
2525 BNX2X_ERR("Unable to set real_num_queues not including cnic\n");
2526load_error_cnic0:
2527 BNX2X_ERR("CNIC-related load failed\n");
2528 bnx2x_free_fp_mem_cnic(bp);
2529 bnx2x_free_mem_cnic(bp);
2530 return rc;
2531#endif /* ! BNX2X_STOP_ON_ERROR */
1191cb83
ED
2532}
2533
9f6c9258
DK
2534/* must be called with rtnl_lock */
2535int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2536{
619c5cb6 2537 int port = BP_PORT(bp);
ad5afc89 2538 int i, rc = 0, load_code = 0;
9f6c9258 2539
55c11941
MS
2540 DP(NETIF_MSG_IFUP, "Starting NIC load\n");
2541 DP(NETIF_MSG_IFUP,
2542 "CNIC is %s\n", CNIC_ENABLED(bp) ? "enabled" : "disabled");
2543
9f6c9258 2544#ifdef BNX2X_STOP_ON_ERROR
51c1a580
MS
2545 if (unlikely(bp->panic)) {
2546 BNX2X_ERR("Can't load NIC when there is panic\n");
9f6c9258 2547 return -EPERM;
51c1a580 2548 }
9f6c9258
DK
2549#endif
2550
2551 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
2552
16a5fd92 2553 /* zero the structure w/o any lock, before SP handler is initialized */
2ae17f66
VZ
2554 memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
2555 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
2556 &bp->last_reported_link.link_report_flags);
2ae17f66 2557
ad5afc89
AE
2558 if (IS_PF(bp))
2559 /* must be called before memory allocation and HW init */
2560 bnx2x_ilt_set_info(bp);
523224a3 2561
6383c0b3
AE
2562 /*
2563 * Zero fastpath structures preserving invariants like napi, which are
2564 * allocated only once, fp index, max_cos, bp pointer.
65565884 2565 * Also set fp->disable_tpa and txdata_ptr.
b3b83c3f 2566 */
51c1a580 2567 DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
b3b83c3f
DK
2568 for_each_queue(bp, i)
2569 bnx2x_bz_fp(bp, i);
55c11941
MS
2570 memset(bp->bnx2x_txq, 0, (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS +
2571 bp->num_cnic_queues) *
2572 sizeof(struct bnx2x_fp_txdata));
b3b83c3f 2573
55c11941 2574 bp->fcoe_init = false;
6383c0b3 2575
a8c94b91
VZ
2576 /* Set the receive queues buffer size */
2577 bnx2x_set_rx_buf_size(bp);
2578
ad5afc89
AE
2579 if (IS_PF(bp)) {
2580 rc = bnx2x_alloc_mem(bp);
2581 if (rc) {
2582 BNX2X_ERR("Unable to allocate bp memory\n");
2583 return rc;
2584 }
2585 }
2586
ad5afc89
AE
2587 /* need to be done after alloc mem, since it's self adjusting to amount
2588 * of memory available for RSS queues
2589 */
2590 rc = bnx2x_alloc_fp_mem(bp);
2591 if (rc) {
2592 BNX2X_ERR("Unable to allocate memory for fps\n");
2593 LOAD_ERROR_EXIT(bp, load_error0);
2594 }
d6214d7a 2595
e3ed4eae
DK
2596 /* Allocated memory for FW statistics */
2597 if (bnx2x_alloc_fw_stats_mem(bp))
2598 LOAD_ERROR_EXIT(bp, load_error0);
2599
8d9ac297
AE
2600 /* request pf to initialize status blocks */
2601 if (IS_VF(bp)) {
2602 rc = bnx2x_vfpf_init(bp);
2603 if (rc)
2604 LOAD_ERROR_EXIT(bp, load_error0);
2605 }
2606
b3b83c3f
DK
2607 /* As long as bnx2x_alloc_mem() may possibly update
2608 * bp->num_queues, bnx2x_set_real_num_queues() should always
55c11941 2609 * come after it. At this stage cnic queues are not counted.
b3b83c3f 2610 */
55c11941 2611 rc = bnx2x_set_real_num_queues(bp, 0);
d6214d7a 2612 if (rc) {
ec6ba945 2613 BNX2X_ERR("Unable to set real_num_queues\n");
619c5cb6 2614 LOAD_ERROR_EXIT(bp, load_error0);
9f6c9258
DK
2615 }
2616
6383c0b3 2617 /* configure multi cos mappings in kernel.
16a5fd92
YM
2618 * this configuration may be overridden by a multi class queue
2619 * discipline or by a dcbx negotiation result.
6383c0b3
AE
2620 */
2621 bnx2x_setup_tc(bp->dev, bp->max_cos);
2622
26614ba5
MS
2623 /* Add all NAPI objects */
2624 bnx2x_add_all_napi(bp);
55c11941 2625 DP(NETIF_MSG_IFUP, "napi added\n");
9f6c9258
DK
2626 bnx2x_napi_enable(bp);
2627
ad5afc89
AE
2628 if (IS_PF(bp)) {
2629 /* set pf load just before approaching the MCP */
2630 bnx2x_set_pf_load(bp);
2631
2632 /* if mcp exists send load request and analyze response */
2633 if (!BP_NOMCP(bp)) {
2634 /* attempt to load pf */
2635 rc = bnx2x_nic_load_request(bp, &load_code);
2636 if (rc)
2637 LOAD_ERROR_EXIT(bp, load_error1);
2638
2639 /* what did mcp say? */
91ebb929 2640 rc = bnx2x_compare_fw_ver(bp, load_code, true);
ad5afc89
AE
2641 if (rc) {
2642 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
d1e2d966
AE
2643 LOAD_ERROR_EXIT(bp, load_error2);
2644 }
ad5afc89
AE
2645 } else {
2646 load_code = bnx2x_nic_load_no_mcp(bp, port);
d1e2d966 2647 }
9f6c9258 2648
ad5afc89
AE
2649 /* mark pmf if applicable */
2650 bnx2x_nic_load_pmf(bp, load_code);
9f6c9258 2651
ad5afc89
AE
2652 /* Init Function state controlling object */
2653 bnx2x__init_func_obj(bp);
6383c0b3 2654
ad5afc89
AE
2655 /* Initialize HW */
2656 rc = bnx2x_init_hw(bp, load_code);
2657 if (rc) {
2658 BNX2X_ERR("HW init failed, aborting\n");
2659 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2660 LOAD_ERROR_EXIT(bp, load_error2);
2661 }
9f6c9258
DK
2662 }
2663
ecf01c22
YM
2664 bnx2x_pre_irq_nic_init(bp);
2665
d6214d7a
DK
2666 /* Connect to IRQs */
2667 rc = bnx2x_setup_irqs(bp);
523224a3 2668 if (rc) {
ad5afc89
AE
2669 BNX2X_ERR("setup irqs failed\n");
2670 if (IS_PF(bp))
2671 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
619c5cb6 2672 LOAD_ERROR_EXIT(bp, load_error2);
523224a3
DK
2673 }
2674
619c5cb6 2675 /* Init per-function objects */
ad5afc89 2676 if (IS_PF(bp)) {
ecf01c22
YM
2677 /* Setup NIC internals and enable interrupts */
2678 bnx2x_post_irq_nic_init(bp, load_code);
2679
ad5afc89 2680 bnx2x_init_bp_objs(bp);
b56e9670 2681 bnx2x_iov_nic_init(bp);
a3348722 2682
ad5afc89
AE
2683 /* Set AFEX default VLAN tag to an invalid value */
2684 bp->afex_def_vlan_tag = -1;
2685 bnx2x_nic_load_afex_dcc(bp, load_code);
2686 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
2687 rc = bnx2x_func_start(bp);
2688 if (rc) {
2689 BNX2X_ERR("Function start failed!\n");
2690 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
9f6c9258 2691
619c5cb6 2692 LOAD_ERROR_EXIT(bp, load_error3);
9f6c9258 2693 }
9f6c9258 2694
ad5afc89
AE
2695 /* Send LOAD_DONE command to MCP */
2696 if (!BP_NOMCP(bp)) {
2697 load_code = bnx2x_fw_command(bp,
2698 DRV_MSG_CODE_LOAD_DONE, 0);
2699 if (!load_code) {
2700 BNX2X_ERR("MCP response failure, aborting\n");
2701 rc = -EBUSY;
2702 LOAD_ERROR_EXIT(bp, load_error3);
2703 }
2704 }
9f6c9258 2705
0c14e5ce
AE
2706 /* initialize FW coalescing state machines in RAM */
2707 bnx2x_update_coalesce(bp);
60cad4e6 2708 }
0c14e5ce 2709
60cad4e6
AE
2710 /* setup the leading queue */
2711 rc = bnx2x_setup_leading(bp);
2712 if (rc) {
2713 BNX2X_ERR("Setup leading failed!\n");
2714 LOAD_ERROR_EXIT(bp, load_error3);
2715 }
ad5afc89 2716
60cad4e6
AE
2717 /* set up the rest of the queues */
2718 for_each_nondefault_eth_queue(bp, i) {
2719 if (IS_PF(bp))
2720 rc = bnx2x_setup_queue(bp, &bp->fp[i], false);
2721 else /* VF */
2722 rc = bnx2x_vfpf_setup_q(bp, &bp->fp[i], false);
ad5afc89 2723 if (rc) {
60cad4e6 2724 BNX2X_ERR("Queue %d setup failed\n", i);
ad5afc89
AE
2725 LOAD_ERROR_EXIT(bp, load_error3);
2726 }
60cad4e6 2727 }
8d9ac297 2728
60cad4e6
AE
2729 /* setup rss */
2730 rc = bnx2x_init_rss(bp);
2731 if (rc) {
2732 BNX2X_ERR("PF RSS init failed\n");
2733 LOAD_ERROR_EXIT(bp, load_error3);
51c1a580 2734 }
619c5cb6 2735
523224a3
DK
2736 /* Now when Clients are configured we are ready to work */
2737 bp->state = BNX2X_STATE_OPEN;
2738
619c5cb6 2739 /* Configure a ucast MAC */
ad5afc89
AE
2740 if (IS_PF(bp))
2741 rc = bnx2x_set_eth_mac(bp, true);
8d9ac297 2742 else /* vf */
f8f4f61a
DK
2743 rc = bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr, bp->fp->index,
2744 true);
51c1a580
MS
2745 if (rc) {
2746 BNX2X_ERR("Setting Ethernet MAC failed\n");
55c11941 2747 LOAD_ERROR_EXIT(bp, load_error3);
51c1a580 2748 }
6e30dd4e 2749
ad5afc89 2750 if (IS_PF(bp) && bp->pending_max) {
e3835b99
DK
2751 bnx2x_update_max_mf_config(bp, bp->pending_max);
2752 bp->pending_max = 0;
2753 }
2754
ad5afc89
AE
2755 if (bp->port.pmf) {
2756 rc = bnx2x_initial_phy_init(bp, load_mode);
2757 if (rc)
2758 LOAD_ERROR_EXIT(bp, load_error3);
2759 }
c63da990 2760 bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_BOOT_FROM_SAN;
9f6c9258 2761
619c5cb6
VZ
2762 /* Start fast path */
2763
2764 /* Initialize Rx filter. */
8b09be5f 2765 bnx2x_set_rx_mode_inner(bp);
6e30dd4e 2766
619c5cb6 2767 /* Start the Tx */
9f6c9258
DK
2768 switch (load_mode) {
2769 case LOAD_NORMAL:
16a5fd92 2770 /* Tx queue should be only re-enabled */
523224a3 2771 netif_tx_wake_all_queues(bp->dev);
9f6c9258
DK
2772 break;
2773
2774 case LOAD_OPEN:
2775 netif_tx_start_all_queues(bp->dev);
523224a3 2776 smp_mb__after_clear_bit();
9f6c9258
DK
2777 break;
2778
2779 case LOAD_DIAG:
8970b2e4 2780 case LOAD_LOOPBACK_EXT:
9f6c9258
DK
2781 bp->state = BNX2X_STATE_DIAG;
2782 break;
2783
2784 default:
2785 break;
2786 }
2787
00253a8c 2788 if (bp->port.pmf)
4c704899 2789 bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_PORT_MASK, 0);
00253a8c 2790 else
9f6c9258
DK
2791 bnx2x__link_status_update(bp);
2792
2793 /* start the timer */
2794 mod_timer(&bp->timer, jiffies + bp->current_interval);
2795
55c11941
MS
2796 if (CNIC_ENABLED(bp))
2797 bnx2x_load_cnic(bp);
9f6c9258 2798
ad5afc89
AE
2799 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
2800 /* mark driver is loaded in shmem2 */
9ce392d4
YM
2801 u32 val;
2802 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2803 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2804 val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
2805 DRV_FLAGS_CAPABILITIES_LOADED_L2);
2806 }
2807
619c5cb6 2808 /* Wait for all pending SP commands to complete */
ad5afc89 2809 if (IS_PF(bp) && !bnx2x_wait_sp_comp(bp, ~0x0UL)) {
619c5cb6 2810 BNX2X_ERR("Timeout waiting for SP elements to complete\n");
5d07d868 2811 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
619c5cb6
VZ
2812 return -EBUSY;
2813 }
6891dd25 2814
9876879f
BW
2815 /* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */
2816 if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG))
2817 bnx2x_dcbx_init(bp, false);
2818
55c11941
MS
2819 DP(NETIF_MSG_IFUP, "Ending successfully NIC load\n");
2820
9f6c9258
DK
2821 return 0;
2822
619c5cb6 2823#ifndef BNX2X_STOP_ON_ERROR
9f6c9258 2824load_error3:
ad5afc89
AE
2825 if (IS_PF(bp)) {
2826 bnx2x_int_disable_sync(bp, 1);
d6214d7a 2827
ad5afc89
AE
2828 /* Clean queueable objects */
2829 bnx2x_squeeze_objects(bp);
2830 }
619c5cb6 2831
9f6c9258
DK
2832 /* Free SKBs, SGEs, TPA pool and driver internals */
2833 bnx2x_free_skbs(bp);
ec6ba945 2834 for_each_rx_queue(bp, i)
9f6c9258 2835 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
d6214d7a 2836
9f6c9258 2837 /* Release IRQs */
d6214d7a
DK
2838 bnx2x_free_irq(bp);
2839load_error2:
ad5afc89 2840 if (IS_PF(bp) && !BP_NOMCP(bp)) {
d6214d7a
DK
2841 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
2842 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
2843 }
2844
2845 bp->port.pmf = 0;
9f6c9258
DK
2846load_error1:
2847 bnx2x_napi_disable(bp);
722c6f58 2848 bnx2x_del_all_napi(bp);
ad5afc89 2849
889b9af3 2850 /* clear pf_load status, as it was already set */
ad5afc89
AE
2851 if (IS_PF(bp))
2852 bnx2x_clear_pf_load(bp);
d6214d7a 2853load_error0:
ad5afc89 2854 bnx2x_free_fw_stats_mem(bp);
e3ed4eae 2855 bnx2x_free_fp_mem(bp);
9f6c9258
DK
2856 bnx2x_free_mem(bp);
2857
2858 return rc;
619c5cb6 2859#endif /* ! BNX2X_STOP_ON_ERROR */
9f6c9258
DK
2860}
2861
7fa6f340 2862int bnx2x_drain_tx_queues(struct bnx2x *bp)
ad5afc89
AE
2863{
2864 u8 rc = 0, cos, i;
2865
2866 /* Wait until tx fastpath tasks complete */
2867 for_each_tx_queue(bp, i) {
2868 struct bnx2x_fastpath *fp = &bp->fp[i];
2869
2870 for_each_cos_in_tx_queue(fp, cos)
2871 rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]);
2872 if (rc)
2873 return rc;
2874 }
2875 return 0;
2876}
2877
9f6c9258 2878/* must be called with rtnl_lock */
5d07d868 2879int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
9f6c9258
DK
2880{
2881 int i;
c9ee9206
VZ
2882 bool global = false;
2883
55c11941
MS
2884 DP(NETIF_MSG_IFUP, "Starting NIC unload\n");
2885
9ce392d4 2886 /* mark driver is unloaded in shmem2 */
ad5afc89 2887 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
9ce392d4
YM
2888 u32 val;
2889 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2890 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2891 val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
2892 }
2893
80bfe5cc 2894 if (IS_PF(bp) && bp->recovery_state != BNX2X_RECOVERY_DONE &&
ad5afc89
AE
2895 (bp->state == BNX2X_STATE_CLOSED ||
2896 bp->state == BNX2X_STATE_ERROR)) {
c9ee9206
VZ
2897 /* We can get here if the driver has been unloaded
2898 * during parity error recovery and is either waiting for a
2899 * leader to complete or for other functions to unload and
2900 * then ifdown has been issued. In this case we want to
2901 * unload and let other functions to complete a recovery
2902 * process.
2903 */
9f6c9258
DK
2904 bp->recovery_state = BNX2X_RECOVERY_DONE;
2905 bp->is_leader = 0;
c9ee9206
VZ
2906 bnx2x_release_leader_lock(bp);
2907 smp_mb();
2908
51c1a580
MS
2909 DP(NETIF_MSG_IFDOWN, "Releasing a leadership...\n");
2910 BNX2X_ERR("Can't unload in closed or error state\n");
9f6c9258
DK
2911 return -EINVAL;
2912 }
2913
80bfe5cc 2914 /* Nothing to do during unload if previous bnx2x_nic_load()
16a5fd92 2915 * have not completed successfully - all resources are released.
80bfe5cc
YM
2916 *
2917 * we can get here only after unsuccessful ndo_* callback, during which
2918 * dev->IFF_UP flag is still on.
2919 */
2920 if (bp->state == BNX2X_STATE_CLOSED || bp->state == BNX2X_STATE_ERROR)
2921 return 0;
2922
2923 /* It's important to set the bp->state to the value different from
87b7ba3d
VZ
2924 * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
2925 * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
2926 */
2927 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
2928 smp_mb();
2929
78c3bcc5
AE
2930 /* indicate to VFs that the PF is going down */
2931 bnx2x_iov_channel_down(bp);
2932
55c11941
MS
2933 if (CNIC_LOADED(bp))
2934 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
2935
9505ee37
VZ
2936 /* Stop Tx */
2937 bnx2x_tx_disable(bp);
65565884 2938 netdev_reset_tc(bp->dev);
9505ee37 2939
9f6c9258 2940 bp->rx_mode = BNX2X_RX_MODE_NONE;
9f6c9258 2941
9f6c9258 2942 del_timer_sync(&bp->timer);
f85582f8 2943
ad5afc89
AE
2944 if (IS_PF(bp)) {
2945 /* Set ALWAYS_ALIVE bit in shmem */
2946 bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
2947 bnx2x_drv_pulse(bp);
2948 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2949 bnx2x_save_statistics(bp);
2950 }
9f6c9258 2951
ad5afc89
AE
2952 /* wait till consumers catch up with producers in all queues */
2953 bnx2x_drain_tx_queues(bp);
9f6c9258 2954
9b176b6b
AE
2955 /* if VF indicate to PF this function is going down (PF will delete sp
2956 * elements and clear initializations
2957 */
2958 if (IS_VF(bp))
2959 bnx2x_vfpf_close_vf(bp);
2960 else if (unload_mode != UNLOAD_RECOVERY)
2961 /* if this is a normal/close unload need to clean up chip*/
5d07d868 2962 bnx2x_chip_cleanup(bp, unload_mode, keep_link);
523224a3 2963 else {
c9ee9206
VZ
2964 /* Send the UNLOAD_REQUEST to the MCP */
2965 bnx2x_send_unload_req(bp, unload_mode);
2966
16a5fd92 2967 /* Prevent transactions to host from the functions on the
c9ee9206 2968 * engine that doesn't reset global blocks in case of global
16a5fd92 2969 * attention once global blocks are reset and gates are opened
c9ee9206
VZ
2970 * (the engine which leader will perform the recovery
2971 * last).
2972 */
2973 if (!CHIP_IS_E1x(bp))
2974 bnx2x_pf_disable(bp);
2975
2976 /* Disable HW interrupts, NAPI */
523224a3 2977 bnx2x_netif_stop(bp, 1);
26614ba5
MS
2978 /* Delete all NAPI objects */
2979 bnx2x_del_all_napi(bp);
55c11941
MS
2980 if (CNIC_LOADED(bp))
2981 bnx2x_del_all_napi_cnic(bp);
523224a3 2982 /* Release IRQs */
d6214d7a 2983 bnx2x_free_irq(bp);
c9ee9206
VZ
2984
2985 /* Report UNLOAD_DONE to MCP */
5d07d868 2986 bnx2x_send_unload_done(bp, false);
523224a3 2987 }
9f6c9258 2988
619c5cb6 2989 /*
16a5fd92 2990 * At this stage no more interrupts will arrive so we may safely clean
619c5cb6
VZ
2991 * the queueable objects here in case they failed to get cleaned so far.
2992 */
ad5afc89
AE
2993 if (IS_PF(bp))
2994 bnx2x_squeeze_objects(bp);
619c5cb6 2995
79616895
VZ
2996 /* There should be no more pending SP commands at this stage */
2997 bp->sp_state = 0;
2998
9f6c9258
DK
2999 bp->port.pmf = 0;
3000
a0d307b2
DK
3001 /* clear pending work in rtnl task */
3002 bp->sp_rtnl_state = 0;
3003 smp_mb();
3004
9f6c9258
DK
3005 /* Free SKBs, SGEs, TPA pool and driver internals */
3006 bnx2x_free_skbs(bp);
55c11941
MS
3007 if (CNIC_LOADED(bp))
3008 bnx2x_free_skbs_cnic(bp);
ec6ba945 3009 for_each_rx_queue(bp, i)
9f6c9258 3010 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
d6214d7a 3011
ad5afc89
AE
3012 bnx2x_free_fp_mem(bp);
3013 if (CNIC_LOADED(bp))
55c11941 3014 bnx2x_free_fp_mem_cnic(bp);
9f6c9258 3015
ad5afc89 3016 if (IS_PF(bp)) {
ad5afc89
AE
3017 if (CNIC_LOADED(bp))
3018 bnx2x_free_mem_cnic(bp);
3019 }
b4cddbd6
AE
3020 bnx2x_free_mem(bp);
3021
9f6c9258 3022 bp->state = BNX2X_STATE_CLOSED;
55c11941 3023 bp->cnic_loaded = false;
9f6c9258 3024
c9ee9206
VZ
3025 /* Check if there are pending parity attentions. If there are - set
3026 * RECOVERY_IN_PROGRESS.
3027 */
ad5afc89 3028 if (IS_PF(bp) && bnx2x_chk_parity_attn(bp, &global, false)) {
c9ee9206
VZ
3029 bnx2x_set_reset_in_progress(bp);
3030
3031 /* Set RESET_IS_GLOBAL if needed */
3032 if (global)
3033 bnx2x_set_reset_global(bp);
3034 }
3035
9f6c9258
DK
3036 /* The last driver must disable a "close the gate" if there is no
3037 * parity attention or "process kill" pending.
3038 */
ad5afc89
AE
3039 if (IS_PF(bp) &&
3040 !bnx2x_clear_pf_load(bp) &&
3041 bnx2x_reset_is_done(bp, BP_PATH(bp)))
9f6c9258
DK
3042 bnx2x_disable_close_the_gate(bp);
3043
55c11941
MS
3044 DP(NETIF_MSG_IFUP, "Ending NIC unload\n");
3045
9f6c9258
DK
3046 return 0;
3047}
f85582f8 3048
9f6c9258
DK
3049int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
3050{
3051 u16 pmcsr;
3052
adf5f6a1 3053 /* If there is no power capability, silently succeed */
29ed74c3 3054 if (!bp->pdev->pm_cap) {
51c1a580 3055 BNX2X_DEV_INFO("No power capability. Breaking.\n");
adf5f6a1
DK
3056 return 0;
3057 }
3058
29ed74c3 3059 pci_read_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL, &pmcsr);
9f6c9258
DK
3060
3061 switch (state) {
3062 case PCI_D0:
29ed74c3 3063 pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
9f6c9258
DK
3064 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3065 PCI_PM_CTRL_PME_STATUS));
3066
3067 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3068 /* delay required during transition out of D3hot */
3069 msleep(20);
3070 break;
3071
3072 case PCI_D3hot:
3073 /* If there are other clients above don't
3074 shut down the power */
3075 if (atomic_read(&bp->pdev->enable_cnt) != 1)
3076 return 0;
3077 /* Don't shut down the power for emulation and FPGA */
3078 if (CHIP_REV_IS_SLOW(bp))
3079 return 0;
3080
3081 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3082 pmcsr |= 3;
3083
3084 if (bp->wol)
3085 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3086
29ed74c3 3087 pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
9f6c9258
DK
3088 pmcsr);
3089
3090 /* No more memory access after this point until
3091 * device is brought back to D0.
3092 */
3093 break;
3094
3095 default:
51c1a580 3096 dev_err(&bp->pdev->dev, "Can't support state = %d\n", state);
9f6c9258
DK
3097 return -EINVAL;
3098 }
3099 return 0;
3100}
3101
9f6c9258
DK
3102/*
3103 * net_device service functions
3104 */
a8f47eb7 3105static int bnx2x_poll(struct napi_struct *napi, int budget)
9f6c9258
DK
3106{
3107 int work_done = 0;
6383c0b3 3108 u8 cos;
9f6c9258
DK
3109 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
3110 napi);
3111 struct bnx2x *bp = fp->bp;
3112
3113 while (1) {
3114#ifdef BNX2X_STOP_ON_ERROR
3115 if (unlikely(bp->panic)) {
3116 napi_complete(napi);
3117 return 0;
3118 }
3119#endif
8f20aa57
DK
3120 if (!bnx2x_fp_lock_napi(fp))
3121 return work_done;
9f6c9258 3122
6383c0b3 3123 for_each_cos_in_tx_queue(fp, cos)
65565884
MS
3124 if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
3125 bnx2x_tx_int(bp, fp->txdata_ptr[cos]);
6383c0b3 3126
9f6c9258
DK
3127 if (bnx2x_has_rx_work(fp)) {
3128 work_done += bnx2x_rx_int(fp, budget - work_done);
3129
3130 /* must not complete if we consumed full budget */
8f20aa57
DK
3131 if (work_done >= budget) {
3132 bnx2x_fp_unlock_napi(fp);
9f6c9258 3133 break;
8f20aa57 3134 }
9f6c9258
DK
3135 }
3136
3137 /* Fall out from the NAPI loop if needed */
8f20aa57
DK
3138 if (!bnx2x_fp_unlock_napi(fp) &&
3139 !(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
55c11941 3140
ec6ba945
VZ
3141 /* No need to update SB for FCoE L2 ring as long as
3142 * it's connected to the default SB and the SB
3143 * has been updated when NAPI was scheduled.
3144 */
3145 if (IS_FCOE_FP(fp)) {
3146 napi_complete(napi);
3147 break;
3148 }
9f6c9258 3149 bnx2x_update_fpsb_idx(fp);
f85582f8
DK
3150 /* bnx2x_has_rx_work() reads the status block,
3151 * thus we need to ensure that status block indices
3152 * have been actually read (bnx2x_update_fpsb_idx)
3153 * prior to this check (bnx2x_has_rx_work) so that
3154 * we won't write the "newer" value of the status block
3155 * to IGU (if there was a DMA right after
3156 * bnx2x_has_rx_work and if there is no rmb, the memory
3157 * reading (bnx2x_update_fpsb_idx) may be postponed
3158 * to right before bnx2x_ack_sb). In this case there
3159 * will never be another interrupt until there is
3160 * another update of the status block, while there
3161 * is still unhandled work.
3162 */
9f6c9258
DK
3163 rmb();
3164
3165 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
3166 napi_complete(napi);
3167 /* Re-enable interrupts */
51c1a580 3168 DP(NETIF_MSG_RX_STATUS,
523224a3
DK
3169 "Update index to %d\n", fp->fp_hc_idx);
3170 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
3171 le16_to_cpu(fp->fp_hc_idx),
9f6c9258
DK
3172 IGU_INT_ENABLE, 1);
3173 break;
3174 }
3175 }
3176 }
3177
3178 return work_done;
3179}
3180
e0d1095a 3181#ifdef CONFIG_NET_RX_BUSY_POLL
8f20aa57
DK
3182/* must be called with local_bh_disable()d */
3183int bnx2x_low_latency_recv(struct napi_struct *napi)
3184{
3185 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
3186 napi);
3187 struct bnx2x *bp = fp->bp;
3188 int found = 0;
3189
3190 if ((bp->state == BNX2X_STATE_CLOSED) ||
3191 (bp->state == BNX2X_STATE_ERROR) ||
3192 (bp->flags & (TPA_ENABLE_FLAG | GRO_ENABLE_FLAG)))
3193 return LL_FLUSH_FAILED;
3194
3195 if (!bnx2x_fp_lock_poll(fp))
3196 return LL_FLUSH_BUSY;
3197
75b29459 3198 if (bnx2x_has_rx_work(fp))
8f20aa57 3199 found = bnx2x_rx_int(fp, 4);
8f20aa57
DK
3200
3201 bnx2x_fp_unlock_poll(fp);
3202
3203 return found;
3204}
3205#endif
3206
9f6c9258
DK
3207/* we split the first BD into headers and data BDs
3208 * to ease the pain of our fellow microcode engineers
3209 * we use one mapping for both BDs
9f6c9258 3210 */
91226790
DK
3211static u16 bnx2x_tx_split(struct bnx2x *bp,
3212 struct bnx2x_fp_txdata *txdata,
3213 struct sw_tx_bd *tx_buf,
3214 struct eth_tx_start_bd **tx_bd, u16 hlen,
3215 u16 bd_prod)
9f6c9258
DK
3216{
3217 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
3218 struct eth_tx_bd *d_tx_bd;
3219 dma_addr_t mapping;
3220 int old_len = le16_to_cpu(h_tx_bd->nbytes);
3221
3222 /* first fix first BD */
9f6c9258
DK
3223 h_tx_bd->nbytes = cpu_to_le16(hlen);
3224
91226790
DK
3225 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x)\n",
3226 h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo);
9f6c9258
DK
3227
3228 /* now get a new data BD
3229 * (after the pbd) and fill it */
3230 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
6383c0b3 3231 d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
9f6c9258
DK
3232
3233 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
3234 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
3235
3236 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3237 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3238 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
3239
3240 /* this marks the BD as one that has no individual mapping */
3241 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
3242
3243 DP(NETIF_MSG_TX_QUEUED,
3244 "TSO split data size is %d (%x:%x)\n",
3245 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
3246
3247 /* update tx_bd */
3248 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
3249
3250 return bd_prod;
3251}
3252
86564c3f
YM
3253#define bswab32(b32) ((__force __le32) swab32((__force __u32) (b32)))
3254#define bswab16(b16) ((__force __le16) swab16((__force __u16) (b16)))
91226790 3255static __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
9f6c9258 3256{
86564c3f
YM
3257 __sum16 tsum = (__force __sum16) csum;
3258
9f6c9258 3259 if (fix > 0)
86564c3f
YM
3260 tsum = ~csum_fold(csum_sub((__force __wsum) csum,
3261 csum_partial(t_header - fix, fix, 0)));
9f6c9258
DK
3262
3263 else if (fix < 0)
86564c3f
YM
3264 tsum = ~csum_fold(csum_add((__force __wsum) csum,
3265 csum_partial(t_header, -fix, 0)));
9f6c9258 3266
e2593fcd 3267 return bswab16(tsum);
9f6c9258
DK
3268}
3269
91226790 3270static u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
9f6c9258
DK
3271{
3272 u32 rc;
a848ade4
DK
3273 __u8 prot = 0;
3274 __be16 protocol;
9f6c9258
DK
3275
3276 if (skb->ip_summed != CHECKSUM_PARTIAL)
a848ade4 3277 return XMIT_PLAIN;
9f6c9258 3278
a848ade4
DK
3279 protocol = vlan_get_protocol(skb);
3280 if (protocol == htons(ETH_P_IPV6)) {
3281 rc = XMIT_CSUM_V6;
3282 prot = ipv6_hdr(skb)->nexthdr;
3283 } else {
3284 rc = XMIT_CSUM_V4;
3285 prot = ip_hdr(skb)->protocol;
3286 }
9f6c9258 3287
a848ade4
DK
3288 if (!CHIP_IS_E1x(bp) && skb->encapsulation) {
3289 if (inner_ip_hdr(skb)->version == 6) {
3290 rc |= XMIT_CSUM_ENC_V6;
3291 if (inner_ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
3292 rc |= XMIT_CSUM_TCP;
9f6c9258 3293 } else {
a848ade4
DK
3294 rc |= XMIT_CSUM_ENC_V4;
3295 if (inner_ip_hdr(skb)->protocol == IPPROTO_TCP)
9f6c9258
DK
3296 rc |= XMIT_CSUM_TCP;
3297 }
3298 }
a848ade4
DK
3299 if (prot == IPPROTO_TCP)
3300 rc |= XMIT_CSUM_TCP;
9f6c9258 3301
36a8f39e
ED
3302 if (skb_is_gso(skb)) {
3303 if (skb_is_gso_v6(skb)) {
3304 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP);
3305 if (rc & XMIT_CSUM_ENC)
3306 rc |= XMIT_GSO_ENC_V6;
3307 } else {
3308 rc |= (XMIT_GSO_V4 | XMIT_CSUM_TCP);
3309 if (rc & XMIT_CSUM_ENC)
3310 rc |= XMIT_GSO_ENC_V4;
3311 }
a848ade4 3312 }
9f6c9258
DK
3313
3314 return rc;
3315}
3316
3317#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
3318/* check if packet requires linearization (packet is too fragmented)
3319 no need to check fragmentation if page size > 8K (there will be no
3320 violation to FW restrictions) */
3321static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
3322 u32 xmit_type)
3323{
3324 int to_copy = 0;
3325 int hlen = 0;
3326 int first_bd_sz = 0;
3327
3328 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
3329 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
3330
3331 if (xmit_type & XMIT_GSO) {
3332 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
3333 /* Check if LSO packet needs to be copied:
3334 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
3335 int wnd_size = MAX_FETCH_BD - 3;
3336 /* Number of windows to check */
3337 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
3338 int wnd_idx = 0;
3339 int frag_idx = 0;
3340 u32 wnd_sum = 0;
3341
3342 /* Headers length */
3343 hlen = (int)(skb_transport_header(skb) - skb->data) +
3344 tcp_hdrlen(skb);
3345
3346 /* Amount of data (w/o headers) on linear part of SKB*/
3347 first_bd_sz = skb_headlen(skb) - hlen;
3348
3349 wnd_sum = first_bd_sz;
3350
3351 /* Calculate the first sum - it's special */
3352 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
3353 wnd_sum +=
9e903e08 3354 skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]);
9f6c9258
DK
3355
3356 /* If there was data on linear skb data - check it */
3357 if (first_bd_sz > 0) {
3358 if (unlikely(wnd_sum < lso_mss)) {
3359 to_copy = 1;
3360 goto exit_lbl;
3361 }
3362
3363 wnd_sum -= first_bd_sz;
3364 }
3365
3366 /* Others are easier: run through the frag list and
3367 check all windows */
3368 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
3369 wnd_sum +=
9e903e08 3370 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]);
9f6c9258
DK
3371
3372 if (unlikely(wnd_sum < lso_mss)) {
3373 to_copy = 1;
3374 break;
3375 }
3376 wnd_sum -=
9e903e08 3377 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx]);
9f6c9258
DK
3378 }
3379 } else {
3380 /* in non-LSO too fragmented packet should always
3381 be linearized */
3382 to_copy = 1;
3383 }
3384 }
3385
3386exit_lbl:
3387 if (unlikely(to_copy))
3388 DP(NETIF_MSG_TX_QUEUED,
51c1a580 3389 "Linearization IS REQUIRED for %s packet. num_frags %d hlen %d first_bd_sz %d\n",
9f6c9258
DK
3390 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
3391 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
3392
3393 return to_copy;
3394}
3395#endif
3396
91226790
DK
3397static void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
3398 u32 xmit_type)
f2e0899f 3399{
a848ade4
DK
3400 struct ipv6hdr *ipv6;
3401
2297a2da
VZ
3402 *parsing_data |= (skb_shinfo(skb)->gso_size <<
3403 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
3404 ETH_TX_PARSE_BD_E2_LSO_MSS;
a848ade4
DK
3405
3406 if (xmit_type & XMIT_GSO_ENC_V6)
3407 ipv6 = inner_ipv6_hdr(skb);
3408 else if (xmit_type & XMIT_GSO_V6)
3409 ipv6 = ipv6_hdr(skb);
3410 else
3411 ipv6 = NULL;
3412
3413 if (ipv6 && ipv6->nexthdr == NEXTHDR_IPV6)
2297a2da 3414 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
f2e0899f
DK
3415}
3416
3417/**
e8920674 3418 * bnx2x_set_pbd_gso - update PBD in GSO case.
f2e0899f 3419 *
e8920674
DK
3420 * @skb: packet skb
3421 * @pbd: parse BD
3422 * @xmit_type: xmit flags
f2e0899f 3423 */
91226790
DK
3424static void bnx2x_set_pbd_gso(struct sk_buff *skb,
3425 struct eth_tx_parse_bd_e1x *pbd,
057cf65e 3426 struct eth_tx_start_bd *tx_start_bd,
91226790 3427 u32 xmit_type)
f2e0899f
DK
3428{
3429 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
86564c3f 3430 pbd->tcp_send_seq = bswab32(tcp_hdr(skb)->seq);
91226790 3431 pbd->tcp_flags = pbd_tcp_flags(tcp_hdr(skb));
f2e0899f
DK
3432
3433 if (xmit_type & XMIT_GSO_V4) {
86564c3f 3434 pbd->ip_id = bswab16(ip_hdr(skb)->id);
f2e0899f 3435 pbd->tcp_pseudo_csum =
86564c3f
YM
3436 bswab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
3437 ip_hdr(skb)->daddr,
3438 0, IPPROTO_TCP, 0));
f2e0899f 3439
057cf65e
YM
3440 /* GSO on 57710/57711 needs FW to calculate IP checksum */
3441 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IP_CSUM;
3442 } else {
f2e0899f 3443 pbd->tcp_pseudo_csum =
86564c3f
YM
3444 bswab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3445 &ipv6_hdr(skb)->daddr,
3446 0, IPPROTO_TCP, 0));
057cf65e 3447 }
f2e0899f 3448
86564c3f
YM
3449 pbd->global_data |=
3450 cpu_to_le16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN);
f2e0899f 3451}
f85582f8 3452
a848ade4
DK
3453/**
3454 * bnx2x_set_pbd_csum_enc - update PBD with checksum and return header length
3455 *
3456 * @bp: driver handle
3457 * @skb: packet skb
3458 * @parsing_data: data to be updated
3459 * @xmit_type: xmit flags
3460 *
3461 * 57712/578xx related, when skb has encapsulation
3462 */
3463static u8 bnx2x_set_pbd_csum_enc(struct bnx2x *bp, struct sk_buff *skb,
3464 u32 *parsing_data, u32 xmit_type)
3465{
3466 *parsing_data |=
3467 ((((u8 *)skb_inner_transport_header(skb) - skb->data) >> 1) <<
3468 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3469 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
3470
3471 if (xmit_type & XMIT_CSUM_TCP) {
3472 *parsing_data |= ((inner_tcp_hdrlen(skb) / 4) <<
3473 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3474 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
3475
3476 return skb_inner_transport_header(skb) +
3477 inner_tcp_hdrlen(skb) - skb->data;
3478 }
3479
3480 /* We support checksum offload for TCP and UDP only.
3481 * No need to pass the UDP header length - it's a constant.
3482 */
3483 return skb_inner_transport_header(skb) +
3484 sizeof(struct udphdr) - skb->data;
3485}
3486
f2e0899f 3487/**
e8920674 3488 * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
f2e0899f 3489 *
e8920674
DK
3490 * @bp: driver handle
3491 * @skb: packet skb
3492 * @parsing_data: data to be updated
3493 * @xmit_type: xmit flags
f2e0899f 3494 *
91226790 3495 * 57712/578xx related
f2e0899f 3496 */
91226790
DK
3497static u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
3498 u32 *parsing_data, u32 xmit_type)
f2e0899f 3499{
e39aece7 3500 *parsing_data |=
2de67439 3501 ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
91226790
DK
3502 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3503 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
f2e0899f 3504
e39aece7
VZ
3505 if (xmit_type & XMIT_CSUM_TCP) {
3506 *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
3507 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3508 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
f2e0899f 3509
e39aece7 3510 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
924d75ab
YM
3511 }
3512 /* We support checksum offload for TCP and UDP only.
3513 * No need to pass the UDP header length - it's a constant.
3514 */
3515 return skb_transport_header(skb) + sizeof(struct udphdr) - skb->data;
f2e0899f
DK
3516}
3517
a848ade4 3518/* set FW indication according to inner or outer protocols if tunneled */
91226790
DK
3519static void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3520 struct eth_tx_start_bd *tx_start_bd,
3521 u32 xmit_type)
93ef5c02 3522{
93ef5c02
DK
3523 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
3524
a848ade4 3525 if (xmit_type & (XMIT_CSUM_ENC_V6 | XMIT_CSUM_V6))
91226790 3526 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
93ef5c02
DK
3527
3528 if (!(xmit_type & XMIT_CSUM_TCP))
3529 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
93ef5c02
DK
3530}
3531
f2e0899f 3532/**
e8920674 3533 * bnx2x_set_pbd_csum - update PBD with checksum and return header length
f2e0899f 3534 *
e8920674
DK
3535 * @bp: driver handle
3536 * @skb: packet skb
3537 * @pbd: parse BD to be updated
3538 * @xmit_type: xmit flags
f2e0899f 3539 */
91226790
DK
3540static u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3541 struct eth_tx_parse_bd_e1x *pbd,
3542 u32 xmit_type)
f2e0899f 3543{
e39aece7 3544 u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
f2e0899f
DK
3545
3546 /* for now NS flag is not used in Linux */
3547 pbd->global_data =
86564c3f
YM
3548 cpu_to_le16(hlen |
3549 ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3550 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
f2e0899f
DK
3551
3552 pbd->ip_hlen_w = (skb_transport_header(skb) -
e39aece7 3553 skb_network_header(skb)) >> 1;
f2e0899f 3554
e39aece7
VZ
3555 hlen += pbd->ip_hlen_w;
3556
3557 /* We support checksum offload for TCP and UDP only */
3558 if (xmit_type & XMIT_CSUM_TCP)
3559 hlen += tcp_hdrlen(skb) / 2;
3560 else
3561 hlen += sizeof(struct udphdr) / 2;
f2e0899f
DK
3562
3563 pbd->total_hlen_w = cpu_to_le16(hlen);
3564 hlen = hlen*2;
3565
3566 if (xmit_type & XMIT_CSUM_TCP) {
86564c3f 3567 pbd->tcp_pseudo_csum = bswab16(tcp_hdr(skb)->check);
f2e0899f
DK
3568
3569 } else {
3570 s8 fix = SKB_CS_OFF(skb); /* signed! */
3571
3572 DP(NETIF_MSG_TX_QUEUED,
3573 "hlen %d fix %d csum before fix %x\n",
3574 le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
3575
3576 /* HW bug: fixup the CSUM */
3577 pbd->tcp_pseudo_csum =
3578 bnx2x_csum_fix(skb_transport_header(skb),
3579 SKB_CS(skb), fix);
3580
3581 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
3582 pbd->tcp_pseudo_csum);
3583 }
3584
3585 return hlen;
3586}
f85582f8 3587
a848ade4
DK
3588static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb,
3589 struct eth_tx_parse_bd_e2 *pbd_e2,
3590 struct eth_tx_parse_2nd_bd *pbd2,
3591 u16 *global_data,
3592 u32 xmit_type)
3593{
e287a75c 3594 u16 hlen_w = 0;
a848ade4 3595 u8 outerip_off, outerip_len = 0;
e768fb29 3596
e287a75c
DK
3597 /* from outer IP to transport */
3598 hlen_w = (skb_inner_transport_header(skb) -
3599 skb_network_header(skb)) >> 1;
a848ade4
DK
3600
3601 /* transport len */
e768fb29 3602 hlen_w += inner_tcp_hdrlen(skb) >> 1;
a848ade4 3603
e287a75c 3604 pbd2->fw_ip_hdr_to_payload_w = hlen_w;
a848ade4 3605
e768fb29
DK
3606 /* outer IP header info */
3607 if (xmit_type & XMIT_CSUM_V4) {
e287a75c 3608 struct iphdr *iph = ip_hdr(skb);
1b4fc0e2
DK
3609 u32 csum = (__force u32)(~iph->check) -
3610 (__force u32)iph->tot_len -
3611 (__force u32)iph->frag_off;
c957d09f 3612
a848ade4 3613 pbd2->fw_ip_csum_wo_len_flags_frag =
c957d09f 3614 bswab16(csum_fold((__force __wsum)csum));
a848ade4
DK
3615 } else {
3616 pbd2->fw_ip_hdr_to_payload_w =
e287a75c 3617 hlen_w - ((sizeof(struct ipv6hdr)) >> 1);
a848ade4
DK
3618 }
3619
3620 pbd2->tcp_send_seq = bswab32(inner_tcp_hdr(skb)->seq);
3621
3622 pbd2->tcp_flags = pbd_tcp_flags(inner_tcp_hdr(skb));
3623
3624 if (xmit_type & XMIT_GSO_V4) {
e287a75c 3625 pbd2->hw_ip_id = bswab16(inner_ip_hdr(skb)->id);
a848ade4
DK
3626
3627 pbd_e2->data.tunnel_data.pseudo_csum =
3628 bswab16(~csum_tcpudp_magic(
3629 inner_ip_hdr(skb)->saddr,
3630 inner_ip_hdr(skb)->daddr,
3631 0, IPPROTO_TCP, 0));
3632
3633 outerip_len = ip_hdr(skb)->ihl << 1;
3634 } else {
3635 pbd_e2->data.tunnel_data.pseudo_csum =
3636 bswab16(~csum_ipv6_magic(
3637 &inner_ipv6_hdr(skb)->saddr,
3638 &inner_ipv6_hdr(skb)->daddr,
3639 0, IPPROTO_TCP, 0));
3640 }
3641
3642 outerip_off = (skb_network_header(skb) - skb->data) >> 1;
3643
3644 *global_data |=
3645 outerip_off |
3646 (!!(xmit_type & XMIT_CSUM_V6) <<
3647 ETH_TX_PARSE_2ND_BD_IP_HDR_TYPE_OUTER_SHIFT) |
3648 (outerip_len <<
3649 ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT) |
3650 ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3651 ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN_SHIFT);
65bc0cfe
DK
3652
3653 if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
3654 SET_FLAG(*global_data, ETH_TX_PARSE_2ND_BD_TUNNEL_UDP_EXIST, 1);
3655 pbd2->tunnel_udp_hdr_start_w = skb_transport_offset(skb) >> 1;
3656 }
a848ade4
DK
3657}
3658
9f6c9258
DK
3659/* called with netif_tx_lock
3660 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
3661 * netif_wake_queue()
3662 */
3663netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
3664{
3665 struct bnx2x *bp = netdev_priv(dev);
6383c0b3 3666
9f6c9258 3667 struct netdev_queue *txq;
6383c0b3 3668 struct bnx2x_fp_txdata *txdata;
9f6c9258 3669 struct sw_tx_bd *tx_buf;
619c5cb6 3670 struct eth_tx_start_bd *tx_start_bd, *first_bd;
9f6c9258 3671 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
523224a3 3672 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
f2e0899f 3673 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
a848ade4 3674 struct eth_tx_parse_2nd_bd *pbd2 = NULL;
2297a2da 3675 u32 pbd_e2_parsing_data = 0;
9f6c9258 3676 u16 pkt_prod, bd_prod;
65565884 3677 int nbd, txq_index;
9f6c9258
DK
3678 dma_addr_t mapping;
3679 u32 xmit_type = bnx2x_xmit_type(bp, skb);
3680 int i;
3681 u8 hlen = 0;
3682 __le16 pkt_size = 0;
3683 struct ethhdr *eth;
3684 u8 mac_type = UNICAST_ADDRESS;
3685
3686#ifdef BNX2X_STOP_ON_ERROR
3687 if (unlikely(bp->panic))
3688 return NETDEV_TX_BUSY;
3689#endif
3690
6383c0b3
AE
3691 txq_index = skb_get_queue_mapping(skb);
3692 txq = netdev_get_tx_queue(dev, txq_index);
3693
55c11941 3694 BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + (CNIC_LOADED(bp) ? 1 : 0));
6383c0b3 3695
65565884 3696 txdata = &bp->bnx2x_txq[txq_index];
6383c0b3
AE
3697
3698 /* enable this debug print to view the transmission queue being used
51c1a580 3699 DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n",
6383c0b3 3700 txq_index, fp_index, txdata_index); */
9f6c9258 3701
16a5fd92 3702 /* enable this debug print to view the transmission details
51c1a580
MS
3703 DP(NETIF_MSG_TX_QUEUED,
3704 "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n",
6383c0b3 3705 txdata->cid, fp_index, txdata_index, txdata, fp); */
9f6c9258 3706
6383c0b3 3707 if (unlikely(bnx2x_tx_avail(bp, txdata) <
7df2dc6b
DK
3708 skb_shinfo(skb)->nr_frags +
3709 BDS_PER_TX_PKT +
3710 NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT))) {
2384d6aa 3711 /* Handle special storage cases separately */
c96bdc0c
DK
3712 if (txdata->tx_ring_size == 0) {
3713 struct bnx2x_eth_q_stats *q_stats =
3714 bnx2x_fp_qstats(bp, txdata->parent_fp);
3715 q_stats->driver_filtered_tx_pkt++;
3716 dev_kfree_skb(skb);
3717 return NETDEV_TX_OK;
3718 }
2de67439
YM
3719 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
3720 netif_tx_stop_queue(txq);
c96bdc0c 3721 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
2384d6aa 3722
9f6c9258
DK
3723 return NETDEV_TX_BUSY;
3724 }
3725
51c1a580 3726 DP(NETIF_MSG_TX_QUEUED,
04c46736 3727 "queue[%d]: SKB: summed %x protocol %x protocol(%x,%x) gso type %x xmit_type %x len %d\n",
6383c0b3 3728 txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
04c46736
YM
3729 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type,
3730 skb->len);
9f6c9258
DK
3731
3732 eth = (struct ethhdr *)skb->data;
3733
3734 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
3735 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
3736 if (is_broadcast_ether_addr(eth->h_dest))
3737 mac_type = BROADCAST_ADDRESS;
3738 else
3739 mac_type = MULTICAST_ADDRESS;
3740 }
3741
91226790 3742#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT)
9f6c9258
DK
3743 /* First, check if we need to linearize the skb (due to FW
3744 restrictions). No need to check fragmentation if page size > 8K
3745 (there will be no violation to FW restrictions) */
3746 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
3747 /* Statistics of linearization */
3748 bp->lin_cnt++;
3749 if (skb_linearize(skb) != 0) {
51c1a580
MS
3750 DP(NETIF_MSG_TX_QUEUED,
3751 "SKB linearization failed - silently dropping this SKB\n");
9f6c9258
DK
3752 dev_kfree_skb_any(skb);
3753 return NETDEV_TX_OK;
3754 }
3755 }
3756#endif
619c5cb6
VZ
3757 /* Map skb linear data for DMA */
3758 mapping = dma_map_single(&bp->pdev->dev, skb->data,
3759 skb_headlen(skb), DMA_TO_DEVICE);
3760 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
51c1a580
MS
3761 DP(NETIF_MSG_TX_QUEUED,
3762 "SKB mapping failed - silently dropping this SKB\n");
619c5cb6
VZ
3763 dev_kfree_skb_any(skb);
3764 return NETDEV_TX_OK;
3765 }
9f6c9258
DK
3766 /*
3767 Please read carefully. First we use one BD which we mark as start,
3768 then we have a parsing info BD (used for TSO or xsum),
3769 and only then we have the rest of the TSO BDs.
3770 (don't forget to mark the last one as last,
3771 and to unmap only AFTER you write to the BD ...)
3772 And above all, all pdb sizes are in words - NOT DWORDS!
3773 */
3774
619c5cb6
VZ
3775 /* get current pkt produced now - advance it just before sending packet
3776 * since mapping of pages may fail and cause packet to be dropped
3777 */
6383c0b3
AE
3778 pkt_prod = txdata->tx_pkt_prod;
3779 bd_prod = TX_BD(txdata->tx_bd_prod);
9f6c9258 3780
619c5cb6
VZ
3781 /* get a tx_buf and first BD
3782 * tx_start_bd may be changed during SPLIT,
3783 * but first_bd will always stay first
3784 */
6383c0b3
AE
3785 tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
3786 tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
619c5cb6 3787 first_bd = tx_start_bd;
9f6c9258
DK
3788
3789 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
f85582f8 3790
91226790
DK
3791 /* header nbd: indirectly zero other flags! */
3792 tx_start_bd->general_data = 1 << ETH_TX_START_BD_HDR_NBDS_SHIFT;
9f6c9258
DK
3793
3794 /* remember the first BD of the packet */
6383c0b3 3795 tx_buf->first_bd = txdata->tx_bd_prod;
9f6c9258
DK
3796 tx_buf->skb = skb;
3797 tx_buf->flags = 0;
3798
3799 DP(NETIF_MSG_TX_QUEUED,
3800 "sending pkt %u @%p next_idx %u bd %u @%p\n",
6383c0b3 3801 pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
9f6c9258 3802
eab6d18d 3803 if (vlan_tx_tag_present(skb)) {
523224a3
DK
3804 tx_start_bd->vlan_or_ethertype =
3805 cpu_to_le16(vlan_tx_tag_get(skb));
3806 tx_start_bd->bd_flags.as_bitfield |=
3807 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
dc1ba591
AE
3808 } else {
3809 /* when transmitting in a vf, start bd must hold the ethertype
3810 * for fw to enforce it
3811 */
91226790 3812 if (IS_VF(bp))
dc1ba591
AE
3813 tx_start_bd->vlan_or_ethertype =
3814 cpu_to_le16(ntohs(eth->h_proto));
91226790 3815 else
dc1ba591
AE
3816 /* used by FW for packet accounting */
3817 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
dc1ba591 3818 }
9f6c9258 3819
91226790
DK
3820 nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
3821
9f6c9258
DK
3822 /* turn on parsing and get a BD */
3823 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9f6c9258 3824
93ef5c02
DK
3825 if (xmit_type & XMIT_CSUM)
3826 bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
9f6c9258 3827
619c5cb6 3828 if (!CHIP_IS_E1x(bp)) {
6383c0b3 3829 pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
f2e0899f 3830 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
a848ade4
DK
3831
3832 if (xmit_type & XMIT_CSUM_ENC) {
3833 u16 global_data = 0;
3834
3835 /* Set PBD in enc checksum offload case */
3836 hlen = bnx2x_set_pbd_csum_enc(bp, skb,
3837 &pbd_e2_parsing_data,
3838 xmit_type);
3839
3840 /* turn on 2nd parsing and get a BD */
3841 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3842
3843 pbd2 = &txdata->tx_desc_ring[bd_prod].parse_2nd_bd;
3844
3845 memset(pbd2, 0, sizeof(*pbd2));
3846
3847 pbd_e2->data.tunnel_data.ip_hdr_start_inner_w =
3848 (skb_inner_network_header(skb) -
3849 skb->data) >> 1;
3850
3851 if (xmit_type & XMIT_GSO_ENC)
3852 bnx2x_update_pbds_gso_enc(skb, pbd_e2, pbd2,
3853 &global_data,
3854 xmit_type);
3855
3856 pbd2->global_data = cpu_to_le16(global_data);
3857
3858 /* add addition parse BD indication to start BD */
3859 SET_FLAG(tx_start_bd->general_data,
3860 ETH_TX_START_BD_PARSE_NBDS, 1);
3861 /* set encapsulation flag in start BD */
3862 SET_FLAG(tx_start_bd->general_data,
3863 ETH_TX_START_BD_TUNNEL_EXIST, 1);
3864 nbd++;
3865 } else if (xmit_type & XMIT_CSUM) {
91226790 3866 /* Set PBD in checksum offload case w/o encapsulation */
2297a2da
VZ
3867 hlen = bnx2x_set_pbd_csum_e2(bp, skb,
3868 &pbd_e2_parsing_data,
3869 xmit_type);
a848ade4 3870 }
dc1ba591 3871
91226790
DK
3872 /* Add the macs to the parsing BD this is a vf */
3873 if (IS_VF(bp)) {
3874 /* override GRE parameters in BD */
3875 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
3876 &pbd_e2->data.mac_addr.src_mid,
3877 &pbd_e2->data.mac_addr.src_lo,
619c5cb6 3878 eth->h_source);
91226790
DK
3879
3880 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi,
3881 &pbd_e2->data.mac_addr.dst_mid,
3882 &pbd_e2->data.mac_addr.dst_lo,
619c5cb6
VZ
3883 eth->h_dest);
3884 }
96bed4b9
YM
3885
3886 SET_FLAG(pbd_e2_parsing_data,
3887 ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, mac_type);
f2e0899f 3888 } else {
96bed4b9 3889 u16 global_data = 0;
6383c0b3 3890 pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
f2e0899f
DK
3891 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
3892 /* Set PBD in checksum offload case */
3893 if (xmit_type & XMIT_CSUM)
3894 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
9f6c9258 3895
96bed4b9
YM
3896 SET_FLAG(global_data,
3897 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type);
3898 pbd_e1x->global_data |= cpu_to_le16(global_data);
9f6c9258
DK
3899 }
3900
f85582f8 3901 /* Setup the data pointer of the first BD of the packet */
9f6c9258
DK
3902 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3903 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9f6c9258
DK
3904 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
3905 pkt_size = tx_start_bd->nbytes;
3906
51c1a580 3907 DP(NETIF_MSG_TX_QUEUED,
91226790 3908 "first bd @%p addr (%x:%x) nbytes %d flags %x vlan %x\n",
9f6c9258 3909 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
91226790 3910 le16_to_cpu(tx_start_bd->nbytes),
523224a3
DK
3911 tx_start_bd->bd_flags.as_bitfield,
3912 le16_to_cpu(tx_start_bd->vlan_or_ethertype));
9f6c9258
DK
3913
3914 if (xmit_type & XMIT_GSO) {
3915
3916 DP(NETIF_MSG_TX_QUEUED,
3917 "TSO packet len %d hlen %d total len %d tso size %d\n",
3918 skb->len, hlen, skb_headlen(skb),
3919 skb_shinfo(skb)->gso_size);
3920
3921 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
3922
91226790
DK
3923 if (unlikely(skb_headlen(skb) > hlen)) {
3924 nbd++;
6383c0b3
AE
3925 bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
3926 &tx_start_bd, hlen,
91226790
DK
3927 bd_prod);
3928 }
619c5cb6 3929 if (!CHIP_IS_E1x(bp))
2297a2da
VZ
3930 bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
3931 xmit_type);
f2e0899f 3932 else
44dbc78e 3933 bnx2x_set_pbd_gso(skb, pbd_e1x, first_bd, xmit_type);
9f6c9258 3934 }
2297a2da
VZ
3935
3936 /* Set the PBD's parsing_data field if not zero
3937 * (for the chips newer than 57711).
3938 */
3939 if (pbd_e2_parsing_data)
3940 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
3941
9f6c9258
DK
3942 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
3943
f85582f8 3944 /* Handle fragmented skb */
9f6c9258
DK
3945 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3946 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3947
9e903e08
ED
3948 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
3949 skb_frag_size(frag), DMA_TO_DEVICE);
619c5cb6 3950 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
2df1a70a 3951 unsigned int pkts_compl = 0, bytes_compl = 0;
619c5cb6 3952
51c1a580
MS
3953 DP(NETIF_MSG_TX_QUEUED,
3954 "Unable to map page - dropping packet...\n");
619c5cb6
VZ
3955
3956 /* we need unmap all buffers already mapped
3957 * for this SKB;
3958 * first_bd->nbd need to be properly updated
3959 * before call to bnx2x_free_tx_pkt
3960 */
3961 first_bd->nbd = cpu_to_le16(nbd);
6383c0b3 3962 bnx2x_free_tx_pkt(bp, txdata,
2df1a70a
TH
3963 TX_BD(txdata->tx_pkt_prod),
3964 &pkts_compl, &bytes_compl);
619c5cb6
VZ
3965 return NETDEV_TX_OK;
3966 }
3967
9f6c9258 3968 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
6383c0b3 3969 tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
9f6c9258 3970 if (total_pkt_bd == NULL)
6383c0b3 3971 total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
9f6c9258 3972
9f6c9258
DK
3973 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3974 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9e903e08
ED
3975 tx_data_bd->nbytes = cpu_to_le16(skb_frag_size(frag));
3976 le16_add_cpu(&pkt_size, skb_frag_size(frag));
619c5cb6 3977 nbd++;
9f6c9258
DK
3978
3979 DP(NETIF_MSG_TX_QUEUED,
3980 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
3981 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
3982 le16_to_cpu(tx_data_bd->nbytes));
3983 }
3984
3985 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
3986
619c5cb6
VZ
3987 /* update with actual num BDs */
3988 first_bd->nbd = cpu_to_le16(nbd);
3989
9f6c9258
DK
3990 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3991
3992 /* now send a tx doorbell, counting the next BD
3993 * if the packet contains or ends with it
3994 */
3995 if (TX_BD_POFF(bd_prod) < nbd)
3996 nbd++;
3997
619c5cb6
VZ
3998 /* total_pkt_bytes should be set on the first data BD if
3999 * it's not an LSO packet and there is more than one
4000 * data BD. In this case pkt_size is limited by an MTU value.
4001 * However we prefer to set it for an LSO packet (while we don't
4002 * have to) in order to save some CPU cycles in a none-LSO
4003 * case, when we much more care about them.
4004 */
9f6c9258
DK
4005 if (total_pkt_bd != NULL)
4006 total_pkt_bd->total_pkt_bytes = pkt_size;
4007
523224a3 4008 if (pbd_e1x)
9f6c9258 4009 DP(NETIF_MSG_TX_QUEUED,
51c1a580 4010 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u tcp_flags %x xsum %x seq %u hlen %u\n",
523224a3
DK
4011 pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
4012 pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
4013 pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
4014 le16_to_cpu(pbd_e1x->total_hlen_w));
f2e0899f
DK
4015 if (pbd_e2)
4016 DP(NETIF_MSG_TX_QUEUED,
4017 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
91226790
DK
4018 pbd_e2,
4019 pbd_e2->data.mac_addr.dst_hi,
4020 pbd_e2->data.mac_addr.dst_mid,
4021 pbd_e2->data.mac_addr.dst_lo,
4022 pbd_e2->data.mac_addr.src_hi,
4023 pbd_e2->data.mac_addr.src_mid,
4024 pbd_e2->data.mac_addr.src_lo,
f2e0899f 4025 pbd_e2->parsing_data);
9f6c9258
DK
4026 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
4027
2df1a70a
TH
4028 netdev_tx_sent_queue(txq, skb->len);
4029
8373c57d
WB
4030 skb_tx_timestamp(skb);
4031
6383c0b3 4032 txdata->tx_pkt_prod++;
9f6c9258
DK
4033 /*
4034 * Make sure that the BD data is updated before updating the producer
4035 * since FW might read the BD right after the producer is updated.
4036 * This is only applicable for weak-ordered memory model archs such
4037 * as IA-64. The following barrier is also mandatory since FW will
4038 * assumes packets must have BDs.
4039 */
4040 wmb();
4041
6383c0b3 4042 txdata->tx_db.data.prod += nbd;
9f6c9258 4043 barrier();
f85582f8 4044
6383c0b3 4045 DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
9f6c9258
DK
4046
4047 mmiowb();
4048
6383c0b3 4049 txdata->tx_bd_prod += nbd;
9f6c9258 4050
7df2dc6b 4051 if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_DESC_PER_TX_PKT)) {
9f6c9258
DK
4052 netif_tx_stop_queue(txq);
4053
4054 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
4055 * ordering of set_bit() in netif_tx_stop_queue() and read of
4056 * fp->bd_tx_cons */
4057 smp_mb();
4058
15192a8c 4059 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
7df2dc6b 4060 if (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT)
9f6c9258
DK
4061 netif_tx_wake_queue(txq);
4062 }
6383c0b3 4063 txdata->tx_pkt++;
9f6c9258
DK
4064
4065 return NETDEV_TX_OK;
4066}
f85582f8 4067
6383c0b3
AE
4068/**
4069 * bnx2x_setup_tc - routine to configure net_device for multi tc
4070 *
4071 * @netdev: net device to configure
4072 * @tc: number of traffic classes to enable
4073 *
4074 * callback connected to the ndo_setup_tc function pointer
4075 */
4076int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
4077{
4078 int cos, prio, count, offset;
4079 struct bnx2x *bp = netdev_priv(dev);
4080
4081 /* setup tc must be called under rtnl lock */
4082 ASSERT_RTNL();
4083
16a5fd92 4084 /* no traffic classes requested. Aborting */
6383c0b3
AE
4085 if (!num_tc) {
4086 netdev_reset_tc(dev);
4087 return 0;
4088 }
4089
4090 /* requested to support too many traffic classes */
4091 if (num_tc > bp->max_cos) {
6bf07b8e 4092 BNX2X_ERR("support for too many traffic classes requested: %d. Max supported is %d\n",
51c1a580 4093 num_tc, bp->max_cos);
6383c0b3
AE
4094 return -EINVAL;
4095 }
4096
4097 /* declare amount of supported traffic classes */
4098 if (netdev_set_num_tc(dev, num_tc)) {
51c1a580 4099 BNX2X_ERR("failed to declare %d traffic classes\n", num_tc);
6383c0b3
AE
4100 return -EINVAL;
4101 }
4102
4103 /* configure priority to traffic class mapping */
4104 for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
4105 netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[prio]);
51c1a580
MS
4106 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4107 "mapping priority %d to tc %d\n",
6383c0b3
AE
4108 prio, bp->prio_to_cos[prio]);
4109 }
4110
16a5fd92 4111 /* Use this configuration to differentiate tc0 from other COSes
6383c0b3
AE
4112 This can be used for ets or pfc, and save the effort of setting
4113 up a multio class queue disc or negotiating DCBX with a switch
4114 netdev_set_prio_tc_map(dev, 0, 0);
94f05b0f 4115 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0);
6383c0b3
AE
4116 for (prio = 1; prio < 16; prio++) {
4117 netdev_set_prio_tc_map(dev, prio, 1);
94f05b0f 4118 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1);
6383c0b3
AE
4119 } */
4120
4121 /* configure traffic class to transmission queue mapping */
4122 for (cos = 0; cos < bp->max_cos; cos++) {
4123 count = BNX2X_NUM_ETH_QUEUES(bp);
65565884 4124 offset = cos * BNX2X_NUM_NON_CNIC_QUEUES(bp);
6383c0b3 4125 netdev_set_tc_queue(dev, cos, count, offset);
51c1a580
MS
4126 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4127 "mapping tc %d to offset %d count %d\n",
6383c0b3
AE
4128 cos, offset, count);
4129 }
4130
4131 return 0;
4132}
4133
9f6c9258
DK
4134/* called with rtnl_lock */
4135int bnx2x_change_mac_addr(struct net_device *dev, void *p)
4136{
4137 struct sockaddr *addr = p;
4138 struct bnx2x *bp = netdev_priv(dev);
619c5cb6 4139 int rc = 0;
9f6c9258 4140
51c1a580
MS
4141 if (!bnx2x_is_valid_ether_addr(bp, addr->sa_data)) {
4142 BNX2X_ERR("Requested MAC address is not valid\n");
614c76df 4143 return -EINVAL;
51c1a580 4144 }
614c76df 4145
a3348722
BW
4146 if ((IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp)) &&
4147 !is_zero_ether_addr(addr->sa_data)) {
51c1a580 4148 BNX2X_ERR("Can't configure non-zero address on iSCSI or FCoE functions in MF-SD mode\n");
9f6c9258 4149 return -EINVAL;
51c1a580 4150 }
9f6c9258 4151
619c5cb6
VZ
4152 if (netif_running(dev)) {
4153 rc = bnx2x_set_eth_mac(bp, false);
4154 if (rc)
4155 return rc;
4156 }
4157
9f6c9258 4158 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
619c5cb6 4159
523224a3 4160 if (netif_running(dev))
619c5cb6 4161 rc = bnx2x_set_eth_mac(bp, true);
9f6c9258 4162
619c5cb6 4163 return rc;
9f6c9258
DK
4164}
4165
b3b83c3f
DK
4166static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
4167{
4168 union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
4169 struct bnx2x_fastpath *fp = &bp->fp[fp_index];
6383c0b3 4170 u8 cos;
b3b83c3f
DK
4171
4172 /* Common */
55c11941 4173
b3b83c3f
DK
4174 if (IS_FCOE_IDX(fp_index)) {
4175 memset(sb, 0, sizeof(union host_hc_status_block));
4176 fp->status_blk_mapping = 0;
b3b83c3f 4177 } else {
b3b83c3f 4178 /* status blocks */
619c5cb6 4179 if (!CHIP_IS_E1x(bp))
b3b83c3f
DK
4180 BNX2X_PCI_FREE(sb->e2_sb,
4181 bnx2x_fp(bp, fp_index,
4182 status_blk_mapping),
4183 sizeof(struct host_hc_status_block_e2));
4184 else
4185 BNX2X_PCI_FREE(sb->e1x_sb,
4186 bnx2x_fp(bp, fp_index,
4187 status_blk_mapping),
4188 sizeof(struct host_hc_status_block_e1x));
b3b83c3f 4189 }
55c11941 4190
b3b83c3f
DK
4191 /* Rx */
4192 if (!skip_rx_queue(bp, fp_index)) {
4193 bnx2x_free_rx_bds(fp);
4194
4195 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4196 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
4197 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
4198 bnx2x_fp(bp, fp_index, rx_desc_mapping),
4199 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4200
4201 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
4202 bnx2x_fp(bp, fp_index, rx_comp_mapping),
4203 sizeof(struct eth_fast_path_rx_cqe) *
4204 NUM_RCQ_BD);
4205
4206 /* SGE ring */
4207 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
4208 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
4209 bnx2x_fp(bp, fp_index, rx_sge_mapping),
4210 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4211 }
4212
4213 /* Tx */
4214 if (!skip_tx_queue(bp, fp_index)) {
4215 /* fastpath tx rings: tx_buf tx_desc */
6383c0b3 4216 for_each_cos_in_tx_queue(fp, cos) {
65565884 4217 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
6383c0b3 4218
51c1a580 4219 DP(NETIF_MSG_IFDOWN,
94f05b0f 4220 "freeing tx memory of fp %d cos %d cid %d\n",
6383c0b3
AE
4221 fp_index, cos, txdata->cid);
4222
4223 BNX2X_FREE(txdata->tx_buf_ring);
4224 BNX2X_PCI_FREE(txdata->tx_desc_ring,
4225 txdata->tx_desc_mapping,
4226 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4227 }
b3b83c3f
DK
4228 }
4229 /* end of fastpath */
4230}
4231
a8f47eb7 4232static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp)
55c11941
MS
4233{
4234 int i;
4235 for_each_cnic_queue(bp, i)
4236 bnx2x_free_fp_mem_at(bp, i);
4237}
4238
b3b83c3f
DK
4239void bnx2x_free_fp_mem(struct bnx2x *bp)
4240{
4241 int i;
55c11941 4242 for_each_eth_queue(bp, i)
b3b83c3f
DK
4243 bnx2x_free_fp_mem_at(bp, i);
4244}
4245
1191cb83 4246static void set_sb_shortcuts(struct bnx2x *bp, int index)
b3b83c3f
DK
4247{
4248 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
619c5cb6 4249 if (!CHIP_IS_E1x(bp)) {
b3b83c3f
DK
4250 bnx2x_fp(bp, index, sb_index_values) =
4251 (__le16 *)status_blk.e2_sb->sb.index_values;
4252 bnx2x_fp(bp, index, sb_running_index) =
4253 (__le16 *)status_blk.e2_sb->sb.running_index;
4254 } else {
4255 bnx2x_fp(bp, index, sb_index_values) =
4256 (__le16 *)status_blk.e1x_sb->sb.index_values;
4257 bnx2x_fp(bp, index, sb_running_index) =
4258 (__le16 *)status_blk.e1x_sb->sb.running_index;
4259 }
4260}
4261
1191cb83
ED
4262/* Returns the number of actually allocated BDs */
4263static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
4264 int rx_ring_size)
4265{
4266 struct bnx2x *bp = fp->bp;
4267 u16 ring_prod, cqe_ring_prod;
4268 int i, failure_cnt = 0;
4269
4270 fp->rx_comp_cons = 0;
4271 cqe_ring_prod = ring_prod = 0;
4272
4273 /* This routine is called only during fo init so
4274 * fp->eth_q_stats.rx_skb_alloc_failed = 0
4275 */
4276 for (i = 0; i < rx_ring_size; i++) {
996dedba 4277 if (bnx2x_alloc_rx_data(bp, fp, ring_prod, GFP_KERNEL) < 0) {
1191cb83
ED
4278 failure_cnt++;
4279 continue;
4280 }
4281 ring_prod = NEXT_RX_IDX(ring_prod);
4282 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4283 WARN_ON(ring_prod <= (i - failure_cnt));
4284 }
4285
4286 if (failure_cnt)
4287 BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n",
4288 i - failure_cnt, fp->index);
4289
4290 fp->rx_bd_prod = ring_prod;
4291 /* Limit the CQE producer by the CQE ring size */
4292 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
4293 cqe_ring_prod);
4294 fp->rx_pkt = fp->rx_calls = 0;
4295
15192a8c 4296 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed += failure_cnt;
1191cb83
ED
4297
4298 return i - failure_cnt;
4299}
4300
4301static void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
4302{
4303 int i;
4304
4305 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4306 struct eth_rx_cqe_next_page *nextpg;
4307
4308 nextpg = (struct eth_rx_cqe_next_page *)
4309 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4310 nextpg->addr_hi =
4311 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4312 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4313 nextpg->addr_lo =
4314 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4315 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4316 }
4317}
4318
b3b83c3f
DK
4319static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
4320{
4321 union host_hc_status_block *sb;
4322 struct bnx2x_fastpath *fp = &bp->fp[index];
4323 int ring_size = 0;
6383c0b3 4324 u8 cos;
c2188952 4325 int rx_ring_size = 0;
b3b83c3f 4326
a3348722
BW
4327 if (!bp->rx_ring_size &&
4328 (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) {
614c76df
DK
4329 rx_ring_size = MIN_RX_SIZE_NONTPA;
4330 bp->rx_ring_size = rx_ring_size;
55c11941 4331 } else if (!bp->rx_ring_size) {
c2188952
VZ
4332 rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
4333
065f8b92
YM
4334 if (CHIP_IS_E3(bp)) {
4335 u32 cfg = SHMEM_RD(bp,
4336 dev_info.port_hw_config[BP_PORT(bp)].
4337 default_cfg);
4338
4339 /* Decrease ring size for 1G functions */
4340 if ((cfg & PORT_HW_CFG_NET_SERDES_IF_MASK) ==
4341 PORT_HW_CFG_NET_SERDES_IF_SGMII)
4342 rx_ring_size /= 10;
4343 }
d760fc37 4344
c2188952
VZ
4345 /* allocate at least number of buffers required by FW */
4346 rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
4347 MIN_RX_SIZE_TPA, rx_ring_size);
4348
4349 bp->rx_ring_size = rx_ring_size;
614c76df 4350 } else /* if rx_ring_size specified - use it */
c2188952 4351 rx_ring_size = bp->rx_ring_size;
b3b83c3f 4352
04c46736
YM
4353 DP(BNX2X_MSG_SP, "calculated rx_ring_size %d\n", rx_ring_size);
4354
b3b83c3f
DK
4355 /* Common */
4356 sb = &bnx2x_fp(bp, index, status_blk);
55c11941 4357
b3b83c3f 4358 if (!IS_FCOE_IDX(index)) {
b3b83c3f 4359 /* status blocks */
619c5cb6 4360 if (!CHIP_IS_E1x(bp))
b3b83c3f
DK
4361 BNX2X_PCI_ALLOC(sb->e2_sb,
4362 &bnx2x_fp(bp, index, status_blk_mapping),
4363 sizeof(struct host_hc_status_block_e2));
4364 else
4365 BNX2X_PCI_ALLOC(sb->e1x_sb,
4366 &bnx2x_fp(bp, index, status_blk_mapping),
4367 sizeof(struct host_hc_status_block_e1x));
b3b83c3f 4368 }
8eef2af1
DK
4369
4370 /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
4371 * set shortcuts for it.
4372 */
4373 if (!IS_FCOE_IDX(index))
4374 set_sb_shortcuts(bp, index);
b3b83c3f
DK
4375
4376 /* Tx */
4377 if (!skip_tx_queue(bp, index)) {
4378 /* fastpath tx rings: tx_buf tx_desc */
6383c0b3 4379 for_each_cos_in_tx_queue(fp, cos) {
65565884 4380 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
6383c0b3 4381
51c1a580
MS
4382 DP(NETIF_MSG_IFUP,
4383 "allocating tx memory of fp %d cos %d\n",
6383c0b3
AE
4384 index, cos);
4385
4386 BNX2X_ALLOC(txdata->tx_buf_ring,
b3b83c3f 4387 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6383c0b3
AE
4388 BNX2X_PCI_ALLOC(txdata->tx_desc_ring,
4389 &txdata->tx_desc_mapping,
b3b83c3f 4390 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
6383c0b3 4391 }
b3b83c3f
DK
4392 }
4393
4394 /* Rx */
4395 if (!skip_rx_queue(bp, index)) {
4396 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4397 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_buf_ring),
4398 sizeof(struct sw_rx_bd) * NUM_RX_BD);
4399 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_desc_ring),
4400 &bnx2x_fp(bp, index, rx_desc_mapping),
4401 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4402
75b29459
DK
4403 /* Seed all CQEs by 1s */
4404 BNX2X_PCI_FALLOC(bnx2x_fp(bp, index, rx_comp_ring),
4405 &bnx2x_fp(bp, index, rx_comp_mapping),
4406 sizeof(struct eth_fast_path_rx_cqe) *
4407 NUM_RCQ_BD);
b3b83c3f
DK
4408
4409 /* SGE ring */
4410 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_page_ring),
4411 sizeof(struct sw_rx_page) * NUM_RX_SGE);
4412 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_sge_ring),
4413 &bnx2x_fp(bp, index, rx_sge_mapping),
4414 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4415 /* RX BD ring */
4416 bnx2x_set_next_page_rx_bd(fp);
4417
4418 /* CQ ring */
4419 bnx2x_set_next_page_rx_cq(fp);
4420
4421 /* BDs */
4422 ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
4423 if (ring_size < rx_ring_size)
4424 goto alloc_mem_err;
4425 }
4426
4427 return 0;
4428
4429/* handles low memory cases */
4430alloc_mem_err:
4431 BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
4432 index, ring_size);
4433 /* FW will drop all packets if queue is not big enough,
4434 * In these cases we disable the queue
6383c0b3 4435 * Min size is different for OOO, TPA and non-TPA queues
b3b83c3f
DK
4436 */
4437 if (ring_size < (fp->disable_tpa ?
eb722d7a 4438 MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
b3b83c3f
DK
4439 /* release memory allocated for this queue */
4440 bnx2x_free_fp_mem_at(bp, index);
4441 return -ENOMEM;
4442 }
4443 return 0;
4444}
4445
a8f47eb7 4446static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp)
55c11941
MS
4447{
4448 if (!NO_FCOE(bp))
4449 /* FCoE */
4450 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp)))
4451 /* we will fail load process instead of mark
4452 * NO_FCOE_FLAG
4453 */
4454 return -ENOMEM;
4455
4456 return 0;
4457}
4458
a8f47eb7 4459static int bnx2x_alloc_fp_mem(struct bnx2x *bp)
b3b83c3f
DK
4460{
4461 int i;
4462
55c11941
MS
4463 /* 1. Allocate FP for leading - fatal if error
4464 * 2. Allocate RSS - fix number of queues if error
b3b83c3f
DK
4465 */
4466
4467 /* leading */
4468 if (bnx2x_alloc_fp_mem_at(bp, 0))
4469 return -ENOMEM;
6383c0b3 4470
b3b83c3f
DK
4471 /* RSS */
4472 for_each_nondefault_eth_queue(bp, i)
4473 if (bnx2x_alloc_fp_mem_at(bp, i))
4474 break;
4475
4476 /* handle memory failures */
4477 if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
4478 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
4479
4480 WARN_ON(delta < 0);
4864a16a 4481 bnx2x_shrink_eth_fp(bp, delta);
55c11941
MS
4482 if (CNIC_SUPPORT(bp))
4483 /* move non eth FPs next to last eth FP
4484 * must be done in that order
4485 * FCOE_IDX < FWD_IDX < OOO_IDX
4486 */
b3b83c3f 4487
55c11941
MS
4488 /* move FCoE fp even NO_FCOE_FLAG is on */
4489 bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta);
4490 bp->num_ethernet_queues -= delta;
4491 bp->num_queues = bp->num_ethernet_queues +
4492 bp->num_cnic_queues;
b3b83c3f
DK
4493 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
4494 bp->num_queues + delta, bp->num_queues);
4495 }
4496
4497 return 0;
4498}
d6214d7a 4499
523224a3
DK
4500void bnx2x_free_mem_bp(struct bnx2x *bp)
4501{
c3146eb6
DK
4502 int i;
4503
4504 for (i = 0; i < bp->fp_array_size; i++)
4505 kfree(bp->fp[i].tpa_info);
523224a3 4506 kfree(bp->fp);
15192a8c
BW
4507 kfree(bp->sp_objs);
4508 kfree(bp->fp_stats);
65565884 4509 kfree(bp->bnx2x_txq);
523224a3
DK
4510 kfree(bp->msix_table);
4511 kfree(bp->ilt);
4512}
4513
0329aba1 4514int bnx2x_alloc_mem_bp(struct bnx2x *bp)
523224a3
DK
4515{
4516 struct bnx2x_fastpath *fp;
4517 struct msix_entry *tbl;
4518 struct bnx2x_ilt *ilt;
6383c0b3 4519 int msix_table_size = 0;
55c11941 4520 int fp_array_size, txq_array_size;
15192a8c 4521 int i;
6383c0b3
AE
4522
4523 /*
4524 * The biggest MSI-X table we might need is as a maximum number of fast
2de67439 4525 * path IGU SBs plus default SB (for PF only).
6383c0b3 4526 */
1ab4434c
AE
4527 msix_table_size = bp->igu_sb_cnt;
4528 if (IS_PF(bp))
4529 msix_table_size++;
4530 BNX2X_DEV_INFO("msix_table_size %d\n", msix_table_size);
523224a3 4531
6383c0b3 4532 /* fp array: RSS plus CNIC related L2 queues */
55c11941 4533 fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + CNIC_SUPPORT(bp);
c3146eb6
DK
4534 bp->fp_array_size = fp_array_size;
4535 BNX2X_DEV_INFO("fp_array_size %d\n", bp->fp_array_size);
15192a8c 4536
c3146eb6 4537 fp = kcalloc(bp->fp_array_size, sizeof(*fp), GFP_KERNEL);
523224a3
DK
4538 if (!fp)
4539 goto alloc_err;
c3146eb6 4540 for (i = 0; i < bp->fp_array_size; i++) {
15192a8c
BW
4541 fp[i].tpa_info =
4542 kcalloc(ETH_MAX_AGGREGATION_QUEUES_E1H_E2,
4543 sizeof(struct bnx2x_agg_info), GFP_KERNEL);
4544 if (!(fp[i].tpa_info))
4545 goto alloc_err;
4546 }
4547
523224a3
DK
4548 bp->fp = fp;
4549
15192a8c 4550 /* allocate sp objs */
c3146eb6 4551 bp->sp_objs = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_sp_objs),
15192a8c
BW
4552 GFP_KERNEL);
4553 if (!bp->sp_objs)
4554 goto alloc_err;
4555
4556 /* allocate fp_stats */
c3146eb6 4557 bp->fp_stats = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_fp_stats),
15192a8c
BW
4558 GFP_KERNEL);
4559 if (!bp->fp_stats)
4560 goto alloc_err;
4561
65565884 4562 /* Allocate memory for the transmission queues array */
55c11941
MS
4563 txq_array_size =
4564 BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS + CNIC_SUPPORT(bp);
4565 BNX2X_DEV_INFO("txq_array_size %d", txq_array_size);
4566
4567 bp->bnx2x_txq = kcalloc(txq_array_size, sizeof(struct bnx2x_fp_txdata),
4568 GFP_KERNEL);
65565884
MS
4569 if (!bp->bnx2x_txq)
4570 goto alloc_err;
4571
523224a3 4572 /* msix table */
01e23742 4573 tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL);
523224a3
DK
4574 if (!tbl)
4575 goto alloc_err;
4576 bp->msix_table = tbl;
4577
4578 /* ilt */
4579 ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
4580 if (!ilt)
4581 goto alloc_err;
4582 bp->ilt = ilt;
4583
4584 return 0;
4585alloc_err:
4586 bnx2x_free_mem_bp(bp);
4587 return -ENOMEM;
523224a3
DK
4588}
4589
a9fccec7 4590int bnx2x_reload_if_running(struct net_device *dev)
66371c44
MM
4591{
4592 struct bnx2x *bp = netdev_priv(dev);
4593
4594 if (unlikely(!netif_running(dev)))
4595 return 0;
4596
5d07d868 4597 bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
66371c44
MM
4598 return bnx2x_nic_load(bp, LOAD_NORMAL);
4599}
4600
1ac9e428
YR
4601int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
4602{
4603 u32 sel_phy_idx = 0;
4604 if (bp->link_params.num_phys <= 1)
4605 return INT_PHY;
4606
4607 if (bp->link_vars.link_up) {
4608 sel_phy_idx = EXT_PHY1;
4609 /* In case link is SERDES, check if the EXT_PHY2 is the one */
4610 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
4611 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
4612 sel_phy_idx = EXT_PHY2;
4613 } else {
4614
4615 switch (bnx2x_phy_selection(&bp->link_params)) {
4616 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
4617 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
4618 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
4619 sel_phy_idx = EXT_PHY1;
4620 break;
4621 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
4622 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
4623 sel_phy_idx = EXT_PHY2;
4624 break;
4625 }
4626 }
4627
4628 return sel_phy_idx;
1ac9e428
YR
4629}
4630int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
4631{
4632 u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
4633 /*
2de67439 4634 * The selected activated PHY is always after swapping (in case PHY
1ac9e428
YR
4635 * swapping is enabled). So when swapping is enabled, we need to reverse
4636 * the configuration
4637 */
4638
4639 if (bp->link_params.multi_phy_config &
4640 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
4641 if (sel_phy_idx == EXT_PHY1)
4642 sel_phy_idx = EXT_PHY2;
4643 else if (sel_phy_idx == EXT_PHY2)
4644 sel_phy_idx = EXT_PHY1;
4645 }
4646 return LINK_CONFIG_IDX(sel_phy_idx);
4647}
4648
55c11941 4649#ifdef NETDEV_FCOE_WWNN
bf61ee14
VZ
4650int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
4651{
4652 struct bnx2x *bp = netdev_priv(dev);
4653 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
4654
4655 switch (type) {
4656 case NETDEV_FCOE_WWNN:
4657 *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi,
4658 cp->fcoe_wwn_node_name_lo);
4659 break;
4660 case NETDEV_FCOE_WWPN:
4661 *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi,
4662 cp->fcoe_wwn_port_name_lo);
4663 break;
4664 default:
51c1a580 4665 BNX2X_ERR("Wrong WWN type requested - %d\n", type);
bf61ee14
VZ
4666 return -EINVAL;
4667 }
4668
4669 return 0;
4670}
4671#endif
4672
9f6c9258
DK
4673/* called with rtnl_lock */
4674int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
4675{
4676 struct bnx2x *bp = netdev_priv(dev);
9f6c9258
DK
4677
4678 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
51c1a580 4679 BNX2X_ERR("Can't perform change MTU during parity recovery\n");
9f6c9258
DK
4680 return -EAGAIN;
4681 }
4682
4683 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
51c1a580
MS
4684 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) {
4685 BNX2X_ERR("Can't support requested MTU size\n");
9f6c9258 4686 return -EINVAL;
51c1a580 4687 }
9f6c9258
DK
4688
4689 /* This does not race with packet allocation
4690 * because the actual alloc size is
4691 * only updated as part of load
4692 */
4693 dev->mtu = new_mtu;
4694
66371c44
MM
4695 return bnx2x_reload_if_running(dev);
4696}
4697
c8f44aff 4698netdev_features_t bnx2x_fix_features(struct net_device *dev,
621b4d66 4699 netdev_features_t features)
66371c44
MM
4700{
4701 struct bnx2x *bp = netdev_priv(dev);
4702
4703 /* TPA requires Rx CSUM offloading */
621b4d66 4704 if (!(features & NETIF_F_RXCSUM) || bp->disable_tpa) {
66371c44 4705 features &= ~NETIF_F_LRO;
621b4d66
DK
4706 features &= ~NETIF_F_GRO;
4707 }
66371c44
MM
4708
4709 return features;
4710}
4711
c8f44aff 4712int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
66371c44
MM
4713{
4714 struct bnx2x *bp = netdev_priv(dev);
4715 u32 flags = bp->flags;
8802f579 4716 u32 changes;
538dd2e3 4717 bool bnx2x_reload = false;
66371c44
MM
4718
4719 if (features & NETIF_F_LRO)
4720 flags |= TPA_ENABLE_FLAG;
4721 else
4722 flags &= ~TPA_ENABLE_FLAG;
4723
621b4d66
DK
4724 if (features & NETIF_F_GRO)
4725 flags |= GRO_ENABLE_FLAG;
4726 else
4727 flags &= ~GRO_ENABLE_FLAG;
4728
538dd2e3
MB
4729 if (features & NETIF_F_LOOPBACK) {
4730 if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
4731 bp->link_params.loopback_mode = LOOPBACK_BMAC;
4732 bnx2x_reload = true;
4733 }
4734 } else {
4735 if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
4736 bp->link_params.loopback_mode = LOOPBACK_NONE;
4737 bnx2x_reload = true;
4738 }
4739 }
4740
8802f579
ED
4741 changes = flags ^ bp->flags;
4742
16a5fd92 4743 /* if GRO is changed while LRO is enabled, don't force a reload */
8802f579
ED
4744 if ((changes & GRO_ENABLE_FLAG) && (flags & TPA_ENABLE_FLAG))
4745 changes &= ~GRO_ENABLE_FLAG;
4746
4747 if (changes)
538dd2e3 4748 bnx2x_reload = true;
8802f579
ED
4749
4750 bp->flags = flags;
66371c44 4751
538dd2e3 4752 if (bnx2x_reload) {
66371c44
MM
4753 if (bp->recovery_state == BNX2X_RECOVERY_DONE)
4754 return bnx2x_reload_if_running(dev);
4755 /* else: bnx2x_nic_load() will be called at end of recovery */
9f6c9258
DK
4756 }
4757
66371c44 4758 return 0;
9f6c9258
DK
4759}
4760
4761void bnx2x_tx_timeout(struct net_device *dev)
4762{
4763 struct bnx2x *bp = netdev_priv(dev);
4764
4765#ifdef BNX2X_STOP_ON_ERROR
4766 if (!bp->panic)
4767 bnx2x_panic();
4768#endif
7be08a72 4769
9f6c9258 4770 /* This allows the netif to be shutdown gracefully before resetting */
230bb0f3 4771 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_TX_TIMEOUT, 0);
9f6c9258
DK
4772}
4773
9f6c9258
DK
4774int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
4775{
4776 struct net_device *dev = pci_get_drvdata(pdev);
4777 struct bnx2x *bp;
4778
4779 if (!dev) {
4780 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4781 return -ENODEV;
4782 }
4783 bp = netdev_priv(dev);
4784
4785 rtnl_lock();
4786
4787 pci_save_state(pdev);
4788
4789 if (!netif_running(dev)) {
4790 rtnl_unlock();
4791 return 0;
4792 }
4793
4794 netif_device_detach(dev);
4795
5d07d868 4796 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
9f6c9258
DK
4797
4798 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
4799
4800 rtnl_unlock();
4801
4802 return 0;
4803}
4804
4805int bnx2x_resume(struct pci_dev *pdev)
4806{
4807 struct net_device *dev = pci_get_drvdata(pdev);
4808 struct bnx2x *bp;
4809 int rc;
4810
4811 if (!dev) {
4812 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4813 return -ENODEV;
4814 }
4815 bp = netdev_priv(dev);
4816
4817 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
51c1a580 4818 BNX2X_ERR("Handling parity error recovery. Try again later\n");
9f6c9258
DK
4819 return -EAGAIN;
4820 }
4821
4822 rtnl_lock();
4823
4824 pci_restore_state(pdev);
4825
4826 if (!netif_running(dev)) {
4827 rtnl_unlock();
4828 return 0;
4829 }
4830
4831 bnx2x_set_power_state(bp, PCI_D0);
4832 netif_device_attach(dev);
4833
4834 rc = bnx2x_nic_load(bp, LOAD_OPEN);
4835
4836 rtnl_unlock();
4837
4838 return rc;
4839}
619c5cb6 4840
619c5cb6
VZ
4841void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
4842 u32 cid)
4843{
b9871bcf
AE
4844 if (!cxt) {
4845 BNX2X_ERR("bad context pointer %p\n", cxt);
4846 return;
4847 }
4848
619c5cb6
VZ
4849 /* ustorm cxt validation */
4850 cxt->ustorm_ag_context.cdu_usage =
4851 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
4852 CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
4853 /* xcontext validation */
4854 cxt->xstorm_ag_context.cdu_reserved =
4855 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
4856 CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
4857}
4858
1191cb83
ED
4859static void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
4860 u8 fw_sb_id, u8 sb_index,
4861 u8 ticks)
619c5cb6 4862{
619c5cb6
VZ
4863 u32 addr = BAR_CSTRORM_INTMEM +
4864 CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
4865 REG_WR8(bp, addr, ticks);
51c1a580
MS
4866 DP(NETIF_MSG_IFUP,
4867 "port %x fw_sb_id %d sb_index %d ticks %d\n",
4868 port, fw_sb_id, sb_index, ticks);
619c5cb6
VZ
4869}
4870
1191cb83
ED
4871static void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
4872 u16 fw_sb_id, u8 sb_index,
4873 u8 disable)
619c5cb6
VZ
4874{
4875 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
4876 u32 addr = BAR_CSTRORM_INTMEM +
4877 CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
0c14e5ce 4878 u8 flags = REG_RD8(bp, addr);
619c5cb6
VZ
4879 /* clear and set */
4880 flags &= ~HC_INDEX_DATA_HC_ENABLED;
4881 flags |= enable_flag;
0c14e5ce 4882 REG_WR8(bp, addr, flags);
51c1a580
MS
4883 DP(NETIF_MSG_IFUP,
4884 "port %x fw_sb_id %d sb_index %d disable %d\n",
4885 port, fw_sb_id, sb_index, disable);
619c5cb6
VZ
4886}
4887
4888void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
4889 u8 sb_index, u8 disable, u16 usec)
4890{
4891 int port = BP_PORT(bp);
4892 u8 ticks = usec / BNX2X_BTR;
4893
4894 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
4895
4896 disable = disable ? 1 : (usec ? 0 : 1);
4897 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
4898}
230bb0f3
YM
4899
4900void bnx2x_schedule_sp_rtnl(struct bnx2x *bp, enum sp_rtnl_flag flag,
4901 u32 verbose)
4902{
4903 smp_mb__before_clear_bit();
4904 set_bit(flag, &bp->sp_rtnl_state);
4905 smp_mb__after_clear_bit();
4906 DP((BNX2X_MSG_SP | verbose), "Scheduling sp_rtnl task [Flag: %d]\n",
4907 flag);
4908 schedule_delayed_work(&bp->sp_rtnl_task, 0);
4909}
4910EXPORT_SYMBOL(bnx2x_schedule_sp_rtnl);
This page took 0.850302 seconds and 5 git commands to generate.