bnx2x: Support ndo_set_rxmode in VF driver
[deliverable/linux.git] / drivers / net / ethernet / broadcom / bnx2x / bnx2x_cmn.c
CommitLineData
9f6c9258
DK
1/* bnx2x_cmn.c: Broadcom Everest network driver.
2 *
85b26ea1 3 * Copyright (c) 2007-2012 Broadcom Corporation
9f6c9258
DK
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
15 *
16 */
17
f1deab50
JP
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
9f6c9258 20#include <linux/etherdevice.h>
9bcc0893 21#include <linux/if_vlan.h>
a6b7a407 22#include <linux/interrupt.h>
9f6c9258 23#include <linux/ip.h>
f2e0899f 24#include <net/ipv6.h>
7f3e01fe 25#include <net/ip6_checksum.h>
c0cba59e 26#include <linux/prefetch.h>
9f6c9258 27#include "bnx2x_cmn.h"
523224a3 28#include "bnx2x_init.h"
042181f5 29#include "bnx2x_sp.h"
523224a3 30
619c5cb6 31
9f6c9258 32
b3b83c3f
DK
33/**
34 * bnx2x_move_fp - move content of the fastpath structure.
35 *
36 * @bp: driver handle
37 * @from: source FP index
38 * @to: destination FP index
39 *
40 * Makes sure the contents of the bp->fp[to].napi is kept
72754080
AE
41 * intact. This is done by first copying the napi struct from
42 * the target to the source, and then mem copying the entire
65565884
MS
43 * source onto the target. Update txdata pointers and related
44 * content.
b3b83c3f
DK
45 */
46static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
47{
48 struct bnx2x_fastpath *from_fp = &bp->fp[from];
49 struct bnx2x_fastpath *to_fp = &bp->fp[to];
15192a8c
BW
50 struct bnx2x_sp_objs *from_sp_objs = &bp->sp_objs[from];
51 struct bnx2x_sp_objs *to_sp_objs = &bp->sp_objs[to];
52 struct bnx2x_fp_stats *from_fp_stats = &bp->fp_stats[from];
53 struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to];
65565884
MS
54 int old_max_eth_txqs, new_max_eth_txqs;
55 int old_txdata_index = 0, new_txdata_index = 0;
72754080
AE
56
57 /* Copy the NAPI object as it has been already initialized */
58 from_fp->napi = to_fp->napi;
59
b3b83c3f
DK
60 /* Move bnx2x_fastpath contents */
61 memcpy(to_fp, from_fp, sizeof(*to_fp));
62 to_fp->index = to;
65565884 63
15192a8c
BW
64 /* move sp_objs contents as well, as their indices match fp ones */
65 memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs));
66
67 /* move fp_stats contents as well, as their indices match fp ones */
68 memcpy(to_fp_stats, from_fp_stats, sizeof(*to_fp_stats));
69
65565884
MS
70 /* Update txdata pointers in fp and move txdata content accordingly:
71 * Each fp consumes 'max_cos' txdata structures, so the index should be
72 * decremented by max_cos x delta.
73 */
74
75 old_max_eth_txqs = BNX2X_NUM_ETH_QUEUES(bp) * (bp)->max_cos;
76 new_max_eth_txqs = (BNX2X_NUM_ETH_QUEUES(bp) - from + to) *
77 (bp)->max_cos;
78 if (from == FCOE_IDX(bp)) {
79 old_txdata_index = old_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
80 new_txdata_index = new_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
81 }
82
83 memcpy(&bp->bnx2x_txq[old_txdata_index],
84 &bp->bnx2x_txq[new_txdata_index],
85 sizeof(struct bnx2x_fp_txdata));
86 to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index];
b3b83c3f
DK
87}
88
619c5cb6
VZ
89int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
90
9f6c9258
DK
91/* free skb in the packet ring at pos idx
92 * return idx of last bd freed
93 */
6383c0b3 94static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
2df1a70a
TH
95 u16 idx, unsigned int *pkts_compl,
96 unsigned int *bytes_compl)
9f6c9258 97{
6383c0b3 98 struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
9f6c9258
DK
99 struct eth_tx_start_bd *tx_start_bd;
100 struct eth_tx_bd *tx_data_bd;
101 struct sk_buff *skb = tx_buf->skb;
102 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
103 int nbd;
104
105 /* prefetch skb end pointer to speedup dev_kfree_skb() */
106 prefetch(&skb->end);
107
51c1a580 108 DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n",
6383c0b3 109 txdata->txq_index, idx, tx_buf, skb);
9f6c9258
DK
110
111 /* unmap first bd */
6383c0b3 112 tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
9f6c9258 113 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
4bca60f4 114 BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
9f6c9258 115
619c5cb6 116
9f6c9258
DK
117 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
118#ifdef BNX2X_STOP_ON_ERROR
119 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
120 BNX2X_ERR("BAD nbd!\n");
121 bnx2x_panic();
122 }
123#endif
124 new_cons = nbd + tx_buf->first_bd;
125
126 /* Get the next bd */
127 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
128
129 /* Skip a parse bd... */
130 --nbd;
131 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
132
133 /* ...and the TSO split header bd since they have no mapping */
134 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
135 --nbd;
136 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
137 }
138
139 /* now free frags */
140 while (nbd > 0) {
141
6383c0b3 142 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
9f6c9258
DK
143 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
144 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
145 if (--nbd)
146 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
147 }
148
149 /* release skb */
150 WARN_ON(!skb);
d8290ae5 151 if (likely(skb)) {
2df1a70a
TH
152 (*pkts_compl)++;
153 (*bytes_compl) += skb->len;
154 }
d8290ae5 155
40955532 156 dev_kfree_skb_any(skb);
9f6c9258
DK
157 tx_buf->first_bd = 0;
158 tx_buf->skb = NULL;
159
160 return new_cons;
161}
162
6383c0b3 163int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
9f6c9258 164{
9f6c9258 165 struct netdev_queue *txq;
6383c0b3 166 u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
2df1a70a 167 unsigned int pkts_compl = 0, bytes_compl = 0;
9f6c9258
DK
168
169#ifdef BNX2X_STOP_ON_ERROR
170 if (unlikely(bp->panic))
171 return -1;
172#endif
173
6383c0b3
AE
174 txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
175 hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
176 sw_cons = txdata->tx_pkt_cons;
9f6c9258
DK
177
178 while (sw_cons != hw_cons) {
179 u16 pkt_cons;
180
181 pkt_cons = TX_BD(sw_cons);
182
51c1a580
MS
183 DP(NETIF_MSG_TX_DONE,
184 "queue[%d]: hw_cons %u sw_cons %u pkt_cons %u\n",
6383c0b3 185 txdata->txq_index, hw_cons, sw_cons, pkt_cons);
9f6c9258 186
2df1a70a
TH
187 bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
188 &pkts_compl, &bytes_compl);
189
9f6c9258
DK
190 sw_cons++;
191 }
192
2df1a70a
TH
193 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
194
6383c0b3
AE
195 txdata->tx_pkt_cons = sw_cons;
196 txdata->tx_bd_cons = bd_cons;
9f6c9258
DK
197
198 /* Need to make the tx_bd_cons update visible to start_xmit()
199 * before checking for netif_tx_queue_stopped(). Without the
200 * memory barrier, there is a small possibility that
201 * start_xmit() will miss it and cause the queue to be stopped
202 * forever.
619c5cb6
VZ
203 * On the other hand we need an rmb() here to ensure the proper
204 * ordering of bit testing in the following
205 * netif_tx_queue_stopped(txq) call.
9f6c9258
DK
206 */
207 smp_mb();
208
9f6c9258
DK
209 if (unlikely(netif_tx_queue_stopped(txq))) {
210 /* Taking tx_lock() is needed to prevent reenabling the queue
211 * while it's empty. This could have happen if rx_action() gets
212 * suspended in bnx2x_tx_int() after the condition before
213 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
214 *
215 * stops the queue->sees fresh tx_bd_cons->releases the queue->
216 * sends some packets consuming the whole queue again->
217 * stops the queue
218 */
219
220 __netif_tx_lock(txq, smp_processor_id());
221
222 if ((netif_tx_queue_stopped(txq)) &&
223 (bp->state == BNX2X_STATE_OPEN) &&
7df2dc6b 224 (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT))
9f6c9258
DK
225 netif_tx_wake_queue(txq);
226
227 __netif_tx_unlock(txq);
228 }
229 return 0;
230}
231
232static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
233 u16 idx)
234{
235 u16 last_max = fp->last_max_sge;
236
237 if (SUB_S16(idx, last_max) > 0)
238 fp->last_max_sge = idx;
239}
240
621b4d66
DK
241static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
242 u16 sge_len,
243 struct eth_end_agg_rx_cqe *cqe)
9f6c9258
DK
244{
245 struct bnx2x *bp = fp->bp;
9f6c9258
DK
246 u16 last_max, last_elem, first_elem;
247 u16 delta = 0;
248 u16 i;
249
250 if (!sge_len)
251 return;
252
253 /* First mark all used pages */
254 for (i = 0; i < sge_len; i++)
619c5cb6 255 BIT_VEC64_CLEAR_BIT(fp->sge_mask,
621b4d66 256 RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i])));
9f6c9258
DK
257
258 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
621b4d66 259 sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
9f6c9258
DK
260
261 /* Here we assume that the last SGE index is the biggest */
262 prefetch((void *)(fp->sge_mask));
523224a3 263 bnx2x_update_last_max_sge(fp,
621b4d66 264 le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
9f6c9258
DK
265
266 last_max = RX_SGE(fp->last_max_sge);
619c5cb6
VZ
267 last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
268 first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
9f6c9258
DK
269
270 /* If ring is not full */
271 if (last_elem + 1 != first_elem)
272 last_elem++;
273
274 /* Now update the prod */
275 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
276 if (likely(fp->sge_mask[i]))
277 break;
278
619c5cb6
VZ
279 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
280 delta += BIT_VEC64_ELEM_SZ;
9f6c9258
DK
281 }
282
283 if (delta > 0) {
284 fp->rx_sge_prod += delta;
285 /* clear page-end entries */
286 bnx2x_clear_sge_mask_next_elems(fp);
287 }
288
289 DP(NETIF_MSG_RX_STATUS,
290 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
291 fp->last_max_sge, fp->rx_sge_prod);
292}
293
e52fcb24
ED
294/* Set Toeplitz hash value in the skb using the value from the
295 * CQE (calculated by HW).
296 */
297static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
a334b5fb
ED
298 const struct eth_fast_path_rx_cqe *cqe,
299 bool *l4_rxhash)
e52fcb24
ED
300{
301 /* Set Toeplitz hash from CQE */
302 if ((bp->dev->features & NETIF_F_RXHASH) &&
a334b5fb
ED
303 (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) {
304 enum eth_rss_hash_type htype;
305
306 htype = cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE;
307 *l4_rxhash = (htype == TCP_IPV4_HASH_TYPE) ||
308 (htype == TCP_IPV6_HASH_TYPE);
e52fcb24 309 return le32_to_cpu(cqe->rss_hash_result);
a334b5fb
ED
310 }
311 *l4_rxhash = false;
e52fcb24
ED
312 return 0;
313}
314
9f6c9258 315static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
e52fcb24 316 u16 cons, u16 prod,
619c5cb6 317 struct eth_fast_path_rx_cqe *cqe)
9f6c9258
DK
318{
319 struct bnx2x *bp = fp->bp;
320 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
321 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
322 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
323 dma_addr_t mapping;
619c5cb6
VZ
324 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
325 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
9f6c9258 326
619c5cb6
VZ
327 /* print error if current state != stop */
328 if (tpa_info->tpa_state != BNX2X_TPA_STOP)
9f6c9258
DK
329 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
330
e52fcb24 331 /* Try to map an empty data buffer from the aggregation info */
619c5cb6 332 mapping = dma_map_single(&bp->pdev->dev,
e52fcb24 333 first_buf->data + NET_SKB_PAD,
619c5cb6
VZ
334 fp->rx_buf_size, DMA_FROM_DEVICE);
335 /*
336 * ...if it fails - move the skb from the consumer to the producer
337 * and set the current aggregation state as ERROR to drop it
338 * when TPA_STOP arrives.
339 */
340
341 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
342 /* Move the BD from the consumer to the producer */
e52fcb24 343 bnx2x_reuse_rx_data(fp, cons, prod);
619c5cb6
VZ
344 tpa_info->tpa_state = BNX2X_TPA_ERROR;
345 return;
346 }
9f6c9258 347
e52fcb24
ED
348 /* move empty data from pool to prod */
349 prod_rx_buf->data = first_buf->data;
619c5cb6 350 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
e52fcb24 351 /* point prod_bd to new data */
9f6c9258
DK
352 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
353 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
354
619c5cb6
VZ
355 /* move partial skb from cons to pool (don't unmap yet) */
356 *first_buf = *cons_rx_buf;
357
358 /* mark bin state as START */
359 tpa_info->parsing_flags =
360 le16_to_cpu(cqe->pars_flags.flags);
361 tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
362 tpa_info->tpa_state = BNX2X_TPA_START;
363 tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
364 tpa_info->placement_offset = cqe->placement_offset;
a334b5fb 365 tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->l4_rxhash);
621b4d66
DK
366 if (fp->mode == TPA_MODE_GRO) {
367 u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
368 tpa_info->full_page =
369 SGE_PAGE_SIZE * PAGES_PER_SGE / gro_size * gro_size;
370 tpa_info->gro_size = gro_size;
371 }
619c5cb6 372
9f6c9258
DK
373#ifdef BNX2X_STOP_ON_ERROR
374 fp->tpa_queue_used |= (1 << queue);
375#ifdef _ASM_GENERIC_INT_L64_H
376 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
377#else
378 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
379#endif
380 fp->tpa_queue_used);
381#endif
382}
383
e4e3c02a
VZ
384/* Timestamp option length allowed for TPA aggregation:
385 *
386 * nop nop kind length echo val
387 */
388#define TPA_TSTAMP_OPT_LEN 12
389/**
e8920674 390 * bnx2x_set_lro_mss - calculate the approximate value of the MSS
e4e3c02a 391 *
e8920674
DK
392 * @bp: driver handle
393 * @parsing_flags: parsing flags from the START CQE
394 * @len_on_bd: total length of the first packet for the
395 * aggregation.
396 *
397 * Approximate value of the MSS for this aggregation calculated using
398 * the first packet of it.
e4e3c02a 399 */
1191cb83
ED
400static u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags,
401 u16 len_on_bd)
e4e3c02a 402{
619c5cb6
VZ
403 /*
404 * TPA arrgregation won't have either IP options or TCP options
405 * other than timestamp or IPv6 extension headers.
e4e3c02a 406 */
619c5cb6
VZ
407 u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
408
409 if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
410 PRS_FLAG_OVERETH_IPV6)
411 hdrs_len += sizeof(struct ipv6hdr);
412 else /* IPv4 */
413 hdrs_len += sizeof(struct iphdr);
e4e3c02a
VZ
414
415
416 /* Check if there was a TCP timestamp, if there is it's will
417 * always be 12 bytes length: nop nop kind length echo val.
418 *
419 * Otherwise FW would close the aggregation.
420 */
421 if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
422 hdrs_len += TPA_TSTAMP_OPT_LEN;
423
424 return len_on_bd - hdrs_len;
425}
426
1191cb83
ED
427static int bnx2x_alloc_rx_sge(struct bnx2x *bp,
428 struct bnx2x_fastpath *fp, u16 index)
429{
430 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
431 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
432 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
433 dma_addr_t mapping;
434
435 if (unlikely(page == NULL)) {
436 BNX2X_ERR("Can't alloc sge\n");
437 return -ENOMEM;
438 }
439
440 mapping = dma_map_page(&bp->pdev->dev, page, 0,
441 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
442 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
443 __free_pages(page, PAGES_PER_SGE_SHIFT);
444 BNX2X_ERR("Can't map sge\n");
445 return -ENOMEM;
446 }
447
448 sw_buf->page = page;
449 dma_unmap_addr_set(sw_buf, mapping, mapping);
450
451 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
452 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
453
454 return 0;
455}
456
9f6c9258 457static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
621b4d66
DK
458 struct bnx2x_agg_info *tpa_info,
459 u16 pages,
460 struct sk_buff *skb,
619c5cb6
VZ
461 struct eth_end_agg_rx_cqe *cqe,
462 u16 cqe_idx)
9f6c9258
DK
463{
464 struct sw_rx_page *rx_pg, old_rx_pg;
621b4d66
DK
465 u32 i, frag_len, frag_size;
466 int err, j, frag_id = 0;
619c5cb6 467 u16 len_on_bd = tpa_info->len_on_bd;
621b4d66 468 u16 full_page = 0, gro_size = 0;
9f6c9258 469
619c5cb6 470 frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
621b4d66
DK
471
472 if (fp->mode == TPA_MODE_GRO) {
473 gro_size = tpa_info->gro_size;
474 full_page = tpa_info->full_page;
475 }
9f6c9258
DK
476
477 /* This is needed in order to enable forwarding support */
621b4d66 478 if (frag_size) {
619c5cb6
VZ
479 skb_shinfo(skb)->gso_size = bnx2x_set_lro_mss(bp,
480 tpa_info->parsing_flags, len_on_bd);
9f6c9258 481
621b4d66
DK
482 /* set for GRO */
483 if (fp->mode == TPA_MODE_GRO)
484 skb_shinfo(skb)->gso_type =
485 (GET_FLAG(tpa_info->parsing_flags,
486 PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
487 PRS_FLAG_OVERETH_IPV6) ?
488 SKB_GSO_TCPV6 : SKB_GSO_TCPV4;
489 }
490
491
9f6c9258
DK
492#ifdef BNX2X_STOP_ON_ERROR
493 if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
494 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
495 pages, cqe_idx);
619c5cb6 496 BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
9f6c9258
DK
497 bnx2x_panic();
498 return -EINVAL;
499 }
500#endif
501
502 /* Run through the SGL and compose the fragmented skb */
503 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
619c5cb6 504 u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
9f6c9258
DK
505
506 /* FW gives the indices of the SGE as if the ring is an array
507 (meaning that "next" element will consume 2 indices) */
621b4d66
DK
508 if (fp->mode == TPA_MODE_GRO)
509 frag_len = min_t(u32, frag_size, (u32)full_page);
510 else /* LRO */
511 frag_len = min_t(u32, frag_size,
512 (u32)(SGE_PAGE_SIZE * PAGES_PER_SGE));
513
9f6c9258
DK
514 rx_pg = &fp->rx_page_ring[sge_idx];
515 old_rx_pg = *rx_pg;
516
517 /* If we fail to allocate a substitute page, we simply stop
518 where we are and drop the whole packet */
519 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
520 if (unlikely(err)) {
15192a8c 521 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
9f6c9258
DK
522 return err;
523 }
524
525 /* Unmap the page as we r going to pass it to the stack */
526 dma_unmap_page(&bp->pdev->dev,
527 dma_unmap_addr(&old_rx_pg, mapping),
528 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
9f6c9258 529 /* Add one frag and update the appropriate fields in the skb */
621b4d66
DK
530 if (fp->mode == TPA_MODE_LRO)
531 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
532 else { /* GRO */
533 int rem;
534 int offset = 0;
535 for (rem = frag_len; rem > 0; rem -= gro_size) {
536 int len = rem > gro_size ? gro_size : rem;
537 skb_fill_page_desc(skb, frag_id++,
538 old_rx_pg.page, offset, len);
539 if (offset)
540 get_page(old_rx_pg.page);
541 offset += len;
542 }
543 }
9f6c9258
DK
544
545 skb->data_len += frag_len;
e1ac50f6 546 skb->truesize += SGE_PAGE_SIZE * PAGES_PER_SGE;
9f6c9258
DK
547 skb->len += frag_len;
548
549 frag_size -= frag_len;
550 }
551
552 return 0;
553}
554
d46d132c
ED
555static void bnx2x_frag_free(const struct bnx2x_fastpath *fp, void *data)
556{
557 if (fp->rx_frag_size)
558 put_page(virt_to_head_page(data));
559 else
560 kfree(data);
561}
562
563static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp)
564{
565 if (fp->rx_frag_size)
566 return netdev_alloc_frag(fp->rx_frag_size);
567
568 return kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC);
569}
570
571
1191cb83
ED
572static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
573 struct bnx2x_agg_info *tpa_info,
574 u16 pages,
575 struct eth_end_agg_rx_cqe *cqe,
576 u16 cqe_idx)
9f6c9258 577{
619c5cb6 578 struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
621b4d66 579 u8 pad = tpa_info->placement_offset;
619c5cb6 580 u16 len = tpa_info->len_on_bd;
e52fcb24 581 struct sk_buff *skb = NULL;
621b4d66 582 u8 *new_data, *data = rx_buf->data;
619c5cb6
VZ
583 u8 old_tpa_state = tpa_info->tpa_state;
584
585 tpa_info->tpa_state = BNX2X_TPA_STOP;
586
587 /* If we there was an error during the handling of the TPA_START -
588 * drop this aggregation.
589 */
590 if (old_tpa_state == BNX2X_TPA_ERROR)
591 goto drop;
592
e52fcb24 593 /* Try to allocate the new data */
d46d132c 594 new_data = bnx2x_frag_alloc(fp);
9f6c9258
DK
595 /* Unmap skb in the pool anyway, as we are going to change
596 pool entry status to BNX2X_TPA_STOP even if new skb allocation
597 fails. */
598 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
a8c94b91 599 fp->rx_buf_size, DMA_FROM_DEVICE);
e52fcb24 600 if (likely(new_data))
d46d132c 601 skb = build_skb(data, fp->rx_frag_size);
9f6c9258 602
e52fcb24 603 if (likely(skb)) {
9f6c9258 604#ifdef BNX2X_STOP_ON_ERROR
a8c94b91 605 if (pad + len > fp->rx_buf_size) {
51c1a580 606 BNX2X_ERR("skb_put is about to fail... pad %d len %d rx_buf_size %d\n",
a8c94b91 607 pad, len, fp->rx_buf_size);
9f6c9258
DK
608 bnx2x_panic();
609 return;
610 }
611#endif
612
e52fcb24 613 skb_reserve(skb, pad + NET_SKB_PAD);
9f6c9258 614 skb_put(skb, len);
e52fcb24 615 skb->rxhash = tpa_info->rxhash;
a334b5fb 616 skb->l4_rxhash = tpa_info->l4_rxhash;
9f6c9258
DK
617
618 skb->protocol = eth_type_trans(skb, bp->dev);
619 skb->ip_summed = CHECKSUM_UNNECESSARY;
620
621b4d66
DK
621 if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
622 skb, cqe, cqe_idx)) {
619c5cb6
VZ
623 if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
624 __vlan_hwaccel_put_tag(skb, tpa_info->vlan_tag);
9bcc0893 625 napi_gro_receive(&fp->napi, skb);
9f6c9258 626 } else {
51c1a580
MS
627 DP(NETIF_MSG_RX_STATUS,
628 "Failed to allocate new pages - dropping packet!\n");
40955532 629 dev_kfree_skb_any(skb);
9f6c9258
DK
630 }
631
632
e52fcb24
ED
633 /* put new data in bin */
634 rx_buf->data = new_data;
9f6c9258 635
619c5cb6 636 return;
9f6c9258 637 }
d46d132c 638 bnx2x_frag_free(fp, new_data);
619c5cb6
VZ
639drop:
640 /* drop the packet and keep the buffer in the bin */
641 DP(NETIF_MSG_RX_STATUS,
642 "Failed to allocate or map a new skb - dropping packet!\n");
15192a8c 643 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++;
9f6c9258
DK
644}
645
1191cb83
ED
646static int bnx2x_alloc_rx_data(struct bnx2x *bp,
647 struct bnx2x_fastpath *fp, u16 index)
648{
649 u8 *data;
650 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
651 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
652 dma_addr_t mapping;
653
d46d132c 654 data = bnx2x_frag_alloc(fp);
1191cb83
ED
655 if (unlikely(data == NULL))
656 return -ENOMEM;
657
658 mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD,
659 fp->rx_buf_size,
660 DMA_FROM_DEVICE);
661 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
d46d132c 662 bnx2x_frag_free(fp, data);
1191cb83
ED
663 BNX2X_ERR("Can't map rx data\n");
664 return -ENOMEM;
665 }
666
667 rx_buf->data = data;
668 dma_unmap_addr_set(rx_buf, mapping, mapping);
669
670 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
671 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
672
673 return 0;
674}
675
15192a8c
BW
676static
677void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
678 struct bnx2x_fastpath *fp,
679 struct bnx2x_eth_q_stats *qstats)
d6cb3e41 680{
e488921f
MS
681 /* Do nothing if no L4 csum validation was done.
682 * We do not check whether IP csum was validated. For IPv4 we assume
683 * that if the card got as far as validating the L4 csum, it also
684 * validated the IP csum. IPv6 has no IP csum.
685 */
d6cb3e41 686 if (cqe->fast_path_cqe.status_flags &
e488921f 687 ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)
d6cb3e41
ED
688 return;
689
e488921f 690 /* If L4 validation was done, check if an error was found. */
d6cb3e41
ED
691
692 if (cqe->fast_path_cqe.type_error_flags &
693 (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
694 ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
15192a8c 695 qstats->hw_csum_err++;
d6cb3e41
ED
696 else
697 skb->ip_summed = CHECKSUM_UNNECESSARY;
698}
9f6c9258
DK
699
700int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
701{
702 struct bnx2x *bp = fp->bp;
703 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
704 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
705 int rx_pkt = 0;
706
707#ifdef BNX2X_STOP_ON_ERROR
708 if (unlikely(bp->panic))
709 return 0;
710#endif
711
712 /* CQ "next element" is of the size of the regular element,
713 that's why it's ok here */
714 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
715 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
716 hw_comp_cons++;
717
718 bd_cons = fp->rx_bd_cons;
719 bd_prod = fp->rx_bd_prod;
720 bd_prod_fw = bd_prod;
721 sw_comp_cons = fp->rx_comp_cons;
722 sw_comp_prod = fp->rx_comp_prod;
723
724 /* Memory barrier necessary as speculative reads of the rx
725 * buffer can be ahead of the index in the status block
726 */
727 rmb();
728
729 DP(NETIF_MSG_RX_STATUS,
730 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
731 fp->index, hw_comp_cons, sw_comp_cons);
732
733 while (sw_comp_cons != hw_comp_cons) {
734 struct sw_rx_bd *rx_buf = NULL;
735 struct sk_buff *skb;
736 union eth_rx_cqe *cqe;
619c5cb6 737 struct eth_fast_path_rx_cqe *cqe_fp;
9f6c9258 738 u8 cqe_fp_flags;
619c5cb6 739 enum eth_rx_cqe_type cqe_fp_type;
621b4d66 740 u16 len, pad, queue;
e52fcb24 741 u8 *data;
a334b5fb 742 bool l4_rxhash;
9f6c9258 743
619c5cb6
VZ
744#ifdef BNX2X_STOP_ON_ERROR
745 if (unlikely(bp->panic))
746 return 0;
747#endif
748
9f6c9258
DK
749 comp_ring_cons = RCQ_BD(sw_comp_cons);
750 bd_prod = RX_BD(bd_prod);
751 bd_cons = RX_BD(bd_cons);
752
9f6c9258 753 cqe = &fp->rx_comp_ring[comp_ring_cons];
619c5cb6
VZ
754 cqe_fp = &cqe->fast_path_cqe;
755 cqe_fp_flags = cqe_fp->type_error_flags;
756 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
9f6c9258 757
51c1a580
MS
758 DP(NETIF_MSG_RX_STATUS,
759 "CQE type %x err %x status %x queue %x vlan %x len %u\n",
760 CQE_TYPE(cqe_fp_flags),
619c5cb6
VZ
761 cqe_fp_flags, cqe_fp->status_flags,
762 le32_to_cpu(cqe_fp->rss_hash_result),
621b4d66
DK
763 le16_to_cpu(cqe_fp->vlan_tag),
764 le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len));
9f6c9258
DK
765
766 /* is this a slowpath msg? */
619c5cb6 767 if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
9f6c9258
DK
768 bnx2x_sp_event(fp, cqe);
769 goto next_cqe;
e52fcb24 770 }
621b4d66 771
e52fcb24
ED
772 rx_buf = &fp->rx_buf_ring[bd_cons];
773 data = rx_buf->data;
9f6c9258 774
e52fcb24 775 if (!CQE_TYPE_FAST(cqe_fp_type)) {
621b4d66
DK
776 struct bnx2x_agg_info *tpa_info;
777 u16 frag_size, pages;
619c5cb6 778#ifdef BNX2X_STOP_ON_ERROR
e52fcb24
ED
779 /* sanity check */
780 if (fp->disable_tpa &&
781 (CQE_TYPE_START(cqe_fp_type) ||
782 CQE_TYPE_STOP(cqe_fp_type)))
51c1a580 783 BNX2X_ERR("START/STOP packet while disable_tpa type %x\n",
e52fcb24 784 CQE_TYPE(cqe_fp_type));
619c5cb6 785#endif
9f6c9258 786
e52fcb24
ED
787 if (CQE_TYPE_START(cqe_fp_type)) {
788 u16 queue = cqe_fp->queue_index;
789 DP(NETIF_MSG_RX_STATUS,
790 "calling tpa_start on queue %d\n",
791 queue);
9f6c9258 792
e52fcb24
ED
793 bnx2x_tpa_start(fp, queue,
794 bd_cons, bd_prod,
795 cqe_fp);
621b4d66 796
e52fcb24 797 goto next_rx;
e52fcb24 798
621b4d66
DK
799 }
800 queue = cqe->end_agg_cqe.queue_index;
801 tpa_info = &fp->tpa_info[queue];
802 DP(NETIF_MSG_RX_STATUS,
803 "calling tpa_stop on queue %d\n",
804 queue);
805
806 frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) -
807 tpa_info->len_on_bd;
808
809 if (fp->mode == TPA_MODE_GRO)
810 pages = (frag_size + tpa_info->full_page - 1) /
811 tpa_info->full_page;
812 else
813 pages = SGE_PAGE_ALIGN(frag_size) >>
814 SGE_PAGE_SHIFT;
815
816 bnx2x_tpa_stop(bp, fp, tpa_info, pages,
817 &cqe->end_agg_cqe, comp_ring_cons);
9f6c9258 818#ifdef BNX2X_STOP_ON_ERROR
621b4d66
DK
819 if (bp->panic)
820 return 0;
9f6c9258
DK
821#endif
822
621b4d66
DK
823 bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe);
824 goto next_cqe;
e52fcb24
ED
825 }
826 /* non TPA */
621b4d66 827 len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len);
e52fcb24
ED
828 pad = cqe_fp->placement_offset;
829 dma_sync_single_for_cpu(&bp->pdev->dev,
9f6c9258 830 dma_unmap_addr(rx_buf, mapping),
e52fcb24
ED
831 pad + RX_COPY_THRESH,
832 DMA_FROM_DEVICE);
833 pad += NET_SKB_PAD;
834 prefetch(data + pad); /* speedup eth_type_trans() */
835 /* is this an error packet? */
836 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
51c1a580 837 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
e52fcb24
ED
838 "ERROR flags %x rx packet %u\n",
839 cqe_fp_flags, sw_comp_cons);
15192a8c 840 bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++;
e52fcb24
ED
841 goto reuse_rx;
842 }
9f6c9258 843
e52fcb24
ED
844 /* Since we don't have a jumbo ring
845 * copy small packets if mtu > 1500
846 */
847 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
848 (len <= RX_COPY_THRESH)) {
849 skb = netdev_alloc_skb_ip_align(bp->dev, len);
850 if (skb == NULL) {
51c1a580 851 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
e52fcb24 852 "ERROR packet dropped because of alloc failure\n");
15192a8c 853 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
9f6c9258
DK
854 goto reuse_rx;
855 }
e52fcb24
ED
856 memcpy(skb->data, data + pad, len);
857 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
858 } else {
859 if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod) == 0)) {
9f6c9258 860 dma_unmap_single(&bp->pdev->dev,
e52fcb24 861 dma_unmap_addr(rx_buf, mapping),
a8c94b91 862 fp->rx_buf_size,
9f6c9258 863 DMA_FROM_DEVICE);
d46d132c 864 skb = build_skb(data, fp->rx_frag_size);
e52fcb24 865 if (unlikely(!skb)) {
d46d132c 866 bnx2x_frag_free(fp, data);
15192a8c
BW
867 bnx2x_fp_qstats(bp, fp)->
868 rx_skb_alloc_failed++;
e52fcb24
ED
869 goto next_rx;
870 }
9f6c9258 871 skb_reserve(skb, pad);
9f6c9258 872 } else {
51c1a580
MS
873 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
874 "ERROR packet dropped because of alloc failure\n");
15192a8c 875 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
9f6c9258 876reuse_rx:
e52fcb24 877 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
9f6c9258
DK
878 goto next_rx;
879 }
036d2df9 880 }
9f6c9258 881
036d2df9
DK
882 skb_put(skb, len);
883 skb->protocol = eth_type_trans(skb, bp->dev);
9f6c9258 884
036d2df9 885 /* Set Toeplitz hash for a none-LRO skb */
a334b5fb
ED
886 skb->rxhash = bnx2x_get_rxhash(bp, cqe_fp, &l4_rxhash);
887 skb->l4_rxhash = l4_rxhash;
9f6c9258 888
036d2df9 889 skb_checksum_none_assert(skb);
f85582f8 890
d6cb3e41 891 if (bp->dev->features & NETIF_F_RXCSUM)
15192a8c
BW
892 bnx2x_csum_validate(skb, cqe, fp,
893 bnx2x_fp_qstats(bp, fp));
9f6c9258 894
f233cafe 895 skb_record_rx_queue(skb, fp->rx_queue);
9f6c9258 896
619c5cb6
VZ
897 if (le16_to_cpu(cqe_fp->pars_flags.flags) &
898 PARSING_FLAGS_VLAN)
9bcc0893 899 __vlan_hwaccel_put_tag(skb,
619c5cb6 900 le16_to_cpu(cqe_fp->vlan_tag));
9bcc0893 901 napi_gro_receive(&fp->napi, skb);
9f6c9258
DK
902
903
904next_rx:
e52fcb24 905 rx_buf->data = NULL;
9f6c9258
DK
906
907 bd_cons = NEXT_RX_IDX(bd_cons);
908 bd_prod = NEXT_RX_IDX(bd_prod);
909 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
910 rx_pkt++;
911next_cqe:
912 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
913 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
914
915 if (rx_pkt == budget)
916 break;
917 } /* while */
918
919 fp->rx_bd_cons = bd_cons;
920 fp->rx_bd_prod = bd_prod_fw;
921 fp->rx_comp_cons = sw_comp_cons;
922 fp->rx_comp_prod = sw_comp_prod;
923
924 /* Update producers */
925 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
926 fp->rx_sge_prod);
927
928 fp->rx_pkt += rx_pkt;
929 fp->rx_calls++;
930
931 return rx_pkt;
932}
933
934static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
935{
936 struct bnx2x_fastpath *fp = fp_cookie;
937 struct bnx2x *bp = fp->bp;
6383c0b3 938 u8 cos;
9f6c9258 939
51c1a580
MS
940 DP(NETIF_MSG_INTR,
941 "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n",
523224a3
DK
942 fp->index, fp->fw_sb_id, fp->igu_sb_id);
943 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
9f6c9258
DK
944
945#ifdef BNX2X_STOP_ON_ERROR
946 if (unlikely(bp->panic))
947 return IRQ_HANDLED;
948#endif
949
950 /* Handle Rx and Tx according to MSI-X vector */
951 prefetch(fp->rx_cons_sb);
6383c0b3
AE
952
953 for_each_cos_in_tx_queue(fp, cos)
65565884 954 prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
6383c0b3 955
523224a3 956 prefetch(&fp->sb_running_index[SM_RX_ID]);
9f6c9258
DK
957 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
958
959 return IRQ_HANDLED;
960}
961
9f6c9258
DK
962/* HW Lock for shared dual port PHYs */
963void bnx2x_acquire_phy_lock(struct bnx2x *bp)
964{
965 mutex_lock(&bp->port.phy_mutex);
966
8203c4b6 967 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
9f6c9258
DK
968}
969
970void bnx2x_release_phy_lock(struct bnx2x *bp)
971{
8203c4b6 972 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
9f6c9258
DK
973
974 mutex_unlock(&bp->port.phy_mutex);
975}
976
0793f83f
DK
977/* calculates MF speed according to current linespeed and MF configuration */
978u16 bnx2x_get_mf_speed(struct bnx2x *bp)
979{
980 u16 line_speed = bp->link_vars.line_speed;
981 if (IS_MF(bp)) {
faa6fcbb
DK
982 u16 maxCfg = bnx2x_extract_max_cfg(bp,
983 bp->mf_config[BP_VN(bp)]);
984
985 /* Calculate the current MAX line speed limit for the MF
986 * devices
0793f83f 987 */
faa6fcbb
DK
988 if (IS_MF_SI(bp))
989 line_speed = (line_speed * maxCfg) / 100;
990 else { /* SD mode */
0793f83f
DK
991 u16 vn_max_rate = maxCfg * 100;
992
993 if (vn_max_rate < line_speed)
994 line_speed = vn_max_rate;
faa6fcbb 995 }
0793f83f
DK
996 }
997
998 return line_speed;
999}
1000
2ae17f66
VZ
1001/**
1002 * bnx2x_fill_report_data - fill link report data to report
1003 *
1004 * @bp: driver handle
1005 * @data: link state to update
1006 *
1007 * It uses a none-atomic bit operations because is called under the mutex.
1008 */
1191cb83
ED
1009static void bnx2x_fill_report_data(struct bnx2x *bp,
1010 struct bnx2x_link_report_data *data)
2ae17f66
VZ
1011{
1012 u16 line_speed = bnx2x_get_mf_speed(bp);
1013
1014 memset(data, 0, sizeof(*data));
1015
1016 /* Fill the report data: efective line speed */
1017 data->line_speed = line_speed;
1018
1019 /* Link is down */
1020 if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
1021 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1022 &data->link_report_flags);
1023
1024 /* Full DUPLEX */
1025 if (bp->link_vars.duplex == DUPLEX_FULL)
1026 __set_bit(BNX2X_LINK_REPORT_FD, &data->link_report_flags);
1027
1028 /* Rx Flow Control is ON */
1029 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
1030 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON, &data->link_report_flags);
1031
1032 /* Tx Flow Control is ON */
1033 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1034 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
1035}
1036
1037/**
1038 * bnx2x_link_report - report link status to OS.
1039 *
1040 * @bp: driver handle
1041 *
1042 * Calls the __bnx2x_link_report() under the same locking scheme
1043 * as a link/PHY state managing code to ensure a consistent link
1044 * reporting.
1045 */
1046
9f6c9258
DK
1047void bnx2x_link_report(struct bnx2x *bp)
1048{
2ae17f66
VZ
1049 bnx2x_acquire_phy_lock(bp);
1050 __bnx2x_link_report(bp);
1051 bnx2x_release_phy_lock(bp);
1052}
9f6c9258 1053
2ae17f66
VZ
1054/**
1055 * __bnx2x_link_report - report link status to OS.
1056 *
1057 * @bp: driver handle
1058 *
1059 * None atomic inmlementation.
1060 * Should be called under the phy_lock.
1061 */
1062void __bnx2x_link_report(struct bnx2x *bp)
1063{
1064 struct bnx2x_link_report_data cur_data;
9f6c9258 1065
2ae17f66 1066 /* reread mf_cfg */
ad5afc89 1067 if (IS_PF(bp) && !CHIP_IS_E1(bp))
2ae17f66
VZ
1068 bnx2x_read_mf_cfg(bp);
1069
1070 /* Read the current link report info */
1071 bnx2x_fill_report_data(bp, &cur_data);
1072
1073 /* Don't report link down or exactly the same link status twice */
1074 if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
1075 (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1076 &bp->last_reported_link.link_report_flags) &&
1077 test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1078 &cur_data.link_report_flags)))
1079 return;
1080
1081 bp->link_cnt++;
9f6c9258 1082
2ae17f66
VZ
1083 /* We are going to report a new link parameters now -
1084 * remember the current data for the next time.
1085 */
1086 memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
9f6c9258 1087
2ae17f66
VZ
1088 if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1089 &cur_data.link_report_flags)) {
1090 netif_carrier_off(bp->dev);
1091 netdev_err(bp->dev, "NIC Link is Down\n");
1092 return;
1093 } else {
94f05b0f
JP
1094 const char *duplex;
1095 const char *flow;
1096
2ae17f66 1097 netif_carrier_on(bp->dev);
9f6c9258 1098
2ae17f66
VZ
1099 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
1100 &cur_data.link_report_flags))
94f05b0f 1101 duplex = "full";
9f6c9258 1102 else
94f05b0f 1103 duplex = "half";
9f6c9258 1104
2ae17f66
VZ
1105 /* Handle the FC at the end so that only these flags would be
1106 * possibly set. This way we may easily check if there is no FC
1107 * enabled.
1108 */
1109 if (cur_data.link_report_flags) {
1110 if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1111 &cur_data.link_report_flags)) {
2ae17f66
VZ
1112 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1113 &cur_data.link_report_flags))
94f05b0f
JP
1114 flow = "ON - receive & transmit";
1115 else
1116 flow = "ON - receive";
9f6c9258 1117 } else {
94f05b0f 1118 flow = "ON - transmit";
9f6c9258 1119 }
94f05b0f
JP
1120 } else {
1121 flow = "none";
9f6c9258 1122 }
94f05b0f
JP
1123 netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
1124 cur_data.line_speed, duplex, flow);
9f6c9258
DK
1125 }
1126}
1127
1191cb83
ED
1128static void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
1129{
1130 int i;
1131
1132 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1133 struct eth_rx_sge *sge;
1134
1135 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
1136 sge->addr_hi =
1137 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
1138 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1139
1140 sge->addr_lo =
1141 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
1142 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1143 }
1144}
1145
1146static void bnx2x_free_tpa_pool(struct bnx2x *bp,
1147 struct bnx2x_fastpath *fp, int last)
1148{
1149 int i;
1150
1151 for (i = 0; i < last; i++) {
1152 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
1153 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
1154 u8 *data = first_buf->data;
1155
1156 if (data == NULL) {
1157 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
1158 continue;
1159 }
1160 if (tpa_info->tpa_state == BNX2X_TPA_START)
1161 dma_unmap_single(&bp->pdev->dev,
1162 dma_unmap_addr(first_buf, mapping),
1163 fp->rx_buf_size, DMA_FROM_DEVICE);
d46d132c 1164 bnx2x_frag_free(fp, data);
1191cb83
ED
1165 first_buf->data = NULL;
1166 }
1167}
1168
55c11941
MS
1169void bnx2x_init_rx_rings_cnic(struct bnx2x *bp)
1170{
1171 int j;
1172
1173 for_each_rx_queue_cnic(bp, j) {
1174 struct bnx2x_fastpath *fp = &bp->fp[j];
1175
1176 fp->rx_bd_cons = 0;
1177
1178 /* Activate BD ring */
1179 /* Warning!
1180 * this will generate an interrupt (to the TSTORM)
1181 * must only be done after chip is initialized
1182 */
1183 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1184 fp->rx_sge_prod);
1185 }
1186}
1187
9f6c9258
DK
1188void bnx2x_init_rx_rings(struct bnx2x *bp)
1189{
1190 int func = BP_FUNC(bp);
523224a3 1191 u16 ring_prod;
9f6c9258 1192 int i, j;
25141580 1193
b3b83c3f 1194 /* Allocate TPA resources */
55c11941 1195 for_each_eth_queue(bp, j) {
523224a3 1196 struct bnx2x_fastpath *fp = &bp->fp[j];
9f6c9258 1197
a8c94b91
VZ
1198 DP(NETIF_MSG_IFUP,
1199 "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
1200
523224a3 1201 if (!fp->disable_tpa) {
619c5cb6 1202 /* Fill the per-aggregtion pool */
dfacf138 1203 for (i = 0; i < MAX_AGG_QS(bp); i++) {
619c5cb6
VZ
1204 struct bnx2x_agg_info *tpa_info =
1205 &fp->tpa_info[i];
1206 struct sw_rx_bd *first_buf =
1207 &tpa_info->first_buf;
1208
d46d132c 1209 first_buf->data = bnx2x_frag_alloc(fp);
e52fcb24 1210 if (!first_buf->data) {
51c1a580
MS
1211 BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
1212 j);
9f6c9258
DK
1213 bnx2x_free_tpa_pool(bp, fp, i);
1214 fp->disable_tpa = 1;
1215 break;
1216 }
619c5cb6
VZ
1217 dma_unmap_addr_set(first_buf, mapping, 0);
1218 tpa_info->tpa_state = BNX2X_TPA_STOP;
9f6c9258 1219 }
523224a3
DK
1220
1221 /* "next page" elements initialization */
1222 bnx2x_set_next_page_sgl(fp);
1223
1224 /* set SGEs bit mask */
1225 bnx2x_init_sge_ring_bit_mask(fp);
1226
1227 /* Allocate SGEs and initialize the ring elements */
1228 for (i = 0, ring_prod = 0;
1229 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
1230
1231 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
51c1a580
MS
1232 BNX2X_ERR("was only able to allocate %d rx sges\n",
1233 i);
1234 BNX2X_ERR("disabling TPA for queue[%d]\n",
1235 j);
523224a3 1236 /* Cleanup already allocated elements */
619c5cb6
VZ
1237 bnx2x_free_rx_sge_range(bp, fp,
1238 ring_prod);
1239 bnx2x_free_tpa_pool(bp, fp,
dfacf138 1240 MAX_AGG_QS(bp));
523224a3
DK
1241 fp->disable_tpa = 1;
1242 ring_prod = 0;
1243 break;
1244 }
1245 ring_prod = NEXT_SGE_IDX(ring_prod);
1246 }
1247
1248 fp->rx_sge_prod = ring_prod;
9f6c9258
DK
1249 }
1250 }
1251
55c11941 1252 for_each_eth_queue(bp, j) {
9f6c9258
DK
1253 struct bnx2x_fastpath *fp = &bp->fp[j];
1254
1255 fp->rx_bd_cons = 0;
9f6c9258 1256
b3b83c3f
DK
1257 /* Activate BD ring */
1258 /* Warning!
1259 * this will generate an interrupt (to the TSTORM)
1260 * must only be done after chip is initialized
1261 */
1262 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1263 fp->rx_sge_prod);
9f6c9258 1264
9f6c9258
DK
1265 if (j != 0)
1266 continue;
1267
619c5cb6 1268 if (CHIP_IS_E1(bp)) {
f2e0899f
DK
1269 REG_WR(bp, BAR_USTRORM_INTMEM +
1270 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1271 U64_LO(fp->rx_comp_mapping));
1272 REG_WR(bp, BAR_USTRORM_INTMEM +
1273 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1274 U64_HI(fp->rx_comp_mapping));
1275 }
9f6c9258
DK
1276 }
1277}
f85582f8 1278
55c11941 1279static void bnx2x_free_tx_skbs_queue(struct bnx2x_fastpath *fp)
9f6c9258 1280{
6383c0b3 1281 u8 cos;
55c11941 1282 struct bnx2x *bp = fp->bp;
9f6c9258 1283
55c11941
MS
1284 for_each_cos_in_tx_queue(fp, cos) {
1285 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
1286 unsigned pkts_compl = 0, bytes_compl = 0;
9f6c9258 1287
55c11941
MS
1288 u16 sw_prod = txdata->tx_pkt_prod;
1289 u16 sw_cons = txdata->tx_pkt_cons;
9f6c9258 1290
55c11941
MS
1291 while (sw_cons != sw_prod) {
1292 bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
1293 &pkts_compl, &bytes_compl);
1294 sw_cons++;
9f6c9258 1295 }
55c11941
MS
1296
1297 netdev_tx_reset_queue(
1298 netdev_get_tx_queue(bp->dev,
1299 txdata->txq_index));
1300 }
1301}
1302
1303static void bnx2x_free_tx_skbs_cnic(struct bnx2x *bp)
1304{
1305 int i;
1306
1307 for_each_tx_queue_cnic(bp, i) {
1308 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1309 }
1310}
1311
1312static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1313{
1314 int i;
1315
1316 for_each_eth_queue(bp, i) {
1317 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
9f6c9258
DK
1318 }
1319}
1320
b3b83c3f
DK
1321static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1322{
1323 struct bnx2x *bp = fp->bp;
1324 int i;
1325
1326 /* ring wasn't allocated */
1327 if (fp->rx_buf_ring == NULL)
1328 return;
1329
1330 for (i = 0; i < NUM_RX_BD; i++) {
1331 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
e52fcb24 1332 u8 *data = rx_buf->data;
b3b83c3f 1333
e52fcb24 1334 if (data == NULL)
b3b83c3f 1335 continue;
b3b83c3f
DK
1336 dma_unmap_single(&bp->pdev->dev,
1337 dma_unmap_addr(rx_buf, mapping),
1338 fp->rx_buf_size, DMA_FROM_DEVICE);
1339
e52fcb24 1340 rx_buf->data = NULL;
d46d132c 1341 bnx2x_frag_free(fp, data);
b3b83c3f
DK
1342 }
1343}
1344
55c11941
MS
1345static void bnx2x_free_rx_skbs_cnic(struct bnx2x *bp)
1346{
1347 int j;
1348
1349 for_each_rx_queue_cnic(bp, j) {
1350 bnx2x_free_rx_bds(&bp->fp[j]);
1351 }
1352}
1353
9f6c9258
DK
1354static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1355{
b3b83c3f 1356 int j;
9f6c9258 1357
55c11941 1358 for_each_eth_queue(bp, j) {
9f6c9258
DK
1359 struct bnx2x_fastpath *fp = &bp->fp[j];
1360
b3b83c3f 1361 bnx2x_free_rx_bds(fp);
9f6c9258 1362
9f6c9258 1363 if (!fp->disable_tpa)
dfacf138 1364 bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
9f6c9258
DK
1365 }
1366}
1367
55c11941
MS
1368void bnx2x_free_skbs_cnic(struct bnx2x *bp)
1369{
1370 bnx2x_free_tx_skbs_cnic(bp);
1371 bnx2x_free_rx_skbs_cnic(bp);
1372}
1373
9f6c9258
DK
1374void bnx2x_free_skbs(struct bnx2x *bp)
1375{
1376 bnx2x_free_tx_skbs(bp);
1377 bnx2x_free_rx_skbs(bp);
1378}
1379
e3835b99
DK
1380void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1381{
1382 /* load old values */
1383 u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1384
1385 if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1386 /* leave all but MAX value */
1387 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1388
1389 /* set new MAX value */
1390 mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1391 & FUNC_MF_CFG_MAX_BW_MASK;
1392
1393 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1394 }
1395}
1396
ca92429f
DK
1397/**
1398 * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1399 *
1400 * @bp: driver handle
1401 * @nvecs: number of vectors to be released
1402 */
1403static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
9f6c9258 1404{
ca92429f 1405 int i, offset = 0;
9f6c9258 1406
ca92429f
DK
1407 if (nvecs == offset)
1408 return;
ad5afc89
AE
1409
1410 /* VFs don't have a default SB */
1411 if (IS_PF(bp)) {
1412 free_irq(bp->msix_table[offset].vector, bp->dev);
1413 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
1414 bp->msix_table[offset].vector);
1415 offset++;
1416 }
55c11941
MS
1417
1418 if (CNIC_SUPPORT(bp)) {
1419 if (nvecs == offset)
1420 return;
1421 offset++;
1422 }
ca92429f 1423
ec6ba945 1424 for_each_eth_queue(bp, i) {
ca92429f
DK
1425 if (nvecs == offset)
1426 return;
51c1a580
MS
1427 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq\n",
1428 i, bp->msix_table[offset].vector);
9f6c9258 1429
ca92429f 1430 free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
9f6c9258
DK
1431 }
1432}
1433
d6214d7a 1434void bnx2x_free_irq(struct bnx2x *bp)
9f6c9258 1435{
30a5de77 1436 if (bp->flags & USING_MSIX_FLAG &&
ad5afc89
AE
1437 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1438 int nvecs = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_SUPPORT(bp);
1439
1440 /* vfs don't have a default status block */
1441 if (IS_PF(bp))
1442 nvecs++;
1443
1444 bnx2x_free_msix_irqs(bp, nvecs);
1445 } else {
30a5de77 1446 free_irq(bp->dev->irq, bp->dev);
ad5afc89 1447 }
9f6c9258
DK
1448}
1449
0e8d2ec5 1450int bnx2x_enable_msix(struct bnx2x *bp)
9f6c9258 1451{
1ab4434c 1452 int msix_vec = 0, i, rc;
9f6c9258 1453
1ab4434c
AE
1454 /* VFs don't have a default status block */
1455 if (IS_PF(bp)) {
1456 bp->msix_table[msix_vec].entry = msix_vec;
1457 BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n",
1458 bp->msix_table[0].entry);
1459 msix_vec++;
1460 }
9f6c9258 1461
55c11941
MS
1462 /* Cnic requires an msix vector for itself */
1463 if (CNIC_SUPPORT(bp)) {
1464 bp->msix_table[msix_vec].entry = msix_vec;
1465 BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n",
1466 msix_vec, bp->msix_table[msix_vec].entry);
1467 msix_vec++;
1468 }
1469
6383c0b3 1470 /* We need separate vectors for ETH queues only (not FCoE) */
ec6ba945 1471 for_each_eth_queue(bp, i) {
d6214d7a 1472 bp->msix_table[msix_vec].entry = msix_vec;
51c1a580
MS
1473 BNX2X_DEV_INFO("msix_table[%d].entry = %d (fastpath #%u)\n",
1474 msix_vec, msix_vec, i);
d6214d7a 1475 msix_vec++;
9f6c9258
DK
1476 }
1477
1ab4434c
AE
1478 DP(BNX2X_MSG_SP, "about to request enable msix with %d vectors\n",
1479 msix_vec);
d6214d7a 1480
1ab4434c 1481 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], msix_vec);
9f6c9258
DK
1482
1483 /*
1484 * reconfigure number of tx/rx queues according to available
1485 * MSI-X vectors
1486 */
55c11941 1487 if (rc >= BNX2X_MIN_MSIX_VEC_CNT(bp)) {
d6214d7a 1488 /* how less vectors we will have? */
1ab4434c 1489 int diff = msix_vec - rc;
9f6c9258 1490
51c1a580 1491 BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
9f6c9258
DK
1492
1493 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
1494
1495 if (rc) {
30a5de77
DK
1496 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
1497 goto no_msix;
9f6c9258 1498 }
d6214d7a
DK
1499 /*
1500 * decrease number of queues by number of unallocated entries
1501 */
55c11941
MS
1502 bp->num_ethernet_queues -= diff;
1503 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
9f6c9258 1504
51c1a580 1505 BNX2X_DEV_INFO("New queue configuration set: %d\n",
30a5de77
DK
1506 bp->num_queues);
1507 } else if (rc > 0) {
1508 /* Get by with single vector */
1509 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], 1);
1510 if (rc) {
1511 BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n",
1512 rc);
1513 goto no_msix;
1514 }
1515
1516 BNX2X_DEV_INFO("Using single MSI-X vector\n");
1517 bp->flags |= USING_SINGLE_MSIX_FLAG;
1518
55c11941
MS
1519 BNX2X_DEV_INFO("set number of queues to 1\n");
1520 bp->num_ethernet_queues = 1;
1521 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
30a5de77 1522 } else if (rc < 0) {
51c1a580 1523 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
30a5de77 1524 goto no_msix;
9f6c9258
DK
1525 }
1526
1527 bp->flags |= USING_MSIX_FLAG;
1528
1529 return 0;
30a5de77
DK
1530
1531no_msix:
1532 /* fall to INTx if not enough memory */
1533 if (rc == -ENOMEM)
1534 bp->flags |= DISABLE_MSI_FLAG;
1535
1536 return rc;
9f6c9258
DK
1537}
1538
1539static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1540{
ca92429f 1541 int i, rc, offset = 0;
9f6c9258 1542
ad5afc89
AE
1543 /* no default status block for vf */
1544 if (IS_PF(bp)) {
1545 rc = request_irq(bp->msix_table[offset++].vector,
1546 bnx2x_msix_sp_int, 0,
1547 bp->dev->name, bp->dev);
1548 if (rc) {
1549 BNX2X_ERR("request sp irq failed\n");
1550 return -EBUSY;
1551 }
9f6c9258
DK
1552 }
1553
55c11941
MS
1554 if (CNIC_SUPPORT(bp))
1555 offset++;
1556
ec6ba945 1557 for_each_eth_queue(bp, i) {
9f6c9258
DK
1558 struct bnx2x_fastpath *fp = &bp->fp[i];
1559 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1560 bp->dev->name, i);
1561
d6214d7a 1562 rc = request_irq(bp->msix_table[offset].vector,
9f6c9258
DK
1563 bnx2x_msix_fp_int, 0, fp->name, fp);
1564 if (rc) {
ca92429f
DK
1565 BNX2X_ERR("request fp #%d irq (%d) failed rc %d\n", i,
1566 bp->msix_table[offset].vector, rc);
1567 bnx2x_free_msix_irqs(bp, offset);
9f6c9258
DK
1568 return -EBUSY;
1569 }
1570
d6214d7a 1571 offset++;
9f6c9258
DK
1572 }
1573
ec6ba945 1574 i = BNX2X_NUM_ETH_QUEUES(bp);
ad5afc89
AE
1575 if (IS_PF(bp)) {
1576 offset = 1 + CNIC_SUPPORT(bp);
1577 netdev_info(bp->dev,
1578 "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n",
1579 bp->msix_table[0].vector,
1580 0, bp->msix_table[offset].vector,
1581 i - 1, bp->msix_table[offset + i - 1].vector);
1582 } else {
1583 offset = CNIC_SUPPORT(bp);
1584 netdev_info(bp->dev,
1585 "using MSI-X IRQs: fp[%d] %d ... fp[%d] %d\n",
1586 0, bp->msix_table[offset].vector,
1587 i - 1, bp->msix_table[offset + i - 1].vector);
1588 }
9f6c9258
DK
1589 return 0;
1590}
1591
d6214d7a 1592int bnx2x_enable_msi(struct bnx2x *bp)
9f6c9258
DK
1593{
1594 int rc;
1595
1596 rc = pci_enable_msi(bp->pdev);
1597 if (rc) {
51c1a580 1598 BNX2X_DEV_INFO("MSI is not attainable\n");
9f6c9258
DK
1599 return -1;
1600 }
1601 bp->flags |= USING_MSI_FLAG;
1602
1603 return 0;
1604}
1605
1606static int bnx2x_req_irq(struct bnx2x *bp)
1607{
1608 unsigned long flags;
30a5de77 1609 unsigned int irq;
9f6c9258 1610
30a5de77 1611 if (bp->flags & (USING_MSI_FLAG | USING_MSIX_FLAG))
9f6c9258
DK
1612 flags = 0;
1613 else
1614 flags = IRQF_SHARED;
1615
30a5de77
DK
1616 if (bp->flags & USING_MSIX_FLAG)
1617 irq = bp->msix_table[0].vector;
1618 else
1619 irq = bp->pdev->irq;
1620
1621 return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev);
9f6c9258
DK
1622}
1623
1191cb83 1624static int bnx2x_setup_irqs(struct bnx2x *bp)
619c5cb6
VZ
1625{
1626 int rc = 0;
30a5de77
DK
1627 if (bp->flags & USING_MSIX_FLAG &&
1628 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
619c5cb6
VZ
1629 rc = bnx2x_req_msix_irqs(bp);
1630 if (rc)
1631 return rc;
1632 } else {
1633 bnx2x_ack_int(bp);
1634 rc = bnx2x_req_irq(bp);
1635 if (rc) {
1636 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
1637 return rc;
1638 }
1639 if (bp->flags & USING_MSI_FLAG) {
1640 bp->dev->irq = bp->pdev->irq;
30a5de77
DK
1641 netdev_info(bp->dev, "using MSI IRQ %d\n",
1642 bp->dev->irq);
1643 }
1644 if (bp->flags & USING_MSIX_FLAG) {
1645 bp->dev->irq = bp->msix_table[0].vector;
1646 netdev_info(bp->dev, "using MSIX IRQ %d\n",
1647 bp->dev->irq);
619c5cb6
VZ
1648 }
1649 }
1650
1651 return 0;
1652}
1653
55c11941
MS
1654static void bnx2x_napi_enable_cnic(struct bnx2x *bp)
1655{
1656 int i;
1657
1658 for_each_rx_queue_cnic(bp, i)
1659 napi_enable(&bnx2x_fp(bp, i, napi));
1660}
1661
1191cb83 1662static void bnx2x_napi_enable(struct bnx2x *bp)
9f6c9258
DK
1663{
1664 int i;
1665
55c11941 1666 for_each_eth_queue(bp, i)
9f6c9258
DK
1667 napi_enable(&bnx2x_fp(bp, i, napi));
1668}
1669
55c11941
MS
1670static void bnx2x_napi_disable_cnic(struct bnx2x *bp)
1671{
1672 int i;
1673
1674 for_each_rx_queue_cnic(bp, i)
1675 napi_disable(&bnx2x_fp(bp, i, napi));
1676}
1677
1191cb83 1678static void bnx2x_napi_disable(struct bnx2x *bp)
9f6c9258
DK
1679{
1680 int i;
1681
55c11941 1682 for_each_eth_queue(bp, i)
9f6c9258
DK
1683 napi_disable(&bnx2x_fp(bp, i, napi));
1684}
1685
1686void bnx2x_netif_start(struct bnx2x *bp)
1687{
4b7ed897
DK
1688 if (netif_running(bp->dev)) {
1689 bnx2x_napi_enable(bp);
55c11941
MS
1690 if (CNIC_LOADED(bp))
1691 bnx2x_napi_enable_cnic(bp);
4b7ed897
DK
1692 bnx2x_int_enable(bp);
1693 if (bp->state == BNX2X_STATE_OPEN)
1694 netif_tx_wake_all_queues(bp->dev);
9f6c9258
DK
1695 }
1696}
1697
1698void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1699{
1700 bnx2x_int_disable_sync(bp, disable_hw);
1701 bnx2x_napi_disable(bp);
55c11941
MS
1702 if (CNIC_LOADED(bp))
1703 bnx2x_napi_disable_cnic(bp);
9f6c9258 1704}
9f6c9258 1705
8307fa3e
VZ
1706u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
1707{
8307fa3e 1708 struct bnx2x *bp = netdev_priv(dev);
cdb9d6ae 1709
55c11941 1710 if (CNIC_LOADED(bp) && !NO_FCOE(bp)) {
8307fa3e
VZ
1711 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1712 u16 ether_type = ntohs(hdr->h_proto);
1713
1714 /* Skip VLAN tag if present */
1715 if (ether_type == ETH_P_8021Q) {
1716 struct vlan_ethhdr *vhdr =
1717 (struct vlan_ethhdr *)skb->data;
1718
1719 ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1720 }
1721
1722 /* If ethertype is FCoE or FIP - use FCoE ring */
1723 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
6383c0b3 1724 return bnx2x_fcoe_tx(bp, txq_index);
8307fa3e 1725 }
55c11941 1726
cdb9d6ae 1727 /* select a non-FCoE queue */
6383c0b3 1728 return __skb_tx_hash(dev, skb, BNX2X_NUM_ETH_QUEUES(bp));
8307fa3e
VZ
1729}
1730
96305234 1731
d6214d7a
DK
1732void bnx2x_set_num_queues(struct bnx2x *bp)
1733{
96305234 1734 /* RSS queues */
55c11941 1735 bp->num_ethernet_queues = bnx2x_calc_num_queues(bp);
ec6ba945 1736
a3348722
BW
1737 /* override in STORAGE SD modes */
1738 if (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))
55c11941
MS
1739 bp->num_ethernet_queues = 1;
1740
ec6ba945 1741 /* Add special queues */
55c11941
MS
1742 bp->num_cnic_queues = CNIC_SUPPORT(bp); /* For FCOE */
1743 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
65565884
MS
1744
1745 BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
ec6ba945
VZ
1746}
1747
cdb9d6ae
VZ
1748/**
1749 * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
1750 *
1751 * @bp: Driver handle
1752 *
1753 * We currently support for at most 16 Tx queues for each CoS thus we will
1754 * allocate a multiple of 16 for ETH L2 rings according to the value of the
1755 * bp->max_cos.
1756 *
1757 * If there is an FCoE L2 queue the appropriate Tx queue will have the next
1758 * index after all ETH L2 indices.
1759 *
1760 * If the actual number of Tx queues (for each CoS) is less than 16 then there
1761 * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
1762 * 16..31,...) with indicies that are not coupled with any real Tx queue.
1763 *
1764 * The proper configuration of skb->queue_mapping is handled by
1765 * bnx2x_select_queue() and __skb_tx_hash().
1766 *
1767 * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
1768 * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
1769 */
55c11941 1770static int bnx2x_set_real_num_queues(struct bnx2x *bp, int include_cnic)
ec6ba945 1771{
6383c0b3 1772 int rc, tx, rx;
ec6ba945 1773
65565884 1774 tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos;
55c11941 1775 rx = BNX2X_NUM_ETH_QUEUES(bp);
ec6ba945 1776
6383c0b3 1777/* account for fcoe queue */
55c11941
MS
1778 if (include_cnic && !NO_FCOE(bp)) {
1779 rx++;
1780 tx++;
6383c0b3 1781 }
6383c0b3
AE
1782
1783 rc = netif_set_real_num_tx_queues(bp->dev, tx);
1784 if (rc) {
1785 BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
1786 return rc;
1787 }
1788 rc = netif_set_real_num_rx_queues(bp->dev, rx);
1789 if (rc) {
1790 BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
1791 return rc;
1792 }
1793
51c1a580 1794 DP(NETIF_MSG_IFUP, "Setting real num queues to (tx, rx) (%d, %d)\n",
6383c0b3
AE
1795 tx, rx);
1796
ec6ba945
VZ
1797 return rc;
1798}
1799
1191cb83 1800static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
a8c94b91
VZ
1801{
1802 int i;
1803
1804 for_each_queue(bp, i) {
1805 struct bnx2x_fastpath *fp = &bp->fp[i];
e52fcb24 1806 u32 mtu;
a8c94b91
VZ
1807
1808 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
1809 if (IS_FCOE_IDX(i))
1810 /*
1811 * Although there are no IP frames expected to arrive to
1812 * this ring we still want to add an
1813 * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
1814 * overrun attack.
1815 */
e52fcb24 1816 mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
a8c94b91 1817 else
e52fcb24
ED
1818 mtu = bp->dev->mtu;
1819 fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START +
1820 IP_HEADER_ALIGNMENT_PADDING +
1821 ETH_OVREHEAD +
1822 mtu +
1823 BNX2X_FW_RX_ALIGN_END;
1824 /* Note : rx_buf_size doesnt take into account NET_SKB_PAD */
d46d132c
ED
1825 if (fp->rx_buf_size + NET_SKB_PAD <= PAGE_SIZE)
1826 fp->rx_frag_size = fp->rx_buf_size + NET_SKB_PAD;
1827 else
1828 fp->rx_frag_size = 0;
a8c94b91
VZ
1829 }
1830}
1831
1191cb83 1832static int bnx2x_init_rss_pf(struct bnx2x *bp)
619c5cb6
VZ
1833{
1834 int i;
619c5cb6
VZ
1835 u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
1836
96305234 1837 /* Prepare the initial contents fo the indirection table if RSS is
619c5cb6
VZ
1838 * enabled
1839 */
5d317c6a
MS
1840 for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++)
1841 bp->rss_conf_obj.ind_table[i] =
96305234
DK
1842 bp->fp->cl_id +
1843 ethtool_rxfh_indir_default(i, num_eth_queues);
619c5cb6
VZ
1844
1845 /*
1846 * For 57710 and 57711 SEARCHER configuration (rss_keys) is
1847 * per-port, so if explicit configuration is needed , do it only
1848 * for a PMF.
1849 *
1850 * For 57712 and newer on the other hand it's a per-function
1851 * configuration.
1852 */
5d317c6a 1853 return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp));
619c5cb6
VZ
1854}
1855
96305234 1856int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
5d317c6a 1857 bool config_hash)
619c5cb6 1858{
3b603066 1859 struct bnx2x_config_rss_params params = {NULL};
619c5cb6
VZ
1860
1861 /* Although RSS is meaningless when there is a single HW queue we
1862 * still need it enabled in order to have HW Rx hash generated.
1863 *
1864 * if (!is_eth_multi(bp))
1865 * bp->multi_mode = ETH_RSS_MODE_DISABLED;
1866 */
1867
96305234 1868 params.rss_obj = rss_obj;
619c5cb6
VZ
1869
1870 __set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
1871
96305234 1872 __set_bit(BNX2X_RSS_MODE_REGULAR, &params.rss_flags);
619c5cb6 1873
96305234
DK
1874 /* RSS configuration */
1875 __set_bit(BNX2X_RSS_IPV4, &params.rss_flags);
1876 __set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags);
1877 __set_bit(BNX2X_RSS_IPV6, &params.rss_flags);
1878 __set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags);
5d317c6a
MS
1879 if (rss_obj->udp_rss_v4)
1880 __set_bit(BNX2X_RSS_IPV4_UDP, &params.rss_flags);
1881 if (rss_obj->udp_rss_v6)
1882 __set_bit(BNX2X_RSS_IPV6_UDP, &params.rss_flags);
619c5cb6 1883
96305234
DK
1884 /* Hash bits */
1885 params.rss_result_mask = MULTI_MASK;
619c5cb6 1886
5d317c6a 1887 memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table));
619c5cb6 1888
96305234
DK
1889 if (config_hash) {
1890 /* RSS keys */
8376d0bc 1891 prandom_bytes(params.rss_key, sizeof(params.rss_key));
96305234 1892 __set_bit(BNX2X_RSS_SET_SRCH, &params.rss_flags);
619c5cb6
VZ
1893 }
1894
1895 return bnx2x_config_rss(bp, &params);
1896}
1897
1191cb83 1898static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
619c5cb6 1899{
3b603066 1900 struct bnx2x_func_state_params func_params = {NULL};
619c5cb6
VZ
1901
1902 /* Prepare parameters for function state transitions */
1903 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
1904
1905 func_params.f_obj = &bp->func_obj;
1906 func_params.cmd = BNX2X_F_CMD_HW_INIT;
1907
1908 func_params.params.hw_init.load_phase = load_code;
1909
1910 return bnx2x_func_state_change(bp, &func_params);
1911}
1912
1913/*
1914 * Cleans the object that have internal lists without sending
1915 * ramrods. Should be run when interrutps are disabled.
1916 */
1917static void bnx2x_squeeze_objects(struct bnx2x *bp)
1918{
1919 int rc;
1920 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
3b603066 1921 struct bnx2x_mcast_ramrod_params rparam = {NULL};
15192a8c 1922 struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
619c5cb6
VZ
1923
1924 /***************** Cleanup MACs' object first *************************/
1925
1926 /* Wait for completion of requested */
1927 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
1928 /* Perform a dry cleanup */
1929 __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
1930
1931 /* Clean ETH primary MAC */
1932 __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
15192a8c 1933 rc = mac_obj->delete_all(bp, &bp->sp_objs->mac_obj, &vlan_mac_flags,
619c5cb6
VZ
1934 &ramrod_flags);
1935 if (rc != 0)
1936 BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
1937
1938 /* Cleanup UC list */
1939 vlan_mac_flags = 0;
1940 __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
1941 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
1942 &ramrod_flags);
1943 if (rc != 0)
1944 BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
1945
1946 /***************** Now clean mcast object *****************************/
1947 rparam.mcast_obj = &bp->mcast_obj;
1948 __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
1949
1950 /* Add a DEL command... */
1951 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
1952 if (rc < 0)
51c1a580
MS
1953 BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n",
1954 rc);
619c5cb6
VZ
1955
1956 /* ...and wait until all pending commands are cleared */
1957 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
1958 while (rc != 0) {
1959 if (rc < 0) {
1960 BNX2X_ERR("Failed to clean multi-cast object: %d\n",
1961 rc);
1962 return;
1963 }
1964
1965 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
1966 }
1967}
1968
1969#ifndef BNX2X_STOP_ON_ERROR
1970#define LOAD_ERROR_EXIT(bp, label) \
1971 do { \
1972 (bp)->state = BNX2X_STATE_ERROR; \
1973 goto label; \
1974 } while (0)
55c11941
MS
1975
1976#define LOAD_ERROR_EXIT_CNIC(bp, label) \
1977 do { \
1978 bp->cnic_loaded = false; \
1979 goto label; \
1980 } while (0)
1981#else /*BNX2X_STOP_ON_ERROR*/
619c5cb6
VZ
1982#define LOAD_ERROR_EXIT(bp, label) \
1983 do { \
1984 (bp)->state = BNX2X_STATE_ERROR; \
1985 (bp)->panic = 1; \
1986 return -EBUSY; \
1987 } while (0)
55c11941
MS
1988#define LOAD_ERROR_EXIT_CNIC(bp, label) \
1989 do { \
1990 bp->cnic_loaded = false; \
1991 (bp)->panic = 1; \
1992 return -EBUSY; \
1993 } while (0)
1994#endif /*BNX2X_STOP_ON_ERROR*/
619c5cb6 1995
ad5afc89
AE
1996static void bnx2x_free_fw_stats_mem(struct bnx2x *bp)
1997{
1998 BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
1999 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2000 return;
2001}
2002
2003static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
452427b0 2004{
ad5afc89
AE
2005 int num_groups;
2006 int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1;
452427b0 2007
ad5afc89
AE
2008 /* number of queues for statistics is number of eth queues + FCoE */
2009 u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe_stats;
452427b0 2010
ad5afc89
AE
2011 /* Total number of FW statistics requests =
2012 * 1 for port stats + 1 for PF stats + potential 2 for FCoE (fcoe proper
2013 * and fcoe l2 queue) stats + num of queues (which includes another 1
2014 * for fcoe l2 queue if applicable)
2015 */
2016 bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats;
452427b0 2017
ad5afc89
AE
2018 /* Request is built from stats_query_header and an array of
2019 * stats_query_cmd_group each of which contains
2020 * STATS_QUERY_CMD_COUNT rules. The real number or requests is
2021 * configured in the stats_query_header.
2022 */
2023 num_groups =
2024 (((bp->fw_stats_num) / STATS_QUERY_CMD_COUNT) +
2025 (((bp->fw_stats_num) % STATS_QUERY_CMD_COUNT) ?
2026 1 : 0));
2027
2028 DP(BNX2X_MSG_SP, "stats fw_stats_num %d, num_groups %d\n",
2029 bp->fw_stats_num, num_groups);
2030 bp->fw_stats_req_sz = sizeof(struct stats_query_header) +
2031 num_groups * sizeof(struct stats_query_cmd_group);
2032
2033 /* Data for statistics requests + stats_counter
2034 * stats_counter holds per-STORM counters that are incremented
2035 * when STORM has finished with the current request.
2036 * memory for FCoE offloaded statistics are counted anyway,
2037 * even if they will not be sent.
2038 * VF stats are not accounted for here as the data of VF stats is stored
2039 * in memory allocated by the VF, not here.
2040 */
2041 bp->fw_stats_data_sz = sizeof(struct per_port_stats) +
2042 sizeof(struct per_pf_stats) +
2043 sizeof(struct fcoe_statistics_params) +
2044 sizeof(struct per_queue_stats) * num_queue_stats +
2045 sizeof(struct stats_counter);
2046
2047 BNX2X_PCI_ALLOC(bp->fw_stats, &bp->fw_stats_mapping,
2048 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2049
2050 /* Set shortcuts */
2051 bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats;
2052 bp->fw_stats_req_mapping = bp->fw_stats_mapping;
2053 bp->fw_stats_data = (struct bnx2x_fw_stats_data *)
2054 ((u8 *)bp->fw_stats + bp->fw_stats_req_sz);
2055 bp->fw_stats_data_mapping = bp->fw_stats_mapping +
2056 bp->fw_stats_req_sz;
2057
2058 DP(BNX2X_MSG_SP, "statistics request base address set to %x %x",
2059 U64_HI(bp->fw_stats_req_mapping),
2060 U64_LO(bp->fw_stats_req_mapping));
2061 DP(BNX2X_MSG_SP, "statistics data base address set to %x %x",
2062 U64_HI(bp->fw_stats_data_mapping),
2063 U64_LO(bp->fw_stats_data_mapping));
2064 return 0;
2065
2066alloc_mem_err:
2067 bnx2x_free_fw_stats_mem(bp);
2068 BNX2X_ERR("Can't allocate FW stats memory\n");
2069 return -ENOMEM;
2070}
2071
2072/* send load request to mcp and analyze response */
2073static int bnx2x_nic_load_request(struct bnx2x *bp, u32 *load_code)
2074{
2075 /* init fw_seq */
2076 bp->fw_seq =
2077 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
2078 DRV_MSG_SEQ_NUMBER_MASK);
2079 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
2080
2081 /* Get current FW pulse sequence */
2082 bp->fw_drv_pulse_wr_seq =
2083 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) &
2084 DRV_PULSE_SEQ_MASK);
2085 BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
2086
2087 /* load request */
2088 (*load_code) = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ,
2089 DRV_MSG_CODE_LOAD_REQ_WITH_LFA);
2090
2091 /* if mcp fails to respond we must abort */
2092 if (!(*load_code)) {
2093 BNX2X_ERR("MCP response failure, aborting\n");
2094 return -EBUSY;
2095 }
2096
2097 /* If mcp refused (e.g. other port is in diagnostic mode) we
2098 * must abort
2099 */
2100 if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) {
2101 BNX2X_ERR("MCP refused load request, aborting\n");
2102 return -EBUSY;
2103 }
2104 return 0;
2105}
2106
2107/* check whether another PF has already loaded FW to chip. In
2108 * virtualized environments a pf from another VM may have already
2109 * initialized the device including loading FW
2110 */
2111int bnx2x_nic_load_analyze_req(struct bnx2x *bp, u32 load_code)
2112{
2113 /* is another pf loaded on this engine? */
2114 if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
2115 load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
2116 /* build my FW version dword */
2117 u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) +
2118 (BCM_5710_FW_MINOR_VERSION << 8) +
2119 (BCM_5710_FW_REVISION_VERSION << 16) +
2120 (BCM_5710_FW_ENGINEERING_VERSION << 24);
2121
2122 /* read loaded FW from chip */
2123 u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
2124
2125 DP(BNX2X_MSG_SP, "loaded fw %x, my fw %x\n",
2126 loaded_fw, my_fw);
2127
2128 /* abort nic load if version mismatch */
2129 if (my_fw != loaded_fw) {
2130 BNX2X_ERR("bnx2x with FW %x was already loaded which mismatches my %x FW. aborting\n",
452427b0 2131 loaded_fw, my_fw);
ad5afc89
AE
2132 return -EBUSY;
2133 }
2134 }
2135 return 0;
2136}
2137
2138/* returns the "mcp load_code" according to global load_count array */
2139static int bnx2x_nic_load_no_mcp(struct bnx2x *bp, int port)
2140{
2141 int path = BP_PATH(bp);
2142
2143 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
2144 path, load_count[path][0], load_count[path][1],
2145 load_count[path][2]);
2146 load_count[path][0]++;
2147 load_count[path][1 + port]++;
2148 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
2149 path, load_count[path][0], load_count[path][1],
2150 load_count[path][2]);
2151 if (load_count[path][0] == 1)
2152 return FW_MSG_CODE_DRV_LOAD_COMMON;
2153 else if (load_count[path][1 + port] == 1)
2154 return FW_MSG_CODE_DRV_LOAD_PORT;
2155 else
2156 return FW_MSG_CODE_DRV_LOAD_FUNCTION;
2157}
2158
2159/* mark PMF if applicable */
2160static void bnx2x_nic_load_pmf(struct bnx2x *bp, u32 load_code)
2161{
2162 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2163 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
2164 (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
2165 bp->port.pmf = 1;
2166 /* We need the barrier to ensure the ordering between the
2167 * writing to bp->port.pmf here and reading it from the
2168 * bnx2x_periodic_task().
2169 */
2170 smp_mb();
2171 } else {
2172 bp->port.pmf = 0;
452427b0
YM
2173 }
2174
ad5afc89
AE
2175 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2176}
2177
2178static void bnx2x_nic_load_afex_dcc(struct bnx2x *bp, int load_code)
2179{
2180 if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2181 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
2182 (bp->common.shmem2_base)) {
2183 if (SHMEM2_HAS(bp, dcc_support))
2184 SHMEM2_WR(bp, dcc_support,
2185 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
2186 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
2187 if (SHMEM2_HAS(bp, afex_driver_support))
2188 SHMEM2_WR(bp, afex_driver_support,
2189 SHMEM_AFEX_SUPPORTED_VERSION_ONE);
2190 }
2191
2192 /* Set AFEX default VLAN tag to an invalid value */
2193 bp->afex_def_vlan_tag = -1;
452427b0
YM
2194}
2195
1191cb83
ED
2196/**
2197 * bnx2x_bz_fp - zero content of the fastpath structure.
2198 *
2199 * @bp: driver handle
2200 * @index: fastpath index to be zeroed
2201 *
2202 * Makes sure the contents of the bp->fp[index].napi is kept
2203 * intact.
2204 */
2205static void bnx2x_bz_fp(struct bnx2x *bp, int index)
2206{
2207 struct bnx2x_fastpath *fp = &bp->fp[index];
15192a8c
BW
2208 struct bnx2x_fp_stats *fp_stats = &bp->fp_stats[index];
2209
65565884 2210 int cos;
1191cb83 2211 struct napi_struct orig_napi = fp->napi;
15192a8c 2212 struct bnx2x_agg_info *orig_tpa_info = fp->tpa_info;
1191cb83 2213 /* bzero bnx2x_fastpath contents */
15192a8c
BW
2214 if (bp->stats_init) {
2215 memset(fp->tpa_info, 0, sizeof(*fp->tpa_info));
1191cb83 2216 memset(fp, 0, sizeof(*fp));
15192a8c 2217 } else {
1191cb83
ED
2218 /* Keep Queue statistics */
2219 struct bnx2x_eth_q_stats *tmp_eth_q_stats;
2220 struct bnx2x_eth_q_stats_old *tmp_eth_q_stats_old;
2221
2222 tmp_eth_q_stats = kzalloc(sizeof(struct bnx2x_eth_q_stats),
2223 GFP_KERNEL);
2224 if (tmp_eth_q_stats)
15192a8c 2225 memcpy(tmp_eth_q_stats, &fp_stats->eth_q_stats,
1191cb83
ED
2226 sizeof(struct bnx2x_eth_q_stats));
2227
2228 tmp_eth_q_stats_old =
2229 kzalloc(sizeof(struct bnx2x_eth_q_stats_old),
2230 GFP_KERNEL);
2231 if (tmp_eth_q_stats_old)
15192a8c 2232 memcpy(tmp_eth_q_stats_old, &fp_stats->eth_q_stats_old,
1191cb83
ED
2233 sizeof(struct bnx2x_eth_q_stats_old));
2234
15192a8c 2235 memset(fp->tpa_info, 0, sizeof(*fp->tpa_info));
1191cb83
ED
2236 memset(fp, 0, sizeof(*fp));
2237
2238 if (tmp_eth_q_stats) {
15192a8c
BW
2239 memcpy(&fp_stats->eth_q_stats, tmp_eth_q_stats,
2240 sizeof(struct bnx2x_eth_q_stats));
1191cb83
ED
2241 kfree(tmp_eth_q_stats);
2242 }
2243
2244 if (tmp_eth_q_stats_old) {
15192a8c 2245 memcpy(&fp_stats->eth_q_stats_old, tmp_eth_q_stats_old,
1191cb83
ED
2246 sizeof(struct bnx2x_eth_q_stats_old));
2247 kfree(tmp_eth_q_stats_old);
2248 }
2249
2250 }
2251
2252 /* Restore the NAPI object as it has been already initialized */
2253 fp->napi = orig_napi;
15192a8c 2254 fp->tpa_info = orig_tpa_info;
1191cb83
ED
2255 fp->bp = bp;
2256 fp->index = index;
2257 if (IS_ETH_FP(fp))
2258 fp->max_cos = bp->max_cos;
2259 else
2260 /* Special queues support only one CoS */
2261 fp->max_cos = 1;
2262
65565884 2263 /* Init txdata pointers */
65565884
MS
2264 if (IS_FCOE_FP(fp))
2265 fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)];
65565884
MS
2266 if (IS_ETH_FP(fp))
2267 for_each_cos_in_tx_queue(fp, cos)
2268 fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos *
2269 BNX2X_NUM_ETH_QUEUES(bp) + index];
2270
1191cb83
ED
2271 /*
2272 * set the tpa flag for each queue. The tpa flag determines the queue
2273 * minimal size so it must be set prior to queue memory allocation
2274 */
2275 fp->disable_tpa = !(bp->flags & TPA_ENABLE_FLAG ||
2276 (bp->flags & GRO_ENABLE_FLAG &&
2277 bnx2x_mtu_allows_gro(bp->dev->mtu)));
2278 if (bp->flags & TPA_ENABLE_FLAG)
2279 fp->mode = TPA_MODE_LRO;
2280 else if (bp->flags & GRO_ENABLE_FLAG)
2281 fp->mode = TPA_MODE_GRO;
2282
1191cb83
ED
2283 /* We don't want TPA on an FCoE L2 ring */
2284 if (IS_FCOE_FP(fp))
2285 fp->disable_tpa = 1;
55c11941
MS
2286}
2287
2288int bnx2x_load_cnic(struct bnx2x *bp)
2289{
2290 int i, rc, port = BP_PORT(bp);
2291
2292 DP(NETIF_MSG_IFUP, "Starting CNIC-related load\n");
2293
2294 mutex_init(&bp->cnic_mutex);
2295
ad5afc89
AE
2296 if (IS_PF(bp)) {
2297 rc = bnx2x_alloc_mem_cnic(bp);
2298 if (rc) {
2299 BNX2X_ERR("Unable to allocate bp memory for cnic\n");
2300 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2301 }
55c11941
MS
2302 }
2303
2304 rc = bnx2x_alloc_fp_mem_cnic(bp);
2305 if (rc) {
2306 BNX2X_ERR("Unable to allocate memory for cnic fps\n");
2307 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2308 }
2309
2310 /* Update the number of queues with the cnic queues */
2311 rc = bnx2x_set_real_num_queues(bp, 1);
2312 if (rc) {
2313 BNX2X_ERR("Unable to set real_num_queues including cnic\n");
2314 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2315 }
2316
2317 /* Add all CNIC NAPI objects */
2318 bnx2x_add_all_napi_cnic(bp);
2319 DP(NETIF_MSG_IFUP, "cnic napi added\n");
2320 bnx2x_napi_enable_cnic(bp);
2321
2322 rc = bnx2x_init_hw_func_cnic(bp);
2323 if (rc)
2324 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic1);
2325
2326 bnx2x_nic_init_cnic(bp);
2327
ad5afc89
AE
2328 if (IS_PF(bp)) {
2329 /* Enable Timer scan */
2330 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
2331
2332 /* setup cnic queues */
2333 for_each_cnic_queue(bp, i) {
2334 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2335 if (rc) {
2336 BNX2X_ERR("Queue setup failed\n");
2337 LOAD_ERROR_EXIT(bp, load_error_cnic2);
2338 }
55c11941
MS
2339 }
2340 }
2341
2342 /* Initialize Rx filter. */
2343 netif_addr_lock_bh(bp->dev);
2344 bnx2x_set_rx_mode(bp->dev);
2345 netif_addr_unlock_bh(bp->dev);
2346
2347 /* re-read iscsi info */
2348 bnx2x_get_iscsi_info(bp);
2349 bnx2x_setup_cnic_irq_info(bp);
2350 bnx2x_setup_cnic_info(bp);
2351 bp->cnic_loaded = true;
2352 if (bp->state == BNX2X_STATE_OPEN)
2353 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
2354
2355
2356 DP(NETIF_MSG_IFUP, "Ending successfully CNIC-related load\n");
2357
2358 return 0;
2359
2360#ifndef BNX2X_STOP_ON_ERROR
2361load_error_cnic2:
2362 /* Disable Timer scan */
2363 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
2364
2365load_error_cnic1:
2366 bnx2x_napi_disable_cnic(bp);
2367 /* Update the number of queues without the cnic queues */
2368 rc = bnx2x_set_real_num_queues(bp, 0);
2369 if (rc)
2370 BNX2X_ERR("Unable to set real_num_queues not including cnic\n");
2371load_error_cnic0:
2372 BNX2X_ERR("CNIC-related load failed\n");
2373 bnx2x_free_fp_mem_cnic(bp);
2374 bnx2x_free_mem_cnic(bp);
2375 return rc;
2376#endif /* ! BNX2X_STOP_ON_ERROR */
1191cb83
ED
2377}
2378
2379
9f6c9258
DK
2380/* must be called with rtnl_lock */
2381int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2382{
619c5cb6 2383 int port = BP_PORT(bp);
ad5afc89 2384 int i, rc = 0, load_code = 0;
9f6c9258 2385
55c11941
MS
2386 DP(NETIF_MSG_IFUP, "Starting NIC load\n");
2387 DP(NETIF_MSG_IFUP,
2388 "CNIC is %s\n", CNIC_ENABLED(bp) ? "enabled" : "disabled");
2389
9f6c9258 2390#ifdef BNX2X_STOP_ON_ERROR
51c1a580
MS
2391 if (unlikely(bp->panic)) {
2392 BNX2X_ERR("Can't load NIC when there is panic\n");
9f6c9258 2393 return -EPERM;
51c1a580 2394 }
9f6c9258
DK
2395#endif
2396
2397 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
2398
2ae17f66
VZ
2399 /* Set the initial link reported state to link down */
2400 bnx2x_acquire_phy_lock(bp);
2401 memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
2402 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
2403 &bp->last_reported_link.link_report_flags);
2404 bnx2x_release_phy_lock(bp);
2405
ad5afc89
AE
2406 if (IS_PF(bp))
2407 /* must be called before memory allocation and HW init */
2408 bnx2x_ilt_set_info(bp);
523224a3 2409
6383c0b3
AE
2410 /*
2411 * Zero fastpath structures preserving invariants like napi, which are
2412 * allocated only once, fp index, max_cos, bp pointer.
65565884 2413 * Also set fp->disable_tpa and txdata_ptr.
b3b83c3f 2414 */
51c1a580 2415 DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
b3b83c3f
DK
2416 for_each_queue(bp, i)
2417 bnx2x_bz_fp(bp, i);
55c11941
MS
2418 memset(bp->bnx2x_txq, 0, (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS +
2419 bp->num_cnic_queues) *
2420 sizeof(struct bnx2x_fp_txdata));
b3b83c3f 2421
55c11941 2422 bp->fcoe_init = false;
6383c0b3 2423
a8c94b91
VZ
2424 /* Set the receive queues buffer size */
2425 bnx2x_set_rx_buf_size(bp);
2426
ad5afc89
AE
2427 if (IS_PF(bp)) {
2428 rc = bnx2x_alloc_mem(bp);
2429 if (rc) {
2430 BNX2X_ERR("Unable to allocate bp memory\n");
2431 return rc;
2432 }
2433 }
2434
2435 /* Allocated memory for FW statistics */
2436 if (bnx2x_alloc_fw_stats_mem(bp))
2437 LOAD_ERROR_EXIT(bp, load_error0);
2438
2439 /* need to be done after alloc mem, since it's self adjusting to amount
2440 * of memory available for RSS queues
2441 */
2442 rc = bnx2x_alloc_fp_mem(bp);
2443 if (rc) {
2444 BNX2X_ERR("Unable to allocate memory for fps\n");
2445 LOAD_ERROR_EXIT(bp, load_error0);
2446 }
d6214d7a 2447
8d9ac297
AE
2448 /* request pf to initialize status blocks */
2449 if (IS_VF(bp)) {
2450 rc = bnx2x_vfpf_init(bp);
2451 if (rc)
2452 LOAD_ERROR_EXIT(bp, load_error0);
2453 }
2454
b3b83c3f
DK
2455 /* As long as bnx2x_alloc_mem() may possibly update
2456 * bp->num_queues, bnx2x_set_real_num_queues() should always
55c11941 2457 * come after it. At this stage cnic queues are not counted.
b3b83c3f 2458 */
55c11941 2459 rc = bnx2x_set_real_num_queues(bp, 0);
d6214d7a 2460 if (rc) {
ec6ba945 2461 BNX2X_ERR("Unable to set real_num_queues\n");
619c5cb6 2462 LOAD_ERROR_EXIT(bp, load_error0);
9f6c9258
DK
2463 }
2464
6383c0b3
AE
2465 /* configure multi cos mappings in kernel.
2466 * this configuration may be overriden by a multi class queue discipline
2467 * or by a dcbx negotiation result.
2468 */
2469 bnx2x_setup_tc(bp->dev, bp->max_cos);
2470
26614ba5
MS
2471 /* Add all NAPI objects */
2472 bnx2x_add_all_napi(bp);
55c11941 2473 DP(NETIF_MSG_IFUP, "napi added\n");
9f6c9258
DK
2474 bnx2x_napi_enable(bp);
2475
ad5afc89
AE
2476 if (IS_PF(bp)) {
2477 /* set pf load just before approaching the MCP */
2478 bnx2x_set_pf_load(bp);
2479
2480 /* if mcp exists send load request and analyze response */
2481 if (!BP_NOMCP(bp)) {
2482 /* attempt to load pf */
2483 rc = bnx2x_nic_load_request(bp, &load_code);
2484 if (rc)
2485 LOAD_ERROR_EXIT(bp, load_error1);
2486
2487 /* what did mcp say? */
2488 rc = bnx2x_nic_load_analyze_req(bp, load_code);
2489 if (rc) {
2490 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
d1e2d966
AE
2491 LOAD_ERROR_EXIT(bp, load_error2);
2492 }
ad5afc89
AE
2493 } else {
2494 load_code = bnx2x_nic_load_no_mcp(bp, port);
d1e2d966 2495 }
9f6c9258 2496
ad5afc89
AE
2497 /* mark pmf if applicable */
2498 bnx2x_nic_load_pmf(bp, load_code);
9f6c9258 2499
ad5afc89
AE
2500 /* Init Function state controlling object */
2501 bnx2x__init_func_obj(bp);
6383c0b3 2502
ad5afc89
AE
2503 /* Initialize HW */
2504 rc = bnx2x_init_hw(bp, load_code);
2505 if (rc) {
2506 BNX2X_ERR("HW init failed, aborting\n");
2507 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2508 LOAD_ERROR_EXIT(bp, load_error2);
2509 }
9f6c9258
DK
2510 }
2511
d6214d7a
DK
2512 /* Connect to IRQs */
2513 rc = bnx2x_setup_irqs(bp);
523224a3 2514 if (rc) {
ad5afc89
AE
2515 BNX2X_ERR("setup irqs failed\n");
2516 if (IS_PF(bp))
2517 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
619c5cb6 2518 LOAD_ERROR_EXIT(bp, load_error2);
523224a3
DK
2519 }
2520
9f6c9258
DK
2521 /* Setup NIC internals and enable interrupts */
2522 bnx2x_nic_init(bp, load_code);
2523
619c5cb6 2524 /* Init per-function objects */
ad5afc89
AE
2525 if (IS_PF(bp)) {
2526 bnx2x_init_bp_objs(bp);
619c5cb6 2527
a3348722 2528
ad5afc89
AE
2529 /* Set AFEX default VLAN tag to an invalid value */
2530 bp->afex_def_vlan_tag = -1;
2531 bnx2x_nic_load_afex_dcc(bp, load_code);
2532 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
2533 rc = bnx2x_func_start(bp);
2534 if (rc) {
2535 BNX2X_ERR("Function start failed!\n");
2536 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
9f6c9258 2537
619c5cb6 2538 LOAD_ERROR_EXIT(bp, load_error3);
9f6c9258 2539 }
9f6c9258 2540
ad5afc89
AE
2541 /* Send LOAD_DONE command to MCP */
2542 if (!BP_NOMCP(bp)) {
2543 load_code = bnx2x_fw_command(bp,
2544 DRV_MSG_CODE_LOAD_DONE, 0);
2545 if (!load_code) {
2546 BNX2X_ERR("MCP response failure, aborting\n");
2547 rc = -EBUSY;
2548 LOAD_ERROR_EXIT(bp, load_error3);
2549 }
2550 }
9f6c9258 2551
ad5afc89
AE
2552 /* setup the leading queue */
2553 rc = bnx2x_setup_leading(bp);
51c1a580 2554 if (rc) {
ad5afc89 2555 BNX2X_ERR("Setup leading failed!\n");
55c11941 2556 LOAD_ERROR_EXIT(bp, load_error3);
51c1a580 2557 }
523224a3 2558
ad5afc89
AE
2559 /* set up the rest of the queues */
2560 for_each_nondefault_eth_queue(bp, i) {
2561 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2562 if (rc) {
2563 BNX2X_ERR("Queue setup failed\n");
2564 LOAD_ERROR_EXIT(bp, load_error3);
2565 }
2566 }
2567
2568 /* setup rss */
2569 rc = bnx2x_init_rss_pf(bp);
2570 if (rc) {
2571 BNX2X_ERR("PF RSS init failed\n");
2572 LOAD_ERROR_EXIT(bp, load_error3);
2573 }
8d9ac297
AE
2574
2575 } else { /* vf */
2576 for_each_eth_queue(bp, i) {
2577 rc = bnx2x_vfpf_setup_q(bp, i);
2578 if (rc) {
2579 BNX2X_ERR("Queue setup failed\n");
2580 LOAD_ERROR_EXIT(bp, load_error3);
2581 }
2582 }
51c1a580 2583 }
619c5cb6 2584
523224a3
DK
2585 /* Now when Clients are configured we are ready to work */
2586 bp->state = BNX2X_STATE_OPEN;
2587
619c5cb6 2588 /* Configure a ucast MAC */
ad5afc89
AE
2589 if (IS_PF(bp))
2590 rc = bnx2x_set_eth_mac(bp, true);
8d9ac297
AE
2591 else /* vf */
2592 rc = bnx2x_vfpf_set_mac(bp);
51c1a580
MS
2593 if (rc) {
2594 BNX2X_ERR("Setting Ethernet MAC failed\n");
55c11941 2595 LOAD_ERROR_EXIT(bp, load_error3);
51c1a580 2596 }
6e30dd4e 2597
ad5afc89 2598 if (IS_PF(bp) && bp->pending_max) {
e3835b99
DK
2599 bnx2x_update_max_mf_config(bp, bp->pending_max);
2600 bp->pending_max = 0;
2601 }
2602
ad5afc89
AE
2603 if (bp->port.pmf) {
2604 rc = bnx2x_initial_phy_init(bp, load_mode);
2605 if (rc)
2606 LOAD_ERROR_EXIT(bp, load_error3);
2607 }
c63da990 2608 bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_BOOT_FROM_SAN;
9f6c9258 2609
619c5cb6
VZ
2610 /* Start fast path */
2611
2612 /* Initialize Rx filter. */
2613 netif_addr_lock_bh(bp->dev);
6e30dd4e 2614 bnx2x_set_rx_mode(bp->dev);
619c5cb6 2615 netif_addr_unlock_bh(bp->dev);
6e30dd4e 2616
619c5cb6 2617 /* Start the Tx */
9f6c9258
DK
2618 switch (load_mode) {
2619 case LOAD_NORMAL:
523224a3
DK
2620 /* Tx queue should be only reenabled */
2621 netif_tx_wake_all_queues(bp->dev);
9f6c9258
DK
2622 break;
2623
2624 case LOAD_OPEN:
2625 netif_tx_start_all_queues(bp->dev);
523224a3 2626 smp_mb__after_clear_bit();
9f6c9258
DK
2627 break;
2628
2629 case LOAD_DIAG:
8970b2e4 2630 case LOAD_LOOPBACK_EXT:
9f6c9258
DK
2631 bp->state = BNX2X_STATE_DIAG;
2632 break;
2633
2634 default:
2635 break;
2636 }
2637
00253a8c 2638 if (bp->port.pmf)
4c704899 2639 bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_PORT_MASK, 0);
00253a8c 2640 else
9f6c9258
DK
2641 bnx2x__link_status_update(bp);
2642
2643 /* start the timer */
2644 mod_timer(&bp->timer, jiffies + bp->current_interval);
2645
55c11941
MS
2646 if (CNIC_ENABLED(bp))
2647 bnx2x_load_cnic(bp);
9f6c9258 2648
ad5afc89
AE
2649 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
2650 /* mark driver is loaded in shmem2 */
9ce392d4
YM
2651 u32 val;
2652 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2653 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2654 val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
2655 DRV_FLAGS_CAPABILITIES_LOADED_L2);
2656 }
2657
619c5cb6 2658 /* Wait for all pending SP commands to complete */
ad5afc89 2659 if (IS_PF(bp) && !bnx2x_wait_sp_comp(bp, ~0x0UL)) {
619c5cb6 2660 BNX2X_ERR("Timeout waiting for SP elements to complete\n");
5d07d868 2661 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
619c5cb6
VZ
2662 return -EBUSY;
2663 }
6891dd25 2664
9876879f
BW
2665 /* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */
2666 if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG))
2667 bnx2x_dcbx_init(bp, false);
2668
55c11941
MS
2669 DP(NETIF_MSG_IFUP, "Ending successfully NIC load\n");
2670
9f6c9258
DK
2671 return 0;
2672
619c5cb6 2673#ifndef BNX2X_STOP_ON_ERROR
9f6c9258 2674load_error3:
ad5afc89
AE
2675 if (IS_PF(bp)) {
2676 bnx2x_int_disable_sync(bp, 1);
d6214d7a 2677
ad5afc89
AE
2678 /* Clean queueable objects */
2679 bnx2x_squeeze_objects(bp);
2680 }
619c5cb6 2681
9f6c9258
DK
2682 /* Free SKBs, SGEs, TPA pool and driver internals */
2683 bnx2x_free_skbs(bp);
ec6ba945 2684 for_each_rx_queue(bp, i)
9f6c9258 2685 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
d6214d7a 2686
9f6c9258 2687 /* Release IRQs */
d6214d7a
DK
2688 bnx2x_free_irq(bp);
2689load_error2:
ad5afc89 2690 if (IS_PF(bp) && !BP_NOMCP(bp)) {
d6214d7a
DK
2691 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
2692 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
2693 }
2694
2695 bp->port.pmf = 0;
9f6c9258
DK
2696load_error1:
2697 bnx2x_napi_disable(bp);
ad5afc89 2698
889b9af3 2699 /* clear pf_load status, as it was already set */
ad5afc89
AE
2700 if (IS_PF(bp))
2701 bnx2x_clear_pf_load(bp);
d6214d7a 2702load_error0:
ad5afc89
AE
2703 bnx2x_free_fp_mem(bp);
2704 bnx2x_free_fw_stats_mem(bp);
9f6c9258
DK
2705 bnx2x_free_mem(bp);
2706
2707 return rc;
619c5cb6 2708#endif /* ! BNX2X_STOP_ON_ERROR */
9f6c9258
DK
2709}
2710
ad5afc89
AE
2711static int bnx2x_drain_tx_queues(struct bnx2x *bp)
2712{
2713 u8 rc = 0, cos, i;
2714
2715 /* Wait until tx fastpath tasks complete */
2716 for_each_tx_queue(bp, i) {
2717 struct bnx2x_fastpath *fp = &bp->fp[i];
2718
2719 for_each_cos_in_tx_queue(fp, cos)
2720 rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]);
2721 if (rc)
2722 return rc;
2723 }
2724 return 0;
2725}
2726
9f6c9258 2727/* must be called with rtnl_lock */
5d07d868 2728int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
9f6c9258
DK
2729{
2730 int i;
c9ee9206
VZ
2731 bool global = false;
2732
55c11941
MS
2733 DP(NETIF_MSG_IFUP, "Starting NIC unload\n");
2734
9ce392d4 2735 /* mark driver is unloaded in shmem2 */
ad5afc89 2736 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
9ce392d4
YM
2737 u32 val;
2738 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2739 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2740 val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
2741 }
2742
ad5afc89
AE
2743 if (IS_PF(bp) &&
2744 (bp->state == BNX2X_STATE_CLOSED ||
2745 bp->state == BNX2X_STATE_ERROR)) {
c9ee9206
VZ
2746 /* We can get here if the driver has been unloaded
2747 * during parity error recovery and is either waiting for a
2748 * leader to complete or for other functions to unload and
2749 * then ifdown has been issued. In this case we want to
2750 * unload and let other functions to complete a recovery
2751 * process.
2752 */
9f6c9258
DK
2753 bp->recovery_state = BNX2X_RECOVERY_DONE;
2754 bp->is_leader = 0;
c9ee9206
VZ
2755 bnx2x_release_leader_lock(bp);
2756 smp_mb();
2757
51c1a580
MS
2758 DP(NETIF_MSG_IFDOWN, "Releasing a leadership...\n");
2759 BNX2X_ERR("Can't unload in closed or error state\n");
9f6c9258
DK
2760 return -EINVAL;
2761 }
2762
87b7ba3d
VZ
2763 /*
2764 * It's important to set the bp->state to the value different from
2765 * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
2766 * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
2767 */
2768 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
2769 smp_mb();
2770
55c11941
MS
2771 if (CNIC_LOADED(bp))
2772 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
2773
9505ee37
VZ
2774 /* Stop Tx */
2775 bnx2x_tx_disable(bp);
65565884 2776 netdev_reset_tc(bp->dev);
9505ee37 2777
9f6c9258 2778 bp->rx_mode = BNX2X_RX_MODE_NONE;
9f6c9258 2779
9f6c9258 2780 del_timer_sync(&bp->timer);
f85582f8 2781
ad5afc89
AE
2782 if (IS_PF(bp)) {
2783 /* Set ALWAYS_ALIVE bit in shmem */
2784 bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
2785 bnx2x_drv_pulse(bp);
2786 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2787 bnx2x_save_statistics(bp);
2788 }
9f6c9258 2789
ad5afc89
AE
2790 /* wait till consumers catch up with producers in all queues */
2791 bnx2x_drain_tx_queues(bp);
9f6c9258 2792
9b176b6b
AE
2793 /* if VF indicate to PF this function is going down (PF will delete sp
2794 * elements and clear initializations
2795 */
2796 if (IS_VF(bp))
2797 bnx2x_vfpf_close_vf(bp);
2798 else if (unload_mode != UNLOAD_RECOVERY)
2799 /* if this is a normal/close unload need to clean up chip*/
5d07d868 2800 bnx2x_chip_cleanup(bp, unload_mode, keep_link);
523224a3 2801 else {
c9ee9206
VZ
2802 /* Send the UNLOAD_REQUEST to the MCP */
2803 bnx2x_send_unload_req(bp, unload_mode);
2804
2805 /*
2806 * Prevent transactions to host from the functions on the
2807 * engine that doesn't reset global blocks in case of global
2808 * attention once gloabl blocks are reset and gates are opened
2809 * (the engine which leader will perform the recovery
2810 * last).
2811 */
2812 if (!CHIP_IS_E1x(bp))
2813 bnx2x_pf_disable(bp);
2814
2815 /* Disable HW interrupts, NAPI */
523224a3 2816 bnx2x_netif_stop(bp, 1);
26614ba5
MS
2817 /* Delete all NAPI objects */
2818 bnx2x_del_all_napi(bp);
55c11941
MS
2819 if (CNIC_LOADED(bp))
2820 bnx2x_del_all_napi_cnic(bp);
523224a3 2821 /* Release IRQs */
d6214d7a 2822 bnx2x_free_irq(bp);
c9ee9206
VZ
2823
2824 /* Report UNLOAD_DONE to MCP */
5d07d868 2825 bnx2x_send_unload_done(bp, false);
523224a3 2826 }
9f6c9258 2827
619c5cb6
VZ
2828 /*
2829 * At this stage no more interrupts will arrive so we may safly clean
2830 * the queueable objects here in case they failed to get cleaned so far.
2831 */
ad5afc89
AE
2832 if (IS_PF(bp))
2833 bnx2x_squeeze_objects(bp);
619c5cb6 2834
79616895
VZ
2835 /* There should be no more pending SP commands at this stage */
2836 bp->sp_state = 0;
2837
9f6c9258
DK
2838 bp->port.pmf = 0;
2839
2840 /* Free SKBs, SGEs, TPA pool and driver internals */
2841 bnx2x_free_skbs(bp);
55c11941
MS
2842 if (CNIC_LOADED(bp))
2843 bnx2x_free_skbs_cnic(bp);
ec6ba945 2844 for_each_rx_queue(bp, i)
9f6c9258 2845 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
d6214d7a 2846
ad5afc89
AE
2847 bnx2x_free_fp_mem(bp);
2848 if (CNIC_LOADED(bp))
55c11941 2849 bnx2x_free_fp_mem_cnic(bp);
9f6c9258 2850
ad5afc89
AE
2851 if (IS_PF(bp)) {
2852 bnx2x_free_mem(bp);
2853 if (CNIC_LOADED(bp))
2854 bnx2x_free_mem_cnic(bp);
2855 }
9f6c9258 2856 bp->state = BNX2X_STATE_CLOSED;
55c11941 2857 bp->cnic_loaded = false;
9f6c9258 2858
c9ee9206
VZ
2859 /* Check if there are pending parity attentions. If there are - set
2860 * RECOVERY_IN_PROGRESS.
2861 */
ad5afc89 2862 if (IS_PF(bp) && bnx2x_chk_parity_attn(bp, &global, false)) {
c9ee9206
VZ
2863 bnx2x_set_reset_in_progress(bp);
2864
2865 /* Set RESET_IS_GLOBAL if needed */
2866 if (global)
2867 bnx2x_set_reset_global(bp);
2868 }
2869
2870
9f6c9258
DK
2871 /* The last driver must disable a "close the gate" if there is no
2872 * parity attention or "process kill" pending.
2873 */
ad5afc89
AE
2874 if (IS_PF(bp) &&
2875 !bnx2x_clear_pf_load(bp) &&
2876 bnx2x_reset_is_done(bp, BP_PATH(bp)))
9f6c9258
DK
2877 bnx2x_disable_close_the_gate(bp);
2878
55c11941
MS
2879 DP(NETIF_MSG_IFUP, "Ending NIC unload\n");
2880
9f6c9258
DK
2881 return 0;
2882}
f85582f8 2883
9f6c9258
DK
2884int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
2885{
2886 u16 pmcsr;
2887
adf5f6a1
DK
2888 /* If there is no power capability, silently succeed */
2889 if (!bp->pm_cap) {
51c1a580 2890 BNX2X_DEV_INFO("No power capability. Breaking.\n");
adf5f6a1
DK
2891 return 0;
2892 }
2893
9f6c9258
DK
2894 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2895
2896 switch (state) {
2897 case PCI_D0:
2898 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2899 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2900 PCI_PM_CTRL_PME_STATUS));
2901
2902 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2903 /* delay required during transition out of D3hot */
2904 msleep(20);
2905 break;
2906
2907 case PCI_D3hot:
2908 /* If there are other clients above don't
2909 shut down the power */
2910 if (atomic_read(&bp->pdev->enable_cnt) != 1)
2911 return 0;
2912 /* Don't shut down the power for emulation and FPGA */
2913 if (CHIP_REV_IS_SLOW(bp))
2914 return 0;
2915
2916 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2917 pmcsr |= 3;
2918
2919 if (bp->wol)
2920 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2921
2922 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2923 pmcsr);
2924
2925 /* No more memory access after this point until
2926 * device is brought back to D0.
2927 */
2928 break;
2929
2930 default:
51c1a580 2931 dev_err(&bp->pdev->dev, "Can't support state = %d\n", state);
9f6c9258
DK
2932 return -EINVAL;
2933 }
2934 return 0;
2935}
2936
9f6c9258
DK
2937/*
2938 * net_device service functions
2939 */
d6214d7a 2940int bnx2x_poll(struct napi_struct *napi, int budget)
9f6c9258
DK
2941{
2942 int work_done = 0;
6383c0b3 2943 u8 cos;
9f6c9258
DK
2944 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
2945 napi);
2946 struct bnx2x *bp = fp->bp;
2947
2948 while (1) {
2949#ifdef BNX2X_STOP_ON_ERROR
2950 if (unlikely(bp->panic)) {
2951 napi_complete(napi);
2952 return 0;
2953 }
2954#endif
2955
6383c0b3 2956 for_each_cos_in_tx_queue(fp, cos)
65565884
MS
2957 if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
2958 bnx2x_tx_int(bp, fp->txdata_ptr[cos]);
6383c0b3 2959
9f6c9258
DK
2960
2961 if (bnx2x_has_rx_work(fp)) {
2962 work_done += bnx2x_rx_int(fp, budget - work_done);
2963
2964 /* must not complete if we consumed full budget */
2965 if (work_done >= budget)
2966 break;
2967 }
2968
2969 /* Fall out from the NAPI loop if needed */
2970 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
55c11941 2971
ec6ba945
VZ
2972 /* No need to update SB for FCoE L2 ring as long as
2973 * it's connected to the default SB and the SB
2974 * has been updated when NAPI was scheduled.
2975 */
2976 if (IS_FCOE_FP(fp)) {
2977 napi_complete(napi);
2978 break;
2979 }
9f6c9258 2980 bnx2x_update_fpsb_idx(fp);
f85582f8
DK
2981 /* bnx2x_has_rx_work() reads the status block,
2982 * thus we need to ensure that status block indices
2983 * have been actually read (bnx2x_update_fpsb_idx)
2984 * prior to this check (bnx2x_has_rx_work) so that
2985 * we won't write the "newer" value of the status block
2986 * to IGU (if there was a DMA right after
2987 * bnx2x_has_rx_work and if there is no rmb, the memory
2988 * reading (bnx2x_update_fpsb_idx) may be postponed
2989 * to right before bnx2x_ack_sb). In this case there
2990 * will never be another interrupt until there is
2991 * another update of the status block, while there
2992 * is still unhandled work.
2993 */
9f6c9258
DK
2994 rmb();
2995
2996 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
2997 napi_complete(napi);
2998 /* Re-enable interrupts */
51c1a580 2999 DP(NETIF_MSG_RX_STATUS,
523224a3
DK
3000 "Update index to %d\n", fp->fp_hc_idx);
3001 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
3002 le16_to_cpu(fp->fp_hc_idx),
9f6c9258
DK
3003 IGU_INT_ENABLE, 1);
3004 break;
3005 }
3006 }
3007 }
3008
3009 return work_done;
3010}
3011
9f6c9258
DK
3012/* we split the first BD into headers and data BDs
3013 * to ease the pain of our fellow microcode engineers
3014 * we use one mapping for both BDs
9f6c9258
DK
3015 */
3016static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
6383c0b3 3017 struct bnx2x_fp_txdata *txdata,
9f6c9258
DK
3018 struct sw_tx_bd *tx_buf,
3019 struct eth_tx_start_bd **tx_bd, u16 hlen,
3020 u16 bd_prod, int nbd)
3021{
3022 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
3023 struct eth_tx_bd *d_tx_bd;
3024 dma_addr_t mapping;
3025 int old_len = le16_to_cpu(h_tx_bd->nbytes);
3026
3027 /* first fix first BD */
3028 h_tx_bd->nbd = cpu_to_le16(nbd);
3029 h_tx_bd->nbytes = cpu_to_le16(hlen);
3030
51c1a580
MS
3031 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x) nbd %d\n",
3032 h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo, h_tx_bd->nbd);
9f6c9258
DK
3033
3034 /* now get a new data BD
3035 * (after the pbd) and fill it */
3036 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
6383c0b3 3037 d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
9f6c9258
DK
3038
3039 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
3040 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
3041
3042 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3043 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3044 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
3045
3046 /* this marks the BD as one that has no individual mapping */
3047 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
3048
3049 DP(NETIF_MSG_TX_QUEUED,
3050 "TSO split data size is %d (%x:%x)\n",
3051 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
3052
3053 /* update tx_bd */
3054 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
3055
3056 return bd_prod;
3057}
3058
3059static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
3060{
3061 if (fix > 0)
3062 csum = (u16) ~csum_fold(csum_sub(csum,
3063 csum_partial(t_header - fix, fix, 0)));
3064
3065 else if (fix < 0)
3066 csum = (u16) ~csum_fold(csum_add(csum,
3067 csum_partial(t_header, -fix, 0)));
3068
3069 return swab16(csum);
3070}
3071
3072static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
3073{
3074 u32 rc;
3075
3076 if (skb->ip_summed != CHECKSUM_PARTIAL)
3077 rc = XMIT_PLAIN;
3078
3079 else {
d0d9d8ef 3080 if (vlan_get_protocol(skb) == htons(ETH_P_IPV6)) {
9f6c9258
DK
3081 rc = XMIT_CSUM_V6;
3082 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
3083 rc |= XMIT_CSUM_TCP;
3084
3085 } else {
3086 rc = XMIT_CSUM_V4;
3087 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
3088 rc |= XMIT_CSUM_TCP;
3089 }
3090 }
3091
5892b9e9
VZ
3092 if (skb_is_gso_v6(skb))
3093 rc |= XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6;
3094 else if (skb_is_gso(skb))
3095 rc |= XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP;
9f6c9258
DK
3096
3097 return rc;
3098}
3099
3100#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
3101/* check if packet requires linearization (packet is too fragmented)
3102 no need to check fragmentation if page size > 8K (there will be no
3103 violation to FW restrictions) */
3104static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
3105 u32 xmit_type)
3106{
3107 int to_copy = 0;
3108 int hlen = 0;
3109 int first_bd_sz = 0;
3110
3111 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
3112 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
3113
3114 if (xmit_type & XMIT_GSO) {
3115 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
3116 /* Check if LSO packet needs to be copied:
3117 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
3118 int wnd_size = MAX_FETCH_BD - 3;
3119 /* Number of windows to check */
3120 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
3121 int wnd_idx = 0;
3122 int frag_idx = 0;
3123 u32 wnd_sum = 0;
3124
3125 /* Headers length */
3126 hlen = (int)(skb_transport_header(skb) - skb->data) +
3127 tcp_hdrlen(skb);
3128
3129 /* Amount of data (w/o headers) on linear part of SKB*/
3130 first_bd_sz = skb_headlen(skb) - hlen;
3131
3132 wnd_sum = first_bd_sz;
3133
3134 /* Calculate the first sum - it's special */
3135 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
3136 wnd_sum +=
9e903e08 3137 skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]);
9f6c9258
DK
3138
3139 /* If there was data on linear skb data - check it */
3140 if (first_bd_sz > 0) {
3141 if (unlikely(wnd_sum < lso_mss)) {
3142 to_copy = 1;
3143 goto exit_lbl;
3144 }
3145
3146 wnd_sum -= first_bd_sz;
3147 }
3148
3149 /* Others are easier: run through the frag list and
3150 check all windows */
3151 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
3152 wnd_sum +=
9e903e08 3153 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]);
9f6c9258
DK
3154
3155 if (unlikely(wnd_sum < lso_mss)) {
3156 to_copy = 1;
3157 break;
3158 }
3159 wnd_sum -=
9e903e08 3160 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx]);
9f6c9258
DK
3161 }
3162 } else {
3163 /* in non-LSO too fragmented packet should always
3164 be linearized */
3165 to_copy = 1;
3166 }
3167 }
3168
3169exit_lbl:
3170 if (unlikely(to_copy))
3171 DP(NETIF_MSG_TX_QUEUED,
51c1a580 3172 "Linearization IS REQUIRED for %s packet. num_frags %d hlen %d first_bd_sz %d\n",
9f6c9258
DK
3173 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
3174 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
3175
3176 return to_copy;
3177}
3178#endif
3179
2297a2da
VZ
3180static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
3181 u32 xmit_type)
f2e0899f 3182{
2297a2da
VZ
3183 *parsing_data |= (skb_shinfo(skb)->gso_size <<
3184 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
3185 ETH_TX_PARSE_BD_E2_LSO_MSS;
f2e0899f
DK
3186 if ((xmit_type & XMIT_GSO_V6) &&
3187 (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
2297a2da 3188 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
f2e0899f
DK
3189}
3190
3191/**
e8920674 3192 * bnx2x_set_pbd_gso - update PBD in GSO case.
f2e0899f 3193 *
e8920674
DK
3194 * @skb: packet skb
3195 * @pbd: parse BD
3196 * @xmit_type: xmit flags
f2e0899f
DK
3197 */
3198static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
3199 struct eth_tx_parse_bd_e1x *pbd,
3200 u32 xmit_type)
3201{
3202 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
3203 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
3204 pbd->tcp_flags = pbd_tcp_flags(skb);
3205
3206 if (xmit_type & XMIT_GSO_V4) {
3207 pbd->ip_id = swab16(ip_hdr(skb)->id);
3208 pbd->tcp_pseudo_csum =
3209 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
3210 ip_hdr(skb)->daddr,
3211 0, IPPROTO_TCP, 0));
3212
3213 } else
3214 pbd->tcp_pseudo_csum =
3215 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3216 &ipv6_hdr(skb)->daddr,
3217 0, IPPROTO_TCP, 0));
3218
3219 pbd->global_data |= ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN;
3220}
f85582f8 3221
f2e0899f 3222/**
e8920674 3223 * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
f2e0899f 3224 *
e8920674
DK
3225 * @bp: driver handle
3226 * @skb: packet skb
3227 * @parsing_data: data to be updated
3228 * @xmit_type: xmit flags
f2e0899f 3229 *
e8920674 3230 * 57712 related
f2e0899f
DK
3231 */
3232static inline u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
2297a2da 3233 u32 *parsing_data, u32 xmit_type)
f2e0899f 3234{
e39aece7
VZ
3235 *parsing_data |=
3236 ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
3237 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT) &
3238 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W;
f2e0899f 3239
e39aece7
VZ
3240 if (xmit_type & XMIT_CSUM_TCP) {
3241 *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
3242 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3243 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
f2e0899f 3244
e39aece7
VZ
3245 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
3246 } else
3247 /* We support checksum offload for TCP and UDP only.
3248 * No need to pass the UDP header length - it's a constant.
3249 */
3250 return skb_transport_header(skb) +
3251 sizeof(struct udphdr) - skb->data;
f2e0899f
DK
3252}
3253
93ef5c02
DK
3254static inline void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3255 struct eth_tx_start_bd *tx_start_bd, u32 xmit_type)
3256{
93ef5c02
DK
3257 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
3258
3259 if (xmit_type & XMIT_CSUM_V4)
3260 tx_start_bd->bd_flags.as_bitfield |=
3261 ETH_TX_BD_FLAGS_IP_CSUM;
3262 else
3263 tx_start_bd->bd_flags.as_bitfield |=
3264 ETH_TX_BD_FLAGS_IPV6;
3265
3266 if (!(xmit_type & XMIT_CSUM_TCP))
3267 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
93ef5c02
DK
3268}
3269
f2e0899f 3270/**
e8920674 3271 * bnx2x_set_pbd_csum - update PBD with checksum and return header length
f2e0899f 3272 *
e8920674
DK
3273 * @bp: driver handle
3274 * @skb: packet skb
3275 * @pbd: parse BD to be updated
3276 * @xmit_type: xmit flags
f2e0899f
DK
3277 */
3278static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3279 struct eth_tx_parse_bd_e1x *pbd,
3280 u32 xmit_type)
3281{
e39aece7 3282 u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
f2e0899f
DK
3283
3284 /* for now NS flag is not used in Linux */
3285 pbd->global_data =
3286 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3287 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
3288
3289 pbd->ip_hlen_w = (skb_transport_header(skb) -
e39aece7 3290 skb_network_header(skb)) >> 1;
f2e0899f 3291
e39aece7
VZ
3292 hlen += pbd->ip_hlen_w;
3293
3294 /* We support checksum offload for TCP and UDP only */
3295 if (xmit_type & XMIT_CSUM_TCP)
3296 hlen += tcp_hdrlen(skb) / 2;
3297 else
3298 hlen += sizeof(struct udphdr) / 2;
f2e0899f
DK
3299
3300 pbd->total_hlen_w = cpu_to_le16(hlen);
3301 hlen = hlen*2;
3302
3303 if (xmit_type & XMIT_CSUM_TCP) {
3304 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
3305
3306 } else {
3307 s8 fix = SKB_CS_OFF(skb); /* signed! */
3308
3309 DP(NETIF_MSG_TX_QUEUED,
3310 "hlen %d fix %d csum before fix %x\n",
3311 le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
3312
3313 /* HW bug: fixup the CSUM */
3314 pbd->tcp_pseudo_csum =
3315 bnx2x_csum_fix(skb_transport_header(skb),
3316 SKB_CS(skb), fix);
3317
3318 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
3319 pbd->tcp_pseudo_csum);
3320 }
3321
3322 return hlen;
3323}
f85582f8 3324
9f6c9258
DK
3325/* called with netif_tx_lock
3326 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
3327 * netif_wake_queue()
3328 */
3329netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
3330{
3331 struct bnx2x *bp = netdev_priv(dev);
6383c0b3 3332
9f6c9258 3333 struct netdev_queue *txq;
6383c0b3 3334 struct bnx2x_fp_txdata *txdata;
9f6c9258 3335 struct sw_tx_bd *tx_buf;
619c5cb6 3336 struct eth_tx_start_bd *tx_start_bd, *first_bd;
9f6c9258 3337 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
523224a3 3338 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
f2e0899f 3339 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
2297a2da 3340 u32 pbd_e2_parsing_data = 0;
9f6c9258 3341 u16 pkt_prod, bd_prod;
65565884 3342 int nbd, txq_index;
9f6c9258
DK
3343 dma_addr_t mapping;
3344 u32 xmit_type = bnx2x_xmit_type(bp, skb);
3345 int i;
3346 u8 hlen = 0;
3347 __le16 pkt_size = 0;
3348 struct ethhdr *eth;
3349 u8 mac_type = UNICAST_ADDRESS;
3350
3351#ifdef BNX2X_STOP_ON_ERROR
3352 if (unlikely(bp->panic))
3353 return NETDEV_TX_BUSY;
3354#endif
3355
6383c0b3
AE
3356 txq_index = skb_get_queue_mapping(skb);
3357 txq = netdev_get_tx_queue(dev, txq_index);
3358
55c11941 3359 BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + (CNIC_LOADED(bp) ? 1 : 0));
6383c0b3 3360
65565884 3361 txdata = &bp->bnx2x_txq[txq_index];
6383c0b3
AE
3362
3363 /* enable this debug print to view the transmission queue being used
51c1a580 3364 DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n",
6383c0b3 3365 txq_index, fp_index, txdata_index); */
9f6c9258 3366
6383c0b3 3367 /* enable this debug print to view the tranmission details
51c1a580
MS
3368 DP(NETIF_MSG_TX_QUEUED,
3369 "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n",
6383c0b3 3370 txdata->cid, fp_index, txdata_index, txdata, fp); */
9f6c9258 3371
6383c0b3 3372 if (unlikely(bnx2x_tx_avail(bp, txdata) <
7df2dc6b
DK
3373 skb_shinfo(skb)->nr_frags +
3374 BDS_PER_TX_PKT +
3375 NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT))) {
2384d6aa 3376 /* Handle special storage cases separately */
c96bdc0c
DK
3377 if (txdata->tx_ring_size == 0) {
3378 struct bnx2x_eth_q_stats *q_stats =
3379 bnx2x_fp_qstats(bp, txdata->parent_fp);
3380 q_stats->driver_filtered_tx_pkt++;
3381 dev_kfree_skb(skb);
3382 return NETDEV_TX_OK;
3383 }
2384d6aa
DK
3384 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
3385 netif_tx_stop_queue(txq);
c96bdc0c 3386 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
2384d6aa 3387
9f6c9258
DK
3388 return NETDEV_TX_BUSY;
3389 }
3390
51c1a580
MS
3391 DP(NETIF_MSG_TX_QUEUED,
3392 "queue[%d]: SKB: summed %x protocol %x protocol(%x,%x) gso type %x xmit_type %x\n",
6383c0b3 3393 txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
9f6c9258
DK
3394 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
3395
3396 eth = (struct ethhdr *)skb->data;
3397
3398 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
3399 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
3400 if (is_broadcast_ether_addr(eth->h_dest))
3401 mac_type = BROADCAST_ADDRESS;
3402 else
3403 mac_type = MULTICAST_ADDRESS;
3404 }
3405
3406#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
3407 /* First, check if we need to linearize the skb (due to FW
3408 restrictions). No need to check fragmentation if page size > 8K
3409 (there will be no violation to FW restrictions) */
3410 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
3411 /* Statistics of linearization */
3412 bp->lin_cnt++;
3413 if (skb_linearize(skb) != 0) {
51c1a580
MS
3414 DP(NETIF_MSG_TX_QUEUED,
3415 "SKB linearization failed - silently dropping this SKB\n");
9f6c9258
DK
3416 dev_kfree_skb_any(skb);
3417 return NETDEV_TX_OK;
3418 }
3419 }
3420#endif
619c5cb6
VZ
3421 /* Map skb linear data for DMA */
3422 mapping = dma_map_single(&bp->pdev->dev, skb->data,
3423 skb_headlen(skb), DMA_TO_DEVICE);
3424 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
51c1a580
MS
3425 DP(NETIF_MSG_TX_QUEUED,
3426 "SKB mapping failed - silently dropping this SKB\n");
619c5cb6
VZ
3427 dev_kfree_skb_any(skb);
3428 return NETDEV_TX_OK;
3429 }
9f6c9258
DK
3430 /*
3431 Please read carefully. First we use one BD which we mark as start,
3432 then we have a parsing info BD (used for TSO or xsum),
3433 and only then we have the rest of the TSO BDs.
3434 (don't forget to mark the last one as last,
3435 and to unmap only AFTER you write to the BD ...)
3436 And above all, all pdb sizes are in words - NOT DWORDS!
3437 */
3438
619c5cb6
VZ
3439 /* get current pkt produced now - advance it just before sending packet
3440 * since mapping of pages may fail and cause packet to be dropped
3441 */
6383c0b3
AE
3442 pkt_prod = txdata->tx_pkt_prod;
3443 bd_prod = TX_BD(txdata->tx_bd_prod);
9f6c9258 3444
619c5cb6
VZ
3445 /* get a tx_buf and first BD
3446 * tx_start_bd may be changed during SPLIT,
3447 * but first_bd will always stay first
3448 */
6383c0b3
AE
3449 tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
3450 tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
619c5cb6 3451 first_bd = tx_start_bd;
9f6c9258
DK
3452
3453 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
96bed4b9
YM
3454 SET_FLAG(tx_start_bd->general_data,
3455 ETH_TX_START_BD_PARSE_NBDS,
3456 0);
f85582f8 3457
9f6c9258 3458 /* header nbd */
f85582f8 3459 SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_HDR_NBDS, 1);
9f6c9258
DK
3460
3461 /* remember the first BD of the packet */
6383c0b3 3462 tx_buf->first_bd = txdata->tx_bd_prod;
9f6c9258
DK
3463 tx_buf->skb = skb;
3464 tx_buf->flags = 0;
3465
3466 DP(NETIF_MSG_TX_QUEUED,
3467 "sending pkt %u @%p next_idx %u bd %u @%p\n",
6383c0b3 3468 pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
9f6c9258 3469
eab6d18d 3470 if (vlan_tx_tag_present(skb)) {
523224a3
DK
3471 tx_start_bd->vlan_or_ethertype =
3472 cpu_to_le16(vlan_tx_tag_get(skb));
3473 tx_start_bd->bd_flags.as_bitfield |=
3474 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
9f6c9258 3475 } else
523224a3 3476 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
9f6c9258
DK
3477
3478 /* turn on parsing and get a BD */
3479 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9f6c9258 3480
93ef5c02
DK
3481 if (xmit_type & XMIT_CSUM)
3482 bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
9f6c9258 3483
619c5cb6 3484 if (!CHIP_IS_E1x(bp)) {
6383c0b3 3485 pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
f2e0899f
DK
3486 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
3487 /* Set PBD in checksum offload case */
3488 if (xmit_type & XMIT_CSUM)
2297a2da
VZ
3489 hlen = bnx2x_set_pbd_csum_e2(bp, skb,
3490 &pbd_e2_parsing_data,
3491 xmit_type);
619c5cb6
VZ
3492 if (IS_MF_SI(bp)) {
3493 /*
3494 * fill in the MAC addresses in the PBD - for local
3495 * switching
3496 */
3497 bnx2x_set_fw_mac_addr(&pbd_e2->src_mac_addr_hi,
3498 &pbd_e2->src_mac_addr_mid,
3499 &pbd_e2->src_mac_addr_lo,
3500 eth->h_source);
3501 bnx2x_set_fw_mac_addr(&pbd_e2->dst_mac_addr_hi,
3502 &pbd_e2->dst_mac_addr_mid,
3503 &pbd_e2->dst_mac_addr_lo,
3504 eth->h_dest);
3505 }
96bed4b9
YM
3506
3507 SET_FLAG(pbd_e2_parsing_data,
3508 ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, mac_type);
f2e0899f 3509 } else {
96bed4b9 3510 u16 global_data = 0;
6383c0b3 3511 pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
f2e0899f
DK
3512 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
3513 /* Set PBD in checksum offload case */
3514 if (xmit_type & XMIT_CSUM)
3515 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
9f6c9258 3516
96bed4b9
YM
3517 SET_FLAG(global_data,
3518 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type);
3519 pbd_e1x->global_data |= cpu_to_le16(global_data);
9f6c9258
DK
3520 }
3521
f85582f8 3522 /* Setup the data pointer of the first BD of the packet */
9f6c9258
DK
3523 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3524 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
619c5cb6 3525 nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
9f6c9258
DK
3526 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
3527 pkt_size = tx_start_bd->nbytes;
3528
51c1a580
MS
3529 DP(NETIF_MSG_TX_QUEUED,
3530 "first bd @%p addr (%x:%x) nbd %d nbytes %d flags %x vlan %x\n",
9f6c9258
DK
3531 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
3532 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
523224a3
DK
3533 tx_start_bd->bd_flags.as_bitfield,
3534 le16_to_cpu(tx_start_bd->vlan_or_ethertype));
9f6c9258
DK
3535
3536 if (xmit_type & XMIT_GSO) {
3537
3538 DP(NETIF_MSG_TX_QUEUED,
3539 "TSO packet len %d hlen %d total len %d tso size %d\n",
3540 skb->len, hlen, skb_headlen(skb),
3541 skb_shinfo(skb)->gso_size);
3542
3543 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
3544
3545 if (unlikely(skb_headlen(skb) > hlen))
6383c0b3
AE
3546 bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
3547 &tx_start_bd, hlen,
3548 bd_prod, ++nbd);
619c5cb6 3549 if (!CHIP_IS_E1x(bp))
2297a2da
VZ
3550 bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
3551 xmit_type);
f2e0899f
DK
3552 else
3553 bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
9f6c9258 3554 }
2297a2da
VZ
3555
3556 /* Set the PBD's parsing_data field if not zero
3557 * (for the chips newer than 57711).
3558 */
3559 if (pbd_e2_parsing_data)
3560 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
3561
9f6c9258
DK
3562 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
3563
f85582f8 3564 /* Handle fragmented skb */
9f6c9258
DK
3565 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3566 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3567
9e903e08
ED
3568 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
3569 skb_frag_size(frag), DMA_TO_DEVICE);
619c5cb6 3570 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
2df1a70a 3571 unsigned int pkts_compl = 0, bytes_compl = 0;
619c5cb6 3572
51c1a580
MS
3573 DP(NETIF_MSG_TX_QUEUED,
3574 "Unable to map page - dropping packet...\n");
619c5cb6
VZ
3575
3576 /* we need unmap all buffers already mapped
3577 * for this SKB;
3578 * first_bd->nbd need to be properly updated
3579 * before call to bnx2x_free_tx_pkt
3580 */
3581 first_bd->nbd = cpu_to_le16(nbd);
6383c0b3 3582 bnx2x_free_tx_pkt(bp, txdata,
2df1a70a
TH
3583 TX_BD(txdata->tx_pkt_prod),
3584 &pkts_compl, &bytes_compl);
619c5cb6
VZ
3585 return NETDEV_TX_OK;
3586 }
3587
9f6c9258 3588 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
6383c0b3 3589 tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
9f6c9258 3590 if (total_pkt_bd == NULL)
6383c0b3 3591 total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
9f6c9258 3592
9f6c9258
DK
3593 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3594 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9e903e08
ED
3595 tx_data_bd->nbytes = cpu_to_le16(skb_frag_size(frag));
3596 le16_add_cpu(&pkt_size, skb_frag_size(frag));
619c5cb6 3597 nbd++;
9f6c9258
DK
3598
3599 DP(NETIF_MSG_TX_QUEUED,
3600 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
3601 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
3602 le16_to_cpu(tx_data_bd->nbytes));
3603 }
3604
3605 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
3606
619c5cb6
VZ
3607 /* update with actual num BDs */
3608 first_bd->nbd = cpu_to_le16(nbd);
3609
9f6c9258
DK
3610 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3611
3612 /* now send a tx doorbell, counting the next BD
3613 * if the packet contains or ends with it
3614 */
3615 if (TX_BD_POFF(bd_prod) < nbd)
3616 nbd++;
3617
619c5cb6
VZ
3618 /* total_pkt_bytes should be set on the first data BD if
3619 * it's not an LSO packet and there is more than one
3620 * data BD. In this case pkt_size is limited by an MTU value.
3621 * However we prefer to set it for an LSO packet (while we don't
3622 * have to) in order to save some CPU cycles in a none-LSO
3623 * case, when we much more care about them.
3624 */
9f6c9258
DK
3625 if (total_pkt_bd != NULL)
3626 total_pkt_bd->total_pkt_bytes = pkt_size;
3627
523224a3 3628 if (pbd_e1x)
9f6c9258 3629 DP(NETIF_MSG_TX_QUEUED,
51c1a580 3630 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u tcp_flags %x xsum %x seq %u hlen %u\n",
523224a3
DK
3631 pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
3632 pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
3633 pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
3634 le16_to_cpu(pbd_e1x->total_hlen_w));
f2e0899f
DK
3635 if (pbd_e2)
3636 DP(NETIF_MSG_TX_QUEUED,
3637 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
3638 pbd_e2, pbd_e2->dst_mac_addr_hi, pbd_e2->dst_mac_addr_mid,
3639 pbd_e2->dst_mac_addr_lo, pbd_e2->src_mac_addr_hi,
3640 pbd_e2->src_mac_addr_mid, pbd_e2->src_mac_addr_lo,
3641 pbd_e2->parsing_data);
9f6c9258
DK
3642 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
3643
2df1a70a
TH
3644 netdev_tx_sent_queue(txq, skb->len);
3645
8373c57d
WB
3646 skb_tx_timestamp(skb);
3647
6383c0b3 3648 txdata->tx_pkt_prod++;
9f6c9258
DK
3649 /*
3650 * Make sure that the BD data is updated before updating the producer
3651 * since FW might read the BD right after the producer is updated.
3652 * This is only applicable for weak-ordered memory model archs such
3653 * as IA-64. The following barrier is also mandatory since FW will
3654 * assumes packets must have BDs.
3655 */
3656 wmb();
3657
6383c0b3 3658 txdata->tx_db.data.prod += nbd;
9f6c9258 3659 barrier();
f85582f8 3660
6383c0b3 3661 DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
9f6c9258
DK
3662
3663 mmiowb();
3664
6383c0b3 3665 txdata->tx_bd_prod += nbd;
9f6c9258 3666
7df2dc6b 3667 if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_DESC_PER_TX_PKT)) {
9f6c9258
DK
3668 netif_tx_stop_queue(txq);
3669
3670 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
3671 * ordering of set_bit() in netif_tx_stop_queue() and read of
3672 * fp->bd_tx_cons */
3673 smp_mb();
3674
15192a8c 3675 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
7df2dc6b 3676 if (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT)
9f6c9258
DK
3677 netif_tx_wake_queue(txq);
3678 }
6383c0b3 3679 txdata->tx_pkt++;
9f6c9258
DK
3680
3681 return NETDEV_TX_OK;
3682}
f85582f8 3683
6383c0b3
AE
3684/**
3685 * bnx2x_setup_tc - routine to configure net_device for multi tc
3686 *
3687 * @netdev: net device to configure
3688 * @tc: number of traffic classes to enable
3689 *
3690 * callback connected to the ndo_setup_tc function pointer
3691 */
3692int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
3693{
3694 int cos, prio, count, offset;
3695 struct bnx2x *bp = netdev_priv(dev);
3696
3697 /* setup tc must be called under rtnl lock */
3698 ASSERT_RTNL();
3699
3700 /* no traffic classes requested. aborting */
3701 if (!num_tc) {
3702 netdev_reset_tc(dev);
3703 return 0;
3704 }
3705
3706 /* requested to support too many traffic classes */
3707 if (num_tc > bp->max_cos) {
51c1a580
MS
3708 BNX2X_ERR("support for too many traffic classes requested: %d. max supported is %d\n",
3709 num_tc, bp->max_cos);
6383c0b3
AE
3710 return -EINVAL;
3711 }
3712
3713 /* declare amount of supported traffic classes */
3714 if (netdev_set_num_tc(dev, num_tc)) {
51c1a580 3715 BNX2X_ERR("failed to declare %d traffic classes\n", num_tc);
6383c0b3
AE
3716 return -EINVAL;
3717 }
3718
3719 /* configure priority to traffic class mapping */
3720 for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
3721 netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[prio]);
51c1a580
MS
3722 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
3723 "mapping priority %d to tc %d\n",
6383c0b3
AE
3724 prio, bp->prio_to_cos[prio]);
3725 }
3726
3727
3728 /* Use this configuration to diffrentiate tc0 from other COSes
3729 This can be used for ets or pfc, and save the effort of setting
3730 up a multio class queue disc or negotiating DCBX with a switch
3731 netdev_set_prio_tc_map(dev, 0, 0);
94f05b0f 3732 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0);
6383c0b3
AE
3733 for (prio = 1; prio < 16; prio++) {
3734 netdev_set_prio_tc_map(dev, prio, 1);
94f05b0f 3735 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1);
6383c0b3
AE
3736 } */
3737
3738 /* configure traffic class to transmission queue mapping */
3739 for (cos = 0; cos < bp->max_cos; cos++) {
3740 count = BNX2X_NUM_ETH_QUEUES(bp);
65565884 3741 offset = cos * BNX2X_NUM_NON_CNIC_QUEUES(bp);
6383c0b3 3742 netdev_set_tc_queue(dev, cos, count, offset);
51c1a580
MS
3743 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
3744 "mapping tc %d to offset %d count %d\n",
6383c0b3
AE
3745 cos, offset, count);
3746 }
3747
3748 return 0;
3749}
3750
9f6c9258
DK
3751/* called with rtnl_lock */
3752int bnx2x_change_mac_addr(struct net_device *dev, void *p)
3753{
3754 struct sockaddr *addr = p;
3755 struct bnx2x *bp = netdev_priv(dev);
619c5cb6 3756 int rc = 0;
9f6c9258 3757
51c1a580
MS
3758 if (!bnx2x_is_valid_ether_addr(bp, addr->sa_data)) {
3759 BNX2X_ERR("Requested MAC address is not valid\n");
614c76df 3760 return -EINVAL;
51c1a580 3761 }
614c76df 3762
a3348722
BW
3763 if ((IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp)) &&
3764 !is_zero_ether_addr(addr->sa_data)) {
51c1a580 3765 BNX2X_ERR("Can't configure non-zero address on iSCSI or FCoE functions in MF-SD mode\n");
9f6c9258 3766 return -EINVAL;
51c1a580 3767 }
9f6c9258 3768
619c5cb6
VZ
3769 if (netif_running(dev)) {
3770 rc = bnx2x_set_eth_mac(bp, false);
3771 if (rc)
3772 return rc;
3773 }
3774
7ce5d222 3775 dev->addr_assign_type &= ~NET_ADDR_RANDOM;
9f6c9258 3776 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
619c5cb6 3777
523224a3 3778 if (netif_running(dev))
619c5cb6 3779 rc = bnx2x_set_eth_mac(bp, true);
9f6c9258 3780
619c5cb6 3781 return rc;
9f6c9258
DK
3782}
3783
b3b83c3f
DK
3784static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
3785{
3786 union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
3787 struct bnx2x_fastpath *fp = &bp->fp[fp_index];
6383c0b3 3788 u8 cos;
b3b83c3f
DK
3789
3790 /* Common */
55c11941 3791
b3b83c3f
DK
3792 if (IS_FCOE_IDX(fp_index)) {
3793 memset(sb, 0, sizeof(union host_hc_status_block));
3794 fp->status_blk_mapping = 0;
b3b83c3f 3795 } else {
b3b83c3f 3796 /* status blocks */
619c5cb6 3797 if (!CHIP_IS_E1x(bp))
b3b83c3f
DK
3798 BNX2X_PCI_FREE(sb->e2_sb,
3799 bnx2x_fp(bp, fp_index,
3800 status_blk_mapping),
3801 sizeof(struct host_hc_status_block_e2));
3802 else
3803 BNX2X_PCI_FREE(sb->e1x_sb,
3804 bnx2x_fp(bp, fp_index,
3805 status_blk_mapping),
3806 sizeof(struct host_hc_status_block_e1x));
b3b83c3f 3807 }
55c11941 3808
b3b83c3f
DK
3809 /* Rx */
3810 if (!skip_rx_queue(bp, fp_index)) {
3811 bnx2x_free_rx_bds(fp);
3812
3813 /* fastpath rx rings: rx_buf rx_desc rx_comp */
3814 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
3815 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
3816 bnx2x_fp(bp, fp_index, rx_desc_mapping),
3817 sizeof(struct eth_rx_bd) * NUM_RX_BD);
3818
3819 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
3820 bnx2x_fp(bp, fp_index, rx_comp_mapping),
3821 sizeof(struct eth_fast_path_rx_cqe) *
3822 NUM_RCQ_BD);
3823
3824 /* SGE ring */
3825 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
3826 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
3827 bnx2x_fp(bp, fp_index, rx_sge_mapping),
3828 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
3829 }
3830
3831 /* Tx */
3832 if (!skip_tx_queue(bp, fp_index)) {
3833 /* fastpath tx rings: tx_buf tx_desc */
6383c0b3 3834 for_each_cos_in_tx_queue(fp, cos) {
65565884 3835 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
6383c0b3 3836
51c1a580 3837 DP(NETIF_MSG_IFDOWN,
94f05b0f 3838 "freeing tx memory of fp %d cos %d cid %d\n",
6383c0b3
AE
3839 fp_index, cos, txdata->cid);
3840
3841 BNX2X_FREE(txdata->tx_buf_ring);
3842 BNX2X_PCI_FREE(txdata->tx_desc_ring,
3843 txdata->tx_desc_mapping,
3844 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
3845 }
b3b83c3f
DK
3846 }
3847 /* end of fastpath */
3848}
3849
55c11941
MS
3850void bnx2x_free_fp_mem_cnic(struct bnx2x *bp)
3851{
3852 int i;
3853 for_each_cnic_queue(bp, i)
3854 bnx2x_free_fp_mem_at(bp, i);
3855}
3856
b3b83c3f
DK
3857void bnx2x_free_fp_mem(struct bnx2x *bp)
3858{
3859 int i;
55c11941 3860 for_each_eth_queue(bp, i)
b3b83c3f
DK
3861 bnx2x_free_fp_mem_at(bp, i);
3862}
3863
1191cb83 3864static void set_sb_shortcuts(struct bnx2x *bp, int index)
b3b83c3f
DK
3865{
3866 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
619c5cb6 3867 if (!CHIP_IS_E1x(bp)) {
b3b83c3f
DK
3868 bnx2x_fp(bp, index, sb_index_values) =
3869 (__le16 *)status_blk.e2_sb->sb.index_values;
3870 bnx2x_fp(bp, index, sb_running_index) =
3871 (__le16 *)status_blk.e2_sb->sb.running_index;
3872 } else {
3873 bnx2x_fp(bp, index, sb_index_values) =
3874 (__le16 *)status_blk.e1x_sb->sb.index_values;
3875 bnx2x_fp(bp, index, sb_running_index) =
3876 (__le16 *)status_blk.e1x_sb->sb.running_index;
3877 }
3878}
3879
1191cb83
ED
3880/* Returns the number of actually allocated BDs */
3881static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
3882 int rx_ring_size)
3883{
3884 struct bnx2x *bp = fp->bp;
3885 u16 ring_prod, cqe_ring_prod;
3886 int i, failure_cnt = 0;
3887
3888 fp->rx_comp_cons = 0;
3889 cqe_ring_prod = ring_prod = 0;
3890
3891 /* This routine is called only during fo init so
3892 * fp->eth_q_stats.rx_skb_alloc_failed = 0
3893 */
3894 for (i = 0; i < rx_ring_size; i++) {
3895 if (bnx2x_alloc_rx_data(bp, fp, ring_prod) < 0) {
3896 failure_cnt++;
3897 continue;
3898 }
3899 ring_prod = NEXT_RX_IDX(ring_prod);
3900 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
3901 WARN_ON(ring_prod <= (i - failure_cnt));
3902 }
3903
3904 if (failure_cnt)
3905 BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n",
3906 i - failure_cnt, fp->index);
3907
3908 fp->rx_bd_prod = ring_prod;
3909 /* Limit the CQE producer by the CQE ring size */
3910 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
3911 cqe_ring_prod);
3912 fp->rx_pkt = fp->rx_calls = 0;
3913
15192a8c 3914 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed += failure_cnt;
1191cb83
ED
3915
3916 return i - failure_cnt;
3917}
3918
3919static void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
3920{
3921 int i;
3922
3923 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
3924 struct eth_rx_cqe_next_page *nextpg;
3925
3926 nextpg = (struct eth_rx_cqe_next_page *)
3927 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
3928 nextpg->addr_hi =
3929 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
3930 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
3931 nextpg->addr_lo =
3932 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
3933 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
3934 }
3935}
3936
b3b83c3f
DK
3937static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
3938{
3939 union host_hc_status_block *sb;
3940 struct bnx2x_fastpath *fp = &bp->fp[index];
3941 int ring_size = 0;
6383c0b3 3942 u8 cos;
c2188952 3943 int rx_ring_size = 0;
b3b83c3f 3944
a3348722
BW
3945 if (!bp->rx_ring_size &&
3946 (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) {
614c76df
DK
3947 rx_ring_size = MIN_RX_SIZE_NONTPA;
3948 bp->rx_ring_size = rx_ring_size;
55c11941 3949 } else if (!bp->rx_ring_size) {
c2188952
VZ
3950 rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
3951
065f8b92
YM
3952 if (CHIP_IS_E3(bp)) {
3953 u32 cfg = SHMEM_RD(bp,
3954 dev_info.port_hw_config[BP_PORT(bp)].
3955 default_cfg);
3956
3957 /* Decrease ring size for 1G functions */
3958 if ((cfg & PORT_HW_CFG_NET_SERDES_IF_MASK) ==
3959 PORT_HW_CFG_NET_SERDES_IF_SGMII)
3960 rx_ring_size /= 10;
3961 }
d760fc37 3962
c2188952
VZ
3963 /* allocate at least number of buffers required by FW */
3964 rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
3965 MIN_RX_SIZE_TPA, rx_ring_size);
3966
3967 bp->rx_ring_size = rx_ring_size;
614c76df 3968 } else /* if rx_ring_size specified - use it */
c2188952 3969 rx_ring_size = bp->rx_ring_size;
b3b83c3f 3970
b3b83c3f
DK
3971 /* Common */
3972 sb = &bnx2x_fp(bp, index, status_blk);
55c11941 3973
b3b83c3f 3974 if (!IS_FCOE_IDX(index)) {
b3b83c3f 3975 /* status blocks */
619c5cb6 3976 if (!CHIP_IS_E1x(bp))
b3b83c3f
DK
3977 BNX2X_PCI_ALLOC(sb->e2_sb,
3978 &bnx2x_fp(bp, index, status_blk_mapping),
3979 sizeof(struct host_hc_status_block_e2));
3980 else
3981 BNX2X_PCI_ALLOC(sb->e1x_sb,
3982 &bnx2x_fp(bp, index, status_blk_mapping),
3983 sizeof(struct host_hc_status_block_e1x));
b3b83c3f 3984 }
8eef2af1
DK
3985
3986 /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
3987 * set shortcuts for it.
3988 */
3989 if (!IS_FCOE_IDX(index))
3990 set_sb_shortcuts(bp, index);
b3b83c3f
DK
3991
3992 /* Tx */
3993 if (!skip_tx_queue(bp, index)) {
3994 /* fastpath tx rings: tx_buf tx_desc */
6383c0b3 3995 for_each_cos_in_tx_queue(fp, cos) {
65565884 3996 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
6383c0b3 3997
51c1a580
MS
3998 DP(NETIF_MSG_IFUP,
3999 "allocating tx memory of fp %d cos %d\n",
6383c0b3
AE
4000 index, cos);
4001
4002 BNX2X_ALLOC(txdata->tx_buf_ring,
b3b83c3f 4003 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6383c0b3
AE
4004 BNX2X_PCI_ALLOC(txdata->tx_desc_ring,
4005 &txdata->tx_desc_mapping,
b3b83c3f 4006 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
6383c0b3 4007 }
b3b83c3f
DK
4008 }
4009
4010 /* Rx */
4011 if (!skip_rx_queue(bp, index)) {
4012 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4013 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_buf_ring),
4014 sizeof(struct sw_rx_bd) * NUM_RX_BD);
4015 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_desc_ring),
4016 &bnx2x_fp(bp, index, rx_desc_mapping),
4017 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4018
4019 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_comp_ring),
4020 &bnx2x_fp(bp, index, rx_comp_mapping),
4021 sizeof(struct eth_fast_path_rx_cqe) *
4022 NUM_RCQ_BD);
4023
4024 /* SGE ring */
4025 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_page_ring),
4026 sizeof(struct sw_rx_page) * NUM_RX_SGE);
4027 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_sge_ring),
4028 &bnx2x_fp(bp, index, rx_sge_mapping),
4029 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4030 /* RX BD ring */
4031 bnx2x_set_next_page_rx_bd(fp);
4032
4033 /* CQ ring */
4034 bnx2x_set_next_page_rx_cq(fp);
4035
4036 /* BDs */
4037 ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
4038 if (ring_size < rx_ring_size)
4039 goto alloc_mem_err;
4040 }
4041
4042 return 0;
4043
4044/* handles low memory cases */
4045alloc_mem_err:
4046 BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
4047 index, ring_size);
4048 /* FW will drop all packets if queue is not big enough,
4049 * In these cases we disable the queue
6383c0b3 4050 * Min size is different for OOO, TPA and non-TPA queues
b3b83c3f
DK
4051 */
4052 if (ring_size < (fp->disable_tpa ?
eb722d7a 4053 MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
b3b83c3f
DK
4054 /* release memory allocated for this queue */
4055 bnx2x_free_fp_mem_at(bp, index);
4056 return -ENOMEM;
4057 }
4058 return 0;
4059}
4060
55c11941
MS
4061int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp)
4062{
4063 if (!NO_FCOE(bp))
4064 /* FCoE */
4065 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp)))
4066 /* we will fail load process instead of mark
4067 * NO_FCOE_FLAG
4068 */
4069 return -ENOMEM;
4070
4071 return 0;
4072}
4073
b3b83c3f
DK
4074int bnx2x_alloc_fp_mem(struct bnx2x *bp)
4075{
4076 int i;
4077
55c11941
MS
4078 /* 1. Allocate FP for leading - fatal if error
4079 * 2. Allocate RSS - fix number of queues if error
b3b83c3f
DK
4080 */
4081
4082 /* leading */
4083 if (bnx2x_alloc_fp_mem_at(bp, 0))
4084 return -ENOMEM;
6383c0b3 4085
b3b83c3f
DK
4086 /* RSS */
4087 for_each_nondefault_eth_queue(bp, i)
4088 if (bnx2x_alloc_fp_mem_at(bp, i))
4089 break;
4090
4091 /* handle memory failures */
4092 if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
4093 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
4094
4095 WARN_ON(delta < 0);
55c11941
MS
4096 if (CNIC_SUPPORT(bp))
4097 /* move non eth FPs next to last eth FP
4098 * must be done in that order
4099 * FCOE_IDX < FWD_IDX < OOO_IDX
4100 */
b3b83c3f 4101
55c11941
MS
4102 /* move FCoE fp even NO_FCOE_FLAG is on */
4103 bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta);
4104 bp->num_ethernet_queues -= delta;
4105 bp->num_queues = bp->num_ethernet_queues +
4106 bp->num_cnic_queues;
b3b83c3f
DK
4107 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
4108 bp->num_queues + delta, bp->num_queues);
4109 }
4110
4111 return 0;
4112}
d6214d7a 4113
523224a3
DK
4114void bnx2x_free_mem_bp(struct bnx2x *bp)
4115{
15192a8c 4116 kfree(bp->fp->tpa_info);
523224a3 4117 kfree(bp->fp);
15192a8c
BW
4118 kfree(bp->sp_objs);
4119 kfree(bp->fp_stats);
65565884 4120 kfree(bp->bnx2x_txq);
523224a3
DK
4121 kfree(bp->msix_table);
4122 kfree(bp->ilt);
4123}
4124
0329aba1 4125int bnx2x_alloc_mem_bp(struct bnx2x *bp)
523224a3
DK
4126{
4127 struct bnx2x_fastpath *fp;
4128 struct msix_entry *tbl;
4129 struct bnx2x_ilt *ilt;
6383c0b3 4130 int msix_table_size = 0;
55c11941 4131 int fp_array_size, txq_array_size;
15192a8c 4132 int i;
6383c0b3
AE
4133
4134 /*
4135 * The biggest MSI-X table we might need is as a maximum number of fast
4136 * path IGU SBs plus default SB (for PF).
4137 */
1ab4434c
AE
4138 msix_table_size = bp->igu_sb_cnt;
4139 if (IS_PF(bp))
4140 msix_table_size++;
4141 BNX2X_DEV_INFO("msix_table_size %d\n", msix_table_size);
523224a3 4142
6383c0b3 4143 /* fp array: RSS plus CNIC related L2 queues */
55c11941 4144 fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + CNIC_SUPPORT(bp);
15192a8c
BW
4145 BNX2X_DEV_INFO("fp_array_size %d", fp_array_size);
4146
4147 fp = kcalloc(fp_array_size, sizeof(*fp), GFP_KERNEL);
523224a3
DK
4148 if (!fp)
4149 goto alloc_err;
15192a8c
BW
4150 for (i = 0; i < fp_array_size; i++) {
4151 fp[i].tpa_info =
4152 kcalloc(ETH_MAX_AGGREGATION_QUEUES_E1H_E2,
4153 sizeof(struct bnx2x_agg_info), GFP_KERNEL);
4154 if (!(fp[i].tpa_info))
4155 goto alloc_err;
4156 }
4157
523224a3
DK
4158 bp->fp = fp;
4159
15192a8c
BW
4160 /* allocate sp objs */
4161 bp->sp_objs = kcalloc(fp_array_size, sizeof(struct bnx2x_sp_objs),
4162 GFP_KERNEL);
4163 if (!bp->sp_objs)
4164 goto alloc_err;
4165
4166 /* allocate fp_stats */
4167 bp->fp_stats = kcalloc(fp_array_size, sizeof(struct bnx2x_fp_stats),
4168 GFP_KERNEL);
4169 if (!bp->fp_stats)
4170 goto alloc_err;
4171
65565884 4172 /* Allocate memory for the transmission queues array */
55c11941
MS
4173 txq_array_size =
4174 BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS + CNIC_SUPPORT(bp);
4175 BNX2X_DEV_INFO("txq_array_size %d", txq_array_size);
4176
4177 bp->bnx2x_txq = kcalloc(txq_array_size, sizeof(struct bnx2x_fp_txdata),
4178 GFP_KERNEL);
65565884
MS
4179 if (!bp->bnx2x_txq)
4180 goto alloc_err;
4181
523224a3 4182 /* msix table */
01e23742 4183 tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL);
523224a3
DK
4184 if (!tbl)
4185 goto alloc_err;
4186 bp->msix_table = tbl;
4187
4188 /* ilt */
4189 ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
4190 if (!ilt)
4191 goto alloc_err;
4192 bp->ilt = ilt;
4193
4194 return 0;
4195alloc_err:
4196 bnx2x_free_mem_bp(bp);
4197 return -ENOMEM;
4198
4199}
4200
a9fccec7 4201int bnx2x_reload_if_running(struct net_device *dev)
66371c44
MM
4202{
4203 struct bnx2x *bp = netdev_priv(dev);
4204
4205 if (unlikely(!netif_running(dev)))
4206 return 0;
4207
5d07d868 4208 bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
66371c44
MM
4209 return bnx2x_nic_load(bp, LOAD_NORMAL);
4210}
4211
1ac9e428
YR
4212int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
4213{
4214 u32 sel_phy_idx = 0;
4215 if (bp->link_params.num_phys <= 1)
4216 return INT_PHY;
4217
4218 if (bp->link_vars.link_up) {
4219 sel_phy_idx = EXT_PHY1;
4220 /* In case link is SERDES, check if the EXT_PHY2 is the one */
4221 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
4222 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
4223 sel_phy_idx = EXT_PHY2;
4224 } else {
4225
4226 switch (bnx2x_phy_selection(&bp->link_params)) {
4227 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
4228 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
4229 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
4230 sel_phy_idx = EXT_PHY1;
4231 break;
4232 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
4233 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
4234 sel_phy_idx = EXT_PHY2;
4235 break;
4236 }
4237 }
4238
4239 return sel_phy_idx;
4240
4241}
4242int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
4243{
4244 u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
4245 /*
4246 * The selected actived PHY is always after swapping (in case PHY
4247 * swapping is enabled). So when swapping is enabled, we need to reverse
4248 * the configuration
4249 */
4250
4251 if (bp->link_params.multi_phy_config &
4252 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
4253 if (sel_phy_idx == EXT_PHY1)
4254 sel_phy_idx = EXT_PHY2;
4255 else if (sel_phy_idx == EXT_PHY2)
4256 sel_phy_idx = EXT_PHY1;
4257 }
4258 return LINK_CONFIG_IDX(sel_phy_idx);
4259}
4260
55c11941 4261#ifdef NETDEV_FCOE_WWNN
bf61ee14
VZ
4262int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
4263{
4264 struct bnx2x *bp = netdev_priv(dev);
4265 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
4266
4267 switch (type) {
4268 case NETDEV_FCOE_WWNN:
4269 *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi,
4270 cp->fcoe_wwn_node_name_lo);
4271 break;
4272 case NETDEV_FCOE_WWPN:
4273 *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi,
4274 cp->fcoe_wwn_port_name_lo);
4275 break;
4276 default:
51c1a580 4277 BNX2X_ERR("Wrong WWN type requested - %d\n", type);
bf61ee14
VZ
4278 return -EINVAL;
4279 }
4280
4281 return 0;
4282}
4283#endif
4284
9f6c9258
DK
4285/* called with rtnl_lock */
4286int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
4287{
4288 struct bnx2x *bp = netdev_priv(dev);
9f6c9258
DK
4289
4290 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
51c1a580 4291 BNX2X_ERR("Can't perform change MTU during parity recovery\n");
9f6c9258
DK
4292 return -EAGAIN;
4293 }
4294
4295 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
51c1a580
MS
4296 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) {
4297 BNX2X_ERR("Can't support requested MTU size\n");
9f6c9258 4298 return -EINVAL;
51c1a580 4299 }
9f6c9258
DK
4300
4301 /* This does not race with packet allocation
4302 * because the actual alloc size is
4303 * only updated as part of load
4304 */
4305 dev->mtu = new_mtu;
4306
66371c44
MM
4307 return bnx2x_reload_if_running(dev);
4308}
4309
c8f44aff 4310netdev_features_t bnx2x_fix_features(struct net_device *dev,
621b4d66 4311 netdev_features_t features)
66371c44
MM
4312{
4313 struct bnx2x *bp = netdev_priv(dev);
4314
4315 /* TPA requires Rx CSUM offloading */
621b4d66 4316 if (!(features & NETIF_F_RXCSUM) || bp->disable_tpa) {
66371c44 4317 features &= ~NETIF_F_LRO;
621b4d66
DK
4318 features &= ~NETIF_F_GRO;
4319 }
66371c44
MM
4320
4321 return features;
4322}
4323
c8f44aff 4324int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
66371c44
MM
4325{
4326 struct bnx2x *bp = netdev_priv(dev);
4327 u32 flags = bp->flags;
538dd2e3 4328 bool bnx2x_reload = false;
66371c44
MM
4329
4330 if (features & NETIF_F_LRO)
4331 flags |= TPA_ENABLE_FLAG;
4332 else
4333 flags &= ~TPA_ENABLE_FLAG;
4334
621b4d66
DK
4335 if (features & NETIF_F_GRO)
4336 flags |= GRO_ENABLE_FLAG;
4337 else
4338 flags &= ~GRO_ENABLE_FLAG;
4339
538dd2e3
MB
4340 if (features & NETIF_F_LOOPBACK) {
4341 if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
4342 bp->link_params.loopback_mode = LOOPBACK_BMAC;
4343 bnx2x_reload = true;
4344 }
4345 } else {
4346 if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
4347 bp->link_params.loopback_mode = LOOPBACK_NONE;
4348 bnx2x_reload = true;
4349 }
4350 }
4351
66371c44
MM
4352 if (flags ^ bp->flags) {
4353 bp->flags = flags;
538dd2e3
MB
4354 bnx2x_reload = true;
4355 }
66371c44 4356
538dd2e3 4357 if (bnx2x_reload) {
66371c44
MM
4358 if (bp->recovery_state == BNX2X_RECOVERY_DONE)
4359 return bnx2x_reload_if_running(dev);
4360 /* else: bnx2x_nic_load() will be called at end of recovery */
9f6c9258
DK
4361 }
4362
66371c44 4363 return 0;
9f6c9258
DK
4364}
4365
4366void bnx2x_tx_timeout(struct net_device *dev)
4367{
4368 struct bnx2x *bp = netdev_priv(dev);
4369
4370#ifdef BNX2X_STOP_ON_ERROR
4371 if (!bp->panic)
4372 bnx2x_panic();
4373#endif
7be08a72
AE
4374
4375 smp_mb__before_clear_bit();
4376 set_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state);
4377 smp_mb__after_clear_bit();
4378
9f6c9258 4379 /* This allows the netif to be shutdown gracefully before resetting */
7be08a72 4380 schedule_delayed_work(&bp->sp_rtnl_task, 0);
9f6c9258
DK
4381}
4382
9f6c9258
DK
4383int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
4384{
4385 struct net_device *dev = pci_get_drvdata(pdev);
4386 struct bnx2x *bp;
4387
4388 if (!dev) {
4389 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4390 return -ENODEV;
4391 }
4392 bp = netdev_priv(dev);
4393
4394 rtnl_lock();
4395
4396 pci_save_state(pdev);
4397
4398 if (!netif_running(dev)) {
4399 rtnl_unlock();
4400 return 0;
4401 }
4402
4403 netif_device_detach(dev);
4404
5d07d868 4405 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
9f6c9258
DK
4406
4407 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
4408
4409 rtnl_unlock();
4410
4411 return 0;
4412}
4413
4414int bnx2x_resume(struct pci_dev *pdev)
4415{
4416 struct net_device *dev = pci_get_drvdata(pdev);
4417 struct bnx2x *bp;
4418 int rc;
4419
4420 if (!dev) {
4421 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4422 return -ENODEV;
4423 }
4424 bp = netdev_priv(dev);
4425
4426 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
51c1a580 4427 BNX2X_ERR("Handling parity error recovery. Try again later\n");
9f6c9258
DK
4428 return -EAGAIN;
4429 }
4430
4431 rtnl_lock();
4432
4433 pci_restore_state(pdev);
4434
4435 if (!netif_running(dev)) {
4436 rtnl_unlock();
4437 return 0;
4438 }
4439
4440 bnx2x_set_power_state(bp, PCI_D0);
4441 netif_device_attach(dev);
4442
4443 rc = bnx2x_nic_load(bp, LOAD_OPEN);
4444
4445 rtnl_unlock();
4446
4447 return rc;
4448}
619c5cb6
VZ
4449
4450
4451void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
4452 u32 cid)
4453{
4454 /* ustorm cxt validation */
4455 cxt->ustorm_ag_context.cdu_usage =
4456 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
4457 CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
4458 /* xcontext validation */
4459 cxt->xstorm_ag_context.cdu_reserved =
4460 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
4461 CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
4462}
4463
1191cb83
ED
4464static void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
4465 u8 fw_sb_id, u8 sb_index,
4466 u8 ticks)
619c5cb6
VZ
4467{
4468
4469 u32 addr = BAR_CSTRORM_INTMEM +
4470 CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
4471 REG_WR8(bp, addr, ticks);
51c1a580
MS
4472 DP(NETIF_MSG_IFUP,
4473 "port %x fw_sb_id %d sb_index %d ticks %d\n",
4474 port, fw_sb_id, sb_index, ticks);
619c5cb6
VZ
4475}
4476
1191cb83
ED
4477static void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
4478 u16 fw_sb_id, u8 sb_index,
4479 u8 disable)
619c5cb6
VZ
4480{
4481 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
4482 u32 addr = BAR_CSTRORM_INTMEM +
4483 CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
4484 u16 flags = REG_RD16(bp, addr);
4485 /* clear and set */
4486 flags &= ~HC_INDEX_DATA_HC_ENABLED;
4487 flags |= enable_flag;
4488 REG_WR16(bp, addr, flags);
51c1a580
MS
4489 DP(NETIF_MSG_IFUP,
4490 "port %x fw_sb_id %d sb_index %d disable %d\n",
4491 port, fw_sb_id, sb_index, disable);
619c5cb6
VZ
4492}
4493
4494void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
4495 u8 sb_index, u8 disable, u16 usec)
4496{
4497 int port = BP_PORT(bp);
4498 u8 ticks = usec / BNX2X_BTR;
4499
4500 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
4501
4502 disable = disable ? 1 : (usec ? 0 : 1);
4503 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
4504}
This page took 0.98354 seconds and 5 git commands to generate.