bnx2x: enable inta on the pci bus when used
[deliverable/linux.git] / drivers / net / ethernet / broadcom / bnx2x / bnx2x_cmn.c
CommitLineData
9f6c9258
DK
1/* bnx2x_cmn.c: Broadcom Everest network driver.
2 *
85b26ea1 3 * Copyright (c) 2007-2012 Broadcom Corporation
9f6c9258
DK
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
15 *
16 */
17
f1deab50
JP
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
9f6c9258 20#include <linux/etherdevice.h>
9bcc0893 21#include <linux/if_vlan.h>
a6b7a407 22#include <linux/interrupt.h>
9f6c9258 23#include <linux/ip.h>
f2e0899f 24#include <net/ipv6.h>
7f3e01fe 25#include <net/ip6_checksum.h>
c0cba59e 26#include <linux/prefetch.h>
9f6c9258 27#include "bnx2x_cmn.h"
523224a3 28#include "bnx2x_init.h"
042181f5 29#include "bnx2x_sp.h"
523224a3 30
619c5cb6 31
9f6c9258 32
b3b83c3f
DK
33/**
34 * bnx2x_move_fp - move content of the fastpath structure.
35 *
36 * @bp: driver handle
37 * @from: source FP index
38 * @to: destination FP index
39 *
40 * Makes sure the contents of the bp->fp[to].napi is kept
72754080
AE
41 * intact. This is done by first copying the napi struct from
42 * the target to the source, and then mem copying the entire
43 * source onto the target
b3b83c3f
DK
44 */
45static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
46{
47 struct bnx2x_fastpath *from_fp = &bp->fp[from];
48 struct bnx2x_fastpath *to_fp = &bp->fp[to];
72754080
AE
49
50 /* Copy the NAPI object as it has been already initialized */
51 from_fp->napi = to_fp->napi;
52
b3b83c3f
DK
53 /* Move bnx2x_fastpath contents */
54 memcpy(to_fp, from_fp, sizeof(*to_fp));
55 to_fp->index = to;
b3b83c3f
DK
56}
57
619c5cb6
VZ
58int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
59
9f6c9258
DK
60/* free skb in the packet ring at pos idx
61 * return idx of last bd freed
62 */
6383c0b3 63static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
2df1a70a
TH
64 u16 idx, unsigned int *pkts_compl,
65 unsigned int *bytes_compl)
9f6c9258 66{
6383c0b3 67 struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
9f6c9258
DK
68 struct eth_tx_start_bd *tx_start_bd;
69 struct eth_tx_bd *tx_data_bd;
70 struct sk_buff *skb = tx_buf->skb;
71 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
72 int nbd;
73
74 /* prefetch skb end pointer to speedup dev_kfree_skb() */
75 prefetch(&skb->end);
76
51c1a580 77 DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n",
6383c0b3 78 txdata->txq_index, idx, tx_buf, skb);
9f6c9258
DK
79
80 /* unmap first bd */
6383c0b3 81 tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
9f6c9258 82 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
4bca60f4 83 BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
9f6c9258 84
619c5cb6 85
9f6c9258
DK
86 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
87#ifdef BNX2X_STOP_ON_ERROR
88 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
89 BNX2X_ERR("BAD nbd!\n");
90 bnx2x_panic();
91 }
92#endif
93 new_cons = nbd + tx_buf->first_bd;
94
95 /* Get the next bd */
96 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
97
98 /* Skip a parse bd... */
99 --nbd;
100 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
101
102 /* ...and the TSO split header bd since they have no mapping */
103 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
104 --nbd;
105 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
106 }
107
108 /* now free frags */
109 while (nbd > 0) {
110
6383c0b3 111 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
9f6c9258
DK
112 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
113 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
114 if (--nbd)
115 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
116 }
117
118 /* release skb */
119 WARN_ON(!skb);
d8290ae5 120 if (likely(skb)) {
2df1a70a
TH
121 (*pkts_compl)++;
122 (*bytes_compl) += skb->len;
123 }
d8290ae5 124
40955532 125 dev_kfree_skb_any(skb);
9f6c9258
DK
126 tx_buf->first_bd = 0;
127 tx_buf->skb = NULL;
128
129 return new_cons;
130}
131
6383c0b3 132int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
9f6c9258 133{
9f6c9258 134 struct netdev_queue *txq;
6383c0b3 135 u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
2df1a70a 136 unsigned int pkts_compl = 0, bytes_compl = 0;
9f6c9258
DK
137
138#ifdef BNX2X_STOP_ON_ERROR
139 if (unlikely(bp->panic))
140 return -1;
141#endif
142
6383c0b3
AE
143 txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
144 hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
145 sw_cons = txdata->tx_pkt_cons;
9f6c9258
DK
146
147 while (sw_cons != hw_cons) {
148 u16 pkt_cons;
149
150 pkt_cons = TX_BD(sw_cons);
151
51c1a580
MS
152 DP(NETIF_MSG_TX_DONE,
153 "queue[%d]: hw_cons %u sw_cons %u pkt_cons %u\n",
6383c0b3 154 txdata->txq_index, hw_cons, sw_cons, pkt_cons);
9f6c9258 155
2df1a70a
TH
156 bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
157 &pkts_compl, &bytes_compl);
158
9f6c9258
DK
159 sw_cons++;
160 }
161
2df1a70a
TH
162 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
163
6383c0b3
AE
164 txdata->tx_pkt_cons = sw_cons;
165 txdata->tx_bd_cons = bd_cons;
9f6c9258
DK
166
167 /* Need to make the tx_bd_cons update visible to start_xmit()
168 * before checking for netif_tx_queue_stopped(). Without the
169 * memory barrier, there is a small possibility that
170 * start_xmit() will miss it and cause the queue to be stopped
171 * forever.
619c5cb6
VZ
172 * On the other hand we need an rmb() here to ensure the proper
173 * ordering of bit testing in the following
174 * netif_tx_queue_stopped(txq) call.
9f6c9258
DK
175 */
176 smp_mb();
177
9f6c9258
DK
178 if (unlikely(netif_tx_queue_stopped(txq))) {
179 /* Taking tx_lock() is needed to prevent reenabling the queue
180 * while it's empty. This could have happen if rx_action() gets
181 * suspended in bnx2x_tx_int() after the condition before
182 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
183 *
184 * stops the queue->sees fresh tx_bd_cons->releases the queue->
185 * sends some packets consuming the whole queue again->
186 * stops the queue
187 */
188
189 __netif_tx_lock(txq, smp_processor_id());
190
191 if ((netif_tx_queue_stopped(txq)) &&
192 (bp->state == BNX2X_STATE_OPEN) &&
6383c0b3 193 (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 3))
9f6c9258
DK
194 netif_tx_wake_queue(txq);
195
196 __netif_tx_unlock(txq);
197 }
198 return 0;
199}
200
201static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
202 u16 idx)
203{
204 u16 last_max = fp->last_max_sge;
205
206 if (SUB_S16(idx, last_max) > 0)
207 fp->last_max_sge = idx;
208}
209
621b4d66
DK
210static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
211 u16 sge_len,
212 struct eth_end_agg_rx_cqe *cqe)
9f6c9258
DK
213{
214 struct bnx2x *bp = fp->bp;
9f6c9258
DK
215 u16 last_max, last_elem, first_elem;
216 u16 delta = 0;
217 u16 i;
218
219 if (!sge_len)
220 return;
221
222 /* First mark all used pages */
223 for (i = 0; i < sge_len; i++)
619c5cb6 224 BIT_VEC64_CLEAR_BIT(fp->sge_mask,
621b4d66 225 RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i])));
9f6c9258
DK
226
227 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
621b4d66 228 sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
9f6c9258
DK
229
230 /* Here we assume that the last SGE index is the biggest */
231 prefetch((void *)(fp->sge_mask));
523224a3 232 bnx2x_update_last_max_sge(fp,
621b4d66 233 le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
9f6c9258
DK
234
235 last_max = RX_SGE(fp->last_max_sge);
619c5cb6
VZ
236 last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
237 first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
9f6c9258
DK
238
239 /* If ring is not full */
240 if (last_elem + 1 != first_elem)
241 last_elem++;
242
243 /* Now update the prod */
244 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
245 if (likely(fp->sge_mask[i]))
246 break;
247
619c5cb6
VZ
248 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
249 delta += BIT_VEC64_ELEM_SZ;
9f6c9258
DK
250 }
251
252 if (delta > 0) {
253 fp->rx_sge_prod += delta;
254 /* clear page-end entries */
255 bnx2x_clear_sge_mask_next_elems(fp);
256 }
257
258 DP(NETIF_MSG_RX_STATUS,
259 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
260 fp->last_max_sge, fp->rx_sge_prod);
261}
262
e52fcb24
ED
263/* Set Toeplitz hash value in the skb using the value from the
264 * CQE (calculated by HW).
265 */
266static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
267 const struct eth_fast_path_rx_cqe *cqe)
268{
269 /* Set Toeplitz hash from CQE */
270 if ((bp->dev->features & NETIF_F_RXHASH) &&
271 (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG))
272 return le32_to_cpu(cqe->rss_hash_result);
273 return 0;
274}
275
9f6c9258 276static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
e52fcb24 277 u16 cons, u16 prod,
619c5cb6 278 struct eth_fast_path_rx_cqe *cqe)
9f6c9258
DK
279{
280 struct bnx2x *bp = fp->bp;
281 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
282 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
283 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
284 dma_addr_t mapping;
619c5cb6
VZ
285 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
286 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
9f6c9258 287
619c5cb6
VZ
288 /* print error if current state != stop */
289 if (tpa_info->tpa_state != BNX2X_TPA_STOP)
9f6c9258
DK
290 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
291
e52fcb24 292 /* Try to map an empty data buffer from the aggregation info */
619c5cb6 293 mapping = dma_map_single(&bp->pdev->dev,
e52fcb24 294 first_buf->data + NET_SKB_PAD,
619c5cb6
VZ
295 fp->rx_buf_size, DMA_FROM_DEVICE);
296 /*
297 * ...if it fails - move the skb from the consumer to the producer
298 * and set the current aggregation state as ERROR to drop it
299 * when TPA_STOP arrives.
300 */
301
302 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
303 /* Move the BD from the consumer to the producer */
e52fcb24 304 bnx2x_reuse_rx_data(fp, cons, prod);
619c5cb6
VZ
305 tpa_info->tpa_state = BNX2X_TPA_ERROR;
306 return;
307 }
9f6c9258 308
e52fcb24
ED
309 /* move empty data from pool to prod */
310 prod_rx_buf->data = first_buf->data;
619c5cb6 311 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
e52fcb24 312 /* point prod_bd to new data */
9f6c9258
DK
313 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
314 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
315
619c5cb6
VZ
316 /* move partial skb from cons to pool (don't unmap yet) */
317 *first_buf = *cons_rx_buf;
318
319 /* mark bin state as START */
320 tpa_info->parsing_flags =
321 le16_to_cpu(cqe->pars_flags.flags);
322 tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
323 tpa_info->tpa_state = BNX2X_TPA_START;
324 tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
325 tpa_info->placement_offset = cqe->placement_offset;
e52fcb24 326 tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe);
621b4d66
DK
327 if (fp->mode == TPA_MODE_GRO) {
328 u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
329 tpa_info->full_page =
330 SGE_PAGE_SIZE * PAGES_PER_SGE / gro_size * gro_size;
fe603b4d
DK
331 /*
332 * FW 7.2.16 BUG workaround:
333 * if SGE size is (exactly) multiple gro_size
334 * fw will place one less frag on SGE.
335 * the calculation is done only for potentially
336 * dangerous MTUs.
337 */
338 if (unlikely(bp->gro_check))
339 if (!(SGE_PAGE_SIZE * PAGES_PER_SGE % gro_size))
340 tpa_info->full_page -= gro_size;
621b4d66
DK
341 tpa_info->gro_size = gro_size;
342 }
619c5cb6 343
9f6c9258
DK
344#ifdef BNX2X_STOP_ON_ERROR
345 fp->tpa_queue_used |= (1 << queue);
346#ifdef _ASM_GENERIC_INT_L64_H
347 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
348#else
349 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
350#endif
351 fp->tpa_queue_used);
352#endif
353}
354
e4e3c02a
VZ
355/* Timestamp option length allowed for TPA aggregation:
356 *
357 * nop nop kind length echo val
358 */
359#define TPA_TSTAMP_OPT_LEN 12
360/**
e8920674 361 * bnx2x_set_lro_mss - calculate the approximate value of the MSS
e4e3c02a 362 *
e8920674
DK
363 * @bp: driver handle
364 * @parsing_flags: parsing flags from the START CQE
365 * @len_on_bd: total length of the first packet for the
366 * aggregation.
367 *
368 * Approximate value of the MSS for this aggregation calculated using
369 * the first packet of it.
e4e3c02a
VZ
370 */
371static inline u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags,
372 u16 len_on_bd)
373{
619c5cb6
VZ
374 /*
375 * TPA arrgregation won't have either IP options or TCP options
376 * other than timestamp or IPv6 extension headers.
e4e3c02a 377 */
619c5cb6
VZ
378 u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
379
380 if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
381 PRS_FLAG_OVERETH_IPV6)
382 hdrs_len += sizeof(struct ipv6hdr);
383 else /* IPv4 */
384 hdrs_len += sizeof(struct iphdr);
e4e3c02a
VZ
385
386
387 /* Check if there was a TCP timestamp, if there is it's will
388 * always be 12 bytes length: nop nop kind length echo val.
389 *
390 * Otherwise FW would close the aggregation.
391 */
392 if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
393 hdrs_len += TPA_TSTAMP_OPT_LEN;
394
395 return len_on_bd - hdrs_len;
396}
397
9f6c9258 398static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
621b4d66
DK
399 struct bnx2x_agg_info *tpa_info,
400 u16 pages,
401 struct sk_buff *skb,
619c5cb6
VZ
402 struct eth_end_agg_rx_cqe *cqe,
403 u16 cqe_idx)
9f6c9258
DK
404{
405 struct sw_rx_page *rx_pg, old_rx_pg;
621b4d66
DK
406 u32 i, frag_len, frag_size;
407 int err, j, frag_id = 0;
619c5cb6 408 u16 len_on_bd = tpa_info->len_on_bd;
621b4d66 409 u16 full_page = 0, gro_size = 0;
9f6c9258 410
619c5cb6 411 frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
621b4d66
DK
412
413 if (fp->mode == TPA_MODE_GRO) {
414 gro_size = tpa_info->gro_size;
415 full_page = tpa_info->full_page;
416 }
9f6c9258
DK
417
418 /* This is needed in order to enable forwarding support */
621b4d66 419 if (frag_size) {
619c5cb6
VZ
420 skb_shinfo(skb)->gso_size = bnx2x_set_lro_mss(bp,
421 tpa_info->parsing_flags, len_on_bd);
9f6c9258 422
621b4d66
DK
423 /* set for GRO */
424 if (fp->mode == TPA_MODE_GRO)
425 skb_shinfo(skb)->gso_type =
426 (GET_FLAG(tpa_info->parsing_flags,
427 PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
428 PRS_FLAG_OVERETH_IPV6) ?
429 SKB_GSO_TCPV6 : SKB_GSO_TCPV4;
430 }
431
432
9f6c9258
DK
433#ifdef BNX2X_STOP_ON_ERROR
434 if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
435 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
436 pages, cqe_idx);
619c5cb6 437 BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
9f6c9258
DK
438 bnx2x_panic();
439 return -EINVAL;
440 }
441#endif
442
443 /* Run through the SGL and compose the fragmented skb */
444 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
619c5cb6 445 u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
9f6c9258
DK
446
447 /* FW gives the indices of the SGE as if the ring is an array
448 (meaning that "next" element will consume 2 indices) */
621b4d66
DK
449 if (fp->mode == TPA_MODE_GRO)
450 frag_len = min_t(u32, frag_size, (u32)full_page);
451 else /* LRO */
452 frag_len = min_t(u32, frag_size,
453 (u32)(SGE_PAGE_SIZE * PAGES_PER_SGE));
454
9f6c9258
DK
455 rx_pg = &fp->rx_page_ring[sge_idx];
456 old_rx_pg = *rx_pg;
457
458 /* If we fail to allocate a substitute page, we simply stop
459 where we are and drop the whole packet */
460 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
461 if (unlikely(err)) {
462 fp->eth_q_stats.rx_skb_alloc_failed++;
463 return err;
464 }
465
466 /* Unmap the page as we r going to pass it to the stack */
467 dma_unmap_page(&bp->pdev->dev,
468 dma_unmap_addr(&old_rx_pg, mapping),
469 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
9f6c9258 470 /* Add one frag and update the appropriate fields in the skb */
621b4d66
DK
471 if (fp->mode == TPA_MODE_LRO)
472 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
473 else { /* GRO */
474 int rem;
475 int offset = 0;
476 for (rem = frag_len; rem > 0; rem -= gro_size) {
477 int len = rem > gro_size ? gro_size : rem;
478 skb_fill_page_desc(skb, frag_id++,
479 old_rx_pg.page, offset, len);
480 if (offset)
481 get_page(old_rx_pg.page);
482 offset += len;
483 }
484 }
9f6c9258
DK
485
486 skb->data_len += frag_len;
e1ac50f6 487 skb->truesize += SGE_PAGE_SIZE * PAGES_PER_SGE;
9f6c9258
DK
488 skb->len += frag_len;
489
490 frag_size -= frag_len;
491 }
492
493 return 0;
494}
495
621b4d66
DK
496static inline void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
497 struct bnx2x_agg_info *tpa_info,
498 u16 pages,
499 struct eth_end_agg_rx_cqe *cqe,
500 u16 cqe_idx)
9f6c9258 501{
619c5cb6 502 struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
621b4d66 503 u8 pad = tpa_info->placement_offset;
619c5cb6 504 u16 len = tpa_info->len_on_bd;
e52fcb24 505 struct sk_buff *skb = NULL;
621b4d66 506 u8 *new_data, *data = rx_buf->data;
619c5cb6
VZ
507 u8 old_tpa_state = tpa_info->tpa_state;
508
509 tpa_info->tpa_state = BNX2X_TPA_STOP;
510
511 /* If we there was an error during the handling of the TPA_START -
512 * drop this aggregation.
513 */
514 if (old_tpa_state == BNX2X_TPA_ERROR)
515 goto drop;
516
e52fcb24
ED
517 /* Try to allocate the new data */
518 new_data = kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC);
9f6c9258
DK
519
520 /* Unmap skb in the pool anyway, as we are going to change
521 pool entry status to BNX2X_TPA_STOP even if new skb allocation
522 fails. */
523 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
a8c94b91 524 fp->rx_buf_size, DMA_FROM_DEVICE);
e52fcb24
ED
525 if (likely(new_data))
526 skb = build_skb(data);
9f6c9258 527
e52fcb24 528 if (likely(skb)) {
9f6c9258 529#ifdef BNX2X_STOP_ON_ERROR
a8c94b91 530 if (pad + len > fp->rx_buf_size) {
51c1a580 531 BNX2X_ERR("skb_put is about to fail... pad %d len %d rx_buf_size %d\n",
a8c94b91 532 pad, len, fp->rx_buf_size);
9f6c9258
DK
533 bnx2x_panic();
534 return;
535 }
536#endif
537
e52fcb24 538 skb_reserve(skb, pad + NET_SKB_PAD);
9f6c9258 539 skb_put(skb, len);
e52fcb24 540 skb->rxhash = tpa_info->rxhash;
9f6c9258
DK
541
542 skb->protocol = eth_type_trans(skb, bp->dev);
543 skb->ip_summed = CHECKSUM_UNNECESSARY;
544
621b4d66
DK
545 if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
546 skb, cqe, cqe_idx)) {
619c5cb6
VZ
547 if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
548 __vlan_hwaccel_put_tag(skb, tpa_info->vlan_tag);
9bcc0893 549 napi_gro_receive(&fp->napi, skb);
9f6c9258 550 } else {
51c1a580
MS
551 DP(NETIF_MSG_RX_STATUS,
552 "Failed to allocate new pages - dropping packet!\n");
40955532 553 dev_kfree_skb_any(skb);
9f6c9258
DK
554 }
555
556
e52fcb24
ED
557 /* put new data in bin */
558 rx_buf->data = new_data;
9f6c9258 559
619c5cb6 560 return;
9f6c9258 561 }
3f61cd87 562 kfree(new_data);
619c5cb6
VZ
563drop:
564 /* drop the packet and keep the buffer in the bin */
565 DP(NETIF_MSG_RX_STATUS,
566 "Failed to allocate or map a new skb - dropping packet!\n");
567 fp->eth_q_stats.rx_skb_alloc_failed++;
9f6c9258
DK
568}
569
9f6c9258
DK
570
571int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
572{
573 struct bnx2x *bp = fp->bp;
574 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
575 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
576 int rx_pkt = 0;
577
578#ifdef BNX2X_STOP_ON_ERROR
579 if (unlikely(bp->panic))
580 return 0;
581#endif
582
583 /* CQ "next element" is of the size of the regular element,
584 that's why it's ok here */
585 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
586 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
587 hw_comp_cons++;
588
589 bd_cons = fp->rx_bd_cons;
590 bd_prod = fp->rx_bd_prod;
591 bd_prod_fw = bd_prod;
592 sw_comp_cons = fp->rx_comp_cons;
593 sw_comp_prod = fp->rx_comp_prod;
594
595 /* Memory barrier necessary as speculative reads of the rx
596 * buffer can be ahead of the index in the status block
597 */
598 rmb();
599
600 DP(NETIF_MSG_RX_STATUS,
601 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
602 fp->index, hw_comp_cons, sw_comp_cons);
603
604 while (sw_comp_cons != hw_comp_cons) {
605 struct sw_rx_bd *rx_buf = NULL;
606 struct sk_buff *skb;
607 union eth_rx_cqe *cqe;
619c5cb6 608 struct eth_fast_path_rx_cqe *cqe_fp;
9f6c9258 609 u8 cqe_fp_flags;
619c5cb6 610 enum eth_rx_cqe_type cqe_fp_type;
621b4d66 611 u16 len, pad, queue;
e52fcb24 612 u8 *data;
9f6c9258 613
619c5cb6
VZ
614#ifdef BNX2X_STOP_ON_ERROR
615 if (unlikely(bp->panic))
616 return 0;
617#endif
618
9f6c9258
DK
619 comp_ring_cons = RCQ_BD(sw_comp_cons);
620 bd_prod = RX_BD(bd_prod);
621 bd_cons = RX_BD(bd_cons);
622
9f6c9258 623 cqe = &fp->rx_comp_ring[comp_ring_cons];
619c5cb6
VZ
624 cqe_fp = &cqe->fast_path_cqe;
625 cqe_fp_flags = cqe_fp->type_error_flags;
626 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
9f6c9258 627
51c1a580
MS
628 DP(NETIF_MSG_RX_STATUS,
629 "CQE type %x err %x status %x queue %x vlan %x len %u\n",
630 CQE_TYPE(cqe_fp_flags),
619c5cb6
VZ
631 cqe_fp_flags, cqe_fp->status_flags,
632 le32_to_cpu(cqe_fp->rss_hash_result),
621b4d66
DK
633 le16_to_cpu(cqe_fp->vlan_tag),
634 le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len));
9f6c9258
DK
635
636 /* is this a slowpath msg? */
619c5cb6 637 if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
9f6c9258
DK
638 bnx2x_sp_event(fp, cqe);
639 goto next_cqe;
e52fcb24 640 }
621b4d66 641
e52fcb24
ED
642 rx_buf = &fp->rx_buf_ring[bd_cons];
643 data = rx_buf->data;
9f6c9258 644
e52fcb24 645 if (!CQE_TYPE_FAST(cqe_fp_type)) {
621b4d66
DK
646 struct bnx2x_agg_info *tpa_info;
647 u16 frag_size, pages;
619c5cb6 648#ifdef BNX2X_STOP_ON_ERROR
e52fcb24
ED
649 /* sanity check */
650 if (fp->disable_tpa &&
651 (CQE_TYPE_START(cqe_fp_type) ||
652 CQE_TYPE_STOP(cqe_fp_type)))
51c1a580 653 BNX2X_ERR("START/STOP packet while disable_tpa type %x\n",
e52fcb24 654 CQE_TYPE(cqe_fp_type));
619c5cb6 655#endif
9f6c9258 656
e52fcb24
ED
657 if (CQE_TYPE_START(cqe_fp_type)) {
658 u16 queue = cqe_fp->queue_index;
659 DP(NETIF_MSG_RX_STATUS,
660 "calling tpa_start on queue %d\n",
661 queue);
9f6c9258 662
e52fcb24
ED
663 bnx2x_tpa_start(fp, queue,
664 bd_cons, bd_prod,
665 cqe_fp);
621b4d66 666
e52fcb24 667 goto next_rx;
e52fcb24 668
621b4d66
DK
669 }
670 queue = cqe->end_agg_cqe.queue_index;
671 tpa_info = &fp->tpa_info[queue];
672 DP(NETIF_MSG_RX_STATUS,
673 "calling tpa_stop on queue %d\n",
674 queue);
675
676 frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) -
677 tpa_info->len_on_bd;
678
679 if (fp->mode == TPA_MODE_GRO)
680 pages = (frag_size + tpa_info->full_page - 1) /
681 tpa_info->full_page;
682 else
683 pages = SGE_PAGE_ALIGN(frag_size) >>
684 SGE_PAGE_SHIFT;
685
686 bnx2x_tpa_stop(bp, fp, tpa_info, pages,
687 &cqe->end_agg_cqe, comp_ring_cons);
9f6c9258 688#ifdef BNX2X_STOP_ON_ERROR
621b4d66
DK
689 if (bp->panic)
690 return 0;
9f6c9258
DK
691#endif
692
621b4d66
DK
693 bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe);
694 goto next_cqe;
e52fcb24
ED
695 }
696 /* non TPA */
621b4d66 697 len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len);
e52fcb24
ED
698 pad = cqe_fp->placement_offset;
699 dma_sync_single_for_cpu(&bp->pdev->dev,
9f6c9258 700 dma_unmap_addr(rx_buf, mapping),
e52fcb24
ED
701 pad + RX_COPY_THRESH,
702 DMA_FROM_DEVICE);
703 pad += NET_SKB_PAD;
704 prefetch(data + pad); /* speedup eth_type_trans() */
705 /* is this an error packet? */
706 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
51c1a580 707 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
e52fcb24
ED
708 "ERROR flags %x rx packet %u\n",
709 cqe_fp_flags, sw_comp_cons);
710 fp->eth_q_stats.rx_err_discard_pkt++;
711 goto reuse_rx;
712 }
9f6c9258 713
e52fcb24
ED
714 /* Since we don't have a jumbo ring
715 * copy small packets if mtu > 1500
716 */
717 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
718 (len <= RX_COPY_THRESH)) {
719 skb = netdev_alloc_skb_ip_align(bp->dev, len);
720 if (skb == NULL) {
51c1a580 721 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
e52fcb24
ED
722 "ERROR packet dropped because of alloc failure\n");
723 fp->eth_q_stats.rx_skb_alloc_failed++;
9f6c9258
DK
724 goto reuse_rx;
725 }
e52fcb24
ED
726 memcpy(skb->data, data + pad, len);
727 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
728 } else {
729 if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod) == 0)) {
9f6c9258 730 dma_unmap_single(&bp->pdev->dev,
e52fcb24 731 dma_unmap_addr(rx_buf, mapping),
a8c94b91 732 fp->rx_buf_size,
9f6c9258 733 DMA_FROM_DEVICE);
e52fcb24
ED
734 skb = build_skb(data);
735 if (unlikely(!skb)) {
736 kfree(data);
737 fp->eth_q_stats.rx_skb_alloc_failed++;
738 goto next_rx;
739 }
9f6c9258 740 skb_reserve(skb, pad);
9f6c9258 741 } else {
51c1a580
MS
742 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
743 "ERROR packet dropped because of alloc failure\n");
9f6c9258
DK
744 fp->eth_q_stats.rx_skb_alloc_failed++;
745reuse_rx:
e52fcb24 746 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
9f6c9258
DK
747 goto next_rx;
748 }
036d2df9 749 }
9f6c9258 750
036d2df9
DK
751 skb_put(skb, len);
752 skb->protocol = eth_type_trans(skb, bp->dev);
9f6c9258 753
036d2df9
DK
754 /* Set Toeplitz hash for a none-LRO skb */
755 skb->rxhash = bnx2x_get_rxhash(bp, cqe_fp);
9f6c9258 756
036d2df9 757 skb_checksum_none_assert(skb);
f85582f8 758
036d2df9 759 if (bp->dev->features & NETIF_F_RXCSUM) {
619c5cb6 760
036d2df9
DK
761 if (likely(BNX2X_RX_CSUM_OK(cqe)))
762 skb->ip_summed = CHECKSUM_UNNECESSARY;
763 else
764 fp->eth_q_stats.hw_csum_err++;
9f6c9258
DK
765 }
766
f233cafe 767 skb_record_rx_queue(skb, fp->rx_queue);
9f6c9258 768
619c5cb6
VZ
769 if (le16_to_cpu(cqe_fp->pars_flags.flags) &
770 PARSING_FLAGS_VLAN)
9bcc0893 771 __vlan_hwaccel_put_tag(skb,
619c5cb6 772 le16_to_cpu(cqe_fp->vlan_tag));
9bcc0893 773 napi_gro_receive(&fp->napi, skb);
9f6c9258
DK
774
775
776next_rx:
e52fcb24 777 rx_buf->data = NULL;
9f6c9258
DK
778
779 bd_cons = NEXT_RX_IDX(bd_cons);
780 bd_prod = NEXT_RX_IDX(bd_prod);
781 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
782 rx_pkt++;
783next_cqe:
784 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
785 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
786
787 if (rx_pkt == budget)
788 break;
789 } /* while */
790
791 fp->rx_bd_cons = bd_cons;
792 fp->rx_bd_prod = bd_prod_fw;
793 fp->rx_comp_cons = sw_comp_cons;
794 fp->rx_comp_prod = sw_comp_prod;
795
796 /* Update producers */
797 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
798 fp->rx_sge_prod);
799
800 fp->rx_pkt += rx_pkt;
801 fp->rx_calls++;
802
803 return rx_pkt;
804}
805
806static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
807{
808 struct bnx2x_fastpath *fp = fp_cookie;
809 struct bnx2x *bp = fp->bp;
6383c0b3 810 u8 cos;
9f6c9258 811
51c1a580
MS
812 DP(NETIF_MSG_INTR,
813 "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n",
523224a3
DK
814 fp->index, fp->fw_sb_id, fp->igu_sb_id);
815 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
9f6c9258
DK
816
817#ifdef BNX2X_STOP_ON_ERROR
818 if (unlikely(bp->panic))
819 return IRQ_HANDLED;
820#endif
821
822 /* Handle Rx and Tx according to MSI-X vector */
823 prefetch(fp->rx_cons_sb);
6383c0b3
AE
824
825 for_each_cos_in_tx_queue(fp, cos)
826 prefetch(fp->txdata[cos].tx_cons_sb);
827
523224a3 828 prefetch(&fp->sb_running_index[SM_RX_ID]);
9f6c9258
DK
829 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
830
831 return IRQ_HANDLED;
832}
833
9f6c9258
DK
834/* HW Lock for shared dual port PHYs */
835void bnx2x_acquire_phy_lock(struct bnx2x *bp)
836{
837 mutex_lock(&bp->port.phy_mutex);
838
839 if (bp->port.need_hw_lock)
840 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
841}
842
843void bnx2x_release_phy_lock(struct bnx2x *bp)
844{
845 if (bp->port.need_hw_lock)
846 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
847
848 mutex_unlock(&bp->port.phy_mutex);
849}
850
0793f83f
DK
851/* calculates MF speed according to current linespeed and MF configuration */
852u16 bnx2x_get_mf_speed(struct bnx2x *bp)
853{
854 u16 line_speed = bp->link_vars.line_speed;
855 if (IS_MF(bp)) {
faa6fcbb
DK
856 u16 maxCfg = bnx2x_extract_max_cfg(bp,
857 bp->mf_config[BP_VN(bp)]);
858
859 /* Calculate the current MAX line speed limit for the MF
860 * devices
0793f83f 861 */
faa6fcbb
DK
862 if (IS_MF_SI(bp))
863 line_speed = (line_speed * maxCfg) / 100;
864 else { /* SD mode */
0793f83f
DK
865 u16 vn_max_rate = maxCfg * 100;
866
867 if (vn_max_rate < line_speed)
868 line_speed = vn_max_rate;
faa6fcbb 869 }
0793f83f
DK
870 }
871
872 return line_speed;
873}
874
2ae17f66
VZ
875/**
876 * bnx2x_fill_report_data - fill link report data to report
877 *
878 * @bp: driver handle
879 * @data: link state to update
880 *
881 * It uses a none-atomic bit operations because is called under the mutex.
882 */
883static inline void bnx2x_fill_report_data(struct bnx2x *bp,
884 struct bnx2x_link_report_data *data)
885{
886 u16 line_speed = bnx2x_get_mf_speed(bp);
887
888 memset(data, 0, sizeof(*data));
889
890 /* Fill the report data: efective line speed */
891 data->line_speed = line_speed;
892
893 /* Link is down */
894 if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
895 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
896 &data->link_report_flags);
897
898 /* Full DUPLEX */
899 if (bp->link_vars.duplex == DUPLEX_FULL)
900 __set_bit(BNX2X_LINK_REPORT_FD, &data->link_report_flags);
901
902 /* Rx Flow Control is ON */
903 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
904 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON, &data->link_report_flags);
905
906 /* Tx Flow Control is ON */
907 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
908 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
909}
910
911/**
912 * bnx2x_link_report - report link status to OS.
913 *
914 * @bp: driver handle
915 *
916 * Calls the __bnx2x_link_report() under the same locking scheme
917 * as a link/PHY state managing code to ensure a consistent link
918 * reporting.
919 */
920
9f6c9258
DK
921void bnx2x_link_report(struct bnx2x *bp)
922{
2ae17f66
VZ
923 bnx2x_acquire_phy_lock(bp);
924 __bnx2x_link_report(bp);
925 bnx2x_release_phy_lock(bp);
926}
9f6c9258 927
2ae17f66
VZ
928/**
929 * __bnx2x_link_report - report link status to OS.
930 *
931 * @bp: driver handle
932 *
933 * None atomic inmlementation.
934 * Should be called under the phy_lock.
935 */
936void __bnx2x_link_report(struct bnx2x *bp)
937{
938 struct bnx2x_link_report_data cur_data;
9f6c9258 939
2ae17f66
VZ
940 /* reread mf_cfg */
941 if (!CHIP_IS_E1(bp))
942 bnx2x_read_mf_cfg(bp);
943
944 /* Read the current link report info */
945 bnx2x_fill_report_data(bp, &cur_data);
946
947 /* Don't report link down or exactly the same link status twice */
948 if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
949 (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
950 &bp->last_reported_link.link_report_flags) &&
951 test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
952 &cur_data.link_report_flags)))
953 return;
954
955 bp->link_cnt++;
9f6c9258 956
2ae17f66
VZ
957 /* We are going to report a new link parameters now -
958 * remember the current data for the next time.
959 */
960 memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
9f6c9258 961
2ae17f66
VZ
962 if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
963 &cur_data.link_report_flags)) {
964 netif_carrier_off(bp->dev);
965 netdev_err(bp->dev, "NIC Link is Down\n");
966 return;
967 } else {
94f05b0f
JP
968 const char *duplex;
969 const char *flow;
970
2ae17f66 971 netif_carrier_on(bp->dev);
9f6c9258 972
2ae17f66
VZ
973 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
974 &cur_data.link_report_flags))
94f05b0f 975 duplex = "full";
9f6c9258 976 else
94f05b0f 977 duplex = "half";
9f6c9258 978
2ae17f66
VZ
979 /* Handle the FC at the end so that only these flags would be
980 * possibly set. This way we may easily check if there is no FC
981 * enabled.
982 */
983 if (cur_data.link_report_flags) {
984 if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
985 &cur_data.link_report_flags)) {
2ae17f66
VZ
986 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
987 &cur_data.link_report_flags))
94f05b0f
JP
988 flow = "ON - receive & transmit";
989 else
990 flow = "ON - receive";
9f6c9258 991 } else {
94f05b0f 992 flow = "ON - transmit";
9f6c9258 993 }
94f05b0f
JP
994 } else {
995 flow = "none";
9f6c9258 996 }
94f05b0f
JP
997 netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
998 cur_data.line_speed, duplex, flow);
9f6c9258
DK
999 }
1000}
1001
1002void bnx2x_init_rx_rings(struct bnx2x *bp)
1003{
1004 int func = BP_FUNC(bp);
523224a3 1005 u16 ring_prod;
9f6c9258 1006 int i, j;
25141580 1007
b3b83c3f 1008 /* Allocate TPA resources */
ec6ba945 1009 for_each_rx_queue(bp, j) {
523224a3 1010 struct bnx2x_fastpath *fp = &bp->fp[j];
9f6c9258 1011
a8c94b91
VZ
1012 DP(NETIF_MSG_IFUP,
1013 "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
1014
523224a3 1015 if (!fp->disable_tpa) {
619c5cb6 1016 /* Fill the per-aggregtion pool */
dfacf138 1017 for (i = 0; i < MAX_AGG_QS(bp); i++) {
619c5cb6
VZ
1018 struct bnx2x_agg_info *tpa_info =
1019 &fp->tpa_info[i];
1020 struct sw_rx_bd *first_buf =
1021 &tpa_info->first_buf;
1022
e52fcb24
ED
1023 first_buf->data = kmalloc(fp->rx_buf_size + NET_SKB_PAD,
1024 GFP_ATOMIC);
1025 if (!first_buf->data) {
51c1a580
MS
1026 BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
1027 j);
9f6c9258
DK
1028 bnx2x_free_tpa_pool(bp, fp, i);
1029 fp->disable_tpa = 1;
1030 break;
1031 }
619c5cb6
VZ
1032 dma_unmap_addr_set(first_buf, mapping, 0);
1033 tpa_info->tpa_state = BNX2X_TPA_STOP;
9f6c9258 1034 }
523224a3
DK
1035
1036 /* "next page" elements initialization */
1037 bnx2x_set_next_page_sgl(fp);
1038
1039 /* set SGEs bit mask */
1040 bnx2x_init_sge_ring_bit_mask(fp);
1041
1042 /* Allocate SGEs and initialize the ring elements */
1043 for (i = 0, ring_prod = 0;
1044 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
1045
1046 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
51c1a580
MS
1047 BNX2X_ERR("was only able to allocate %d rx sges\n",
1048 i);
1049 BNX2X_ERR("disabling TPA for queue[%d]\n",
1050 j);
523224a3 1051 /* Cleanup already allocated elements */
619c5cb6
VZ
1052 bnx2x_free_rx_sge_range(bp, fp,
1053 ring_prod);
1054 bnx2x_free_tpa_pool(bp, fp,
dfacf138 1055 MAX_AGG_QS(bp));
523224a3
DK
1056 fp->disable_tpa = 1;
1057 ring_prod = 0;
1058 break;
1059 }
1060 ring_prod = NEXT_SGE_IDX(ring_prod);
1061 }
1062
1063 fp->rx_sge_prod = ring_prod;
9f6c9258
DK
1064 }
1065 }
1066
ec6ba945 1067 for_each_rx_queue(bp, j) {
9f6c9258
DK
1068 struct bnx2x_fastpath *fp = &bp->fp[j];
1069
1070 fp->rx_bd_cons = 0;
9f6c9258 1071
b3b83c3f
DK
1072 /* Activate BD ring */
1073 /* Warning!
1074 * this will generate an interrupt (to the TSTORM)
1075 * must only be done after chip is initialized
1076 */
1077 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1078 fp->rx_sge_prod);
9f6c9258 1079
9f6c9258
DK
1080 if (j != 0)
1081 continue;
1082
619c5cb6 1083 if (CHIP_IS_E1(bp)) {
f2e0899f
DK
1084 REG_WR(bp, BAR_USTRORM_INTMEM +
1085 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1086 U64_LO(fp->rx_comp_mapping));
1087 REG_WR(bp, BAR_USTRORM_INTMEM +
1088 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1089 U64_HI(fp->rx_comp_mapping));
1090 }
9f6c9258
DK
1091 }
1092}
f85582f8 1093
9f6c9258
DK
1094static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1095{
1096 int i;
6383c0b3 1097 u8 cos;
9f6c9258 1098
ec6ba945 1099 for_each_tx_queue(bp, i) {
9f6c9258 1100 struct bnx2x_fastpath *fp = &bp->fp[i];
6383c0b3
AE
1101 for_each_cos_in_tx_queue(fp, cos) {
1102 struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
2df1a70a 1103 unsigned pkts_compl = 0, bytes_compl = 0;
9f6c9258 1104
6383c0b3
AE
1105 u16 sw_prod = txdata->tx_pkt_prod;
1106 u16 sw_cons = txdata->tx_pkt_cons;
9f6c9258 1107
6383c0b3 1108 while (sw_cons != sw_prod) {
2df1a70a
TH
1109 bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
1110 &pkts_compl, &bytes_compl);
6383c0b3
AE
1111 sw_cons++;
1112 }
2df1a70a
TH
1113 netdev_tx_reset_queue(
1114 netdev_get_tx_queue(bp->dev, txdata->txq_index));
9f6c9258
DK
1115 }
1116 }
1117}
1118
b3b83c3f
DK
1119static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1120{
1121 struct bnx2x *bp = fp->bp;
1122 int i;
1123
1124 /* ring wasn't allocated */
1125 if (fp->rx_buf_ring == NULL)
1126 return;
1127
1128 for (i = 0; i < NUM_RX_BD; i++) {
1129 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
e52fcb24 1130 u8 *data = rx_buf->data;
b3b83c3f 1131
e52fcb24 1132 if (data == NULL)
b3b83c3f 1133 continue;
b3b83c3f
DK
1134 dma_unmap_single(&bp->pdev->dev,
1135 dma_unmap_addr(rx_buf, mapping),
1136 fp->rx_buf_size, DMA_FROM_DEVICE);
1137
e52fcb24
ED
1138 rx_buf->data = NULL;
1139 kfree(data);
b3b83c3f
DK
1140 }
1141}
1142
9f6c9258
DK
1143static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1144{
b3b83c3f 1145 int j;
9f6c9258 1146
ec6ba945 1147 for_each_rx_queue(bp, j) {
9f6c9258
DK
1148 struct bnx2x_fastpath *fp = &bp->fp[j];
1149
b3b83c3f 1150 bnx2x_free_rx_bds(fp);
9f6c9258 1151
9f6c9258 1152 if (!fp->disable_tpa)
dfacf138 1153 bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
9f6c9258
DK
1154 }
1155}
1156
1157void bnx2x_free_skbs(struct bnx2x *bp)
1158{
1159 bnx2x_free_tx_skbs(bp);
1160 bnx2x_free_rx_skbs(bp);
1161}
1162
e3835b99
DK
1163void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1164{
1165 /* load old values */
1166 u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1167
1168 if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1169 /* leave all but MAX value */
1170 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1171
1172 /* set new MAX value */
1173 mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1174 & FUNC_MF_CFG_MAX_BW_MASK;
1175
1176 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1177 }
1178}
1179
ca92429f
DK
1180/**
1181 * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1182 *
1183 * @bp: driver handle
1184 * @nvecs: number of vectors to be released
1185 */
1186static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
9f6c9258 1187{
ca92429f 1188 int i, offset = 0;
9f6c9258 1189
ca92429f
DK
1190 if (nvecs == offset)
1191 return;
1192 free_irq(bp->msix_table[offset].vector, bp->dev);
9f6c9258 1193 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
ca92429f
DK
1194 bp->msix_table[offset].vector);
1195 offset++;
9f6c9258 1196#ifdef BCM_CNIC
ca92429f
DK
1197 if (nvecs == offset)
1198 return;
9f6c9258
DK
1199 offset++;
1200#endif
ca92429f 1201
ec6ba945 1202 for_each_eth_queue(bp, i) {
ca92429f
DK
1203 if (nvecs == offset)
1204 return;
51c1a580
MS
1205 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq\n",
1206 i, bp->msix_table[offset].vector);
9f6c9258 1207
ca92429f 1208 free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
9f6c9258
DK
1209 }
1210}
1211
d6214d7a 1212void bnx2x_free_irq(struct bnx2x *bp)
9f6c9258 1213{
d6214d7a 1214 if (bp->flags & USING_MSIX_FLAG)
ca92429f 1215 bnx2x_free_msix_irqs(bp, BNX2X_NUM_ETH_QUEUES(bp) +
6383c0b3 1216 CNIC_PRESENT + 1);
d6214d7a
DK
1217 else if (bp->flags & USING_MSI_FLAG)
1218 free_irq(bp->pdev->irq, bp->dev);
1219 else
9f6c9258
DK
1220 free_irq(bp->pdev->irq, bp->dev);
1221}
1222
d6214d7a 1223int bnx2x_enable_msix(struct bnx2x *bp)
9f6c9258 1224{
d6214d7a 1225 int msix_vec = 0, i, rc, req_cnt;
9f6c9258 1226
d6214d7a 1227 bp->msix_table[msix_vec].entry = msix_vec;
51c1a580 1228 BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n",
d6214d7a
DK
1229 bp->msix_table[0].entry);
1230 msix_vec++;
9f6c9258
DK
1231
1232#ifdef BCM_CNIC
d6214d7a 1233 bp->msix_table[msix_vec].entry = msix_vec;
51c1a580 1234 BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n",
d6214d7a
DK
1235 bp->msix_table[msix_vec].entry, bp->msix_table[msix_vec].entry);
1236 msix_vec++;
9f6c9258 1237#endif
6383c0b3 1238 /* We need separate vectors for ETH queues only (not FCoE) */
ec6ba945 1239 for_each_eth_queue(bp, i) {
d6214d7a 1240 bp->msix_table[msix_vec].entry = msix_vec;
51c1a580
MS
1241 BNX2X_DEV_INFO("msix_table[%d].entry = %d (fastpath #%u)\n",
1242 msix_vec, msix_vec, i);
d6214d7a 1243 msix_vec++;
9f6c9258
DK
1244 }
1245
6383c0b3 1246 req_cnt = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_PRESENT + 1;
d6214d7a
DK
1247
1248 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], req_cnt);
9f6c9258
DK
1249
1250 /*
1251 * reconfigure number of tx/rx queues according to available
1252 * MSI-X vectors
1253 */
1254 if (rc >= BNX2X_MIN_MSIX_VEC_CNT) {
d6214d7a
DK
1255 /* how less vectors we will have? */
1256 int diff = req_cnt - rc;
9f6c9258 1257
51c1a580 1258 BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
9f6c9258
DK
1259
1260 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
1261
1262 if (rc) {
51c1a580 1263 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
9f6c9258
DK
1264 return rc;
1265 }
d6214d7a
DK
1266 /*
1267 * decrease number of queues by number of unallocated entries
1268 */
1269 bp->num_queues -= diff;
9f6c9258 1270
51c1a580 1271 BNX2X_DEV_INFO("New queue configuration set: %d\n",
9f6c9258
DK
1272 bp->num_queues);
1273 } else if (rc) {
d6214d7a
DK
1274 /* fall to INTx if not enough memory */
1275 if (rc == -ENOMEM)
1276 bp->flags |= DISABLE_MSI_FLAG;
51c1a580 1277 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
9f6c9258
DK
1278 return rc;
1279 }
1280
1281 bp->flags |= USING_MSIX_FLAG;
1282
1283 return 0;
1284}
1285
1286static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1287{
ca92429f 1288 int i, rc, offset = 0;
9f6c9258 1289
ca92429f
DK
1290 rc = request_irq(bp->msix_table[offset++].vector,
1291 bnx2x_msix_sp_int, 0,
9f6c9258
DK
1292 bp->dev->name, bp->dev);
1293 if (rc) {
1294 BNX2X_ERR("request sp irq failed\n");
1295 return -EBUSY;
1296 }
1297
1298#ifdef BCM_CNIC
1299 offset++;
1300#endif
ec6ba945 1301 for_each_eth_queue(bp, i) {
9f6c9258
DK
1302 struct bnx2x_fastpath *fp = &bp->fp[i];
1303 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1304 bp->dev->name, i);
1305
d6214d7a 1306 rc = request_irq(bp->msix_table[offset].vector,
9f6c9258
DK
1307 bnx2x_msix_fp_int, 0, fp->name, fp);
1308 if (rc) {
ca92429f
DK
1309 BNX2X_ERR("request fp #%d irq (%d) failed rc %d\n", i,
1310 bp->msix_table[offset].vector, rc);
1311 bnx2x_free_msix_irqs(bp, offset);
9f6c9258
DK
1312 return -EBUSY;
1313 }
1314
d6214d7a 1315 offset++;
9f6c9258
DK
1316 }
1317
ec6ba945 1318 i = BNX2X_NUM_ETH_QUEUES(bp);
6383c0b3 1319 offset = 1 + CNIC_PRESENT;
51c1a580 1320 netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n",
9f6c9258
DK
1321 bp->msix_table[0].vector,
1322 0, bp->msix_table[offset].vector,
1323 i - 1, bp->msix_table[offset + i - 1].vector);
1324
1325 return 0;
1326}
1327
d6214d7a 1328int bnx2x_enable_msi(struct bnx2x *bp)
9f6c9258
DK
1329{
1330 int rc;
1331
1332 rc = pci_enable_msi(bp->pdev);
1333 if (rc) {
51c1a580 1334 BNX2X_DEV_INFO("MSI is not attainable\n");
9f6c9258
DK
1335 return -1;
1336 }
1337 bp->flags |= USING_MSI_FLAG;
1338
1339 return 0;
1340}
1341
1342static int bnx2x_req_irq(struct bnx2x *bp)
1343{
1344 unsigned long flags;
1345 int rc;
1346
1347 if (bp->flags & USING_MSI_FLAG)
1348 flags = 0;
1349 else
1350 flags = IRQF_SHARED;
1351
1352 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
1353 bp->dev->name, bp->dev);
9f6c9258
DK
1354 return rc;
1355}
1356
619c5cb6
VZ
1357static inline int bnx2x_setup_irqs(struct bnx2x *bp)
1358{
1359 int rc = 0;
1360 if (bp->flags & USING_MSIX_FLAG) {
1361 rc = bnx2x_req_msix_irqs(bp);
1362 if (rc)
1363 return rc;
1364 } else {
1365 bnx2x_ack_int(bp);
1366 rc = bnx2x_req_irq(bp);
1367 if (rc) {
1368 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
1369 return rc;
1370 }
1371 if (bp->flags & USING_MSI_FLAG) {
1372 bp->dev->irq = bp->pdev->irq;
1373 netdev_info(bp->dev, "using MSI IRQ %d\n",
1374 bp->pdev->irq);
1375 }
1376 }
1377
1378 return 0;
1379}
1380
1381static inline void bnx2x_napi_enable(struct bnx2x *bp)
9f6c9258
DK
1382{
1383 int i;
1384
619c5cb6 1385 for_each_rx_queue(bp, i)
9f6c9258
DK
1386 napi_enable(&bnx2x_fp(bp, i, napi));
1387}
1388
619c5cb6 1389static inline void bnx2x_napi_disable(struct bnx2x *bp)
9f6c9258
DK
1390{
1391 int i;
1392
619c5cb6 1393 for_each_rx_queue(bp, i)
9f6c9258
DK
1394 napi_disable(&bnx2x_fp(bp, i, napi));
1395}
1396
1397void bnx2x_netif_start(struct bnx2x *bp)
1398{
4b7ed897
DK
1399 if (netif_running(bp->dev)) {
1400 bnx2x_napi_enable(bp);
1401 bnx2x_int_enable(bp);
1402 if (bp->state == BNX2X_STATE_OPEN)
1403 netif_tx_wake_all_queues(bp->dev);
9f6c9258
DK
1404 }
1405}
1406
1407void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1408{
1409 bnx2x_int_disable_sync(bp, disable_hw);
1410 bnx2x_napi_disable(bp);
9f6c9258 1411}
9f6c9258 1412
8307fa3e
VZ
1413u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
1414{
8307fa3e 1415 struct bnx2x *bp = netdev_priv(dev);
cdb9d6ae 1416
faa28314 1417#ifdef BCM_CNIC
cdb9d6ae 1418 if (!NO_FCOE(bp)) {
8307fa3e
VZ
1419 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1420 u16 ether_type = ntohs(hdr->h_proto);
1421
1422 /* Skip VLAN tag if present */
1423 if (ether_type == ETH_P_8021Q) {
1424 struct vlan_ethhdr *vhdr =
1425 (struct vlan_ethhdr *)skb->data;
1426
1427 ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1428 }
1429
1430 /* If ethertype is FCoE or FIP - use FCoE ring */
1431 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
6383c0b3 1432 return bnx2x_fcoe_tx(bp, txq_index);
8307fa3e
VZ
1433 }
1434#endif
cdb9d6ae 1435 /* select a non-FCoE queue */
6383c0b3 1436 return __skb_tx_hash(dev, skb, BNX2X_NUM_ETH_QUEUES(bp));
8307fa3e
VZ
1437}
1438
d6214d7a
DK
1439void bnx2x_set_num_queues(struct bnx2x *bp)
1440{
1441 switch (bp->multi_mode) {
1442 case ETH_RSS_MODE_DISABLED:
9f6c9258 1443 bp->num_queues = 1;
d6214d7a
DK
1444 break;
1445 case ETH_RSS_MODE_REGULAR:
1446 bp->num_queues = bnx2x_calc_num_queues(bp);
9f6c9258 1447 break;
f85582f8 1448
9f6c9258 1449 default:
d6214d7a 1450 bp->num_queues = 1;
9f6c9258
DK
1451 break;
1452 }
ec6ba945 1453
614c76df 1454#ifdef BCM_CNIC
9e62e912
DK
1455 /* override in STORAGE SD mode */
1456 if (IS_MF_STORAGE_SD(bp))
614c76df
DK
1457 bp->num_queues = 1;
1458#endif
ec6ba945 1459 /* Add special queues */
6383c0b3 1460 bp->num_queues += NON_ETH_CONTEXT_USE;
ec6ba945
VZ
1461}
1462
cdb9d6ae
VZ
1463/**
1464 * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
1465 *
1466 * @bp: Driver handle
1467 *
1468 * We currently support for at most 16 Tx queues for each CoS thus we will
1469 * allocate a multiple of 16 for ETH L2 rings according to the value of the
1470 * bp->max_cos.
1471 *
1472 * If there is an FCoE L2 queue the appropriate Tx queue will have the next
1473 * index after all ETH L2 indices.
1474 *
1475 * If the actual number of Tx queues (for each CoS) is less than 16 then there
1476 * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
1477 * 16..31,...) with indicies that are not coupled with any real Tx queue.
1478 *
1479 * The proper configuration of skb->queue_mapping is handled by
1480 * bnx2x_select_queue() and __skb_tx_hash().
1481 *
1482 * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
1483 * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
1484 */
ec6ba945
VZ
1485static inline int bnx2x_set_real_num_queues(struct bnx2x *bp)
1486{
6383c0b3 1487 int rc, tx, rx;
ec6ba945 1488
6383c0b3
AE
1489 tx = MAX_TXQS_PER_COS * bp->max_cos;
1490 rx = BNX2X_NUM_ETH_QUEUES(bp);
ec6ba945 1491
6383c0b3
AE
1492/* account for fcoe queue */
1493#ifdef BCM_CNIC
1494 if (!NO_FCOE(bp)) {
1495 rx += FCOE_PRESENT;
1496 tx += FCOE_PRESENT;
1497 }
ec6ba945 1498#endif
6383c0b3
AE
1499
1500 rc = netif_set_real_num_tx_queues(bp->dev, tx);
1501 if (rc) {
1502 BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
1503 return rc;
1504 }
1505 rc = netif_set_real_num_rx_queues(bp->dev, rx);
1506 if (rc) {
1507 BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
1508 return rc;
1509 }
1510
51c1a580 1511 DP(NETIF_MSG_IFUP, "Setting real num queues to (tx, rx) (%d, %d)\n",
6383c0b3
AE
1512 tx, rx);
1513
ec6ba945
VZ
1514 return rc;
1515}
1516
a8c94b91
VZ
1517static inline void bnx2x_set_rx_buf_size(struct bnx2x *bp)
1518{
1519 int i;
1520
1521 for_each_queue(bp, i) {
1522 struct bnx2x_fastpath *fp = &bp->fp[i];
e52fcb24 1523 u32 mtu;
a8c94b91
VZ
1524
1525 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
1526 if (IS_FCOE_IDX(i))
1527 /*
1528 * Although there are no IP frames expected to arrive to
1529 * this ring we still want to add an
1530 * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
1531 * overrun attack.
1532 */
e52fcb24 1533 mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
a8c94b91 1534 else
e52fcb24
ED
1535 mtu = bp->dev->mtu;
1536 fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START +
1537 IP_HEADER_ALIGNMENT_PADDING +
1538 ETH_OVREHEAD +
1539 mtu +
1540 BNX2X_FW_RX_ALIGN_END;
1541 /* Note : rx_buf_size doesnt take into account NET_SKB_PAD */
a8c94b91
VZ
1542 }
1543}
1544
619c5cb6
VZ
1545static inline int bnx2x_init_rss_pf(struct bnx2x *bp)
1546{
1547 int i;
1548 u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0};
1549 u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
1550
1551 /*
1552 * Prepare the inital contents fo the indirection table if RSS is
1553 * enabled
1554 */
1555 if (bp->multi_mode != ETH_RSS_MODE_DISABLED) {
1556 for (i = 0; i < sizeof(ind_table); i++)
1557 ind_table[i] =
278bc429
BH
1558 bp->fp->cl_id +
1559 ethtool_rxfh_indir_default(i, num_eth_queues);
619c5cb6
VZ
1560 }
1561
1562 /*
1563 * For 57710 and 57711 SEARCHER configuration (rss_keys) is
1564 * per-port, so if explicit configuration is needed , do it only
1565 * for a PMF.
1566 *
1567 * For 57712 and newer on the other hand it's a per-function
1568 * configuration.
1569 */
1570 return bnx2x_config_rss_pf(bp, ind_table,
1571 bp->port.pmf || !CHIP_IS_E1x(bp));
1572}
1573
1574int bnx2x_config_rss_pf(struct bnx2x *bp, u8 *ind_table, bool config_hash)
1575{
3b603066 1576 struct bnx2x_config_rss_params params = {NULL};
619c5cb6
VZ
1577 int i;
1578
1579 /* Although RSS is meaningless when there is a single HW queue we
1580 * still need it enabled in order to have HW Rx hash generated.
1581 *
1582 * if (!is_eth_multi(bp))
1583 * bp->multi_mode = ETH_RSS_MODE_DISABLED;
1584 */
1585
1586 params.rss_obj = &bp->rss_conf_obj;
1587
1588 __set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
1589
1590 /* RSS mode */
1591 switch (bp->multi_mode) {
1592 case ETH_RSS_MODE_DISABLED:
1593 __set_bit(BNX2X_RSS_MODE_DISABLED, &params.rss_flags);
1594 break;
1595 case ETH_RSS_MODE_REGULAR:
1596 __set_bit(BNX2X_RSS_MODE_REGULAR, &params.rss_flags);
1597 break;
1598 case ETH_RSS_MODE_VLAN_PRI:
1599 __set_bit(BNX2X_RSS_MODE_VLAN_PRI, &params.rss_flags);
1600 break;
1601 case ETH_RSS_MODE_E1HOV_PRI:
1602 __set_bit(BNX2X_RSS_MODE_E1HOV_PRI, &params.rss_flags);
1603 break;
1604 case ETH_RSS_MODE_IP_DSCP:
1605 __set_bit(BNX2X_RSS_MODE_IP_DSCP, &params.rss_flags);
1606 break;
1607 default:
1608 BNX2X_ERR("Unknown multi_mode: %d\n", bp->multi_mode);
1609 return -EINVAL;
1610 }
1611
1612 /* If RSS is enabled */
1613 if (bp->multi_mode != ETH_RSS_MODE_DISABLED) {
1614 /* RSS configuration */
1615 __set_bit(BNX2X_RSS_IPV4, &params.rss_flags);
1616 __set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags);
1617 __set_bit(BNX2X_RSS_IPV6, &params.rss_flags);
1618 __set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags);
1619
1620 /* Hash bits */
1621 params.rss_result_mask = MULTI_MASK;
1622
1623 memcpy(params.ind_table, ind_table, sizeof(params.ind_table));
1624
1625 if (config_hash) {
1626 /* RSS keys */
1627 for (i = 0; i < sizeof(params.rss_key) / 4; i++)
1628 params.rss_key[i] = random32();
1629
1630 __set_bit(BNX2X_RSS_SET_SRCH, &params.rss_flags);
1631 }
1632 }
1633
1634 return bnx2x_config_rss(bp, &params);
1635}
1636
1637static inline int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
1638{
3b603066 1639 struct bnx2x_func_state_params func_params = {NULL};
619c5cb6
VZ
1640
1641 /* Prepare parameters for function state transitions */
1642 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
1643
1644 func_params.f_obj = &bp->func_obj;
1645 func_params.cmd = BNX2X_F_CMD_HW_INIT;
1646
1647 func_params.params.hw_init.load_phase = load_code;
1648
1649 return bnx2x_func_state_change(bp, &func_params);
1650}
1651
1652/*
1653 * Cleans the object that have internal lists without sending
1654 * ramrods. Should be run when interrutps are disabled.
1655 */
1656static void bnx2x_squeeze_objects(struct bnx2x *bp)
1657{
1658 int rc;
1659 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
3b603066 1660 struct bnx2x_mcast_ramrod_params rparam = {NULL};
619c5cb6
VZ
1661 struct bnx2x_vlan_mac_obj *mac_obj = &bp->fp->mac_obj;
1662
1663 /***************** Cleanup MACs' object first *************************/
1664
1665 /* Wait for completion of requested */
1666 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
1667 /* Perform a dry cleanup */
1668 __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
1669
1670 /* Clean ETH primary MAC */
1671 __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
1672 rc = mac_obj->delete_all(bp, &bp->fp->mac_obj, &vlan_mac_flags,
1673 &ramrod_flags);
1674 if (rc != 0)
1675 BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
1676
1677 /* Cleanup UC list */
1678 vlan_mac_flags = 0;
1679 __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
1680 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
1681 &ramrod_flags);
1682 if (rc != 0)
1683 BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
1684
1685 /***************** Now clean mcast object *****************************/
1686 rparam.mcast_obj = &bp->mcast_obj;
1687 __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
1688
1689 /* Add a DEL command... */
1690 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
1691 if (rc < 0)
51c1a580
MS
1692 BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n",
1693 rc);
619c5cb6
VZ
1694
1695 /* ...and wait until all pending commands are cleared */
1696 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
1697 while (rc != 0) {
1698 if (rc < 0) {
1699 BNX2X_ERR("Failed to clean multi-cast object: %d\n",
1700 rc);
1701 return;
1702 }
1703
1704 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
1705 }
1706}
1707
1708#ifndef BNX2X_STOP_ON_ERROR
1709#define LOAD_ERROR_EXIT(bp, label) \
1710 do { \
1711 (bp)->state = BNX2X_STATE_ERROR; \
1712 goto label; \
1713 } while (0)
1714#else
1715#define LOAD_ERROR_EXIT(bp, label) \
1716 do { \
1717 (bp)->state = BNX2X_STATE_ERROR; \
1718 (bp)->panic = 1; \
1719 return -EBUSY; \
1720 } while (0)
1721#endif
1722
452427b0
YM
1723bool bnx2x_test_firmware_version(struct bnx2x *bp, bool is_err)
1724{
1725 /* build FW version dword */
1726 u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) +
1727 (BCM_5710_FW_MINOR_VERSION << 8) +
1728 (BCM_5710_FW_REVISION_VERSION << 16) +
1729 (BCM_5710_FW_ENGINEERING_VERSION << 24);
1730
1731 /* read loaded FW from chip */
1732 u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
1733
1734 DP(NETIF_MSG_IFUP, "loaded fw %x, my fw %x\n", loaded_fw, my_fw);
1735
1736 if (loaded_fw != my_fw) {
1737 if (is_err)
1738 BNX2X_ERR("bnx2x with FW %x was already loaded, which mismatches my %x FW. aborting\n",
1739 loaded_fw, my_fw);
1740 return false;
1741 }
1742
1743 return true;
1744}
1745
9f6c9258
DK
1746/* must be called with rtnl_lock */
1747int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1748{
619c5cb6 1749 int port = BP_PORT(bp);
9f6c9258
DK
1750 u32 load_code;
1751 int i, rc;
1752
1753#ifdef BNX2X_STOP_ON_ERROR
51c1a580
MS
1754 if (unlikely(bp->panic)) {
1755 BNX2X_ERR("Can't load NIC when there is panic\n");
9f6c9258 1756 return -EPERM;
51c1a580 1757 }
9f6c9258
DK
1758#endif
1759
1760 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
1761
2ae17f66
VZ
1762 /* Set the initial link reported state to link down */
1763 bnx2x_acquire_phy_lock(bp);
1764 memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
1765 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1766 &bp->last_reported_link.link_report_flags);
1767 bnx2x_release_phy_lock(bp);
1768
523224a3
DK
1769 /* must be called before memory allocation and HW init */
1770 bnx2x_ilt_set_info(bp);
1771
6383c0b3
AE
1772 /*
1773 * Zero fastpath structures preserving invariants like napi, which are
1774 * allocated only once, fp index, max_cos, bp pointer.
1775 * Also set fp->disable_tpa.
b3b83c3f 1776 */
51c1a580 1777 DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
b3b83c3f
DK
1778 for_each_queue(bp, i)
1779 bnx2x_bz_fp(bp, i);
1780
6383c0b3 1781
a8c94b91
VZ
1782 /* Set the receive queues buffer size */
1783 bnx2x_set_rx_buf_size(bp);
1784
d6214d7a 1785 if (bnx2x_alloc_mem(bp))
9f6c9258 1786 return -ENOMEM;
d6214d7a 1787
b3b83c3f
DK
1788 /* As long as bnx2x_alloc_mem() may possibly update
1789 * bp->num_queues, bnx2x_set_real_num_queues() should always
1790 * come after it.
1791 */
ec6ba945 1792 rc = bnx2x_set_real_num_queues(bp);
d6214d7a 1793 if (rc) {
ec6ba945 1794 BNX2X_ERR("Unable to set real_num_queues\n");
619c5cb6 1795 LOAD_ERROR_EXIT(bp, load_error0);
9f6c9258
DK
1796 }
1797
6383c0b3
AE
1798 /* configure multi cos mappings in kernel.
1799 * this configuration may be overriden by a multi class queue discipline
1800 * or by a dcbx negotiation result.
1801 */
1802 bnx2x_setup_tc(bp->dev, bp->max_cos);
1803
9f6c9258
DK
1804 bnx2x_napi_enable(bp);
1805
889b9af3
AE
1806 /* set pf load just before approaching the MCP */
1807 bnx2x_set_pf_load(bp);
1808
9f6c9258 1809 /* Send LOAD_REQUEST command to MCP
619c5cb6
VZ
1810 * Returns the type of LOAD command:
1811 * if it is the first port to be initialized
1812 * common blocks should be initialized, otherwise - not
1813 */
9f6c9258 1814 if (!BP_NOMCP(bp)) {
95c6c616
AE
1815 /* init fw_seq */
1816 bp->fw_seq =
1817 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
1818 DRV_MSG_SEQ_NUMBER_MASK);
1819 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
1820
1821 /* Get current FW pulse sequence */
1822 bp->fw_drv_pulse_wr_seq =
1823 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) &
1824 DRV_PULSE_SEQ_MASK);
1825 BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
1826
a22f0788 1827 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, 0);
9f6c9258
DK
1828 if (!load_code) {
1829 BNX2X_ERR("MCP response failure, aborting\n");
1830 rc = -EBUSY;
619c5cb6 1831 LOAD_ERROR_EXIT(bp, load_error1);
9f6c9258
DK
1832 }
1833 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
51c1a580 1834 BNX2X_ERR("Driver load refused\n");
9f6c9258 1835 rc = -EBUSY; /* other port in diagnostic mode */
619c5cb6 1836 LOAD_ERROR_EXIT(bp, load_error1);
9f6c9258 1837 }
d1e2d966
AE
1838 if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
1839 load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
d1e2d966 1840 /* abort nic load if version mismatch */
452427b0 1841 if (!bnx2x_test_firmware_version(bp, true)) {
d1e2d966
AE
1842 rc = -EBUSY;
1843 LOAD_ERROR_EXIT(bp, load_error2);
1844 }
1845 }
9f6c9258
DK
1846
1847 } else {
f2e0899f 1848 int path = BP_PATH(bp);
9f6c9258 1849
f2e0899f
DK
1850 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
1851 path, load_count[path][0], load_count[path][1],
1852 load_count[path][2]);
1853 load_count[path][0]++;
1854 load_count[path][1 + port]++;
1855 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
1856 path, load_count[path][0], load_count[path][1],
1857 load_count[path][2]);
1858 if (load_count[path][0] == 1)
9f6c9258 1859 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
f2e0899f 1860 else if (load_count[path][1 + port] == 1)
9f6c9258
DK
1861 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
1862 else
1863 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
1864 }
1865
1866 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
f2e0899f 1867 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
3deb8167 1868 (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
9f6c9258 1869 bp->port.pmf = 1;
3deb8167
YR
1870 /*
1871 * We need the barrier to ensure the ordering between the
1872 * writing to bp->port.pmf here and reading it from the
1873 * bnx2x_periodic_task().
1874 */
1875 smp_mb();
1876 queue_delayed_work(bnx2x_wq, &bp->period_task, 0);
1877 } else
9f6c9258 1878 bp->port.pmf = 0;
6383c0b3 1879
51c1a580 1880 DP(NETIF_MSG_IFUP, "pmf %d\n", bp->port.pmf);
9f6c9258 1881
619c5cb6
VZ
1882 /* Init Function state controlling object */
1883 bnx2x__init_func_obj(bp);
1884
9f6c9258
DK
1885 /* Initialize HW */
1886 rc = bnx2x_init_hw(bp, load_code);
1887 if (rc) {
1888 BNX2X_ERR("HW init failed, aborting\n");
a22f0788 1889 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
619c5cb6 1890 LOAD_ERROR_EXIT(bp, load_error2);
9f6c9258
DK
1891 }
1892
d6214d7a
DK
1893 /* Connect to IRQs */
1894 rc = bnx2x_setup_irqs(bp);
523224a3 1895 if (rc) {
51c1a580 1896 BNX2X_ERR("IRQs setup failed\n");
523224a3 1897 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
619c5cb6 1898 LOAD_ERROR_EXIT(bp, load_error2);
523224a3
DK
1899 }
1900
9f6c9258
DK
1901 /* Setup NIC internals and enable interrupts */
1902 bnx2x_nic_init(bp, load_code);
1903
619c5cb6
VZ
1904 /* Init per-function objects */
1905 bnx2x_init_bp_objs(bp);
1906
f2e0899f
DK
1907 if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
1908 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
619c5cb6
VZ
1909 (bp->common.shmem2_base)) {
1910 if (SHMEM2_HAS(bp, dcc_support))
1911 SHMEM2_WR(bp, dcc_support,
1912 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
1913 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
1914 }
1915
1916 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
1917 rc = bnx2x_func_start(bp);
1918 if (rc) {
1919 BNX2X_ERR("Function start failed!\n");
c636322b 1920 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
619c5cb6
VZ
1921 LOAD_ERROR_EXIT(bp, load_error3);
1922 }
9f6c9258
DK
1923
1924 /* Send LOAD_DONE command to MCP */
1925 if (!BP_NOMCP(bp)) {
a22f0788 1926 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
9f6c9258
DK
1927 if (!load_code) {
1928 BNX2X_ERR("MCP response failure, aborting\n");
1929 rc = -EBUSY;
619c5cb6 1930 LOAD_ERROR_EXIT(bp, load_error3);
9f6c9258
DK
1931 }
1932 }
1933
619c5cb6 1934 rc = bnx2x_setup_leading(bp);
9f6c9258
DK
1935 if (rc) {
1936 BNX2X_ERR("Setup leading failed!\n");
619c5cb6 1937 LOAD_ERROR_EXIT(bp, load_error3);
f2e0899f 1938 }
9f6c9258 1939
9f6c9258 1940#ifdef BCM_CNIC
523224a3 1941 /* Enable Timer scan */
619c5cb6 1942 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
9f6c9258 1943#endif
f85582f8 1944
523224a3 1945 for_each_nondefault_queue(bp, i) {
619c5cb6 1946 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
51c1a580
MS
1947 if (rc) {
1948 BNX2X_ERR("Queue setup failed\n");
619c5cb6 1949 LOAD_ERROR_EXIT(bp, load_error4);
51c1a580 1950 }
523224a3
DK
1951 }
1952
619c5cb6 1953 rc = bnx2x_init_rss_pf(bp);
51c1a580
MS
1954 if (rc) {
1955 BNX2X_ERR("PF RSS init failed\n");
619c5cb6 1956 LOAD_ERROR_EXIT(bp, load_error4);
51c1a580 1957 }
619c5cb6 1958
523224a3
DK
1959 /* Now when Clients are configured we are ready to work */
1960 bp->state = BNX2X_STATE_OPEN;
1961
619c5cb6
VZ
1962 /* Configure a ucast MAC */
1963 rc = bnx2x_set_eth_mac(bp, true);
51c1a580
MS
1964 if (rc) {
1965 BNX2X_ERR("Setting Ethernet MAC failed\n");
619c5cb6 1966 LOAD_ERROR_EXIT(bp, load_error4);
51c1a580 1967 }
6e30dd4e 1968
e3835b99
DK
1969 if (bp->pending_max) {
1970 bnx2x_update_max_mf_config(bp, bp->pending_max);
1971 bp->pending_max = 0;
1972 }
1973
9f6c9258
DK
1974 if (bp->port.pmf)
1975 bnx2x_initial_phy_init(bp, load_mode);
1976
619c5cb6
VZ
1977 /* Start fast path */
1978
1979 /* Initialize Rx filter. */
1980 netif_addr_lock_bh(bp->dev);
6e30dd4e 1981 bnx2x_set_rx_mode(bp->dev);
619c5cb6 1982 netif_addr_unlock_bh(bp->dev);
6e30dd4e 1983
619c5cb6 1984 /* Start the Tx */
9f6c9258
DK
1985 switch (load_mode) {
1986 case LOAD_NORMAL:
523224a3
DK
1987 /* Tx queue should be only reenabled */
1988 netif_tx_wake_all_queues(bp->dev);
9f6c9258
DK
1989 break;
1990
1991 case LOAD_OPEN:
1992 netif_tx_start_all_queues(bp->dev);
523224a3 1993 smp_mb__after_clear_bit();
9f6c9258
DK
1994 break;
1995
1996 case LOAD_DIAG:
9f6c9258
DK
1997 bp->state = BNX2X_STATE_DIAG;
1998 break;
1999
2000 default:
2001 break;
2002 }
2003
00253a8c 2004 if (bp->port.pmf)
e695a2dd 2005 bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_DCB_CONFIGURED, 0);
00253a8c 2006 else
9f6c9258
DK
2007 bnx2x__link_status_update(bp);
2008
2009 /* start the timer */
2010 mod_timer(&bp->timer, jiffies + bp->current_interval);
2011
2012#ifdef BCM_CNIC
b306f5ed
DK
2013 /* re-read iscsi info */
2014 bnx2x_get_iscsi_info(bp);
9f6c9258
DK
2015 bnx2x_setup_cnic_irq_info(bp);
2016 if (bp->state == BNX2X_STATE_OPEN)
2017 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
2018#endif
9f6c9258 2019
9ce392d4
YM
2020 /* mark driver is loaded in shmem2 */
2021 if (SHMEM2_HAS(bp, drv_capabilities_flag)) {
2022 u32 val;
2023 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2024 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2025 val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
2026 DRV_FLAGS_CAPABILITIES_LOADED_L2);
2027 }
2028
619c5cb6
VZ
2029 /* Wait for all pending SP commands to complete */
2030 if (!bnx2x_wait_sp_comp(bp, ~0x0UL)) {
2031 BNX2X_ERR("Timeout waiting for SP elements to complete\n");
2032 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
2033 return -EBUSY;
2034 }
6891dd25 2035
619c5cb6 2036 bnx2x_dcbx_init(bp);
9f6c9258
DK
2037 return 0;
2038
619c5cb6 2039#ifndef BNX2X_STOP_ON_ERROR
9f6c9258 2040load_error4:
619c5cb6 2041#ifdef BCM_CNIC
9f6c9258 2042 /* Disable Timer scan */
619c5cb6 2043 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
9f6c9258
DK
2044#endif
2045load_error3:
2046 bnx2x_int_disable_sync(bp, 1);
d6214d7a 2047
619c5cb6
VZ
2048 /* Clean queueable objects */
2049 bnx2x_squeeze_objects(bp);
2050
9f6c9258
DK
2051 /* Free SKBs, SGEs, TPA pool and driver internals */
2052 bnx2x_free_skbs(bp);
ec6ba945 2053 for_each_rx_queue(bp, i)
9f6c9258 2054 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
d6214d7a 2055
9f6c9258 2056 /* Release IRQs */
d6214d7a
DK
2057 bnx2x_free_irq(bp);
2058load_error2:
2059 if (!BP_NOMCP(bp)) {
2060 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
2061 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
2062 }
2063
2064 bp->port.pmf = 0;
9f6c9258
DK
2065load_error1:
2066 bnx2x_napi_disable(bp);
889b9af3
AE
2067 /* clear pf_load status, as it was already set */
2068 bnx2x_clear_pf_load(bp);
d6214d7a 2069load_error0:
9f6c9258
DK
2070 bnx2x_free_mem(bp);
2071
2072 return rc;
619c5cb6 2073#endif /* ! BNX2X_STOP_ON_ERROR */
9f6c9258
DK
2074}
2075
2076/* must be called with rtnl_lock */
2077int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
2078{
2079 int i;
c9ee9206
VZ
2080 bool global = false;
2081
9ce392d4
YM
2082 /* mark driver is unloaded in shmem2 */
2083 if (SHMEM2_HAS(bp, drv_capabilities_flag)) {
2084 u32 val;
2085 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2086 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2087 val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
2088 }
2089
c9ee9206
VZ
2090 if ((bp->state == BNX2X_STATE_CLOSED) ||
2091 (bp->state == BNX2X_STATE_ERROR)) {
2092 /* We can get here if the driver has been unloaded
2093 * during parity error recovery and is either waiting for a
2094 * leader to complete or for other functions to unload and
2095 * then ifdown has been issued. In this case we want to
2096 * unload and let other functions to complete a recovery
2097 * process.
2098 */
9f6c9258
DK
2099 bp->recovery_state = BNX2X_RECOVERY_DONE;
2100 bp->is_leader = 0;
c9ee9206
VZ
2101 bnx2x_release_leader_lock(bp);
2102 smp_mb();
2103
51c1a580
MS
2104 DP(NETIF_MSG_IFDOWN, "Releasing a leadership...\n");
2105 BNX2X_ERR("Can't unload in closed or error state\n");
9f6c9258
DK
2106 return -EINVAL;
2107 }
2108
87b7ba3d
VZ
2109 /*
2110 * It's important to set the bp->state to the value different from
2111 * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
2112 * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
2113 */
2114 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
2115 smp_mb();
2116
9505ee37
VZ
2117 /* Stop Tx */
2118 bnx2x_tx_disable(bp);
2119
9f6c9258
DK
2120#ifdef BCM_CNIC
2121 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
2122#endif
9f6c9258 2123
9f6c9258 2124 bp->rx_mode = BNX2X_RX_MODE_NONE;
9f6c9258 2125
9f6c9258 2126 del_timer_sync(&bp->timer);
f85582f8 2127
619c5cb6
VZ
2128 /* Set ALWAYS_ALIVE bit in shmem */
2129 bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
2130
2131 bnx2x_drv_pulse(bp);
9f6c9258 2132
f85582f8 2133 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
1355b704 2134 bnx2x_save_statistics(bp);
9f6c9258
DK
2135
2136 /* Cleanup the chip if needed */
2137 if (unload_mode != UNLOAD_RECOVERY)
2138 bnx2x_chip_cleanup(bp, unload_mode);
523224a3 2139 else {
c9ee9206
VZ
2140 /* Send the UNLOAD_REQUEST to the MCP */
2141 bnx2x_send_unload_req(bp, unload_mode);
2142
2143 /*
2144 * Prevent transactions to host from the functions on the
2145 * engine that doesn't reset global blocks in case of global
2146 * attention once gloabl blocks are reset and gates are opened
2147 * (the engine which leader will perform the recovery
2148 * last).
2149 */
2150 if (!CHIP_IS_E1x(bp))
2151 bnx2x_pf_disable(bp);
2152
2153 /* Disable HW interrupts, NAPI */
523224a3
DK
2154 bnx2x_netif_stop(bp, 1);
2155
2156 /* Release IRQs */
d6214d7a 2157 bnx2x_free_irq(bp);
c9ee9206
VZ
2158
2159 /* Report UNLOAD_DONE to MCP */
2160 bnx2x_send_unload_done(bp);
523224a3 2161 }
9f6c9258 2162
619c5cb6
VZ
2163 /*
2164 * At this stage no more interrupts will arrive so we may safly clean
2165 * the queueable objects here in case they failed to get cleaned so far.
2166 */
2167 bnx2x_squeeze_objects(bp);
2168
79616895
VZ
2169 /* There should be no more pending SP commands at this stage */
2170 bp->sp_state = 0;
2171
9f6c9258
DK
2172 bp->port.pmf = 0;
2173
2174 /* Free SKBs, SGEs, TPA pool and driver internals */
2175 bnx2x_free_skbs(bp);
ec6ba945 2176 for_each_rx_queue(bp, i)
9f6c9258 2177 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
d6214d7a 2178
9f6c9258
DK
2179 bnx2x_free_mem(bp);
2180
2181 bp->state = BNX2X_STATE_CLOSED;
2182
c9ee9206
VZ
2183 /* Check if there are pending parity attentions. If there are - set
2184 * RECOVERY_IN_PROGRESS.
2185 */
2186 if (bnx2x_chk_parity_attn(bp, &global, false)) {
2187 bnx2x_set_reset_in_progress(bp);
2188
2189 /* Set RESET_IS_GLOBAL if needed */
2190 if (global)
2191 bnx2x_set_reset_global(bp);
2192 }
2193
2194
9f6c9258
DK
2195 /* The last driver must disable a "close the gate" if there is no
2196 * parity attention or "process kill" pending.
2197 */
889b9af3 2198 if (!bnx2x_clear_pf_load(bp) && bnx2x_reset_is_done(bp, BP_PATH(bp)))
9f6c9258
DK
2199 bnx2x_disable_close_the_gate(bp);
2200
9f6c9258
DK
2201 return 0;
2202}
f85582f8 2203
9f6c9258
DK
2204int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
2205{
2206 u16 pmcsr;
2207
adf5f6a1
DK
2208 /* If there is no power capability, silently succeed */
2209 if (!bp->pm_cap) {
51c1a580 2210 BNX2X_DEV_INFO("No power capability. Breaking.\n");
adf5f6a1
DK
2211 return 0;
2212 }
2213
9f6c9258
DK
2214 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2215
2216 switch (state) {
2217 case PCI_D0:
2218 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2219 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2220 PCI_PM_CTRL_PME_STATUS));
2221
2222 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2223 /* delay required during transition out of D3hot */
2224 msleep(20);
2225 break;
2226
2227 case PCI_D3hot:
2228 /* If there are other clients above don't
2229 shut down the power */
2230 if (atomic_read(&bp->pdev->enable_cnt) != 1)
2231 return 0;
2232 /* Don't shut down the power for emulation and FPGA */
2233 if (CHIP_REV_IS_SLOW(bp))
2234 return 0;
2235
2236 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2237 pmcsr |= 3;
2238
2239 if (bp->wol)
2240 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2241
2242 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2243 pmcsr);
2244
2245 /* No more memory access after this point until
2246 * device is brought back to D0.
2247 */
2248 break;
2249
2250 default:
51c1a580 2251 dev_err(&bp->pdev->dev, "Can't support state = %d\n", state);
9f6c9258
DK
2252 return -EINVAL;
2253 }
2254 return 0;
2255}
2256
9f6c9258
DK
2257/*
2258 * net_device service functions
2259 */
d6214d7a 2260int bnx2x_poll(struct napi_struct *napi, int budget)
9f6c9258
DK
2261{
2262 int work_done = 0;
6383c0b3 2263 u8 cos;
9f6c9258
DK
2264 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
2265 napi);
2266 struct bnx2x *bp = fp->bp;
2267
2268 while (1) {
2269#ifdef BNX2X_STOP_ON_ERROR
2270 if (unlikely(bp->panic)) {
2271 napi_complete(napi);
2272 return 0;
2273 }
2274#endif
2275
6383c0b3
AE
2276 for_each_cos_in_tx_queue(fp, cos)
2277 if (bnx2x_tx_queue_has_work(&fp->txdata[cos]))
2278 bnx2x_tx_int(bp, &fp->txdata[cos]);
2279
9f6c9258
DK
2280
2281 if (bnx2x_has_rx_work(fp)) {
2282 work_done += bnx2x_rx_int(fp, budget - work_done);
2283
2284 /* must not complete if we consumed full budget */
2285 if (work_done >= budget)
2286 break;
2287 }
2288
2289 /* Fall out from the NAPI loop if needed */
2290 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
ec6ba945
VZ
2291#ifdef BCM_CNIC
2292 /* No need to update SB for FCoE L2 ring as long as
2293 * it's connected to the default SB and the SB
2294 * has been updated when NAPI was scheduled.
2295 */
2296 if (IS_FCOE_FP(fp)) {
2297 napi_complete(napi);
2298 break;
2299 }
2300#endif
2301
9f6c9258 2302 bnx2x_update_fpsb_idx(fp);
f85582f8
DK
2303 /* bnx2x_has_rx_work() reads the status block,
2304 * thus we need to ensure that status block indices
2305 * have been actually read (bnx2x_update_fpsb_idx)
2306 * prior to this check (bnx2x_has_rx_work) so that
2307 * we won't write the "newer" value of the status block
2308 * to IGU (if there was a DMA right after
2309 * bnx2x_has_rx_work and if there is no rmb, the memory
2310 * reading (bnx2x_update_fpsb_idx) may be postponed
2311 * to right before bnx2x_ack_sb). In this case there
2312 * will never be another interrupt until there is
2313 * another update of the status block, while there
2314 * is still unhandled work.
2315 */
9f6c9258
DK
2316 rmb();
2317
2318 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
2319 napi_complete(napi);
2320 /* Re-enable interrupts */
51c1a580 2321 DP(NETIF_MSG_RX_STATUS,
523224a3
DK
2322 "Update index to %d\n", fp->fp_hc_idx);
2323 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
2324 le16_to_cpu(fp->fp_hc_idx),
9f6c9258
DK
2325 IGU_INT_ENABLE, 1);
2326 break;
2327 }
2328 }
2329 }
2330
2331 return work_done;
2332}
2333
9f6c9258
DK
2334/* we split the first BD into headers and data BDs
2335 * to ease the pain of our fellow microcode engineers
2336 * we use one mapping for both BDs
2337 * So far this has only been observed to happen
2338 * in Other Operating Systems(TM)
2339 */
2340static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
6383c0b3 2341 struct bnx2x_fp_txdata *txdata,
9f6c9258
DK
2342 struct sw_tx_bd *tx_buf,
2343 struct eth_tx_start_bd **tx_bd, u16 hlen,
2344 u16 bd_prod, int nbd)
2345{
2346 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
2347 struct eth_tx_bd *d_tx_bd;
2348 dma_addr_t mapping;
2349 int old_len = le16_to_cpu(h_tx_bd->nbytes);
2350
2351 /* first fix first BD */
2352 h_tx_bd->nbd = cpu_to_le16(nbd);
2353 h_tx_bd->nbytes = cpu_to_le16(hlen);
2354
51c1a580
MS
2355 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x) nbd %d\n",
2356 h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo, h_tx_bd->nbd);
9f6c9258
DK
2357
2358 /* now get a new data BD
2359 * (after the pbd) and fill it */
2360 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
6383c0b3 2361 d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
9f6c9258
DK
2362
2363 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
2364 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
2365
2366 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2367 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2368 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
2369
2370 /* this marks the BD as one that has no individual mapping */
2371 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
2372
2373 DP(NETIF_MSG_TX_QUEUED,
2374 "TSO split data size is %d (%x:%x)\n",
2375 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
2376
2377 /* update tx_bd */
2378 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
2379
2380 return bd_prod;
2381}
2382
2383static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
2384{
2385 if (fix > 0)
2386 csum = (u16) ~csum_fold(csum_sub(csum,
2387 csum_partial(t_header - fix, fix, 0)));
2388
2389 else if (fix < 0)
2390 csum = (u16) ~csum_fold(csum_add(csum,
2391 csum_partial(t_header, -fix, 0)));
2392
2393 return swab16(csum);
2394}
2395
2396static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
2397{
2398 u32 rc;
2399
2400 if (skb->ip_summed != CHECKSUM_PARTIAL)
2401 rc = XMIT_PLAIN;
2402
2403 else {
d0d9d8ef 2404 if (vlan_get_protocol(skb) == htons(ETH_P_IPV6)) {
9f6c9258
DK
2405 rc = XMIT_CSUM_V6;
2406 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
2407 rc |= XMIT_CSUM_TCP;
2408
2409 } else {
2410 rc = XMIT_CSUM_V4;
2411 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2412 rc |= XMIT_CSUM_TCP;
2413 }
2414 }
2415
5892b9e9
VZ
2416 if (skb_is_gso_v6(skb))
2417 rc |= XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6;
2418 else if (skb_is_gso(skb))
2419 rc |= XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP;
9f6c9258
DK
2420
2421 return rc;
2422}
2423
2424#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
2425/* check if packet requires linearization (packet is too fragmented)
2426 no need to check fragmentation if page size > 8K (there will be no
2427 violation to FW restrictions) */
2428static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
2429 u32 xmit_type)
2430{
2431 int to_copy = 0;
2432 int hlen = 0;
2433 int first_bd_sz = 0;
2434
2435 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
2436 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
2437
2438 if (xmit_type & XMIT_GSO) {
2439 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
2440 /* Check if LSO packet needs to be copied:
2441 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
2442 int wnd_size = MAX_FETCH_BD - 3;
2443 /* Number of windows to check */
2444 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
2445 int wnd_idx = 0;
2446 int frag_idx = 0;
2447 u32 wnd_sum = 0;
2448
2449 /* Headers length */
2450 hlen = (int)(skb_transport_header(skb) - skb->data) +
2451 tcp_hdrlen(skb);
2452
2453 /* Amount of data (w/o headers) on linear part of SKB*/
2454 first_bd_sz = skb_headlen(skb) - hlen;
2455
2456 wnd_sum = first_bd_sz;
2457
2458 /* Calculate the first sum - it's special */
2459 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
2460 wnd_sum +=
9e903e08 2461 skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]);
9f6c9258
DK
2462
2463 /* If there was data on linear skb data - check it */
2464 if (first_bd_sz > 0) {
2465 if (unlikely(wnd_sum < lso_mss)) {
2466 to_copy = 1;
2467 goto exit_lbl;
2468 }
2469
2470 wnd_sum -= first_bd_sz;
2471 }
2472
2473 /* Others are easier: run through the frag list and
2474 check all windows */
2475 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
2476 wnd_sum +=
9e903e08 2477 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]);
9f6c9258
DK
2478
2479 if (unlikely(wnd_sum < lso_mss)) {
2480 to_copy = 1;
2481 break;
2482 }
2483 wnd_sum -=
9e903e08 2484 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx]);
9f6c9258
DK
2485 }
2486 } else {
2487 /* in non-LSO too fragmented packet should always
2488 be linearized */
2489 to_copy = 1;
2490 }
2491 }
2492
2493exit_lbl:
2494 if (unlikely(to_copy))
2495 DP(NETIF_MSG_TX_QUEUED,
51c1a580 2496 "Linearization IS REQUIRED for %s packet. num_frags %d hlen %d first_bd_sz %d\n",
9f6c9258
DK
2497 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
2498 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
2499
2500 return to_copy;
2501}
2502#endif
2503
2297a2da
VZ
2504static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
2505 u32 xmit_type)
f2e0899f 2506{
2297a2da
VZ
2507 *parsing_data |= (skb_shinfo(skb)->gso_size <<
2508 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
2509 ETH_TX_PARSE_BD_E2_LSO_MSS;
f2e0899f
DK
2510 if ((xmit_type & XMIT_GSO_V6) &&
2511 (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
2297a2da 2512 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
f2e0899f
DK
2513}
2514
2515/**
e8920674 2516 * bnx2x_set_pbd_gso - update PBD in GSO case.
f2e0899f 2517 *
e8920674
DK
2518 * @skb: packet skb
2519 * @pbd: parse BD
2520 * @xmit_type: xmit flags
f2e0899f
DK
2521 */
2522static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
2523 struct eth_tx_parse_bd_e1x *pbd,
2524 u32 xmit_type)
2525{
2526 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2527 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
2528 pbd->tcp_flags = pbd_tcp_flags(skb);
2529
2530 if (xmit_type & XMIT_GSO_V4) {
2531 pbd->ip_id = swab16(ip_hdr(skb)->id);
2532 pbd->tcp_pseudo_csum =
2533 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
2534 ip_hdr(skb)->daddr,
2535 0, IPPROTO_TCP, 0));
2536
2537 } else
2538 pbd->tcp_pseudo_csum =
2539 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2540 &ipv6_hdr(skb)->daddr,
2541 0, IPPROTO_TCP, 0));
2542
2543 pbd->global_data |= ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN;
2544}
f85582f8 2545
f2e0899f 2546/**
e8920674 2547 * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
f2e0899f 2548 *
e8920674
DK
2549 * @bp: driver handle
2550 * @skb: packet skb
2551 * @parsing_data: data to be updated
2552 * @xmit_type: xmit flags
f2e0899f 2553 *
e8920674 2554 * 57712 related
f2e0899f
DK
2555 */
2556static inline u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
2297a2da 2557 u32 *parsing_data, u32 xmit_type)
f2e0899f 2558{
e39aece7
VZ
2559 *parsing_data |=
2560 ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
2561 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT) &
2562 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W;
f2e0899f 2563
e39aece7
VZ
2564 if (xmit_type & XMIT_CSUM_TCP) {
2565 *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
2566 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
2567 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
f2e0899f 2568
e39aece7
VZ
2569 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
2570 } else
2571 /* We support checksum offload for TCP and UDP only.
2572 * No need to pass the UDP header length - it's a constant.
2573 */
2574 return skb_transport_header(skb) +
2575 sizeof(struct udphdr) - skb->data;
f2e0899f
DK
2576}
2577
93ef5c02
DK
2578static inline void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
2579 struct eth_tx_start_bd *tx_start_bd, u32 xmit_type)
2580{
93ef5c02
DK
2581 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
2582
2583 if (xmit_type & XMIT_CSUM_V4)
2584 tx_start_bd->bd_flags.as_bitfield |=
2585 ETH_TX_BD_FLAGS_IP_CSUM;
2586 else
2587 tx_start_bd->bd_flags.as_bitfield |=
2588 ETH_TX_BD_FLAGS_IPV6;
2589
2590 if (!(xmit_type & XMIT_CSUM_TCP))
2591 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
93ef5c02
DK
2592}
2593
f2e0899f 2594/**
e8920674 2595 * bnx2x_set_pbd_csum - update PBD with checksum and return header length
f2e0899f 2596 *
e8920674
DK
2597 * @bp: driver handle
2598 * @skb: packet skb
2599 * @pbd: parse BD to be updated
2600 * @xmit_type: xmit flags
f2e0899f
DK
2601 */
2602static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
2603 struct eth_tx_parse_bd_e1x *pbd,
2604 u32 xmit_type)
2605{
e39aece7 2606 u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
f2e0899f
DK
2607
2608 /* for now NS flag is not used in Linux */
2609 pbd->global_data =
2610 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
2611 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
2612
2613 pbd->ip_hlen_w = (skb_transport_header(skb) -
e39aece7 2614 skb_network_header(skb)) >> 1;
f2e0899f 2615
e39aece7
VZ
2616 hlen += pbd->ip_hlen_w;
2617
2618 /* We support checksum offload for TCP and UDP only */
2619 if (xmit_type & XMIT_CSUM_TCP)
2620 hlen += tcp_hdrlen(skb) / 2;
2621 else
2622 hlen += sizeof(struct udphdr) / 2;
f2e0899f
DK
2623
2624 pbd->total_hlen_w = cpu_to_le16(hlen);
2625 hlen = hlen*2;
2626
2627 if (xmit_type & XMIT_CSUM_TCP) {
2628 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
2629
2630 } else {
2631 s8 fix = SKB_CS_OFF(skb); /* signed! */
2632
2633 DP(NETIF_MSG_TX_QUEUED,
2634 "hlen %d fix %d csum before fix %x\n",
2635 le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
2636
2637 /* HW bug: fixup the CSUM */
2638 pbd->tcp_pseudo_csum =
2639 bnx2x_csum_fix(skb_transport_header(skb),
2640 SKB_CS(skb), fix);
2641
2642 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
2643 pbd->tcp_pseudo_csum);
2644 }
2645
2646 return hlen;
2647}
f85582f8 2648
9f6c9258
DK
2649/* called with netif_tx_lock
2650 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
2651 * netif_wake_queue()
2652 */
2653netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
2654{
2655 struct bnx2x *bp = netdev_priv(dev);
6383c0b3 2656
9f6c9258
DK
2657 struct bnx2x_fastpath *fp;
2658 struct netdev_queue *txq;
6383c0b3 2659 struct bnx2x_fp_txdata *txdata;
9f6c9258 2660 struct sw_tx_bd *tx_buf;
619c5cb6 2661 struct eth_tx_start_bd *tx_start_bd, *first_bd;
9f6c9258 2662 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
523224a3 2663 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
f2e0899f 2664 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
2297a2da 2665 u32 pbd_e2_parsing_data = 0;
9f6c9258 2666 u16 pkt_prod, bd_prod;
6383c0b3 2667 int nbd, txq_index, fp_index, txdata_index;
9f6c9258
DK
2668 dma_addr_t mapping;
2669 u32 xmit_type = bnx2x_xmit_type(bp, skb);
2670 int i;
2671 u8 hlen = 0;
2672 __le16 pkt_size = 0;
2673 struct ethhdr *eth;
2674 u8 mac_type = UNICAST_ADDRESS;
2675
2676#ifdef BNX2X_STOP_ON_ERROR
2677 if (unlikely(bp->panic))
2678 return NETDEV_TX_BUSY;
2679#endif
2680
6383c0b3
AE
2681 txq_index = skb_get_queue_mapping(skb);
2682 txq = netdev_get_tx_queue(dev, txq_index);
2683
2684 BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + FCOE_PRESENT);
2685
2686 /* decode the fastpath index and the cos index from the txq */
2687 fp_index = TXQ_TO_FP(txq_index);
2688 txdata_index = TXQ_TO_COS(txq_index);
2689
2690#ifdef BCM_CNIC
2691 /*
2692 * Override the above for the FCoE queue:
2693 * - FCoE fp entry is right after the ETH entries.
2694 * - FCoE L2 queue uses bp->txdata[0] only.
2695 */
2696 if (unlikely(!NO_FCOE(bp) && (txq_index ==
2697 bnx2x_fcoe_tx(bp, txq_index)))) {
2698 fp_index = FCOE_IDX;
2699 txdata_index = 0;
2700 }
2701#endif
2702
2703 /* enable this debug print to view the transmission queue being used
51c1a580 2704 DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n",
6383c0b3 2705 txq_index, fp_index, txdata_index); */
9f6c9258 2706
6383c0b3 2707 /* locate the fastpath and the txdata */
9f6c9258 2708 fp = &bp->fp[fp_index];
6383c0b3
AE
2709 txdata = &fp->txdata[txdata_index];
2710
2711 /* enable this debug print to view the tranmission details
51c1a580
MS
2712 DP(NETIF_MSG_TX_QUEUED,
2713 "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n",
6383c0b3 2714 txdata->cid, fp_index, txdata_index, txdata, fp); */
9f6c9258 2715
6383c0b3
AE
2716 if (unlikely(bnx2x_tx_avail(bp, txdata) <
2717 (skb_shinfo(skb)->nr_frags + 3))) {
9f6c9258
DK
2718 fp->eth_q_stats.driver_xoff++;
2719 netif_tx_stop_queue(txq);
2720 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
2721 return NETDEV_TX_BUSY;
2722 }
2723
51c1a580
MS
2724 DP(NETIF_MSG_TX_QUEUED,
2725 "queue[%d]: SKB: summed %x protocol %x protocol(%x,%x) gso type %x xmit_type %x\n",
6383c0b3 2726 txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
9f6c9258
DK
2727 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
2728
2729 eth = (struct ethhdr *)skb->data;
2730
2731 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
2732 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
2733 if (is_broadcast_ether_addr(eth->h_dest))
2734 mac_type = BROADCAST_ADDRESS;
2735 else
2736 mac_type = MULTICAST_ADDRESS;
2737 }
2738
2739#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
2740 /* First, check if we need to linearize the skb (due to FW
2741 restrictions). No need to check fragmentation if page size > 8K
2742 (there will be no violation to FW restrictions) */
2743 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
2744 /* Statistics of linearization */
2745 bp->lin_cnt++;
2746 if (skb_linearize(skb) != 0) {
51c1a580
MS
2747 DP(NETIF_MSG_TX_QUEUED,
2748 "SKB linearization failed - silently dropping this SKB\n");
9f6c9258
DK
2749 dev_kfree_skb_any(skb);
2750 return NETDEV_TX_OK;
2751 }
2752 }
2753#endif
619c5cb6
VZ
2754 /* Map skb linear data for DMA */
2755 mapping = dma_map_single(&bp->pdev->dev, skb->data,
2756 skb_headlen(skb), DMA_TO_DEVICE);
2757 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
51c1a580
MS
2758 DP(NETIF_MSG_TX_QUEUED,
2759 "SKB mapping failed - silently dropping this SKB\n");
619c5cb6
VZ
2760 dev_kfree_skb_any(skb);
2761 return NETDEV_TX_OK;
2762 }
9f6c9258
DK
2763 /*
2764 Please read carefully. First we use one BD which we mark as start,
2765 then we have a parsing info BD (used for TSO or xsum),
2766 and only then we have the rest of the TSO BDs.
2767 (don't forget to mark the last one as last,
2768 and to unmap only AFTER you write to the BD ...)
2769 And above all, all pdb sizes are in words - NOT DWORDS!
2770 */
2771
619c5cb6
VZ
2772 /* get current pkt produced now - advance it just before sending packet
2773 * since mapping of pages may fail and cause packet to be dropped
2774 */
6383c0b3
AE
2775 pkt_prod = txdata->tx_pkt_prod;
2776 bd_prod = TX_BD(txdata->tx_bd_prod);
9f6c9258 2777
619c5cb6
VZ
2778 /* get a tx_buf and first BD
2779 * tx_start_bd may be changed during SPLIT,
2780 * but first_bd will always stay first
2781 */
6383c0b3
AE
2782 tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
2783 tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
619c5cb6 2784 first_bd = tx_start_bd;
9f6c9258
DK
2785
2786 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
f85582f8
DK
2787 SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_ETH_ADDR_TYPE,
2788 mac_type);
2789
9f6c9258 2790 /* header nbd */
f85582f8 2791 SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_HDR_NBDS, 1);
9f6c9258
DK
2792
2793 /* remember the first BD of the packet */
6383c0b3 2794 tx_buf->first_bd = txdata->tx_bd_prod;
9f6c9258
DK
2795 tx_buf->skb = skb;
2796 tx_buf->flags = 0;
2797
2798 DP(NETIF_MSG_TX_QUEUED,
2799 "sending pkt %u @%p next_idx %u bd %u @%p\n",
6383c0b3 2800 pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
9f6c9258 2801
eab6d18d 2802 if (vlan_tx_tag_present(skb)) {
523224a3
DK
2803 tx_start_bd->vlan_or_ethertype =
2804 cpu_to_le16(vlan_tx_tag_get(skb));
2805 tx_start_bd->bd_flags.as_bitfield |=
2806 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
9f6c9258 2807 } else
523224a3 2808 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
9f6c9258
DK
2809
2810 /* turn on parsing and get a BD */
2811 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9f6c9258 2812
93ef5c02
DK
2813 if (xmit_type & XMIT_CSUM)
2814 bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
9f6c9258 2815
619c5cb6 2816 if (!CHIP_IS_E1x(bp)) {
6383c0b3 2817 pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
f2e0899f
DK
2818 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
2819 /* Set PBD in checksum offload case */
2820 if (xmit_type & XMIT_CSUM)
2297a2da
VZ
2821 hlen = bnx2x_set_pbd_csum_e2(bp, skb,
2822 &pbd_e2_parsing_data,
2823 xmit_type);
619c5cb6
VZ
2824 if (IS_MF_SI(bp)) {
2825 /*
2826 * fill in the MAC addresses in the PBD - for local
2827 * switching
2828 */
2829 bnx2x_set_fw_mac_addr(&pbd_e2->src_mac_addr_hi,
2830 &pbd_e2->src_mac_addr_mid,
2831 &pbd_e2->src_mac_addr_lo,
2832 eth->h_source);
2833 bnx2x_set_fw_mac_addr(&pbd_e2->dst_mac_addr_hi,
2834 &pbd_e2->dst_mac_addr_mid,
2835 &pbd_e2->dst_mac_addr_lo,
2836 eth->h_dest);
2837 }
f2e0899f 2838 } else {
6383c0b3 2839 pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
f2e0899f
DK
2840 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
2841 /* Set PBD in checksum offload case */
2842 if (xmit_type & XMIT_CSUM)
2843 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
9f6c9258 2844
9f6c9258
DK
2845 }
2846
f85582f8 2847 /* Setup the data pointer of the first BD of the packet */
9f6c9258
DK
2848 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2849 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
619c5cb6 2850 nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
9f6c9258
DK
2851 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
2852 pkt_size = tx_start_bd->nbytes;
2853
51c1a580
MS
2854 DP(NETIF_MSG_TX_QUEUED,
2855 "first bd @%p addr (%x:%x) nbd %d nbytes %d flags %x vlan %x\n",
9f6c9258
DK
2856 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
2857 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
523224a3
DK
2858 tx_start_bd->bd_flags.as_bitfield,
2859 le16_to_cpu(tx_start_bd->vlan_or_ethertype));
9f6c9258
DK
2860
2861 if (xmit_type & XMIT_GSO) {
2862
2863 DP(NETIF_MSG_TX_QUEUED,
2864 "TSO packet len %d hlen %d total len %d tso size %d\n",
2865 skb->len, hlen, skb_headlen(skb),
2866 skb_shinfo(skb)->gso_size);
2867
2868 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
2869
2870 if (unlikely(skb_headlen(skb) > hlen))
6383c0b3
AE
2871 bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
2872 &tx_start_bd, hlen,
2873 bd_prod, ++nbd);
619c5cb6 2874 if (!CHIP_IS_E1x(bp))
2297a2da
VZ
2875 bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
2876 xmit_type);
f2e0899f
DK
2877 else
2878 bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
9f6c9258 2879 }
2297a2da
VZ
2880
2881 /* Set the PBD's parsing_data field if not zero
2882 * (for the chips newer than 57711).
2883 */
2884 if (pbd_e2_parsing_data)
2885 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
2886
9f6c9258
DK
2887 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
2888
f85582f8 2889 /* Handle fragmented skb */
9f6c9258
DK
2890 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2891 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2892
9e903e08
ED
2893 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
2894 skb_frag_size(frag), DMA_TO_DEVICE);
619c5cb6 2895 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
2df1a70a 2896 unsigned int pkts_compl = 0, bytes_compl = 0;
619c5cb6 2897
51c1a580
MS
2898 DP(NETIF_MSG_TX_QUEUED,
2899 "Unable to map page - dropping packet...\n");
619c5cb6
VZ
2900
2901 /* we need unmap all buffers already mapped
2902 * for this SKB;
2903 * first_bd->nbd need to be properly updated
2904 * before call to bnx2x_free_tx_pkt
2905 */
2906 first_bd->nbd = cpu_to_le16(nbd);
6383c0b3 2907 bnx2x_free_tx_pkt(bp, txdata,
2df1a70a
TH
2908 TX_BD(txdata->tx_pkt_prod),
2909 &pkts_compl, &bytes_compl);
619c5cb6
VZ
2910 return NETDEV_TX_OK;
2911 }
2912
9f6c9258 2913 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
6383c0b3 2914 tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
9f6c9258 2915 if (total_pkt_bd == NULL)
6383c0b3 2916 total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
9f6c9258 2917
9f6c9258
DK
2918 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2919 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9e903e08
ED
2920 tx_data_bd->nbytes = cpu_to_le16(skb_frag_size(frag));
2921 le16_add_cpu(&pkt_size, skb_frag_size(frag));
619c5cb6 2922 nbd++;
9f6c9258
DK
2923
2924 DP(NETIF_MSG_TX_QUEUED,
2925 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
2926 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
2927 le16_to_cpu(tx_data_bd->nbytes));
2928 }
2929
2930 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
2931
619c5cb6
VZ
2932 /* update with actual num BDs */
2933 first_bd->nbd = cpu_to_le16(nbd);
2934
9f6c9258
DK
2935 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2936
2937 /* now send a tx doorbell, counting the next BD
2938 * if the packet contains or ends with it
2939 */
2940 if (TX_BD_POFF(bd_prod) < nbd)
2941 nbd++;
2942
619c5cb6
VZ
2943 /* total_pkt_bytes should be set on the first data BD if
2944 * it's not an LSO packet and there is more than one
2945 * data BD. In this case pkt_size is limited by an MTU value.
2946 * However we prefer to set it for an LSO packet (while we don't
2947 * have to) in order to save some CPU cycles in a none-LSO
2948 * case, when we much more care about them.
2949 */
9f6c9258
DK
2950 if (total_pkt_bd != NULL)
2951 total_pkt_bd->total_pkt_bytes = pkt_size;
2952
523224a3 2953 if (pbd_e1x)
9f6c9258 2954 DP(NETIF_MSG_TX_QUEUED,
51c1a580 2955 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u tcp_flags %x xsum %x seq %u hlen %u\n",
523224a3
DK
2956 pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
2957 pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
2958 pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
2959 le16_to_cpu(pbd_e1x->total_hlen_w));
f2e0899f
DK
2960 if (pbd_e2)
2961 DP(NETIF_MSG_TX_QUEUED,
2962 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
2963 pbd_e2, pbd_e2->dst_mac_addr_hi, pbd_e2->dst_mac_addr_mid,
2964 pbd_e2->dst_mac_addr_lo, pbd_e2->src_mac_addr_hi,
2965 pbd_e2->src_mac_addr_mid, pbd_e2->src_mac_addr_lo,
2966 pbd_e2->parsing_data);
9f6c9258
DK
2967 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
2968
2df1a70a
TH
2969 netdev_tx_sent_queue(txq, skb->len);
2970
6383c0b3 2971 txdata->tx_pkt_prod++;
9f6c9258
DK
2972 /*
2973 * Make sure that the BD data is updated before updating the producer
2974 * since FW might read the BD right after the producer is updated.
2975 * This is only applicable for weak-ordered memory model archs such
2976 * as IA-64. The following barrier is also mandatory since FW will
2977 * assumes packets must have BDs.
2978 */
2979 wmb();
2980
6383c0b3 2981 txdata->tx_db.data.prod += nbd;
9f6c9258 2982 barrier();
f85582f8 2983
6383c0b3 2984 DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
9f6c9258
DK
2985
2986 mmiowb();
2987
6383c0b3 2988 txdata->tx_bd_prod += nbd;
9f6c9258 2989
6383c0b3 2990 if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_SKB_FRAGS + 3)) {
9f6c9258
DK
2991 netif_tx_stop_queue(txq);
2992
2993 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
2994 * ordering of set_bit() in netif_tx_stop_queue() and read of
2995 * fp->bd_tx_cons */
2996 smp_mb();
2997
2998 fp->eth_q_stats.driver_xoff++;
6383c0b3 2999 if (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 3)
9f6c9258
DK
3000 netif_tx_wake_queue(txq);
3001 }
6383c0b3 3002 txdata->tx_pkt++;
9f6c9258
DK
3003
3004 return NETDEV_TX_OK;
3005}
f85582f8 3006
6383c0b3
AE
3007/**
3008 * bnx2x_setup_tc - routine to configure net_device for multi tc
3009 *
3010 * @netdev: net device to configure
3011 * @tc: number of traffic classes to enable
3012 *
3013 * callback connected to the ndo_setup_tc function pointer
3014 */
3015int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
3016{
3017 int cos, prio, count, offset;
3018 struct bnx2x *bp = netdev_priv(dev);
3019
3020 /* setup tc must be called under rtnl lock */
3021 ASSERT_RTNL();
3022
3023 /* no traffic classes requested. aborting */
3024 if (!num_tc) {
3025 netdev_reset_tc(dev);
3026 return 0;
3027 }
3028
3029 /* requested to support too many traffic classes */
3030 if (num_tc > bp->max_cos) {
51c1a580
MS
3031 BNX2X_ERR("support for too many traffic classes requested: %d. max supported is %d\n",
3032 num_tc, bp->max_cos);
6383c0b3
AE
3033 return -EINVAL;
3034 }
3035
3036 /* declare amount of supported traffic classes */
3037 if (netdev_set_num_tc(dev, num_tc)) {
51c1a580 3038 BNX2X_ERR("failed to declare %d traffic classes\n", num_tc);
6383c0b3
AE
3039 return -EINVAL;
3040 }
3041
3042 /* configure priority to traffic class mapping */
3043 for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
3044 netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[prio]);
51c1a580
MS
3045 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
3046 "mapping priority %d to tc %d\n",
6383c0b3
AE
3047 prio, bp->prio_to_cos[prio]);
3048 }
3049
3050
3051 /* Use this configuration to diffrentiate tc0 from other COSes
3052 This can be used for ets or pfc, and save the effort of setting
3053 up a multio class queue disc or negotiating DCBX with a switch
3054 netdev_set_prio_tc_map(dev, 0, 0);
94f05b0f 3055 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0);
6383c0b3
AE
3056 for (prio = 1; prio < 16; prio++) {
3057 netdev_set_prio_tc_map(dev, prio, 1);
94f05b0f 3058 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1);
6383c0b3
AE
3059 } */
3060
3061 /* configure traffic class to transmission queue mapping */
3062 for (cos = 0; cos < bp->max_cos; cos++) {
3063 count = BNX2X_NUM_ETH_QUEUES(bp);
3064 offset = cos * MAX_TXQS_PER_COS;
3065 netdev_set_tc_queue(dev, cos, count, offset);
51c1a580
MS
3066 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
3067 "mapping tc %d to offset %d count %d\n",
6383c0b3
AE
3068 cos, offset, count);
3069 }
3070
3071 return 0;
3072}
3073
9f6c9258
DK
3074/* called with rtnl_lock */
3075int bnx2x_change_mac_addr(struct net_device *dev, void *p)
3076{
3077 struct sockaddr *addr = p;
3078 struct bnx2x *bp = netdev_priv(dev);
619c5cb6 3079 int rc = 0;
9f6c9258 3080
51c1a580
MS
3081 if (!bnx2x_is_valid_ether_addr(bp, addr->sa_data)) {
3082 BNX2X_ERR("Requested MAC address is not valid\n");
614c76df 3083 return -EINVAL;
51c1a580 3084 }
614c76df
DK
3085
3086#ifdef BCM_CNIC
9e62e912 3087 if (IS_MF_STORAGE_SD(bp) && !is_zero_ether_addr(addr->sa_data)) {
51c1a580 3088 BNX2X_ERR("Can't configure non-zero address on iSCSI or FCoE functions in MF-SD mode\n");
9f6c9258 3089 return -EINVAL;
51c1a580 3090 }
614c76df 3091#endif
9f6c9258 3092
619c5cb6
VZ
3093 if (netif_running(dev)) {
3094 rc = bnx2x_set_eth_mac(bp, false);
3095 if (rc)
3096 return rc;
3097 }
3098
7ce5d222 3099 dev->addr_assign_type &= ~NET_ADDR_RANDOM;
9f6c9258 3100 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
619c5cb6 3101
523224a3 3102 if (netif_running(dev))
619c5cb6 3103 rc = bnx2x_set_eth_mac(bp, true);
9f6c9258 3104
619c5cb6 3105 return rc;
9f6c9258
DK
3106}
3107
b3b83c3f
DK
3108static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
3109{
3110 union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
3111 struct bnx2x_fastpath *fp = &bp->fp[fp_index];
6383c0b3 3112 u8 cos;
b3b83c3f
DK
3113
3114 /* Common */
3115#ifdef BCM_CNIC
3116 if (IS_FCOE_IDX(fp_index)) {
3117 memset(sb, 0, sizeof(union host_hc_status_block));
3118 fp->status_blk_mapping = 0;
3119
3120 } else {
3121#endif
3122 /* status blocks */
619c5cb6 3123 if (!CHIP_IS_E1x(bp))
b3b83c3f
DK
3124 BNX2X_PCI_FREE(sb->e2_sb,
3125 bnx2x_fp(bp, fp_index,
3126 status_blk_mapping),
3127 sizeof(struct host_hc_status_block_e2));
3128 else
3129 BNX2X_PCI_FREE(sb->e1x_sb,
3130 bnx2x_fp(bp, fp_index,
3131 status_blk_mapping),
3132 sizeof(struct host_hc_status_block_e1x));
3133#ifdef BCM_CNIC
3134 }
3135#endif
3136 /* Rx */
3137 if (!skip_rx_queue(bp, fp_index)) {
3138 bnx2x_free_rx_bds(fp);
3139
3140 /* fastpath rx rings: rx_buf rx_desc rx_comp */
3141 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
3142 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
3143 bnx2x_fp(bp, fp_index, rx_desc_mapping),
3144 sizeof(struct eth_rx_bd) * NUM_RX_BD);
3145
3146 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
3147 bnx2x_fp(bp, fp_index, rx_comp_mapping),
3148 sizeof(struct eth_fast_path_rx_cqe) *
3149 NUM_RCQ_BD);
3150
3151 /* SGE ring */
3152 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
3153 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
3154 bnx2x_fp(bp, fp_index, rx_sge_mapping),
3155 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
3156 }
3157
3158 /* Tx */
3159 if (!skip_tx_queue(bp, fp_index)) {
3160 /* fastpath tx rings: tx_buf tx_desc */
6383c0b3
AE
3161 for_each_cos_in_tx_queue(fp, cos) {
3162 struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
3163
51c1a580 3164 DP(NETIF_MSG_IFDOWN,
94f05b0f 3165 "freeing tx memory of fp %d cos %d cid %d\n",
6383c0b3
AE
3166 fp_index, cos, txdata->cid);
3167
3168 BNX2X_FREE(txdata->tx_buf_ring);
3169 BNX2X_PCI_FREE(txdata->tx_desc_ring,
3170 txdata->tx_desc_mapping,
3171 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
3172 }
b3b83c3f
DK
3173 }
3174 /* end of fastpath */
3175}
3176
3177void bnx2x_free_fp_mem(struct bnx2x *bp)
3178{
3179 int i;
3180 for_each_queue(bp, i)
3181 bnx2x_free_fp_mem_at(bp, i);
3182}
3183
3184static inline void set_sb_shortcuts(struct bnx2x *bp, int index)
3185{
3186 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
619c5cb6 3187 if (!CHIP_IS_E1x(bp)) {
b3b83c3f
DK
3188 bnx2x_fp(bp, index, sb_index_values) =
3189 (__le16 *)status_blk.e2_sb->sb.index_values;
3190 bnx2x_fp(bp, index, sb_running_index) =
3191 (__le16 *)status_blk.e2_sb->sb.running_index;
3192 } else {
3193 bnx2x_fp(bp, index, sb_index_values) =
3194 (__le16 *)status_blk.e1x_sb->sb.index_values;
3195 bnx2x_fp(bp, index, sb_running_index) =
3196 (__le16 *)status_blk.e1x_sb->sb.running_index;
3197 }
3198}
3199
3200static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
3201{
3202 union host_hc_status_block *sb;
3203 struct bnx2x_fastpath *fp = &bp->fp[index];
3204 int ring_size = 0;
6383c0b3 3205 u8 cos;
c2188952 3206 int rx_ring_size = 0;
b3b83c3f 3207
614c76df 3208#ifdef BCM_CNIC
9e62e912 3209 if (!bp->rx_ring_size && IS_MF_STORAGE_SD(bp)) {
614c76df
DK
3210 rx_ring_size = MIN_RX_SIZE_NONTPA;
3211 bp->rx_ring_size = rx_ring_size;
3212 } else
3213#endif
c2188952 3214 if (!bp->rx_ring_size) {
d760fc37
MY
3215 u32 cfg = SHMEM_RD(bp,
3216 dev_info.port_hw_config[BP_PORT(bp)].default_cfg);
b3b83c3f 3217
c2188952
VZ
3218 rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
3219
d760fc37
MY
3220 /* Dercease ring size for 1G functions */
3221 if ((cfg & PORT_HW_CFG_NET_SERDES_IF_MASK) ==
3222 PORT_HW_CFG_NET_SERDES_IF_SGMII)
3223 rx_ring_size /= 10;
3224
c2188952
VZ
3225 /* allocate at least number of buffers required by FW */
3226 rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
3227 MIN_RX_SIZE_TPA, rx_ring_size);
3228
3229 bp->rx_ring_size = rx_ring_size;
614c76df 3230 } else /* if rx_ring_size specified - use it */
c2188952 3231 rx_ring_size = bp->rx_ring_size;
b3b83c3f 3232
b3b83c3f
DK
3233 /* Common */
3234 sb = &bnx2x_fp(bp, index, status_blk);
3235#ifdef BCM_CNIC
3236 if (!IS_FCOE_IDX(index)) {
3237#endif
3238 /* status blocks */
619c5cb6 3239 if (!CHIP_IS_E1x(bp))
b3b83c3f
DK
3240 BNX2X_PCI_ALLOC(sb->e2_sb,
3241 &bnx2x_fp(bp, index, status_blk_mapping),
3242 sizeof(struct host_hc_status_block_e2));
3243 else
3244 BNX2X_PCI_ALLOC(sb->e1x_sb,
3245 &bnx2x_fp(bp, index, status_blk_mapping),
3246 sizeof(struct host_hc_status_block_e1x));
3247#ifdef BCM_CNIC
3248 }
3249#endif
8eef2af1
DK
3250
3251 /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
3252 * set shortcuts for it.
3253 */
3254 if (!IS_FCOE_IDX(index))
3255 set_sb_shortcuts(bp, index);
b3b83c3f
DK
3256
3257 /* Tx */
3258 if (!skip_tx_queue(bp, index)) {
3259 /* fastpath tx rings: tx_buf tx_desc */
6383c0b3
AE
3260 for_each_cos_in_tx_queue(fp, cos) {
3261 struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
3262
51c1a580
MS
3263 DP(NETIF_MSG_IFUP,
3264 "allocating tx memory of fp %d cos %d\n",
6383c0b3
AE
3265 index, cos);
3266
3267 BNX2X_ALLOC(txdata->tx_buf_ring,
b3b83c3f 3268 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6383c0b3
AE
3269 BNX2X_PCI_ALLOC(txdata->tx_desc_ring,
3270 &txdata->tx_desc_mapping,
b3b83c3f 3271 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
6383c0b3 3272 }
b3b83c3f
DK
3273 }
3274
3275 /* Rx */
3276 if (!skip_rx_queue(bp, index)) {
3277 /* fastpath rx rings: rx_buf rx_desc rx_comp */
3278 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_buf_ring),
3279 sizeof(struct sw_rx_bd) * NUM_RX_BD);
3280 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_desc_ring),
3281 &bnx2x_fp(bp, index, rx_desc_mapping),
3282 sizeof(struct eth_rx_bd) * NUM_RX_BD);
3283
3284 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_comp_ring),
3285 &bnx2x_fp(bp, index, rx_comp_mapping),
3286 sizeof(struct eth_fast_path_rx_cqe) *
3287 NUM_RCQ_BD);
3288
3289 /* SGE ring */
3290 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_page_ring),
3291 sizeof(struct sw_rx_page) * NUM_RX_SGE);
3292 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_sge_ring),
3293 &bnx2x_fp(bp, index, rx_sge_mapping),
3294 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
3295 /* RX BD ring */
3296 bnx2x_set_next_page_rx_bd(fp);
3297
3298 /* CQ ring */
3299 bnx2x_set_next_page_rx_cq(fp);
3300
3301 /* BDs */
3302 ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
3303 if (ring_size < rx_ring_size)
3304 goto alloc_mem_err;
3305 }
3306
3307 return 0;
3308
3309/* handles low memory cases */
3310alloc_mem_err:
3311 BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
3312 index, ring_size);
3313 /* FW will drop all packets if queue is not big enough,
3314 * In these cases we disable the queue
6383c0b3 3315 * Min size is different for OOO, TPA and non-TPA queues
b3b83c3f
DK
3316 */
3317 if (ring_size < (fp->disable_tpa ?
eb722d7a 3318 MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
b3b83c3f
DK
3319 /* release memory allocated for this queue */
3320 bnx2x_free_fp_mem_at(bp, index);
3321 return -ENOMEM;
3322 }
3323 return 0;
3324}
3325
3326int bnx2x_alloc_fp_mem(struct bnx2x *bp)
3327{
3328 int i;
3329
3330 /**
3331 * 1. Allocate FP for leading - fatal if error
3332 * 2. {CNIC} Allocate FCoE FP - fatal if error
6383c0b3
AE
3333 * 3. {CNIC} Allocate OOO + FWD - disable OOO if error
3334 * 4. Allocate RSS - fix number of queues if error
b3b83c3f
DK
3335 */
3336
3337 /* leading */
3338 if (bnx2x_alloc_fp_mem_at(bp, 0))
3339 return -ENOMEM;
6383c0b3 3340
b3b83c3f 3341#ifdef BCM_CNIC
8eef2af1
DK
3342 if (!NO_FCOE(bp))
3343 /* FCoE */
3344 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX))
3345 /* we will fail load process instead of mark
3346 * NO_FCOE_FLAG
3347 */
3348 return -ENOMEM;
b3b83c3f 3349#endif
6383c0b3 3350
b3b83c3f
DK
3351 /* RSS */
3352 for_each_nondefault_eth_queue(bp, i)
3353 if (bnx2x_alloc_fp_mem_at(bp, i))
3354 break;
3355
3356 /* handle memory failures */
3357 if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
3358 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
3359
3360 WARN_ON(delta < 0);
3361#ifdef BCM_CNIC
3362 /**
3363 * move non eth FPs next to last eth FP
3364 * must be done in that order
3365 * FCOE_IDX < FWD_IDX < OOO_IDX
3366 */
3367
6383c0b3 3368 /* move FCoE fp even NO_FCOE_FLAG is on */
b3b83c3f
DK
3369 bnx2x_move_fp(bp, FCOE_IDX, FCOE_IDX - delta);
3370#endif
3371 bp->num_queues -= delta;
3372 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
3373 bp->num_queues + delta, bp->num_queues);
3374 }
3375
3376 return 0;
3377}
d6214d7a 3378
523224a3
DK
3379void bnx2x_free_mem_bp(struct bnx2x *bp)
3380{
3381 kfree(bp->fp);
3382 kfree(bp->msix_table);
3383 kfree(bp->ilt);
3384}
3385
3386int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp)
3387{
3388 struct bnx2x_fastpath *fp;
3389 struct msix_entry *tbl;
3390 struct bnx2x_ilt *ilt;
6383c0b3
AE
3391 int msix_table_size = 0;
3392
3393 /*
3394 * The biggest MSI-X table we might need is as a maximum number of fast
3395 * path IGU SBs plus default SB (for PF).
3396 */
3397 msix_table_size = bp->igu_sb_cnt + 1;
523224a3 3398
6383c0b3 3399 /* fp array: RSS plus CNIC related L2 queues */
01e23742 3400 fp = kcalloc(BNX2X_MAX_RSS_COUNT(bp) + NON_ETH_CONTEXT_USE,
6383c0b3 3401 sizeof(*fp), GFP_KERNEL);
523224a3
DK
3402 if (!fp)
3403 goto alloc_err;
3404 bp->fp = fp;
3405
3406 /* msix table */
01e23742 3407 tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL);
523224a3
DK
3408 if (!tbl)
3409 goto alloc_err;
3410 bp->msix_table = tbl;
3411
3412 /* ilt */
3413 ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
3414 if (!ilt)
3415 goto alloc_err;
3416 bp->ilt = ilt;
3417
3418 return 0;
3419alloc_err:
3420 bnx2x_free_mem_bp(bp);
3421 return -ENOMEM;
3422
3423}
3424
a9fccec7 3425int bnx2x_reload_if_running(struct net_device *dev)
66371c44
MM
3426{
3427 struct bnx2x *bp = netdev_priv(dev);
3428
3429 if (unlikely(!netif_running(dev)))
3430 return 0;
3431
3432 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
3433 return bnx2x_nic_load(bp, LOAD_NORMAL);
3434}
3435
1ac9e428
YR
3436int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
3437{
3438 u32 sel_phy_idx = 0;
3439 if (bp->link_params.num_phys <= 1)
3440 return INT_PHY;
3441
3442 if (bp->link_vars.link_up) {
3443 sel_phy_idx = EXT_PHY1;
3444 /* In case link is SERDES, check if the EXT_PHY2 is the one */
3445 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
3446 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
3447 sel_phy_idx = EXT_PHY2;
3448 } else {
3449
3450 switch (bnx2x_phy_selection(&bp->link_params)) {
3451 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
3452 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
3453 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
3454 sel_phy_idx = EXT_PHY1;
3455 break;
3456 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
3457 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
3458 sel_phy_idx = EXT_PHY2;
3459 break;
3460 }
3461 }
3462
3463 return sel_phy_idx;
3464
3465}
3466int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
3467{
3468 u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
3469 /*
3470 * The selected actived PHY is always after swapping (in case PHY
3471 * swapping is enabled). So when swapping is enabled, we need to reverse
3472 * the configuration
3473 */
3474
3475 if (bp->link_params.multi_phy_config &
3476 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
3477 if (sel_phy_idx == EXT_PHY1)
3478 sel_phy_idx = EXT_PHY2;
3479 else if (sel_phy_idx == EXT_PHY2)
3480 sel_phy_idx = EXT_PHY1;
3481 }
3482 return LINK_CONFIG_IDX(sel_phy_idx);
3483}
3484
bf61ee14
VZ
3485#if defined(NETDEV_FCOE_WWNN) && defined(BCM_CNIC)
3486int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
3487{
3488 struct bnx2x *bp = netdev_priv(dev);
3489 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
3490
3491 switch (type) {
3492 case NETDEV_FCOE_WWNN:
3493 *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi,
3494 cp->fcoe_wwn_node_name_lo);
3495 break;
3496 case NETDEV_FCOE_WWPN:
3497 *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi,
3498 cp->fcoe_wwn_port_name_lo);
3499 break;
3500 default:
51c1a580 3501 BNX2X_ERR("Wrong WWN type requested - %d\n", type);
bf61ee14
VZ
3502 return -EINVAL;
3503 }
3504
3505 return 0;
3506}
3507#endif
3508
9f6c9258
DK
3509/* called with rtnl_lock */
3510int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
3511{
3512 struct bnx2x *bp = netdev_priv(dev);
9f6c9258
DK
3513
3514 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
51c1a580 3515 BNX2X_ERR("Can't perform change MTU during parity recovery\n");
9f6c9258
DK
3516 return -EAGAIN;
3517 }
3518
3519 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
51c1a580
MS
3520 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) {
3521 BNX2X_ERR("Can't support requested MTU size\n");
9f6c9258 3522 return -EINVAL;
51c1a580 3523 }
9f6c9258
DK
3524
3525 /* This does not race with packet allocation
3526 * because the actual alloc size is
3527 * only updated as part of load
3528 */
3529 dev->mtu = new_mtu;
3530
fe603b4d
DK
3531 bp->gro_check = bnx2x_need_gro_check(new_mtu);
3532
66371c44
MM
3533 return bnx2x_reload_if_running(dev);
3534}
3535
c8f44aff 3536netdev_features_t bnx2x_fix_features(struct net_device *dev,
621b4d66 3537 netdev_features_t features)
66371c44
MM
3538{
3539 struct bnx2x *bp = netdev_priv(dev);
3540
3541 /* TPA requires Rx CSUM offloading */
621b4d66 3542 if (!(features & NETIF_F_RXCSUM) || bp->disable_tpa) {
66371c44 3543 features &= ~NETIF_F_LRO;
621b4d66
DK
3544 features &= ~NETIF_F_GRO;
3545 }
66371c44
MM
3546
3547 return features;
3548}
3549
c8f44aff 3550int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
66371c44
MM
3551{
3552 struct bnx2x *bp = netdev_priv(dev);
3553 u32 flags = bp->flags;
538dd2e3 3554 bool bnx2x_reload = false;
66371c44
MM
3555
3556 if (features & NETIF_F_LRO)
3557 flags |= TPA_ENABLE_FLAG;
3558 else
3559 flags &= ~TPA_ENABLE_FLAG;
3560
621b4d66
DK
3561 if (features & NETIF_F_GRO)
3562 flags |= GRO_ENABLE_FLAG;
3563 else
3564 flags &= ~GRO_ENABLE_FLAG;
3565
538dd2e3
MB
3566 if (features & NETIF_F_LOOPBACK) {
3567 if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
3568 bp->link_params.loopback_mode = LOOPBACK_BMAC;
3569 bnx2x_reload = true;
3570 }
3571 } else {
3572 if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
3573 bp->link_params.loopback_mode = LOOPBACK_NONE;
3574 bnx2x_reload = true;
3575 }
3576 }
3577
66371c44
MM
3578 if (flags ^ bp->flags) {
3579 bp->flags = flags;
538dd2e3
MB
3580 bnx2x_reload = true;
3581 }
66371c44 3582
538dd2e3 3583 if (bnx2x_reload) {
66371c44
MM
3584 if (bp->recovery_state == BNX2X_RECOVERY_DONE)
3585 return bnx2x_reload_if_running(dev);
3586 /* else: bnx2x_nic_load() will be called at end of recovery */
9f6c9258
DK
3587 }
3588
66371c44 3589 return 0;
9f6c9258
DK
3590}
3591
3592void bnx2x_tx_timeout(struct net_device *dev)
3593{
3594 struct bnx2x *bp = netdev_priv(dev);
3595
3596#ifdef BNX2X_STOP_ON_ERROR
3597 if (!bp->panic)
3598 bnx2x_panic();
3599#endif
7be08a72
AE
3600
3601 smp_mb__before_clear_bit();
3602 set_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state);
3603 smp_mb__after_clear_bit();
3604
9f6c9258 3605 /* This allows the netif to be shutdown gracefully before resetting */
7be08a72 3606 schedule_delayed_work(&bp->sp_rtnl_task, 0);
9f6c9258
DK
3607}
3608
9f6c9258
DK
3609int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
3610{
3611 struct net_device *dev = pci_get_drvdata(pdev);
3612 struct bnx2x *bp;
3613
3614 if (!dev) {
3615 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
3616 return -ENODEV;
3617 }
3618 bp = netdev_priv(dev);
3619
3620 rtnl_lock();
3621
3622 pci_save_state(pdev);
3623
3624 if (!netif_running(dev)) {
3625 rtnl_unlock();
3626 return 0;
3627 }
3628
3629 netif_device_detach(dev);
3630
3631 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
3632
3633 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
3634
3635 rtnl_unlock();
3636
3637 return 0;
3638}
3639
3640int bnx2x_resume(struct pci_dev *pdev)
3641{
3642 struct net_device *dev = pci_get_drvdata(pdev);
3643 struct bnx2x *bp;
3644 int rc;
3645
3646 if (!dev) {
3647 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
3648 return -ENODEV;
3649 }
3650 bp = netdev_priv(dev);
3651
3652 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
51c1a580 3653 BNX2X_ERR("Handling parity error recovery. Try again later\n");
9f6c9258
DK
3654 return -EAGAIN;
3655 }
3656
3657 rtnl_lock();
3658
3659 pci_restore_state(pdev);
3660
3661 if (!netif_running(dev)) {
3662 rtnl_unlock();
3663 return 0;
3664 }
3665
3666 bnx2x_set_power_state(bp, PCI_D0);
3667 netif_device_attach(dev);
3668
3669 rc = bnx2x_nic_load(bp, LOAD_OPEN);
3670
3671 rtnl_unlock();
3672
3673 return rc;
3674}
619c5cb6
VZ
3675
3676
3677void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
3678 u32 cid)
3679{
3680 /* ustorm cxt validation */
3681 cxt->ustorm_ag_context.cdu_usage =
3682 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
3683 CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
3684 /* xcontext validation */
3685 cxt->xstorm_ag_context.cdu_reserved =
3686 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
3687 CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
3688}
3689
3690static inline void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
3691 u8 fw_sb_id, u8 sb_index,
3692 u8 ticks)
3693{
3694
3695 u32 addr = BAR_CSTRORM_INTMEM +
3696 CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
3697 REG_WR8(bp, addr, ticks);
51c1a580
MS
3698 DP(NETIF_MSG_IFUP,
3699 "port %x fw_sb_id %d sb_index %d ticks %d\n",
3700 port, fw_sb_id, sb_index, ticks);
619c5cb6
VZ
3701}
3702
3703static inline void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
3704 u16 fw_sb_id, u8 sb_index,
3705 u8 disable)
3706{
3707 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
3708 u32 addr = BAR_CSTRORM_INTMEM +
3709 CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
3710 u16 flags = REG_RD16(bp, addr);
3711 /* clear and set */
3712 flags &= ~HC_INDEX_DATA_HC_ENABLED;
3713 flags |= enable_flag;
3714 REG_WR16(bp, addr, flags);
51c1a580
MS
3715 DP(NETIF_MSG_IFUP,
3716 "port %x fw_sb_id %d sb_index %d disable %d\n",
3717 port, fw_sb_id, sb_index, disable);
619c5cb6
VZ
3718}
3719
3720void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
3721 u8 sb_index, u8 disable, u16 usec)
3722{
3723 int port = BP_PORT(bp);
3724 u8 ticks = usec / BNX2X_BTR;
3725
3726 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
3727
3728 disable = disable ? 1 : (usec ? 0 : 1);
3729 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
3730}
This page took 0.435163 seconds and 5 git commands to generate.