bnx2x: Adjust BCM84833 to BCM578xx
[deliverable/linux.git] / drivers / net / bnx2x / bnx2x_cmn.c
CommitLineData
9f6c9258
DK
1/* bnx2x_cmn.c: Broadcom Everest network driver.
2 *
5de92408 3 * Copyright (c) 2007-2011 Broadcom Corporation
9f6c9258
DK
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
15 *
16 */
17
9f6c9258 18#include <linux/etherdevice.h>
9bcc0893 19#include <linux/if_vlan.h>
a6b7a407 20#include <linux/interrupt.h>
9f6c9258 21#include <linux/ip.h>
f2e0899f 22#include <net/ipv6.h>
7f3e01fe 23#include <net/ip6_checksum.h>
6891dd25 24#include <linux/firmware.h>
c0cba59e 25#include <linux/prefetch.h>
9f6c9258 26#include "bnx2x_cmn.h"
523224a3 27#include "bnx2x_init.h"
042181f5 28#include "bnx2x_sp.h"
523224a3 29
619c5cb6 30
9f6c9258 31
b3b83c3f
DK
32/**
33 * bnx2x_bz_fp - zero content of the fastpath structure.
34 *
35 * @bp: driver handle
36 * @index: fastpath index to be zeroed
37 *
38 * Makes sure the contents of the bp->fp[index].napi is kept
39 * intact.
40 */
41static inline void bnx2x_bz_fp(struct bnx2x *bp, int index)
42{
43 struct bnx2x_fastpath *fp = &bp->fp[index];
44 struct napi_struct orig_napi = fp->napi;
45 /* bzero bnx2x_fastpath contents */
46 memset(fp, 0, sizeof(*fp));
47
48 /* Restore the NAPI object as it has been already initialized */
49 fp->napi = orig_napi;
50}
51
52/**
53 * bnx2x_move_fp - move content of the fastpath structure.
54 *
55 * @bp: driver handle
56 * @from: source FP index
57 * @to: destination FP index
58 *
59 * Makes sure the contents of the bp->fp[to].napi is kept
60 * intact.
61 */
62static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
63{
64 struct bnx2x_fastpath *from_fp = &bp->fp[from];
65 struct bnx2x_fastpath *to_fp = &bp->fp[to];
66 struct napi_struct orig_napi = to_fp->napi;
67 /* Move bnx2x_fastpath contents */
68 memcpy(to_fp, from_fp, sizeof(*to_fp));
69 to_fp->index = to;
70
71 /* Restore the NAPI object as it has been already initialized */
72 to_fp->napi = orig_napi;
73}
74
619c5cb6
VZ
75int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
76
9f6c9258
DK
77/* free skb in the packet ring at pos idx
78 * return idx of last bd freed
79 */
80static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
81 u16 idx)
82{
83 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
84 struct eth_tx_start_bd *tx_start_bd;
85 struct eth_tx_bd *tx_data_bd;
86 struct sk_buff *skb = tx_buf->skb;
87 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
88 int nbd;
89
90 /* prefetch skb end pointer to speedup dev_kfree_skb() */
91 prefetch(&skb->end);
92
619c5cb6
VZ
93 DP(BNX2X_MSG_FP, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n",
94 fp->index, idx, tx_buf, skb);
9f6c9258
DK
95
96 /* unmap first bd */
97 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
98 tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
99 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
4bca60f4 100 BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
9f6c9258 101
619c5cb6 102
9f6c9258
DK
103 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
104#ifdef BNX2X_STOP_ON_ERROR
105 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
106 BNX2X_ERR("BAD nbd!\n");
107 bnx2x_panic();
108 }
109#endif
110 new_cons = nbd + tx_buf->first_bd;
111
112 /* Get the next bd */
113 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
114
115 /* Skip a parse bd... */
116 --nbd;
117 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
118
119 /* ...and the TSO split header bd since they have no mapping */
120 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
121 --nbd;
122 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
123 }
124
125 /* now free frags */
126 while (nbd > 0) {
127
128 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
129 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
130 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
131 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
132 if (--nbd)
133 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
134 }
135
136 /* release skb */
137 WARN_ON(!skb);
40955532 138 dev_kfree_skb_any(skb);
9f6c9258
DK
139 tx_buf->first_bd = 0;
140 tx_buf->skb = NULL;
141
142 return new_cons;
143}
144
145int bnx2x_tx_int(struct bnx2x_fastpath *fp)
146{
147 struct bnx2x *bp = fp->bp;
148 struct netdev_queue *txq;
149 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
150
151#ifdef BNX2X_STOP_ON_ERROR
152 if (unlikely(bp->panic))
153 return -1;
154#endif
155
156 txq = netdev_get_tx_queue(bp->dev, fp->index);
157 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
158 sw_cons = fp->tx_pkt_cons;
159
160 while (sw_cons != hw_cons) {
161 u16 pkt_cons;
162
163 pkt_cons = TX_BD(sw_cons);
164
f2e0899f
DK
165 DP(NETIF_MSG_TX_DONE, "queue[%d]: hw_cons %u sw_cons %u "
166 " pkt_cons %u\n",
167 fp->index, hw_cons, sw_cons, pkt_cons);
9f6c9258 168
9f6c9258
DK
169 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
170 sw_cons++;
171 }
172
173 fp->tx_pkt_cons = sw_cons;
174 fp->tx_bd_cons = bd_cons;
175
176 /* Need to make the tx_bd_cons update visible to start_xmit()
177 * before checking for netif_tx_queue_stopped(). Without the
178 * memory barrier, there is a small possibility that
179 * start_xmit() will miss it and cause the queue to be stopped
180 * forever.
619c5cb6
VZ
181 * On the other hand we need an rmb() here to ensure the proper
182 * ordering of bit testing in the following
183 * netif_tx_queue_stopped(txq) call.
9f6c9258
DK
184 */
185 smp_mb();
186
9f6c9258
DK
187 if (unlikely(netif_tx_queue_stopped(txq))) {
188 /* Taking tx_lock() is needed to prevent reenabling the queue
189 * while it's empty. This could have happen if rx_action() gets
190 * suspended in bnx2x_tx_int() after the condition before
191 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
192 *
193 * stops the queue->sees fresh tx_bd_cons->releases the queue->
194 * sends some packets consuming the whole queue again->
195 * stops the queue
196 */
197
198 __netif_tx_lock(txq, smp_processor_id());
199
200 if ((netif_tx_queue_stopped(txq)) &&
201 (bp->state == BNX2X_STATE_OPEN) &&
202 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
203 netif_tx_wake_queue(txq);
204
205 __netif_tx_unlock(txq);
206 }
207 return 0;
208}
209
210static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
211 u16 idx)
212{
213 u16 last_max = fp->last_max_sge;
214
215 if (SUB_S16(idx, last_max) > 0)
216 fp->last_max_sge = idx;
217}
218
219static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
220 struct eth_fast_path_rx_cqe *fp_cqe)
221{
222 struct bnx2x *bp = fp->bp;
223 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
224 le16_to_cpu(fp_cqe->len_on_bd)) >>
225 SGE_PAGE_SHIFT;
226 u16 last_max, last_elem, first_elem;
227 u16 delta = 0;
228 u16 i;
229
230 if (!sge_len)
231 return;
232
233 /* First mark all used pages */
234 for (i = 0; i < sge_len; i++)
619c5cb6 235 BIT_VEC64_CLEAR_BIT(fp->sge_mask,
523224a3 236 RX_SGE(le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[i])));
9f6c9258
DK
237
238 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
523224a3 239 sge_len - 1, le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
9f6c9258
DK
240
241 /* Here we assume that the last SGE index is the biggest */
242 prefetch((void *)(fp->sge_mask));
523224a3
DK
243 bnx2x_update_last_max_sge(fp,
244 le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
9f6c9258
DK
245
246 last_max = RX_SGE(fp->last_max_sge);
619c5cb6
VZ
247 last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
248 first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
9f6c9258
DK
249
250 /* If ring is not full */
251 if (last_elem + 1 != first_elem)
252 last_elem++;
253
254 /* Now update the prod */
255 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
256 if (likely(fp->sge_mask[i]))
257 break;
258
619c5cb6
VZ
259 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
260 delta += BIT_VEC64_ELEM_SZ;
9f6c9258
DK
261 }
262
263 if (delta > 0) {
264 fp->rx_sge_prod += delta;
265 /* clear page-end entries */
266 bnx2x_clear_sge_mask_next_elems(fp);
267 }
268
269 DP(NETIF_MSG_RX_STATUS,
270 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
271 fp->last_max_sge, fp->rx_sge_prod);
272}
273
274static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
619c5cb6
VZ
275 struct sk_buff *skb, u16 cons, u16 prod,
276 struct eth_fast_path_rx_cqe *cqe)
9f6c9258
DK
277{
278 struct bnx2x *bp = fp->bp;
279 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
280 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
281 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
282 dma_addr_t mapping;
619c5cb6
VZ
283 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
284 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
9f6c9258 285
619c5cb6
VZ
286 /* print error if current state != stop */
287 if (tpa_info->tpa_state != BNX2X_TPA_STOP)
9f6c9258
DK
288 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
289
619c5cb6
VZ
290 /* Try to map an empty skb from the aggregation info */
291 mapping = dma_map_single(&bp->pdev->dev,
292 first_buf->skb->data,
293 fp->rx_buf_size, DMA_FROM_DEVICE);
294 /*
295 * ...if it fails - move the skb from the consumer to the producer
296 * and set the current aggregation state as ERROR to drop it
297 * when TPA_STOP arrives.
298 */
299
300 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
301 /* Move the BD from the consumer to the producer */
302 bnx2x_reuse_rx_skb(fp, cons, prod);
303 tpa_info->tpa_state = BNX2X_TPA_ERROR;
304 return;
305 }
9f6c9258 306
619c5cb6
VZ
307 /* move empty skb from pool to prod */
308 prod_rx_buf->skb = first_buf->skb;
309 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
9f6c9258
DK
310 /* point prod_bd to new skb */
311 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
312 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
313
619c5cb6
VZ
314 /* move partial skb from cons to pool (don't unmap yet) */
315 *first_buf = *cons_rx_buf;
316
317 /* mark bin state as START */
318 tpa_info->parsing_flags =
319 le16_to_cpu(cqe->pars_flags.flags);
320 tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
321 tpa_info->tpa_state = BNX2X_TPA_START;
322 tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
323 tpa_info->placement_offset = cqe->placement_offset;
324
9f6c9258
DK
325#ifdef BNX2X_STOP_ON_ERROR
326 fp->tpa_queue_used |= (1 << queue);
327#ifdef _ASM_GENERIC_INT_L64_H
328 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
329#else
330 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
331#endif
332 fp->tpa_queue_used);
333#endif
334}
335
e4e3c02a
VZ
336/* Timestamp option length allowed for TPA aggregation:
337 *
338 * nop nop kind length echo val
339 */
340#define TPA_TSTAMP_OPT_LEN 12
341/**
e8920674 342 * bnx2x_set_lro_mss - calculate the approximate value of the MSS
e4e3c02a 343 *
e8920674
DK
344 * @bp: driver handle
345 * @parsing_flags: parsing flags from the START CQE
346 * @len_on_bd: total length of the first packet for the
347 * aggregation.
348 *
349 * Approximate value of the MSS for this aggregation calculated using
350 * the first packet of it.
e4e3c02a
VZ
351 */
352static inline u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags,
353 u16 len_on_bd)
354{
619c5cb6
VZ
355 /*
356 * TPA arrgregation won't have either IP options or TCP options
357 * other than timestamp or IPv6 extension headers.
e4e3c02a 358 */
619c5cb6
VZ
359 u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
360
361 if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
362 PRS_FLAG_OVERETH_IPV6)
363 hdrs_len += sizeof(struct ipv6hdr);
364 else /* IPv4 */
365 hdrs_len += sizeof(struct iphdr);
e4e3c02a
VZ
366
367
368 /* Check if there was a TCP timestamp, if there is it's will
369 * always be 12 bytes length: nop nop kind length echo val.
370 *
371 * Otherwise FW would close the aggregation.
372 */
373 if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
374 hdrs_len += TPA_TSTAMP_OPT_LEN;
375
376 return len_on_bd - hdrs_len;
377}
378
9f6c9258 379static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
619c5cb6
VZ
380 u16 queue, struct sk_buff *skb,
381 struct eth_end_agg_rx_cqe *cqe,
382 u16 cqe_idx)
9f6c9258
DK
383{
384 struct sw_rx_page *rx_pg, old_rx_pg;
9f6c9258
DK
385 u32 i, frag_len, frag_size, pages;
386 int err;
387 int j;
619c5cb6
VZ
388 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
389 u16 len_on_bd = tpa_info->len_on_bd;
9f6c9258 390
619c5cb6 391 frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
9f6c9258
DK
392 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
393
394 /* This is needed in order to enable forwarding support */
395 if (frag_size)
619c5cb6
VZ
396 skb_shinfo(skb)->gso_size = bnx2x_set_lro_mss(bp,
397 tpa_info->parsing_flags, len_on_bd);
9f6c9258
DK
398
399#ifdef BNX2X_STOP_ON_ERROR
400 if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
401 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
402 pages, cqe_idx);
619c5cb6 403 BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
9f6c9258
DK
404 bnx2x_panic();
405 return -EINVAL;
406 }
407#endif
408
409 /* Run through the SGL and compose the fragmented skb */
410 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
619c5cb6 411 u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
9f6c9258
DK
412
413 /* FW gives the indices of the SGE as if the ring is an array
414 (meaning that "next" element will consume 2 indices) */
415 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
416 rx_pg = &fp->rx_page_ring[sge_idx];
417 old_rx_pg = *rx_pg;
418
419 /* If we fail to allocate a substitute page, we simply stop
420 where we are and drop the whole packet */
421 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
422 if (unlikely(err)) {
423 fp->eth_q_stats.rx_skb_alloc_failed++;
424 return err;
425 }
426
427 /* Unmap the page as we r going to pass it to the stack */
428 dma_unmap_page(&bp->pdev->dev,
429 dma_unmap_addr(&old_rx_pg, mapping),
430 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
431
432 /* Add one frag and update the appropriate fields in the skb */
433 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
434
435 skb->data_len += frag_len;
436 skb->truesize += frag_len;
437 skb->len += frag_len;
438
439 frag_size -= frag_len;
440 }
441
442 return 0;
443}
444
445static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
619c5cb6 446 u16 queue, struct eth_end_agg_rx_cqe *cqe,
9f6c9258
DK
447 u16 cqe_idx)
448{
619c5cb6
VZ
449 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
450 struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
451 u8 pad = tpa_info->placement_offset;
452 u16 len = tpa_info->len_on_bd;
9f6c9258
DK
453 struct sk_buff *skb = rx_buf->skb;
454 /* alloc new skb */
619c5cb6
VZ
455 struct sk_buff *new_skb;
456 u8 old_tpa_state = tpa_info->tpa_state;
457
458 tpa_info->tpa_state = BNX2X_TPA_STOP;
459
460 /* If we there was an error during the handling of the TPA_START -
461 * drop this aggregation.
462 */
463 if (old_tpa_state == BNX2X_TPA_ERROR)
464 goto drop;
465
466 /* Try to allocate the new skb */
467 new_skb = netdev_alloc_skb(bp->dev, fp->rx_buf_size);
9f6c9258
DK
468
469 /* Unmap skb in the pool anyway, as we are going to change
470 pool entry status to BNX2X_TPA_STOP even if new skb allocation
471 fails. */
472 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
a8c94b91 473 fp->rx_buf_size, DMA_FROM_DEVICE);
9f6c9258
DK
474
475 if (likely(new_skb)) {
9f6c9258 476 prefetch(skb);
217de5aa 477 prefetch(((char *)(skb)) + L1_CACHE_BYTES);
9f6c9258
DK
478
479#ifdef BNX2X_STOP_ON_ERROR
a8c94b91 480 if (pad + len > fp->rx_buf_size) {
9f6c9258
DK
481 BNX2X_ERR("skb_put is about to fail... "
482 "pad %d len %d rx_buf_size %d\n",
a8c94b91 483 pad, len, fp->rx_buf_size);
9f6c9258
DK
484 bnx2x_panic();
485 return;
486 }
487#endif
488
489 skb_reserve(skb, pad);
490 skb_put(skb, len);
491
492 skb->protocol = eth_type_trans(skb, bp->dev);
493 skb->ip_summed = CHECKSUM_UNNECESSARY;
494
619c5cb6
VZ
495 if (!bnx2x_fill_frag_skb(bp, fp, queue, skb, cqe, cqe_idx)) {
496 if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
497 __vlan_hwaccel_put_tag(skb, tpa_info->vlan_tag);
9bcc0893 498 napi_gro_receive(&fp->napi, skb);
9f6c9258
DK
499 } else {
500 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
501 " - dropping packet!\n");
40955532 502 dev_kfree_skb_any(skb);
9f6c9258
DK
503 }
504
505
506 /* put new skb in bin */
619c5cb6 507 rx_buf->skb = new_skb;
9f6c9258 508
619c5cb6 509 return;
9f6c9258
DK
510 }
511
619c5cb6
VZ
512drop:
513 /* drop the packet and keep the buffer in the bin */
514 DP(NETIF_MSG_RX_STATUS,
515 "Failed to allocate or map a new skb - dropping packet!\n");
516 fp->eth_q_stats.rx_skb_alloc_failed++;
9f6c9258
DK
517}
518
519/* Set Toeplitz hash value in the skb using the value from the
520 * CQE (calculated by HW).
521 */
522static inline void bnx2x_set_skb_rxhash(struct bnx2x *bp, union eth_rx_cqe *cqe,
523 struct sk_buff *skb)
524{
525 /* Set Toeplitz hash from CQE */
526 if ((bp->dev->features & NETIF_F_RXHASH) &&
527 (cqe->fast_path_cqe.status_flags &
528 ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG))
529 skb->rxhash =
530 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result);
531}
532
533int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
534{
535 struct bnx2x *bp = fp->bp;
536 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
537 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
538 int rx_pkt = 0;
539
540#ifdef BNX2X_STOP_ON_ERROR
541 if (unlikely(bp->panic))
542 return 0;
543#endif
544
545 /* CQ "next element" is of the size of the regular element,
546 that's why it's ok here */
547 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
548 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
549 hw_comp_cons++;
550
551 bd_cons = fp->rx_bd_cons;
552 bd_prod = fp->rx_bd_prod;
553 bd_prod_fw = bd_prod;
554 sw_comp_cons = fp->rx_comp_cons;
555 sw_comp_prod = fp->rx_comp_prod;
556
557 /* Memory barrier necessary as speculative reads of the rx
558 * buffer can be ahead of the index in the status block
559 */
560 rmb();
561
562 DP(NETIF_MSG_RX_STATUS,
563 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
564 fp->index, hw_comp_cons, sw_comp_cons);
565
566 while (sw_comp_cons != hw_comp_cons) {
567 struct sw_rx_bd *rx_buf = NULL;
568 struct sk_buff *skb;
569 union eth_rx_cqe *cqe;
619c5cb6 570 struct eth_fast_path_rx_cqe *cqe_fp;
9f6c9258 571 u8 cqe_fp_flags;
619c5cb6 572 enum eth_rx_cqe_type cqe_fp_type;
9f6c9258
DK
573 u16 len, pad;
574
619c5cb6
VZ
575#ifdef BNX2X_STOP_ON_ERROR
576 if (unlikely(bp->panic))
577 return 0;
578#endif
579
9f6c9258
DK
580 comp_ring_cons = RCQ_BD(sw_comp_cons);
581 bd_prod = RX_BD(bd_prod);
582 bd_cons = RX_BD(bd_cons);
583
584 /* Prefetch the page containing the BD descriptor
585 at producer's index. It will be needed when new skb is
586 allocated */
587 prefetch((void *)(PAGE_ALIGN((unsigned long)
588 (&fp->rx_desc_ring[bd_prod])) -
589 PAGE_SIZE + 1));
590
591 cqe = &fp->rx_comp_ring[comp_ring_cons];
619c5cb6
VZ
592 cqe_fp = &cqe->fast_path_cqe;
593 cqe_fp_flags = cqe_fp->type_error_flags;
594 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
9f6c9258
DK
595
596 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
597 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
619c5cb6
VZ
598 cqe_fp_flags, cqe_fp->status_flags,
599 le32_to_cpu(cqe_fp->rss_hash_result),
600 le16_to_cpu(cqe_fp->vlan_tag), le16_to_cpu(cqe_fp->pkt_len));
9f6c9258
DK
601
602 /* is this a slowpath msg? */
619c5cb6 603 if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
9f6c9258
DK
604 bnx2x_sp_event(fp, cqe);
605 goto next_cqe;
606
607 /* this is an rx packet */
608 } else {
609 rx_buf = &fp->rx_buf_ring[bd_cons];
610 skb = rx_buf->skb;
611 prefetch(skb);
9f6c9258 612
619c5cb6
VZ
613 if (!CQE_TYPE_FAST(cqe_fp_type)) {
614#ifdef BNX2X_STOP_ON_ERROR
615 /* sanity check */
616 if (fp->disable_tpa &&
617 (CQE_TYPE_START(cqe_fp_type) ||
618 CQE_TYPE_STOP(cqe_fp_type)))
619 BNX2X_ERR("START/STOP packet while "
620 "disable_tpa type %x\n",
621 CQE_TYPE(cqe_fp_type));
622#endif
9f6c9258 623
619c5cb6
VZ
624 if (CQE_TYPE_START(cqe_fp_type)) {
625 u16 queue = cqe_fp->queue_index;
9f6c9258
DK
626 DP(NETIF_MSG_RX_STATUS,
627 "calling tpa_start on queue %d\n",
628 queue);
629
630 bnx2x_tpa_start(fp, queue, skb,
619c5cb6
VZ
631 bd_cons, bd_prod,
632 cqe_fp);
9f6c9258 633
619c5cb6 634 /* Set Toeplitz hash for LRO skb */
9f6c9258
DK
635 bnx2x_set_skb_rxhash(bp, cqe, skb);
636
637 goto next_rx;
619c5cb6
VZ
638
639 } else {
640 u16 queue =
641 cqe->end_agg_cqe.queue_index;
9f6c9258
DK
642 DP(NETIF_MSG_RX_STATUS,
643 "calling tpa_stop on queue %d\n",
644 queue);
645
619c5cb6
VZ
646 bnx2x_tpa_stop(bp, fp, queue,
647 &cqe->end_agg_cqe,
648 comp_ring_cons);
9f6c9258
DK
649#ifdef BNX2X_STOP_ON_ERROR
650 if (bp->panic)
651 return 0;
652#endif
653
619c5cb6 654 bnx2x_update_sge_prod(fp, cqe_fp);
9f6c9258
DK
655 goto next_cqe;
656 }
657 }
619c5cb6
VZ
658 /* non TPA */
659 len = le16_to_cpu(cqe_fp->pkt_len);
660 pad = cqe_fp->placement_offset;
9f6c9258
DK
661 dma_sync_single_for_device(&bp->pdev->dev,
662 dma_unmap_addr(rx_buf, mapping),
619c5cb6
VZ
663 pad + RX_COPY_THRESH,
664 DMA_FROM_DEVICE);
217de5aa 665 prefetch(((char *)(skb)) + L1_CACHE_BYTES);
9f6c9258
DK
666
667 /* is this an error packet? */
668 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
669 DP(NETIF_MSG_RX_ERR,
670 "ERROR flags %x rx packet %u\n",
671 cqe_fp_flags, sw_comp_cons);
672 fp->eth_q_stats.rx_err_discard_pkt++;
673 goto reuse_rx;
674 }
675
676 /* Since we don't have a jumbo ring
677 * copy small packets if mtu > 1500
678 */
679 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
680 (len <= RX_COPY_THRESH)) {
681 struct sk_buff *new_skb;
682
619c5cb6 683 new_skb = netdev_alloc_skb(bp->dev, len + pad);
9f6c9258
DK
684 if (new_skb == NULL) {
685 DP(NETIF_MSG_RX_ERR,
686 "ERROR packet dropped "
687 "because of alloc failure\n");
688 fp->eth_q_stats.rx_skb_alloc_failed++;
689 goto reuse_rx;
690 }
691
692 /* aligned copy */
693 skb_copy_from_linear_data_offset(skb, pad,
694 new_skb->data + pad, len);
695 skb_reserve(new_skb, pad);
696 skb_put(new_skb, len);
697
749a8503 698 bnx2x_reuse_rx_skb(fp, bd_cons, bd_prod);
9f6c9258
DK
699
700 skb = new_skb;
701
702 } else
703 if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
704 dma_unmap_single(&bp->pdev->dev,
705 dma_unmap_addr(rx_buf, mapping),
a8c94b91 706 fp->rx_buf_size,
9f6c9258
DK
707 DMA_FROM_DEVICE);
708 skb_reserve(skb, pad);
709 skb_put(skb, len);
710
711 } else {
712 DP(NETIF_MSG_RX_ERR,
713 "ERROR packet dropped because "
714 "of alloc failure\n");
715 fp->eth_q_stats.rx_skb_alloc_failed++;
716reuse_rx:
749a8503 717 bnx2x_reuse_rx_skb(fp, bd_cons, bd_prod);
9f6c9258
DK
718 goto next_rx;
719 }
720
721 skb->protocol = eth_type_trans(skb, bp->dev);
722
723 /* Set Toeplitz hash for a none-LRO skb */
724 bnx2x_set_skb_rxhash(bp, cqe, skb);
725
bc8acf2c 726 skb_checksum_none_assert(skb);
f85582f8 727
66371c44 728 if (bp->dev->features & NETIF_F_RXCSUM) {
619c5cb6 729
9f6c9258
DK
730 if (likely(BNX2X_RX_CSUM_OK(cqe)))
731 skb->ip_summed = CHECKSUM_UNNECESSARY;
732 else
733 fp->eth_q_stats.hw_csum_err++;
734 }
735 }
736
737 skb_record_rx_queue(skb, fp->index);
738
619c5cb6
VZ
739 if (le16_to_cpu(cqe_fp->pars_flags.flags) &
740 PARSING_FLAGS_VLAN)
9bcc0893 741 __vlan_hwaccel_put_tag(skb,
619c5cb6 742 le16_to_cpu(cqe_fp->vlan_tag));
9bcc0893 743 napi_gro_receive(&fp->napi, skb);
9f6c9258
DK
744
745
746next_rx:
747 rx_buf->skb = NULL;
748
749 bd_cons = NEXT_RX_IDX(bd_cons);
750 bd_prod = NEXT_RX_IDX(bd_prod);
751 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
752 rx_pkt++;
753next_cqe:
754 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
755 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
756
757 if (rx_pkt == budget)
758 break;
759 } /* while */
760
761 fp->rx_bd_cons = bd_cons;
762 fp->rx_bd_prod = bd_prod_fw;
763 fp->rx_comp_cons = sw_comp_cons;
764 fp->rx_comp_prod = sw_comp_prod;
765
766 /* Update producers */
767 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
768 fp->rx_sge_prod);
769
770 fp->rx_pkt += rx_pkt;
771 fp->rx_calls++;
772
773 return rx_pkt;
774}
775
776static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
777{
778 struct bnx2x_fastpath *fp = fp_cookie;
779 struct bnx2x *bp = fp->bp;
780
523224a3
DK
781 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB "
782 "[fp %d fw_sd %d igusb %d]\n",
783 fp->index, fp->fw_sb_id, fp->igu_sb_id);
784 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
9f6c9258
DK
785
786#ifdef BNX2X_STOP_ON_ERROR
787 if (unlikely(bp->panic))
788 return IRQ_HANDLED;
789#endif
790
791 /* Handle Rx and Tx according to MSI-X vector */
792 prefetch(fp->rx_cons_sb);
793 prefetch(fp->tx_cons_sb);
523224a3 794 prefetch(&fp->sb_running_index[SM_RX_ID]);
9f6c9258
DK
795 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
796
797 return IRQ_HANDLED;
798}
799
9f6c9258
DK
800/* HW Lock for shared dual port PHYs */
801void bnx2x_acquire_phy_lock(struct bnx2x *bp)
802{
803 mutex_lock(&bp->port.phy_mutex);
804
805 if (bp->port.need_hw_lock)
806 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
807}
808
809void bnx2x_release_phy_lock(struct bnx2x *bp)
810{
811 if (bp->port.need_hw_lock)
812 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
813
814 mutex_unlock(&bp->port.phy_mutex);
815}
816
0793f83f
DK
817/* calculates MF speed according to current linespeed and MF configuration */
818u16 bnx2x_get_mf_speed(struct bnx2x *bp)
819{
820 u16 line_speed = bp->link_vars.line_speed;
821 if (IS_MF(bp)) {
faa6fcbb
DK
822 u16 maxCfg = bnx2x_extract_max_cfg(bp,
823 bp->mf_config[BP_VN(bp)]);
824
825 /* Calculate the current MAX line speed limit for the MF
826 * devices
0793f83f 827 */
faa6fcbb
DK
828 if (IS_MF_SI(bp))
829 line_speed = (line_speed * maxCfg) / 100;
830 else { /* SD mode */
0793f83f
DK
831 u16 vn_max_rate = maxCfg * 100;
832
833 if (vn_max_rate < line_speed)
834 line_speed = vn_max_rate;
faa6fcbb 835 }
0793f83f
DK
836 }
837
838 return line_speed;
839}
840
2ae17f66
VZ
841/**
842 * bnx2x_fill_report_data - fill link report data to report
843 *
844 * @bp: driver handle
845 * @data: link state to update
846 *
847 * It uses a none-atomic bit operations because is called under the mutex.
848 */
849static inline void bnx2x_fill_report_data(struct bnx2x *bp,
850 struct bnx2x_link_report_data *data)
851{
852 u16 line_speed = bnx2x_get_mf_speed(bp);
853
854 memset(data, 0, sizeof(*data));
855
856 /* Fill the report data: efective line speed */
857 data->line_speed = line_speed;
858
859 /* Link is down */
860 if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
861 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
862 &data->link_report_flags);
863
864 /* Full DUPLEX */
865 if (bp->link_vars.duplex == DUPLEX_FULL)
866 __set_bit(BNX2X_LINK_REPORT_FD, &data->link_report_flags);
867
868 /* Rx Flow Control is ON */
869 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
870 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON, &data->link_report_flags);
871
872 /* Tx Flow Control is ON */
873 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
874 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
875}
876
877/**
878 * bnx2x_link_report - report link status to OS.
879 *
880 * @bp: driver handle
881 *
882 * Calls the __bnx2x_link_report() under the same locking scheme
883 * as a link/PHY state managing code to ensure a consistent link
884 * reporting.
885 */
886
9f6c9258
DK
887void bnx2x_link_report(struct bnx2x *bp)
888{
2ae17f66
VZ
889 bnx2x_acquire_phy_lock(bp);
890 __bnx2x_link_report(bp);
891 bnx2x_release_phy_lock(bp);
892}
9f6c9258 893
2ae17f66
VZ
894/**
895 * __bnx2x_link_report - report link status to OS.
896 *
897 * @bp: driver handle
898 *
899 * None atomic inmlementation.
900 * Should be called under the phy_lock.
901 */
902void __bnx2x_link_report(struct bnx2x *bp)
903{
904 struct bnx2x_link_report_data cur_data;
9f6c9258 905
2ae17f66
VZ
906 /* reread mf_cfg */
907 if (!CHIP_IS_E1(bp))
908 bnx2x_read_mf_cfg(bp);
909
910 /* Read the current link report info */
911 bnx2x_fill_report_data(bp, &cur_data);
912
913 /* Don't report link down or exactly the same link status twice */
914 if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
915 (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
916 &bp->last_reported_link.link_report_flags) &&
917 test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
918 &cur_data.link_report_flags)))
919 return;
920
921 bp->link_cnt++;
9f6c9258 922
2ae17f66
VZ
923 /* We are going to report a new link parameters now -
924 * remember the current data for the next time.
925 */
926 memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
9f6c9258 927
2ae17f66
VZ
928 if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
929 &cur_data.link_report_flags)) {
930 netif_carrier_off(bp->dev);
931 netdev_err(bp->dev, "NIC Link is Down\n");
932 return;
933 } else {
934 netif_carrier_on(bp->dev);
935 netdev_info(bp->dev, "NIC Link is Up, ");
936 pr_cont("%d Mbps ", cur_data.line_speed);
9f6c9258 937
2ae17f66
VZ
938 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
939 &cur_data.link_report_flags))
9f6c9258
DK
940 pr_cont("full duplex");
941 else
942 pr_cont("half duplex");
943
2ae17f66
VZ
944 /* Handle the FC at the end so that only these flags would be
945 * possibly set. This way we may easily check if there is no FC
946 * enabled.
947 */
948 if (cur_data.link_report_flags) {
949 if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
950 &cur_data.link_report_flags)) {
9f6c9258 951 pr_cont(", receive ");
2ae17f66
VZ
952 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
953 &cur_data.link_report_flags))
9f6c9258
DK
954 pr_cont("& transmit ");
955 } else {
956 pr_cont(", transmit ");
957 }
958 pr_cont("flow control ON");
959 }
960 pr_cont("\n");
9f6c9258
DK
961 }
962}
963
964void bnx2x_init_rx_rings(struct bnx2x *bp)
965{
966 int func = BP_FUNC(bp);
967 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
619c5cb6 968 ETH_MAX_AGGREGATION_QUEUES_E1H_E2;
523224a3 969 u16 ring_prod;
9f6c9258 970 int i, j;
25141580 971
b3b83c3f 972 /* Allocate TPA resources */
ec6ba945 973 for_each_rx_queue(bp, j) {
523224a3 974 struct bnx2x_fastpath *fp = &bp->fp[j];
9f6c9258 975
a8c94b91
VZ
976 DP(NETIF_MSG_IFUP,
977 "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
978
523224a3 979 if (!fp->disable_tpa) {
619c5cb6 980 /* Fill the per-aggregtion pool */
9f6c9258 981 for (i = 0; i < max_agg_queues; i++) {
619c5cb6
VZ
982 struct bnx2x_agg_info *tpa_info =
983 &fp->tpa_info[i];
984 struct sw_rx_bd *first_buf =
985 &tpa_info->first_buf;
986
987 first_buf->skb = netdev_alloc_skb(bp->dev,
988 fp->rx_buf_size);
989 if (!first_buf->skb) {
9f6c9258
DK
990 BNX2X_ERR("Failed to allocate TPA "
991 "skb pool for queue[%d] - "
992 "disabling TPA on this "
993 "queue!\n", j);
994 bnx2x_free_tpa_pool(bp, fp, i);
995 fp->disable_tpa = 1;
996 break;
997 }
619c5cb6
VZ
998 dma_unmap_addr_set(first_buf, mapping, 0);
999 tpa_info->tpa_state = BNX2X_TPA_STOP;
9f6c9258 1000 }
523224a3
DK
1001
1002 /* "next page" elements initialization */
1003 bnx2x_set_next_page_sgl(fp);
1004
1005 /* set SGEs bit mask */
1006 bnx2x_init_sge_ring_bit_mask(fp);
1007
1008 /* Allocate SGEs and initialize the ring elements */
1009 for (i = 0, ring_prod = 0;
1010 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
1011
1012 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
1013 BNX2X_ERR("was only able to allocate "
1014 "%d rx sges\n", i);
619c5cb6
VZ
1015 BNX2X_ERR("disabling TPA for "
1016 "queue[%d]\n", j);
523224a3 1017 /* Cleanup already allocated elements */
619c5cb6
VZ
1018 bnx2x_free_rx_sge_range(bp, fp,
1019 ring_prod);
1020 bnx2x_free_tpa_pool(bp, fp,
1021 max_agg_queues);
523224a3
DK
1022 fp->disable_tpa = 1;
1023 ring_prod = 0;
1024 break;
1025 }
1026 ring_prod = NEXT_SGE_IDX(ring_prod);
1027 }
1028
1029 fp->rx_sge_prod = ring_prod;
9f6c9258
DK
1030 }
1031 }
1032
ec6ba945 1033 for_each_rx_queue(bp, j) {
9f6c9258
DK
1034 struct bnx2x_fastpath *fp = &bp->fp[j];
1035
1036 fp->rx_bd_cons = 0;
9f6c9258 1037
b3b83c3f
DK
1038 /* Activate BD ring */
1039 /* Warning!
1040 * this will generate an interrupt (to the TSTORM)
1041 * must only be done after chip is initialized
1042 */
1043 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1044 fp->rx_sge_prod);
9f6c9258 1045
9f6c9258
DK
1046 if (j != 0)
1047 continue;
1048
619c5cb6 1049 if (CHIP_IS_E1(bp)) {
f2e0899f
DK
1050 REG_WR(bp, BAR_USTRORM_INTMEM +
1051 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1052 U64_LO(fp->rx_comp_mapping));
1053 REG_WR(bp, BAR_USTRORM_INTMEM +
1054 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1055 U64_HI(fp->rx_comp_mapping));
1056 }
9f6c9258
DK
1057 }
1058}
f85582f8 1059
9f6c9258
DK
1060static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1061{
1062 int i;
1063
ec6ba945 1064 for_each_tx_queue(bp, i) {
9f6c9258
DK
1065 struct bnx2x_fastpath *fp = &bp->fp[i];
1066
1067 u16 bd_cons = fp->tx_bd_cons;
1068 u16 sw_prod = fp->tx_pkt_prod;
1069 u16 sw_cons = fp->tx_pkt_cons;
1070
1071 while (sw_cons != sw_prod) {
1072 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
1073 sw_cons++;
1074 }
1075 }
1076}
1077
b3b83c3f
DK
1078static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1079{
1080 struct bnx2x *bp = fp->bp;
1081 int i;
1082
1083 /* ring wasn't allocated */
1084 if (fp->rx_buf_ring == NULL)
1085 return;
1086
1087 for (i = 0; i < NUM_RX_BD; i++) {
1088 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
1089 struct sk_buff *skb = rx_buf->skb;
1090
1091 if (skb == NULL)
1092 continue;
b3b83c3f
DK
1093 dma_unmap_single(&bp->pdev->dev,
1094 dma_unmap_addr(rx_buf, mapping),
1095 fp->rx_buf_size, DMA_FROM_DEVICE);
1096
1097 rx_buf->skb = NULL;
1098 dev_kfree_skb(skb);
1099 }
1100}
1101
9f6c9258
DK
1102static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1103{
b3b83c3f 1104 int j;
9f6c9258 1105
ec6ba945 1106 for_each_rx_queue(bp, j) {
9f6c9258
DK
1107 struct bnx2x_fastpath *fp = &bp->fp[j];
1108
b3b83c3f 1109 bnx2x_free_rx_bds(fp);
9f6c9258 1110
9f6c9258
DK
1111 if (!fp->disable_tpa)
1112 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
1113 ETH_MAX_AGGREGATION_QUEUES_E1 :
619c5cb6 1114 ETH_MAX_AGGREGATION_QUEUES_E1H_E2);
9f6c9258
DK
1115 }
1116}
1117
1118void bnx2x_free_skbs(struct bnx2x *bp)
1119{
1120 bnx2x_free_tx_skbs(bp);
1121 bnx2x_free_rx_skbs(bp);
1122}
1123
e3835b99
DK
1124void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1125{
1126 /* load old values */
1127 u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1128
1129 if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1130 /* leave all but MAX value */
1131 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1132
1133 /* set new MAX value */
1134 mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1135 & FUNC_MF_CFG_MAX_BW_MASK;
1136
1137 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1138 }
1139}
1140
ca92429f
DK
1141/**
1142 * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1143 *
1144 * @bp: driver handle
1145 * @nvecs: number of vectors to be released
1146 */
1147static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
9f6c9258 1148{
ca92429f 1149 int i, offset = 0;
9f6c9258 1150
ca92429f
DK
1151 if (nvecs == offset)
1152 return;
1153 free_irq(bp->msix_table[offset].vector, bp->dev);
9f6c9258 1154 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
ca92429f
DK
1155 bp->msix_table[offset].vector);
1156 offset++;
9f6c9258 1157#ifdef BCM_CNIC
ca92429f
DK
1158 if (nvecs == offset)
1159 return;
9f6c9258
DK
1160 offset++;
1161#endif
ca92429f 1162
ec6ba945 1163 for_each_eth_queue(bp, i) {
ca92429f
DK
1164 if (nvecs == offset)
1165 return;
1166 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d "
1167 "irq\n", i, bp->msix_table[offset].vector);
9f6c9258 1168
ca92429f 1169 free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
9f6c9258
DK
1170 }
1171}
1172
d6214d7a 1173void bnx2x_free_irq(struct bnx2x *bp)
9f6c9258 1174{
d6214d7a 1175 if (bp->flags & USING_MSIX_FLAG)
ca92429f
DK
1176 bnx2x_free_msix_irqs(bp, BNX2X_NUM_ETH_QUEUES(bp) +
1177 CNIC_CONTEXT_USE + 1);
d6214d7a
DK
1178 else if (bp->flags & USING_MSI_FLAG)
1179 free_irq(bp->pdev->irq, bp->dev);
1180 else
9f6c9258
DK
1181 free_irq(bp->pdev->irq, bp->dev);
1182}
1183
d6214d7a 1184int bnx2x_enable_msix(struct bnx2x *bp)
9f6c9258 1185{
d6214d7a 1186 int msix_vec = 0, i, rc, req_cnt;
9f6c9258 1187
d6214d7a
DK
1188 bp->msix_table[msix_vec].entry = msix_vec;
1189 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n",
1190 bp->msix_table[0].entry);
1191 msix_vec++;
9f6c9258
DK
1192
1193#ifdef BCM_CNIC
d6214d7a
DK
1194 bp->msix_table[msix_vec].entry = msix_vec;
1195 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d (CNIC)\n",
1196 bp->msix_table[msix_vec].entry, bp->msix_table[msix_vec].entry);
1197 msix_vec++;
9f6c9258 1198#endif
ec6ba945 1199 for_each_eth_queue(bp, i) {
d6214d7a 1200 bp->msix_table[msix_vec].entry = msix_vec;
9f6c9258 1201 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
d6214d7a
DK
1202 "(fastpath #%u)\n", msix_vec, msix_vec, i);
1203 msix_vec++;
9f6c9258
DK
1204 }
1205
ec6ba945 1206 req_cnt = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_CONTEXT_USE + 1;
d6214d7a
DK
1207
1208 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], req_cnt);
9f6c9258
DK
1209
1210 /*
1211 * reconfigure number of tx/rx queues according to available
1212 * MSI-X vectors
1213 */
1214 if (rc >= BNX2X_MIN_MSIX_VEC_CNT) {
d6214d7a
DK
1215 /* how less vectors we will have? */
1216 int diff = req_cnt - rc;
9f6c9258
DK
1217
1218 DP(NETIF_MSG_IFUP,
1219 "Trying to use less MSI-X vectors: %d\n", rc);
1220
1221 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
1222
1223 if (rc) {
1224 DP(NETIF_MSG_IFUP,
1225 "MSI-X is not attainable rc %d\n", rc);
1226 return rc;
1227 }
d6214d7a
DK
1228 /*
1229 * decrease number of queues by number of unallocated entries
1230 */
1231 bp->num_queues -= diff;
9f6c9258
DK
1232
1233 DP(NETIF_MSG_IFUP, "New queue configuration set: %d\n",
1234 bp->num_queues);
1235 } else if (rc) {
d6214d7a
DK
1236 /* fall to INTx if not enough memory */
1237 if (rc == -ENOMEM)
1238 bp->flags |= DISABLE_MSI_FLAG;
9f6c9258
DK
1239 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
1240 return rc;
1241 }
1242
1243 bp->flags |= USING_MSIX_FLAG;
1244
1245 return 0;
1246}
1247
1248static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1249{
ca92429f 1250 int i, rc, offset = 0;
9f6c9258 1251
ca92429f
DK
1252 rc = request_irq(bp->msix_table[offset++].vector,
1253 bnx2x_msix_sp_int, 0,
9f6c9258
DK
1254 bp->dev->name, bp->dev);
1255 if (rc) {
1256 BNX2X_ERR("request sp irq failed\n");
1257 return -EBUSY;
1258 }
1259
1260#ifdef BCM_CNIC
1261 offset++;
1262#endif
ec6ba945 1263 for_each_eth_queue(bp, i) {
9f6c9258
DK
1264 struct bnx2x_fastpath *fp = &bp->fp[i];
1265 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1266 bp->dev->name, i);
1267
d6214d7a 1268 rc = request_irq(bp->msix_table[offset].vector,
9f6c9258
DK
1269 bnx2x_msix_fp_int, 0, fp->name, fp);
1270 if (rc) {
ca92429f
DK
1271 BNX2X_ERR("request fp #%d irq (%d) failed rc %d\n", i,
1272 bp->msix_table[offset].vector, rc);
1273 bnx2x_free_msix_irqs(bp, offset);
9f6c9258
DK
1274 return -EBUSY;
1275 }
1276
d6214d7a 1277 offset++;
9f6c9258
DK
1278 }
1279
ec6ba945 1280 i = BNX2X_NUM_ETH_QUEUES(bp);
d6214d7a 1281 offset = 1 + CNIC_CONTEXT_USE;
9f6c9258
DK
1282 netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d"
1283 " ... fp[%d] %d\n",
1284 bp->msix_table[0].vector,
1285 0, bp->msix_table[offset].vector,
1286 i - 1, bp->msix_table[offset + i - 1].vector);
1287
1288 return 0;
1289}
1290
d6214d7a 1291int bnx2x_enable_msi(struct bnx2x *bp)
9f6c9258
DK
1292{
1293 int rc;
1294
1295 rc = pci_enable_msi(bp->pdev);
1296 if (rc) {
1297 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
1298 return -1;
1299 }
1300 bp->flags |= USING_MSI_FLAG;
1301
1302 return 0;
1303}
1304
1305static int bnx2x_req_irq(struct bnx2x *bp)
1306{
1307 unsigned long flags;
1308 int rc;
1309
1310 if (bp->flags & USING_MSI_FLAG)
1311 flags = 0;
1312 else
1313 flags = IRQF_SHARED;
1314
1315 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
1316 bp->dev->name, bp->dev);
9f6c9258
DK
1317 return rc;
1318}
1319
619c5cb6
VZ
1320static inline int bnx2x_setup_irqs(struct bnx2x *bp)
1321{
1322 int rc = 0;
1323 if (bp->flags & USING_MSIX_FLAG) {
1324 rc = bnx2x_req_msix_irqs(bp);
1325 if (rc)
1326 return rc;
1327 } else {
1328 bnx2x_ack_int(bp);
1329 rc = bnx2x_req_irq(bp);
1330 if (rc) {
1331 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
1332 return rc;
1333 }
1334 if (bp->flags & USING_MSI_FLAG) {
1335 bp->dev->irq = bp->pdev->irq;
1336 netdev_info(bp->dev, "using MSI IRQ %d\n",
1337 bp->pdev->irq);
1338 }
1339 }
1340
1341 return 0;
1342}
1343
1344static inline void bnx2x_napi_enable(struct bnx2x *bp)
9f6c9258
DK
1345{
1346 int i;
1347
619c5cb6 1348 for_each_rx_queue(bp, i)
9f6c9258
DK
1349 napi_enable(&bnx2x_fp(bp, i, napi));
1350}
1351
619c5cb6 1352static inline void bnx2x_napi_disable(struct bnx2x *bp)
9f6c9258
DK
1353{
1354 int i;
1355
619c5cb6 1356 for_each_rx_queue(bp, i)
9f6c9258
DK
1357 napi_disable(&bnx2x_fp(bp, i, napi));
1358}
1359
1360void bnx2x_netif_start(struct bnx2x *bp)
1361{
4b7ed897
DK
1362 if (netif_running(bp->dev)) {
1363 bnx2x_napi_enable(bp);
1364 bnx2x_int_enable(bp);
1365 if (bp->state == BNX2X_STATE_OPEN)
1366 netif_tx_wake_all_queues(bp->dev);
9f6c9258
DK
1367 }
1368}
1369
1370void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1371{
1372 bnx2x_int_disable_sync(bp, disable_hw);
1373 bnx2x_napi_disable(bp);
9f6c9258 1374}
9f6c9258 1375
8307fa3e
VZ
1376u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
1377{
1378#ifdef BCM_CNIC
1379 struct bnx2x *bp = netdev_priv(dev);
1380 if (NO_FCOE(bp))
1381 return skb_tx_hash(dev, skb);
1382 else {
1383 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1384 u16 ether_type = ntohs(hdr->h_proto);
1385
1386 /* Skip VLAN tag if present */
1387 if (ether_type == ETH_P_8021Q) {
1388 struct vlan_ethhdr *vhdr =
1389 (struct vlan_ethhdr *)skb->data;
1390
1391 ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1392 }
1393
1394 /* If ethertype is FCoE or FIP - use FCoE ring */
1395 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
1396 return bnx2x_fcoe(bp, index);
1397 }
1398#endif
1399 /* Select a none-FCoE queue: if FCoE is enabled, exclude FCoE L2 ring
1400 */
1401 return __skb_tx_hash(dev, skb,
1402 dev->real_num_tx_queues - FCOE_CONTEXT_USE);
1403}
1404
d6214d7a
DK
1405void bnx2x_set_num_queues(struct bnx2x *bp)
1406{
1407 switch (bp->multi_mode) {
1408 case ETH_RSS_MODE_DISABLED:
9f6c9258 1409 bp->num_queues = 1;
d6214d7a
DK
1410 break;
1411 case ETH_RSS_MODE_REGULAR:
1412 bp->num_queues = bnx2x_calc_num_queues(bp);
9f6c9258 1413 break;
f85582f8 1414
9f6c9258 1415 default:
d6214d7a 1416 bp->num_queues = 1;
9f6c9258
DK
1417 break;
1418 }
ec6ba945
VZ
1419
1420 /* Add special queues */
1421 bp->num_queues += NONE_ETH_CONTEXT_USE;
1422}
1423
ec6ba945
VZ
1424static inline int bnx2x_set_real_num_queues(struct bnx2x *bp)
1425{
1426 int rc, num = bp->num_queues;
1427
1428#ifdef BCM_CNIC
1429 if (NO_FCOE(bp))
1430 num -= FCOE_CONTEXT_USE;
1431
1432#endif
1433 netif_set_real_num_tx_queues(bp->dev, num);
1434 rc = netif_set_real_num_rx_queues(bp->dev, num);
1435 return rc;
1436}
1437
a8c94b91
VZ
1438static inline void bnx2x_set_rx_buf_size(struct bnx2x *bp)
1439{
1440 int i;
1441
1442 for_each_queue(bp, i) {
1443 struct bnx2x_fastpath *fp = &bp->fp[i];
1444
1445 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
1446 if (IS_FCOE_IDX(i))
1447 /*
1448 * Although there are no IP frames expected to arrive to
1449 * this ring we still want to add an
1450 * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
1451 * overrun attack.
1452 */
1453 fp->rx_buf_size =
1454 BNX2X_FCOE_MINI_JUMBO_MTU + ETH_OVREHEAD +
619c5cb6 1455 BNX2X_FW_RX_ALIGN + IP_HEADER_ALIGNMENT_PADDING;
a8c94b91
VZ
1456 else
1457 fp->rx_buf_size =
619c5cb6
VZ
1458 bp->dev->mtu + ETH_OVREHEAD +
1459 BNX2X_FW_RX_ALIGN + IP_HEADER_ALIGNMENT_PADDING;
a8c94b91
VZ
1460 }
1461}
1462
619c5cb6
VZ
1463static inline int bnx2x_init_rss_pf(struct bnx2x *bp)
1464{
1465 int i;
1466 u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0};
1467 u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
1468
1469 /*
1470 * Prepare the inital contents fo the indirection table if RSS is
1471 * enabled
1472 */
1473 if (bp->multi_mode != ETH_RSS_MODE_DISABLED) {
1474 for (i = 0; i < sizeof(ind_table); i++)
1475 ind_table[i] =
1476 bp->fp->cl_id + (i % num_eth_queues);
1477 }
1478
1479 /*
1480 * For 57710 and 57711 SEARCHER configuration (rss_keys) is
1481 * per-port, so if explicit configuration is needed , do it only
1482 * for a PMF.
1483 *
1484 * For 57712 and newer on the other hand it's a per-function
1485 * configuration.
1486 */
1487 return bnx2x_config_rss_pf(bp, ind_table,
1488 bp->port.pmf || !CHIP_IS_E1x(bp));
1489}
1490
1491int bnx2x_config_rss_pf(struct bnx2x *bp, u8 *ind_table, bool config_hash)
1492{
1493 struct bnx2x_config_rss_params params = {0};
1494 int i;
1495
1496 /* Although RSS is meaningless when there is a single HW queue we
1497 * still need it enabled in order to have HW Rx hash generated.
1498 *
1499 * if (!is_eth_multi(bp))
1500 * bp->multi_mode = ETH_RSS_MODE_DISABLED;
1501 */
1502
1503 params.rss_obj = &bp->rss_conf_obj;
1504
1505 __set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
1506
1507 /* RSS mode */
1508 switch (bp->multi_mode) {
1509 case ETH_RSS_MODE_DISABLED:
1510 __set_bit(BNX2X_RSS_MODE_DISABLED, &params.rss_flags);
1511 break;
1512 case ETH_RSS_MODE_REGULAR:
1513 __set_bit(BNX2X_RSS_MODE_REGULAR, &params.rss_flags);
1514 break;
1515 case ETH_RSS_MODE_VLAN_PRI:
1516 __set_bit(BNX2X_RSS_MODE_VLAN_PRI, &params.rss_flags);
1517 break;
1518 case ETH_RSS_MODE_E1HOV_PRI:
1519 __set_bit(BNX2X_RSS_MODE_E1HOV_PRI, &params.rss_flags);
1520 break;
1521 case ETH_RSS_MODE_IP_DSCP:
1522 __set_bit(BNX2X_RSS_MODE_IP_DSCP, &params.rss_flags);
1523 break;
1524 default:
1525 BNX2X_ERR("Unknown multi_mode: %d\n", bp->multi_mode);
1526 return -EINVAL;
1527 }
1528
1529 /* If RSS is enabled */
1530 if (bp->multi_mode != ETH_RSS_MODE_DISABLED) {
1531 /* RSS configuration */
1532 __set_bit(BNX2X_RSS_IPV4, &params.rss_flags);
1533 __set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags);
1534 __set_bit(BNX2X_RSS_IPV6, &params.rss_flags);
1535 __set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags);
1536
1537 /* Hash bits */
1538 params.rss_result_mask = MULTI_MASK;
1539
1540 memcpy(params.ind_table, ind_table, sizeof(params.ind_table));
1541
1542 if (config_hash) {
1543 /* RSS keys */
1544 for (i = 0; i < sizeof(params.rss_key) / 4; i++)
1545 params.rss_key[i] = random32();
1546
1547 __set_bit(BNX2X_RSS_SET_SRCH, &params.rss_flags);
1548 }
1549 }
1550
1551 return bnx2x_config_rss(bp, &params);
1552}
1553
1554static inline int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
1555{
1556 struct bnx2x_func_state_params func_params = {0};
1557
1558 /* Prepare parameters for function state transitions */
1559 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
1560
1561 func_params.f_obj = &bp->func_obj;
1562 func_params.cmd = BNX2X_F_CMD_HW_INIT;
1563
1564 func_params.params.hw_init.load_phase = load_code;
1565
1566 return bnx2x_func_state_change(bp, &func_params);
1567}
1568
1569/*
1570 * Cleans the object that have internal lists without sending
1571 * ramrods. Should be run when interrutps are disabled.
1572 */
1573static void bnx2x_squeeze_objects(struct bnx2x *bp)
1574{
1575 int rc;
1576 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
1577 struct bnx2x_mcast_ramrod_params rparam = {0};
1578 struct bnx2x_vlan_mac_obj *mac_obj = &bp->fp->mac_obj;
1579
1580 /***************** Cleanup MACs' object first *************************/
1581
1582 /* Wait for completion of requested */
1583 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
1584 /* Perform a dry cleanup */
1585 __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
1586
1587 /* Clean ETH primary MAC */
1588 __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
1589 rc = mac_obj->delete_all(bp, &bp->fp->mac_obj, &vlan_mac_flags,
1590 &ramrod_flags);
1591 if (rc != 0)
1592 BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
1593
1594 /* Cleanup UC list */
1595 vlan_mac_flags = 0;
1596 __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
1597 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
1598 &ramrod_flags);
1599 if (rc != 0)
1600 BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
1601
1602 /***************** Now clean mcast object *****************************/
1603 rparam.mcast_obj = &bp->mcast_obj;
1604 __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
1605
1606 /* Add a DEL command... */
1607 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
1608 if (rc < 0)
1609 BNX2X_ERR("Failed to add a new DEL command to a multi-cast "
1610 "object: %d\n", rc);
1611
1612 /* ...and wait until all pending commands are cleared */
1613 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
1614 while (rc != 0) {
1615 if (rc < 0) {
1616 BNX2X_ERR("Failed to clean multi-cast object: %d\n",
1617 rc);
1618 return;
1619 }
1620
1621 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
1622 }
1623}
1624
1625#ifndef BNX2X_STOP_ON_ERROR
1626#define LOAD_ERROR_EXIT(bp, label) \
1627 do { \
1628 (bp)->state = BNX2X_STATE_ERROR; \
1629 goto label; \
1630 } while (0)
1631#else
1632#define LOAD_ERROR_EXIT(bp, label) \
1633 do { \
1634 (bp)->state = BNX2X_STATE_ERROR; \
1635 (bp)->panic = 1; \
1636 return -EBUSY; \
1637 } while (0)
1638#endif
1639
9f6c9258
DK
1640/* must be called with rtnl_lock */
1641int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1642{
619c5cb6 1643 int port = BP_PORT(bp);
9f6c9258
DK
1644 u32 load_code;
1645 int i, rc;
1646
1647#ifdef BNX2X_STOP_ON_ERROR
1648 if (unlikely(bp->panic))
1649 return -EPERM;
1650#endif
1651
1652 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
1653
2ae17f66
VZ
1654 /* Set the initial link reported state to link down */
1655 bnx2x_acquire_phy_lock(bp);
1656 memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
1657 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1658 &bp->last_reported_link.link_report_flags);
1659 bnx2x_release_phy_lock(bp);
1660
523224a3
DK
1661 /* must be called before memory allocation and HW init */
1662 bnx2x_ilt_set_info(bp);
1663
b3b83c3f
DK
1664 /* zero fastpath structures preserving invariants like napi which are
1665 * allocated only once
1666 */
1667 for_each_queue(bp, i)
1668 bnx2x_bz_fp(bp, i);
1669
a8c94b91
VZ
1670 /* Set the receive queues buffer size */
1671 bnx2x_set_rx_buf_size(bp);
1672
619c5cb6
VZ
1673 /*
1674 * set the tpa flag for each queue. The tpa flag determines the queue
1675 * minimal size so it must be set prior to queue memory allocation
1676 */
b3b83c3f
DK
1677 for_each_queue(bp, i)
1678 bnx2x_fp(bp, i, disable_tpa) =
1679 ((bp->flags & TPA_ENABLE_FLAG) == 0);
1680
1681#ifdef BCM_CNIC
1682 /* We don't want TPA on FCoE L2 ring */
1683 bnx2x_fcoe(bp, disable_tpa) = 1;
1684#endif
1685
d6214d7a 1686 if (bnx2x_alloc_mem(bp))
9f6c9258 1687 return -ENOMEM;
d6214d7a 1688
b3b83c3f
DK
1689 /* As long as bnx2x_alloc_mem() may possibly update
1690 * bp->num_queues, bnx2x_set_real_num_queues() should always
1691 * come after it.
1692 */
ec6ba945 1693 rc = bnx2x_set_real_num_queues(bp);
d6214d7a 1694 if (rc) {
ec6ba945 1695 BNX2X_ERR("Unable to set real_num_queues\n");
619c5cb6 1696 LOAD_ERROR_EXIT(bp, load_error0);
9f6c9258
DK
1697 }
1698
9f6c9258
DK
1699 bnx2x_napi_enable(bp);
1700
9f6c9258 1701 /* Send LOAD_REQUEST command to MCP
619c5cb6
VZ
1702 * Returns the type of LOAD command:
1703 * if it is the first port to be initialized
1704 * common blocks should be initialized, otherwise - not
1705 */
9f6c9258 1706 if (!BP_NOMCP(bp)) {
a22f0788 1707 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, 0);
9f6c9258
DK
1708 if (!load_code) {
1709 BNX2X_ERR("MCP response failure, aborting\n");
1710 rc = -EBUSY;
619c5cb6 1711 LOAD_ERROR_EXIT(bp, load_error1);
9f6c9258
DK
1712 }
1713 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
1714 rc = -EBUSY; /* other port in diagnostic mode */
619c5cb6 1715 LOAD_ERROR_EXIT(bp, load_error1);
9f6c9258
DK
1716 }
1717
1718 } else {
f2e0899f 1719 int path = BP_PATH(bp);
9f6c9258 1720
f2e0899f
DK
1721 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
1722 path, load_count[path][0], load_count[path][1],
1723 load_count[path][2]);
1724 load_count[path][0]++;
1725 load_count[path][1 + port]++;
1726 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
1727 path, load_count[path][0], load_count[path][1],
1728 load_count[path][2]);
1729 if (load_count[path][0] == 1)
9f6c9258 1730 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
f2e0899f 1731 else if (load_count[path][1 + port] == 1)
9f6c9258
DK
1732 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
1733 else
1734 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
1735 }
1736
1737 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
f2e0899f 1738 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
9f6c9258
DK
1739 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
1740 bp->port.pmf = 1;
1741 else
1742 bp->port.pmf = 0;
1743 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
1744
619c5cb6
VZ
1745 /* Init Function state controlling object */
1746 bnx2x__init_func_obj(bp);
1747
9f6c9258
DK
1748 /* Initialize HW */
1749 rc = bnx2x_init_hw(bp, load_code);
1750 if (rc) {
1751 BNX2X_ERR("HW init failed, aborting\n");
a22f0788 1752 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
619c5cb6 1753 LOAD_ERROR_EXIT(bp, load_error2);
9f6c9258
DK
1754 }
1755
d6214d7a
DK
1756 /* Connect to IRQs */
1757 rc = bnx2x_setup_irqs(bp);
523224a3
DK
1758 if (rc) {
1759 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
619c5cb6 1760 LOAD_ERROR_EXIT(bp, load_error2);
523224a3
DK
1761 }
1762
9f6c9258
DK
1763 /* Setup NIC internals and enable interrupts */
1764 bnx2x_nic_init(bp, load_code);
1765
619c5cb6
VZ
1766 /* Init per-function objects */
1767 bnx2x_init_bp_objs(bp);
1768
f2e0899f
DK
1769 if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
1770 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
619c5cb6
VZ
1771 (bp->common.shmem2_base)) {
1772 if (SHMEM2_HAS(bp, dcc_support))
1773 SHMEM2_WR(bp, dcc_support,
1774 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
1775 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
1776 }
1777
1778 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
1779 rc = bnx2x_func_start(bp);
1780 if (rc) {
1781 BNX2X_ERR("Function start failed!\n");
1782 LOAD_ERROR_EXIT(bp, load_error3);
1783 }
9f6c9258
DK
1784
1785 /* Send LOAD_DONE command to MCP */
1786 if (!BP_NOMCP(bp)) {
a22f0788 1787 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
9f6c9258
DK
1788 if (!load_code) {
1789 BNX2X_ERR("MCP response failure, aborting\n");
1790 rc = -EBUSY;
619c5cb6 1791 LOAD_ERROR_EXIT(bp, load_error3);
9f6c9258
DK
1792 }
1793 }
1794
619c5cb6 1795 rc = bnx2x_setup_leading(bp);
9f6c9258
DK
1796 if (rc) {
1797 BNX2X_ERR("Setup leading failed!\n");
619c5cb6 1798 LOAD_ERROR_EXIT(bp, load_error3);
f2e0899f 1799 }
9f6c9258 1800
9f6c9258 1801#ifdef BCM_CNIC
523224a3 1802 /* Enable Timer scan */
619c5cb6 1803 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
9f6c9258 1804#endif
f85582f8 1805
523224a3 1806 for_each_nondefault_queue(bp, i) {
619c5cb6 1807 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
523224a3 1808 if (rc)
619c5cb6 1809 LOAD_ERROR_EXIT(bp, load_error4);
523224a3
DK
1810 }
1811
619c5cb6
VZ
1812 rc = bnx2x_init_rss_pf(bp);
1813 if (rc)
1814 LOAD_ERROR_EXIT(bp, load_error4);
1815
523224a3
DK
1816 /* Now when Clients are configured we are ready to work */
1817 bp->state = BNX2X_STATE_OPEN;
1818
619c5cb6
VZ
1819 /* Configure a ucast MAC */
1820 rc = bnx2x_set_eth_mac(bp, true);
1821 if (rc)
1822 LOAD_ERROR_EXIT(bp, load_error4);
6e30dd4e 1823
e3835b99
DK
1824 if (bp->pending_max) {
1825 bnx2x_update_max_mf_config(bp, bp->pending_max);
1826 bp->pending_max = 0;
1827 }
1828
9f6c9258
DK
1829 if (bp->port.pmf)
1830 bnx2x_initial_phy_init(bp, load_mode);
1831
619c5cb6
VZ
1832 /* Start fast path */
1833
1834 /* Initialize Rx filter. */
1835 netif_addr_lock_bh(bp->dev);
6e30dd4e 1836 bnx2x_set_rx_mode(bp->dev);
619c5cb6 1837 netif_addr_unlock_bh(bp->dev);
6e30dd4e 1838
619c5cb6 1839 /* Start the Tx */
9f6c9258
DK
1840 switch (load_mode) {
1841 case LOAD_NORMAL:
523224a3
DK
1842 /* Tx queue should be only reenabled */
1843 netif_tx_wake_all_queues(bp->dev);
9f6c9258
DK
1844 break;
1845
1846 case LOAD_OPEN:
1847 netif_tx_start_all_queues(bp->dev);
523224a3 1848 smp_mb__after_clear_bit();
9f6c9258
DK
1849 break;
1850
1851 case LOAD_DIAG:
9f6c9258
DK
1852 bp->state = BNX2X_STATE_DIAG;
1853 break;
1854
1855 default:
1856 break;
1857 }
1858
1859 if (!bp->port.pmf)
1860 bnx2x__link_status_update(bp);
1861
1862 /* start the timer */
1863 mod_timer(&bp->timer, jiffies + bp->current_interval);
1864
1865#ifdef BCM_CNIC
1866 bnx2x_setup_cnic_irq_info(bp);
1867 if (bp->state == BNX2X_STATE_OPEN)
1868 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
1869#endif
1870 bnx2x_inc_load_cnt(bp);
1871
619c5cb6
VZ
1872 /* Wait for all pending SP commands to complete */
1873 if (!bnx2x_wait_sp_comp(bp, ~0x0UL)) {
1874 BNX2X_ERR("Timeout waiting for SP elements to complete\n");
1875 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
1876 return -EBUSY;
1877 }
6891dd25 1878
619c5cb6 1879 bnx2x_dcbx_init(bp);
9f6c9258
DK
1880 return 0;
1881
619c5cb6 1882#ifndef BNX2X_STOP_ON_ERROR
9f6c9258 1883load_error4:
619c5cb6 1884#ifdef BCM_CNIC
9f6c9258 1885 /* Disable Timer scan */
619c5cb6 1886 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
9f6c9258
DK
1887#endif
1888load_error3:
1889 bnx2x_int_disable_sync(bp, 1);
d6214d7a 1890
619c5cb6
VZ
1891 /* Clean queueable objects */
1892 bnx2x_squeeze_objects(bp);
1893
9f6c9258
DK
1894 /* Free SKBs, SGEs, TPA pool and driver internals */
1895 bnx2x_free_skbs(bp);
ec6ba945 1896 for_each_rx_queue(bp, i)
9f6c9258 1897 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
d6214d7a 1898
9f6c9258 1899 /* Release IRQs */
d6214d7a
DK
1900 bnx2x_free_irq(bp);
1901load_error2:
1902 if (!BP_NOMCP(bp)) {
1903 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
1904 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
1905 }
1906
1907 bp->port.pmf = 0;
9f6c9258
DK
1908load_error1:
1909 bnx2x_napi_disable(bp);
d6214d7a 1910load_error0:
9f6c9258
DK
1911 bnx2x_free_mem(bp);
1912
1913 return rc;
619c5cb6 1914#endif /* ! BNX2X_STOP_ON_ERROR */
9f6c9258
DK
1915}
1916
1917/* must be called with rtnl_lock */
1918int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
1919{
1920 int i;
c9ee9206
VZ
1921 bool global = false;
1922
1923 if ((bp->state == BNX2X_STATE_CLOSED) ||
1924 (bp->state == BNX2X_STATE_ERROR)) {
1925 /* We can get here if the driver has been unloaded
1926 * during parity error recovery and is either waiting for a
1927 * leader to complete or for other functions to unload and
1928 * then ifdown has been issued. In this case we want to
1929 * unload and let other functions to complete a recovery
1930 * process.
1931 */
9f6c9258
DK
1932 bp->recovery_state = BNX2X_RECOVERY_DONE;
1933 bp->is_leader = 0;
c9ee9206
VZ
1934 bnx2x_release_leader_lock(bp);
1935 smp_mb();
1936
1937 DP(NETIF_MSG_HW, "Releasing a leadership...\n");
9f6c9258
DK
1938
1939 return -EINVAL;
1940 }
1941
1942#ifdef BCM_CNIC
1943 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
1944#endif
1945 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
619c5cb6 1946 smp_mb();
9f6c9258 1947
9f6c9258 1948 bp->rx_mode = BNX2X_RX_MODE_NONE;
9f6c9258 1949
f2e0899f
DK
1950 /* Stop Tx */
1951 bnx2x_tx_disable(bp);
f85582f8 1952
9f6c9258 1953 del_timer_sync(&bp->timer);
f85582f8 1954
619c5cb6
VZ
1955 /* Set ALWAYS_ALIVE bit in shmem */
1956 bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
1957
1958 bnx2x_drv_pulse(bp);
9f6c9258 1959
f85582f8 1960 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9f6c9258
DK
1961
1962 /* Cleanup the chip if needed */
1963 if (unload_mode != UNLOAD_RECOVERY)
1964 bnx2x_chip_cleanup(bp, unload_mode);
523224a3 1965 else {
c9ee9206
VZ
1966 /* Send the UNLOAD_REQUEST to the MCP */
1967 bnx2x_send_unload_req(bp, unload_mode);
1968
1969 /*
1970 * Prevent transactions to host from the functions on the
1971 * engine that doesn't reset global blocks in case of global
1972 * attention once gloabl blocks are reset and gates are opened
1973 * (the engine which leader will perform the recovery
1974 * last).
1975 */
1976 if (!CHIP_IS_E1x(bp))
1977 bnx2x_pf_disable(bp);
1978
1979 /* Disable HW interrupts, NAPI */
523224a3
DK
1980 bnx2x_netif_stop(bp, 1);
1981
1982 /* Release IRQs */
d6214d7a 1983 bnx2x_free_irq(bp);
c9ee9206
VZ
1984
1985 /* Report UNLOAD_DONE to MCP */
1986 bnx2x_send_unload_done(bp);
523224a3 1987 }
9f6c9258 1988
619c5cb6
VZ
1989 /*
1990 * At this stage no more interrupts will arrive so we may safly clean
1991 * the queueable objects here in case they failed to get cleaned so far.
1992 */
1993 bnx2x_squeeze_objects(bp);
1994
9f6c9258
DK
1995 bp->port.pmf = 0;
1996
1997 /* Free SKBs, SGEs, TPA pool and driver internals */
1998 bnx2x_free_skbs(bp);
ec6ba945 1999 for_each_rx_queue(bp, i)
9f6c9258 2000 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
d6214d7a 2001
9f6c9258
DK
2002 bnx2x_free_mem(bp);
2003
2004 bp->state = BNX2X_STATE_CLOSED;
2005
c9ee9206
VZ
2006 /* Check if there are pending parity attentions. If there are - set
2007 * RECOVERY_IN_PROGRESS.
2008 */
2009 if (bnx2x_chk_parity_attn(bp, &global, false)) {
2010 bnx2x_set_reset_in_progress(bp);
2011
2012 /* Set RESET_IS_GLOBAL if needed */
2013 if (global)
2014 bnx2x_set_reset_global(bp);
2015 }
2016
2017
9f6c9258
DK
2018 /* The last driver must disable a "close the gate" if there is no
2019 * parity attention or "process kill" pending.
2020 */
c9ee9206 2021 if (!bnx2x_dec_load_cnt(bp) && bnx2x_reset_is_done(bp, BP_PATH(bp)))
9f6c9258
DK
2022 bnx2x_disable_close_the_gate(bp);
2023
9f6c9258
DK
2024 return 0;
2025}
f85582f8 2026
9f6c9258
DK
2027int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
2028{
2029 u16 pmcsr;
2030
adf5f6a1
DK
2031 /* If there is no power capability, silently succeed */
2032 if (!bp->pm_cap) {
2033 DP(NETIF_MSG_HW, "No power capability. Breaking.\n");
2034 return 0;
2035 }
2036
9f6c9258
DK
2037 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2038
2039 switch (state) {
2040 case PCI_D0:
2041 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2042 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2043 PCI_PM_CTRL_PME_STATUS));
2044
2045 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2046 /* delay required during transition out of D3hot */
2047 msleep(20);
2048 break;
2049
2050 case PCI_D3hot:
2051 /* If there are other clients above don't
2052 shut down the power */
2053 if (atomic_read(&bp->pdev->enable_cnt) != 1)
2054 return 0;
2055 /* Don't shut down the power for emulation and FPGA */
2056 if (CHIP_REV_IS_SLOW(bp))
2057 return 0;
2058
2059 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2060 pmcsr |= 3;
2061
2062 if (bp->wol)
2063 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2064
2065 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2066 pmcsr);
2067
2068 /* No more memory access after this point until
2069 * device is brought back to D0.
2070 */
2071 break;
2072
2073 default:
2074 return -EINVAL;
2075 }
2076 return 0;
2077}
2078
9f6c9258
DK
2079/*
2080 * net_device service functions
2081 */
d6214d7a 2082int bnx2x_poll(struct napi_struct *napi, int budget)
9f6c9258
DK
2083{
2084 int work_done = 0;
2085 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
2086 napi);
2087 struct bnx2x *bp = fp->bp;
2088
2089 while (1) {
2090#ifdef BNX2X_STOP_ON_ERROR
2091 if (unlikely(bp->panic)) {
2092 napi_complete(napi);
2093 return 0;
2094 }
2095#endif
2096
2097 if (bnx2x_has_tx_work(fp))
2098 bnx2x_tx_int(fp);
2099
2100 if (bnx2x_has_rx_work(fp)) {
2101 work_done += bnx2x_rx_int(fp, budget - work_done);
2102
2103 /* must not complete if we consumed full budget */
2104 if (work_done >= budget)
2105 break;
2106 }
2107
2108 /* Fall out from the NAPI loop if needed */
2109 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
ec6ba945
VZ
2110#ifdef BCM_CNIC
2111 /* No need to update SB for FCoE L2 ring as long as
2112 * it's connected to the default SB and the SB
2113 * has been updated when NAPI was scheduled.
2114 */
2115 if (IS_FCOE_FP(fp)) {
2116 napi_complete(napi);
2117 break;
2118 }
2119#endif
2120
9f6c9258 2121 bnx2x_update_fpsb_idx(fp);
f85582f8
DK
2122 /* bnx2x_has_rx_work() reads the status block,
2123 * thus we need to ensure that status block indices
2124 * have been actually read (bnx2x_update_fpsb_idx)
2125 * prior to this check (bnx2x_has_rx_work) so that
2126 * we won't write the "newer" value of the status block
2127 * to IGU (if there was a DMA right after
2128 * bnx2x_has_rx_work and if there is no rmb, the memory
2129 * reading (bnx2x_update_fpsb_idx) may be postponed
2130 * to right before bnx2x_ack_sb). In this case there
2131 * will never be another interrupt until there is
2132 * another update of the status block, while there
2133 * is still unhandled work.
2134 */
9f6c9258
DK
2135 rmb();
2136
2137 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
2138 napi_complete(napi);
2139 /* Re-enable interrupts */
523224a3
DK
2140 DP(NETIF_MSG_HW,
2141 "Update index to %d\n", fp->fp_hc_idx);
2142 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
2143 le16_to_cpu(fp->fp_hc_idx),
9f6c9258
DK
2144 IGU_INT_ENABLE, 1);
2145 break;
2146 }
2147 }
2148 }
2149
2150 return work_done;
2151}
2152
9f6c9258
DK
2153/* we split the first BD into headers and data BDs
2154 * to ease the pain of our fellow microcode engineers
2155 * we use one mapping for both BDs
2156 * So far this has only been observed to happen
2157 * in Other Operating Systems(TM)
2158 */
2159static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
2160 struct bnx2x_fastpath *fp,
2161 struct sw_tx_bd *tx_buf,
2162 struct eth_tx_start_bd **tx_bd, u16 hlen,
2163 u16 bd_prod, int nbd)
2164{
2165 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
2166 struct eth_tx_bd *d_tx_bd;
2167 dma_addr_t mapping;
2168 int old_len = le16_to_cpu(h_tx_bd->nbytes);
2169
2170 /* first fix first BD */
2171 h_tx_bd->nbd = cpu_to_le16(nbd);
2172 h_tx_bd->nbytes = cpu_to_le16(hlen);
2173
2174 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
2175 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
2176 h_tx_bd->addr_lo, h_tx_bd->nbd);
2177
2178 /* now get a new data BD
2179 * (after the pbd) and fill it */
2180 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2181 d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
2182
2183 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
2184 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
2185
2186 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2187 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2188 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
2189
2190 /* this marks the BD as one that has no individual mapping */
2191 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
2192
2193 DP(NETIF_MSG_TX_QUEUED,
2194 "TSO split data size is %d (%x:%x)\n",
2195 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
2196
2197 /* update tx_bd */
2198 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
2199
2200 return bd_prod;
2201}
2202
2203static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
2204{
2205 if (fix > 0)
2206 csum = (u16) ~csum_fold(csum_sub(csum,
2207 csum_partial(t_header - fix, fix, 0)));
2208
2209 else if (fix < 0)
2210 csum = (u16) ~csum_fold(csum_add(csum,
2211 csum_partial(t_header, -fix, 0)));
2212
2213 return swab16(csum);
2214}
2215
2216static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
2217{
2218 u32 rc;
2219
2220 if (skb->ip_summed != CHECKSUM_PARTIAL)
2221 rc = XMIT_PLAIN;
2222
2223 else {
d0d9d8ef 2224 if (vlan_get_protocol(skb) == htons(ETH_P_IPV6)) {
9f6c9258
DK
2225 rc = XMIT_CSUM_V6;
2226 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
2227 rc |= XMIT_CSUM_TCP;
2228
2229 } else {
2230 rc = XMIT_CSUM_V4;
2231 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2232 rc |= XMIT_CSUM_TCP;
2233 }
2234 }
2235
5892b9e9
VZ
2236 if (skb_is_gso_v6(skb))
2237 rc |= XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6;
2238 else if (skb_is_gso(skb))
2239 rc |= XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP;
9f6c9258
DK
2240
2241 return rc;
2242}
2243
2244#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
2245/* check if packet requires linearization (packet is too fragmented)
2246 no need to check fragmentation if page size > 8K (there will be no
2247 violation to FW restrictions) */
2248static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
2249 u32 xmit_type)
2250{
2251 int to_copy = 0;
2252 int hlen = 0;
2253 int first_bd_sz = 0;
2254
2255 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
2256 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
2257
2258 if (xmit_type & XMIT_GSO) {
2259 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
2260 /* Check if LSO packet needs to be copied:
2261 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
2262 int wnd_size = MAX_FETCH_BD - 3;
2263 /* Number of windows to check */
2264 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
2265 int wnd_idx = 0;
2266 int frag_idx = 0;
2267 u32 wnd_sum = 0;
2268
2269 /* Headers length */
2270 hlen = (int)(skb_transport_header(skb) - skb->data) +
2271 tcp_hdrlen(skb);
2272
2273 /* Amount of data (w/o headers) on linear part of SKB*/
2274 first_bd_sz = skb_headlen(skb) - hlen;
2275
2276 wnd_sum = first_bd_sz;
2277
2278 /* Calculate the first sum - it's special */
2279 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
2280 wnd_sum +=
2281 skb_shinfo(skb)->frags[frag_idx].size;
2282
2283 /* If there was data on linear skb data - check it */
2284 if (first_bd_sz > 0) {
2285 if (unlikely(wnd_sum < lso_mss)) {
2286 to_copy = 1;
2287 goto exit_lbl;
2288 }
2289
2290 wnd_sum -= first_bd_sz;
2291 }
2292
2293 /* Others are easier: run through the frag list and
2294 check all windows */
2295 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
2296 wnd_sum +=
2297 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
2298
2299 if (unlikely(wnd_sum < lso_mss)) {
2300 to_copy = 1;
2301 break;
2302 }
2303 wnd_sum -=
2304 skb_shinfo(skb)->frags[wnd_idx].size;
2305 }
2306 } else {
2307 /* in non-LSO too fragmented packet should always
2308 be linearized */
2309 to_copy = 1;
2310 }
2311 }
2312
2313exit_lbl:
2314 if (unlikely(to_copy))
2315 DP(NETIF_MSG_TX_QUEUED,
2316 "Linearization IS REQUIRED for %s packet. "
2317 "num_frags %d hlen %d first_bd_sz %d\n",
2318 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
2319 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
2320
2321 return to_copy;
2322}
2323#endif
2324
2297a2da
VZ
2325static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
2326 u32 xmit_type)
f2e0899f 2327{
2297a2da
VZ
2328 *parsing_data |= (skb_shinfo(skb)->gso_size <<
2329 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
2330 ETH_TX_PARSE_BD_E2_LSO_MSS;
f2e0899f
DK
2331 if ((xmit_type & XMIT_GSO_V6) &&
2332 (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
2297a2da 2333 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
f2e0899f
DK
2334}
2335
2336/**
e8920674 2337 * bnx2x_set_pbd_gso - update PBD in GSO case.
f2e0899f 2338 *
e8920674
DK
2339 * @skb: packet skb
2340 * @pbd: parse BD
2341 * @xmit_type: xmit flags
f2e0899f
DK
2342 */
2343static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
2344 struct eth_tx_parse_bd_e1x *pbd,
2345 u32 xmit_type)
2346{
2347 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2348 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
2349 pbd->tcp_flags = pbd_tcp_flags(skb);
2350
2351 if (xmit_type & XMIT_GSO_V4) {
2352 pbd->ip_id = swab16(ip_hdr(skb)->id);
2353 pbd->tcp_pseudo_csum =
2354 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
2355 ip_hdr(skb)->daddr,
2356 0, IPPROTO_TCP, 0));
2357
2358 } else
2359 pbd->tcp_pseudo_csum =
2360 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2361 &ipv6_hdr(skb)->daddr,
2362 0, IPPROTO_TCP, 0));
2363
2364 pbd->global_data |= ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN;
2365}
f85582f8 2366
f2e0899f 2367/**
e8920674 2368 * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
f2e0899f 2369 *
e8920674
DK
2370 * @bp: driver handle
2371 * @skb: packet skb
2372 * @parsing_data: data to be updated
2373 * @xmit_type: xmit flags
f2e0899f 2374 *
e8920674 2375 * 57712 related
f2e0899f
DK
2376 */
2377static inline u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
2297a2da 2378 u32 *parsing_data, u32 xmit_type)
f2e0899f 2379{
e39aece7
VZ
2380 *parsing_data |=
2381 ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
2382 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT) &
2383 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W;
f2e0899f 2384
e39aece7
VZ
2385 if (xmit_type & XMIT_CSUM_TCP) {
2386 *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
2387 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
2388 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
f2e0899f 2389
e39aece7
VZ
2390 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
2391 } else
2392 /* We support checksum offload for TCP and UDP only.
2393 * No need to pass the UDP header length - it's a constant.
2394 */
2395 return skb_transport_header(skb) +
2396 sizeof(struct udphdr) - skb->data;
f2e0899f
DK
2397}
2398
93ef5c02
DK
2399static inline void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
2400 struct eth_tx_start_bd *tx_start_bd, u32 xmit_type)
2401{
93ef5c02
DK
2402 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
2403
2404 if (xmit_type & XMIT_CSUM_V4)
2405 tx_start_bd->bd_flags.as_bitfield |=
2406 ETH_TX_BD_FLAGS_IP_CSUM;
2407 else
2408 tx_start_bd->bd_flags.as_bitfield |=
2409 ETH_TX_BD_FLAGS_IPV6;
2410
2411 if (!(xmit_type & XMIT_CSUM_TCP))
2412 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
93ef5c02
DK
2413}
2414
f2e0899f 2415/**
e8920674 2416 * bnx2x_set_pbd_csum - update PBD with checksum and return header length
f2e0899f 2417 *
e8920674
DK
2418 * @bp: driver handle
2419 * @skb: packet skb
2420 * @pbd: parse BD to be updated
2421 * @xmit_type: xmit flags
f2e0899f
DK
2422 */
2423static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
2424 struct eth_tx_parse_bd_e1x *pbd,
2425 u32 xmit_type)
2426{
e39aece7 2427 u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
f2e0899f
DK
2428
2429 /* for now NS flag is not used in Linux */
2430 pbd->global_data =
2431 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
2432 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
2433
2434 pbd->ip_hlen_w = (skb_transport_header(skb) -
e39aece7 2435 skb_network_header(skb)) >> 1;
f2e0899f 2436
e39aece7
VZ
2437 hlen += pbd->ip_hlen_w;
2438
2439 /* We support checksum offload for TCP and UDP only */
2440 if (xmit_type & XMIT_CSUM_TCP)
2441 hlen += tcp_hdrlen(skb) / 2;
2442 else
2443 hlen += sizeof(struct udphdr) / 2;
f2e0899f
DK
2444
2445 pbd->total_hlen_w = cpu_to_le16(hlen);
2446 hlen = hlen*2;
2447
2448 if (xmit_type & XMIT_CSUM_TCP) {
2449 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
2450
2451 } else {
2452 s8 fix = SKB_CS_OFF(skb); /* signed! */
2453
2454 DP(NETIF_MSG_TX_QUEUED,
2455 "hlen %d fix %d csum before fix %x\n",
2456 le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
2457
2458 /* HW bug: fixup the CSUM */
2459 pbd->tcp_pseudo_csum =
2460 bnx2x_csum_fix(skb_transport_header(skb),
2461 SKB_CS(skb), fix);
2462
2463 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
2464 pbd->tcp_pseudo_csum);
2465 }
2466
2467 return hlen;
2468}
f85582f8 2469
9f6c9258
DK
2470/* called with netif_tx_lock
2471 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
2472 * netif_wake_queue()
2473 */
2474netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
2475{
2476 struct bnx2x *bp = netdev_priv(dev);
2477 struct bnx2x_fastpath *fp;
2478 struct netdev_queue *txq;
2479 struct sw_tx_bd *tx_buf;
619c5cb6 2480 struct eth_tx_start_bd *tx_start_bd, *first_bd;
9f6c9258 2481 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
523224a3 2482 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
f2e0899f 2483 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
2297a2da 2484 u32 pbd_e2_parsing_data = 0;
9f6c9258
DK
2485 u16 pkt_prod, bd_prod;
2486 int nbd, fp_index;
2487 dma_addr_t mapping;
2488 u32 xmit_type = bnx2x_xmit_type(bp, skb);
2489 int i;
2490 u8 hlen = 0;
2491 __le16 pkt_size = 0;
2492 struct ethhdr *eth;
2493 u8 mac_type = UNICAST_ADDRESS;
2494
2495#ifdef BNX2X_STOP_ON_ERROR
2496 if (unlikely(bp->panic))
2497 return NETDEV_TX_BUSY;
2498#endif
2499
2500 fp_index = skb_get_queue_mapping(skb);
2501 txq = netdev_get_tx_queue(dev, fp_index);
2502
2503 fp = &bp->fp[fp_index];
2504
2505 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
2506 fp->eth_q_stats.driver_xoff++;
2507 netif_tx_stop_queue(txq);
2508 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
2509 return NETDEV_TX_BUSY;
2510 }
2511
f2e0899f
DK
2512 DP(NETIF_MSG_TX_QUEUED, "queue[%d]: SKB: summed %x protocol %x "
2513 "protocol(%x,%x) gso type %x xmit_type %x\n",
2514 fp_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
9f6c9258
DK
2515 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
2516
2517 eth = (struct ethhdr *)skb->data;
2518
2519 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
2520 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
2521 if (is_broadcast_ether_addr(eth->h_dest))
2522 mac_type = BROADCAST_ADDRESS;
2523 else
2524 mac_type = MULTICAST_ADDRESS;
2525 }
2526
2527#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
2528 /* First, check if we need to linearize the skb (due to FW
2529 restrictions). No need to check fragmentation if page size > 8K
2530 (there will be no violation to FW restrictions) */
2531 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
2532 /* Statistics of linearization */
2533 bp->lin_cnt++;
2534 if (skb_linearize(skb) != 0) {
2535 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
2536 "silently dropping this SKB\n");
2537 dev_kfree_skb_any(skb);
2538 return NETDEV_TX_OK;
2539 }
2540 }
2541#endif
619c5cb6
VZ
2542 /* Map skb linear data for DMA */
2543 mapping = dma_map_single(&bp->pdev->dev, skb->data,
2544 skb_headlen(skb), DMA_TO_DEVICE);
2545 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
2546 DP(NETIF_MSG_TX_QUEUED, "SKB mapping failed - "
2547 "silently dropping this SKB\n");
2548 dev_kfree_skb_any(skb);
2549 return NETDEV_TX_OK;
2550 }
9f6c9258
DK
2551 /*
2552 Please read carefully. First we use one BD which we mark as start,
2553 then we have a parsing info BD (used for TSO or xsum),
2554 and only then we have the rest of the TSO BDs.
2555 (don't forget to mark the last one as last,
2556 and to unmap only AFTER you write to the BD ...)
2557 And above all, all pdb sizes are in words - NOT DWORDS!
2558 */
2559
619c5cb6
VZ
2560 /* get current pkt produced now - advance it just before sending packet
2561 * since mapping of pages may fail and cause packet to be dropped
2562 */
2563 pkt_prod = fp->tx_pkt_prod;
9f6c9258
DK
2564 bd_prod = TX_BD(fp->tx_bd_prod);
2565
619c5cb6
VZ
2566 /* get a tx_buf and first BD
2567 * tx_start_bd may be changed during SPLIT,
2568 * but first_bd will always stay first
2569 */
9f6c9258
DK
2570 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
2571 tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
619c5cb6 2572 first_bd = tx_start_bd;
9f6c9258
DK
2573
2574 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
f85582f8
DK
2575 SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_ETH_ADDR_TYPE,
2576 mac_type);
2577
9f6c9258 2578 /* header nbd */
f85582f8 2579 SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_HDR_NBDS, 1);
9f6c9258
DK
2580
2581 /* remember the first BD of the packet */
2582 tx_buf->first_bd = fp->tx_bd_prod;
2583 tx_buf->skb = skb;
2584 tx_buf->flags = 0;
2585
2586 DP(NETIF_MSG_TX_QUEUED,
2587 "sending pkt %u @%p next_idx %u bd %u @%p\n",
2588 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
2589
eab6d18d 2590 if (vlan_tx_tag_present(skb)) {
523224a3
DK
2591 tx_start_bd->vlan_or_ethertype =
2592 cpu_to_le16(vlan_tx_tag_get(skb));
2593 tx_start_bd->bd_flags.as_bitfield |=
2594 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
9f6c9258 2595 } else
523224a3 2596 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
9f6c9258
DK
2597
2598 /* turn on parsing and get a BD */
2599 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9f6c9258 2600
93ef5c02
DK
2601 if (xmit_type & XMIT_CSUM)
2602 bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
9f6c9258 2603
619c5cb6 2604 if (!CHIP_IS_E1x(bp)) {
f2e0899f
DK
2605 pbd_e2 = &fp->tx_desc_ring[bd_prod].parse_bd_e2;
2606 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
2607 /* Set PBD in checksum offload case */
2608 if (xmit_type & XMIT_CSUM)
2297a2da
VZ
2609 hlen = bnx2x_set_pbd_csum_e2(bp, skb,
2610 &pbd_e2_parsing_data,
2611 xmit_type);
619c5cb6
VZ
2612 if (IS_MF_SI(bp)) {
2613 /*
2614 * fill in the MAC addresses in the PBD - for local
2615 * switching
2616 */
2617 bnx2x_set_fw_mac_addr(&pbd_e2->src_mac_addr_hi,
2618 &pbd_e2->src_mac_addr_mid,
2619 &pbd_e2->src_mac_addr_lo,
2620 eth->h_source);
2621 bnx2x_set_fw_mac_addr(&pbd_e2->dst_mac_addr_hi,
2622 &pbd_e2->dst_mac_addr_mid,
2623 &pbd_e2->dst_mac_addr_lo,
2624 eth->h_dest);
2625 }
f2e0899f
DK
2626 } else {
2627 pbd_e1x = &fp->tx_desc_ring[bd_prod].parse_bd_e1x;
2628 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
2629 /* Set PBD in checksum offload case */
2630 if (xmit_type & XMIT_CSUM)
2631 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
9f6c9258 2632
9f6c9258
DK
2633 }
2634
f85582f8 2635 /* Setup the data pointer of the first BD of the packet */
9f6c9258
DK
2636 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2637 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
619c5cb6 2638 nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
9f6c9258
DK
2639 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
2640 pkt_size = tx_start_bd->nbytes;
2641
2642 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
2643 " nbytes %d flags %x vlan %x\n",
2644 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
2645 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
523224a3
DK
2646 tx_start_bd->bd_flags.as_bitfield,
2647 le16_to_cpu(tx_start_bd->vlan_or_ethertype));
9f6c9258
DK
2648
2649 if (xmit_type & XMIT_GSO) {
2650
2651 DP(NETIF_MSG_TX_QUEUED,
2652 "TSO packet len %d hlen %d total len %d tso size %d\n",
2653 skb->len, hlen, skb_headlen(skb),
2654 skb_shinfo(skb)->gso_size);
2655
2656 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
2657
2658 if (unlikely(skb_headlen(skb) > hlen))
2659 bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
2660 hlen, bd_prod, ++nbd);
619c5cb6 2661 if (!CHIP_IS_E1x(bp))
2297a2da
VZ
2662 bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
2663 xmit_type);
f2e0899f
DK
2664 else
2665 bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
9f6c9258 2666 }
2297a2da
VZ
2667
2668 /* Set the PBD's parsing_data field if not zero
2669 * (for the chips newer than 57711).
2670 */
2671 if (pbd_e2_parsing_data)
2672 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
2673
9f6c9258
DK
2674 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
2675
f85582f8 2676 /* Handle fragmented skb */
9f6c9258
DK
2677 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2678 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2679
619c5cb6
VZ
2680 mapping = dma_map_page(&bp->pdev->dev, frag->page,
2681 frag->page_offset, frag->size,
2682 DMA_TO_DEVICE);
2683 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
2684
2685 DP(NETIF_MSG_TX_QUEUED, "Unable to map page - "
2686 "dropping packet...\n");
2687
2688 /* we need unmap all buffers already mapped
2689 * for this SKB;
2690 * first_bd->nbd need to be properly updated
2691 * before call to bnx2x_free_tx_pkt
2692 */
2693 first_bd->nbd = cpu_to_le16(nbd);
2694 bnx2x_free_tx_pkt(bp, fp, TX_BD(fp->tx_pkt_prod));
2695 return NETDEV_TX_OK;
2696 }
2697
9f6c9258
DK
2698 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2699 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
2700 if (total_pkt_bd == NULL)
2701 total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
2702
9f6c9258
DK
2703 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2704 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2705 tx_data_bd->nbytes = cpu_to_le16(frag->size);
2706 le16_add_cpu(&pkt_size, frag->size);
619c5cb6 2707 nbd++;
9f6c9258
DK
2708
2709 DP(NETIF_MSG_TX_QUEUED,
2710 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
2711 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
2712 le16_to_cpu(tx_data_bd->nbytes));
2713 }
2714
2715 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
2716
619c5cb6
VZ
2717 /* update with actual num BDs */
2718 first_bd->nbd = cpu_to_le16(nbd);
2719
9f6c9258
DK
2720 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2721
2722 /* now send a tx doorbell, counting the next BD
2723 * if the packet contains or ends with it
2724 */
2725 if (TX_BD_POFF(bd_prod) < nbd)
2726 nbd++;
2727
619c5cb6
VZ
2728 /* total_pkt_bytes should be set on the first data BD if
2729 * it's not an LSO packet and there is more than one
2730 * data BD. In this case pkt_size is limited by an MTU value.
2731 * However we prefer to set it for an LSO packet (while we don't
2732 * have to) in order to save some CPU cycles in a none-LSO
2733 * case, when we much more care about them.
2734 */
9f6c9258
DK
2735 if (total_pkt_bd != NULL)
2736 total_pkt_bd->total_pkt_bytes = pkt_size;
2737
523224a3 2738 if (pbd_e1x)
9f6c9258 2739 DP(NETIF_MSG_TX_QUEUED,
523224a3 2740 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
9f6c9258 2741 " tcp_flags %x xsum %x seq %u hlen %u\n",
523224a3
DK
2742 pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
2743 pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
2744 pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
2745 le16_to_cpu(pbd_e1x->total_hlen_w));
f2e0899f
DK
2746 if (pbd_e2)
2747 DP(NETIF_MSG_TX_QUEUED,
2748 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
2749 pbd_e2, pbd_e2->dst_mac_addr_hi, pbd_e2->dst_mac_addr_mid,
2750 pbd_e2->dst_mac_addr_lo, pbd_e2->src_mac_addr_hi,
2751 pbd_e2->src_mac_addr_mid, pbd_e2->src_mac_addr_lo,
2752 pbd_e2->parsing_data);
9f6c9258
DK
2753 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
2754
619c5cb6 2755 fp->tx_pkt_prod++;
9f6c9258
DK
2756 /*
2757 * Make sure that the BD data is updated before updating the producer
2758 * since FW might read the BD right after the producer is updated.
2759 * This is only applicable for weak-ordered memory model archs such
2760 * as IA-64. The following barrier is also mandatory since FW will
2761 * assumes packets must have BDs.
2762 */
2763 wmb();
2764
2765 fp->tx_db.data.prod += nbd;
2766 barrier();
f85582f8 2767
523224a3 2768 DOORBELL(bp, fp->cid, fp->tx_db.raw);
9f6c9258
DK
2769
2770 mmiowb();
2771
2772 fp->tx_bd_prod += nbd;
2773
2774 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
2775 netif_tx_stop_queue(txq);
2776
2777 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
2778 * ordering of set_bit() in netif_tx_stop_queue() and read of
2779 * fp->bd_tx_cons */
2780 smp_mb();
2781
2782 fp->eth_q_stats.driver_xoff++;
2783 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
2784 netif_tx_wake_queue(txq);
2785 }
2786 fp->tx_pkt++;
2787
2788 return NETDEV_TX_OK;
2789}
f85582f8 2790
9f6c9258
DK
2791/* called with rtnl_lock */
2792int bnx2x_change_mac_addr(struct net_device *dev, void *p)
2793{
2794 struct sockaddr *addr = p;
2795 struct bnx2x *bp = netdev_priv(dev);
619c5cb6 2796 int rc = 0;
9f6c9258
DK
2797
2798 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
2799 return -EINVAL;
2800
619c5cb6
VZ
2801 if (netif_running(dev)) {
2802 rc = bnx2x_set_eth_mac(bp, false);
2803 if (rc)
2804 return rc;
2805 }
2806
9f6c9258 2807 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
619c5cb6 2808
523224a3 2809 if (netif_running(dev))
619c5cb6 2810 rc = bnx2x_set_eth_mac(bp, true);
9f6c9258 2811
619c5cb6 2812 return rc;
9f6c9258
DK
2813}
2814
b3b83c3f
DK
2815static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
2816{
2817 union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
2818 struct bnx2x_fastpath *fp = &bp->fp[fp_index];
2819
2820 /* Common */
2821#ifdef BCM_CNIC
2822 if (IS_FCOE_IDX(fp_index)) {
2823 memset(sb, 0, sizeof(union host_hc_status_block));
2824 fp->status_blk_mapping = 0;
2825
2826 } else {
2827#endif
2828 /* status blocks */
619c5cb6 2829 if (!CHIP_IS_E1x(bp))
b3b83c3f
DK
2830 BNX2X_PCI_FREE(sb->e2_sb,
2831 bnx2x_fp(bp, fp_index,
2832 status_blk_mapping),
2833 sizeof(struct host_hc_status_block_e2));
2834 else
2835 BNX2X_PCI_FREE(sb->e1x_sb,
2836 bnx2x_fp(bp, fp_index,
2837 status_blk_mapping),
2838 sizeof(struct host_hc_status_block_e1x));
2839#ifdef BCM_CNIC
2840 }
2841#endif
2842 /* Rx */
2843 if (!skip_rx_queue(bp, fp_index)) {
2844 bnx2x_free_rx_bds(fp);
2845
2846 /* fastpath rx rings: rx_buf rx_desc rx_comp */
2847 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
2848 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
2849 bnx2x_fp(bp, fp_index, rx_desc_mapping),
2850 sizeof(struct eth_rx_bd) * NUM_RX_BD);
2851
2852 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
2853 bnx2x_fp(bp, fp_index, rx_comp_mapping),
2854 sizeof(struct eth_fast_path_rx_cqe) *
2855 NUM_RCQ_BD);
2856
2857 /* SGE ring */
2858 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
2859 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
2860 bnx2x_fp(bp, fp_index, rx_sge_mapping),
2861 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
2862 }
2863
2864 /* Tx */
2865 if (!skip_tx_queue(bp, fp_index)) {
2866 /* fastpath tx rings: tx_buf tx_desc */
2867 BNX2X_FREE(bnx2x_fp(bp, fp_index, tx_buf_ring));
2868 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, tx_desc_ring),
2869 bnx2x_fp(bp, fp_index, tx_desc_mapping),
2870 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
2871 }
2872 /* end of fastpath */
2873}
2874
2875void bnx2x_free_fp_mem(struct bnx2x *bp)
2876{
2877 int i;
2878 for_each_queue(bp, i)
2879 bnx2x_free_fp_mem_at(bp, i);
2880}
2881
2882static inline void set_sb_shortcuts(struct bnx2x *bp, int index)
2883{
2884 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
619c5cb6 2885 if (!CHIP_IS_E1x(bp)) {
b3b83c3f
DK
2886 bnx2x_fp(bp, index, sb_index_values) =
2887 (__le16 *)status_blk.e2_sb->sb.index_values;
2888 bnx2x_fp(bp, index, sb_running_index) =
2889 (__le16 *)status_blk.e2_sb->sb.running_index;
2890 } else {
2891 bnx2x_fp(bp, index, sb_index_values) =
2892 (__le16 *)status_blk.e1x_sb->sb.index_values;
2893 bnx2x_fp(bp, index, sb_running_index) =
2894 (__le16 *)status_blk.e1x_sb->sb.running_index;
2895 }
2896}
2897
2898static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
2899{
2900 union host_hc_status_block *sb;
2901 struct bnx2x_fastpath *fp = &bp->fp[index];
2902 int ring_size = 0;
2903
2904 /* if rx_ring_size specified - use it */
2905 int rx_ring_size = bp->rx_ring_size ? bp->rx_ring_size :
2906 MAX_RX_AVAIL/bp->num_queues;
2907
2908 /* allocate at least number of buffers required by FW */
2909 rx_ring_size = max_t(int, fp->disable_tpa ? MIN_RX_SIZE_NONTPA :
2910 MIN_RX_SIZE_TPA,
2911 rx_ring_size);
2912
2913 bnx2x_fp(bp, index, bp) = bp;
2914 bnx2x_fp(bp, index, index) = index;
2915
2916 /* Common */
2917 sb = &bnx2x_fp(bp, index, status_blk);
2918#ifdef BCM_CNIC
2919 if (!IS_FCOE_IDX(index)) {
2920#endif
2921 /* status blocks */
619c5cb6 2922 if (!CHIP_IS_E1x(bp))
b3b83c3f
DK
2923 BNX2X_PCI_ALLOC(sb->e2_sb,
2924 &bnx2x_fp(bp, index, status_blk_mapping),
2925 sizeof(struct host_hc_status_block_e2));
2926 else
2927 BNX2X_PCI_ALLOC(sb->e1x_sb,
2928 &bnx2x_fp(bp, index, status_blk_mapping),
2929 sizeof(struct host_hc_status_block_e1x));
2930#ifdef BCM_CNIC
2931 }
2932#endif
8eef2af1
DK
2933
2934 /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
2935 * set shortcuts for it.
2936 */
2937 if (!IS_FCOE_IDX(index))
2938 set_sb_shortcuts(bp, index);
b3b83c3f
DK
2939
2940 /* Tx */
2941 if (!skip_tx_queue(bp, index)) {
2942 /* fastpath tx rings: tx_buf tx_desc */
2943 BNX2X_ALLOC(bnx2x_fp(bp, index, tx_buf_ring),
2944 sizeof(struct sw_tx_bd) * NUM_TX_BD);
2945 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, tx_desc_ring),
2946 &bnx2x_fp(bp, index, tx_desc_mapping),
2947 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
2948 }
2949
2950 /* Rx */
2951 if (!skip_rx_queue(bp, index)) {
2952 /* fastpath rx rings: rx_buf rx_desc rx_comp */
2953 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_buf_ring),
2954 sizeof(struct sw_rx_bd) * NUM_RX_BD);
2955 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_desc_ring),
2956 &bnx2x_fp(bp, index, rx_desc_mapping),
2957 sizeof(struct eth_rx_bd) * NUM_RX_BD);
2958
2959 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_comp_ring),
2960 &bnx2x_fp(bp, index, rx_comp_mapping),
2961 sizeof(struct eth_fast_path_rx_cqe) *
2962 NUM_RCQ_BD);
2963
2964 /* SGE ring */
2965 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_page_ring),
2966 sizeof(struct sw_rx_page) * NUM_RX_SGE);
2967 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_sge_ring),
2968 &bnx2x_fp(bp, index, rx_sge_mapping),
2969 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
2970 /* RX BD ring */
2971 bnx2x_set_next_page_rx_bd(fp);
2972
2973 /* CQ ring */
2974 bnx2x_set_next_page_rx_cq(fp);
2975
2976 /* BDs */
2977 ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
2978 if (ring_size < rx_ring_size)
2979 goto alloc_mem_err;
2980 }
2981
2982 return 0;
2983
2984/* handles low memory cases */
2985alloc_mem_err:
2986 BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
2987 index, ring_size);
2988 /* FW will drop all packets if queue is not big enough,
2989 * In these cases we disable the queue
2990 * Min size diferent for TPA and non-TPA queues
2991 */
2992 if (ring_size < (fp->disable_tpa ?
eb722d7a 2993 MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
b3b83c3f
DK
2994 /* release memory allocated for this queue */
2995 bnx2x_free_fp_mem_at(bp, index);
2996 return -ENOMEM;
2997 }
2998 return 0;
2999}
3000
3001int bnx2x_alloc_fp_mem(struct bnx2x *bp)
3002{
3003 int i;
3004
3005 /**
3006 * 1. Allocate FP for leading - fatal if error
3007 * 2. {CNIC} Allocate FCoE FP - fatal if error
3008 * 3. Allocate RSS - fix number of queues if error
3009 */
3010
3011 /* leading */
3012 if (bnx2x_alloc_fp_mem_at(bp, 0))
3013 return -ENOMEM;
3014#ifdef BCM_CNIC
8eef2af1
DK
3015 if (!NO_FCOE(bp))
3016 /* FCoE */
3017 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX))
3018 /* we will fail load process instead of mark
3019 * NO_FCOE_FLAG
3020 */
3021 return -ENOMEM;
b3b83c3f
DK
3022#endif
3023 /* RSS */
3024 for_each_nondefault_eth_queue(bp, i)
3025 if (bnx2x_alloc_fp_mem_at(bp, i))
3026 break;
3027
3028 /* handle memory failures */
3029 if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
3030 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
3031
3032 WARN_ON(delta < 0);
3033#ifdef BCM_CNIC
3034 /**
3035 * move non eth FPs next to last eth FP
3036 * must be done in that order
3037 * FCOE_IDX < FWD_IDX < OOO_IDX
3038 */
3039
3040 /* move FCoE fp */
3041 bnx2x_move_fp(bp, FCOE_IDX, FCOE_IDX - delta);
3042#endif
3043 bp->num_queues -= delta;
3044 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
3045 bp->num_queues + delta, bp->num_queues);
3046 }
3047
3048 return 0;
3049}
d6214d7a 3050
523224a3
DK
3051void bnx2x_free_mem_bp(struct bnx2x *bp)
3052{
3053 kfree(bp->fp);
3054 kfree(bp->msix_table);
3055 kfree(bp->ilt);
3056}
3057
3058int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp)
3059{
3060 struct bnx2x_fastpath *fp;
3061 struct msix_entry *tbl;
3062 struct bnx2x_ilt *ilt;
3063
3064 /* fp array */
3065 fp = kzalloc(L2_FP_COUNT(bp->l2_cid_count)*sizeof(*fp), GFP_KERNEL);
3066 if (!fp)
3067 goto alloc_err;
3068 bp->fp = fp;
3069
3070 /* msix table */
ec6ba945 3071 tbl = kzalloc((FP_SB_COUNT(bp->l2_cid_count) + 1) * sizeof(*tbl),
523224a3
DK
3072 GFP_KERNEL);
3073 if (!tbl)
3074 goto alloc_err;
3075 bp->msix_table = tbl;
3076
3077 /* ilt */
3078 ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
3079 if (!ilt)
3080 goto alloc_err;
3081 bp->ilt = ilt;
3082
3083 return 0;
3084alloc_err:
3085 bnx2x_free_mem_bp(bp);
3086 return -ENOMEM;
3087
3088}
3089
a9fccec7 3090int bnx2x_reload_if_running(struct net_device *dev)
66371c44
MM
3091{
3092 struct bnx2x *bp = netdev_priv(dev);
3093
3094 if (unlikely(!netif_running(dev)))
3095 return 0;
3096
3097 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
3098 return bnx2x_nic_load(bp, LOAD_NORMAL);
3099}
3100
1ac9e428
YR
3101int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
3102{
3103 u32 sel_phy_idx = 0;
3104 if (bp->link_params.num_phys <= 1)
3105 return INT_PHY;
3106
3107 if (bp->link_vars.link_up) {
3108 sel_phy_idx = EXT_PHY1;
3109 /* In case link is SERDES, check if the EXT_PHY2 is the one */
3110 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
3111 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
3112 sel_phy_idx = EXT_PHY2;
3113 } else {
3114
3115 switch (bnx2x_phy_selection(&bp->link_params)) {
3116 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
3117 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
3118 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
3119 sel_phy_idx = EXT_PHY1;
3120 break;
3121 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
3122 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
3123 sel_phy_idx = EXT_PHY2;
3124 break;
3125 }
3126 }
3127
3128 return sel_phy_idx;
3129
3130}
3131int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
3132{
3133 u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
3134 /*
3135 * The selected actived PHY is always after swapping (in case PHY
3136 * swapping is enabled). So when swapping is enabled, we need to reverse
3137 * the configuration
3138 */
3139
3140 if (bp->link_params.multi_phy_config &
3141 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
3142 if (sel_phy_idx == EXT_PHY1)
3143 sel_phy_idx = EXT_PHY2;
3144 else if (sel_phy_idx == EXT_PHY2)
3145 sel_phy_idx = EXT_PHY1;
3146 }
3147 return LINK_CONFIG_IDX(sel_phy_idx);
3148}
3149
9f6c9258
DK
3150/* called with rtnl_lock */
3151int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
3152{
3153 struct bnx2x *bp = netdev_priv(dev);
9f6c9258
DK
3154
3155 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
3156 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
3157 return -EAGAIN;
3158 }
3159
3160 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
3161 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
3162 return -EINVAL;
3163
3164 /* This does not race with packet allocation
3165 * because the actual alloc size is
3166 * only updated as part of load
3167 */
3168 dev->mtu = new_mtu;
3169
66371c44
MM
3170 return bnx2x_reload_if_running(dev);
3171}
3172
3173u32 bnx2x_fix_features(struct net_device *dev, u32 features)
3174{
3175 struct bnx2x *bp = netdev_priv(dev);
3176
3177 /* TPA requires Rx CSUM offloading */
3178 if (!(features & NETIF_F_RXCSUM) || bp->disable_tpa)
3179 features &= ~NETIF_F_LRO;
3180
3181 return features;
3182}
3183
3184int bnx2x_set_features(struct net_device *dev, u32 features)
3185{
3186 struct bnx2x *bp = netdev_priv(dev);
3187 u32 flags = bp->flags;
538dd2e3 3188 bool bnx2x_reload = false;
66371c44
MM
3189
3190 if (features & NETIF_F_LRO)
3191 flags |= TPA_ENABLE_FLAG;
3192 else
3193 flags &= ~TPA_ENABLE_FLAG;
3194
538dd2e3
MB
3195 if (features & NETIF_F_LOOPBACK) {
3196 if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
3197 bp->link_params.loopback_mode = LOOPBACK_BMAC;
3198 bnx2x_reload = true;
3199 }
3200 } else {
3201 if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
3202 bp->link_params.loopback_mode = LOOPBACK_NONE;
3203 bnx2x_reload = true;
3204 }
3205 }
3206
66371c44
MM
3207 if (flags ^ bp->flags) {
3208 bp->flags = flags;
538dd2e3
MB
3209 bnx2x_reload = true;
3210 }
66371c44 3211
538dd2e3 3212 if (bnx2x_reload) {
66371c44
MM
3213 if (bp->recovery_state == BNX2X_RECOVERY_DONE)
3214 return bnx2x_reload_if_running(dev);
3215 /* else: bnx2x_nic_load() will be called at end of recovery */
9f6c9258
DK
3216 }
3217
66371c44 3218 return 0;
9f6c9258
DK
3219}
3220
3221void bnx2x_tx_timeout(struct net_device *dev)
3222{
3223 struct bnx2x *bp = netdev_priv(dev);
3224
3225#ifdef BNX2X_STOP_ON_ERROR
3226 if (!bp->panic)
3227 bnx2x_panic();
3228#endif
3229 /* This allows the netif to be shutdown gracefully before resetting */
3230 schedule_delayed_work(&bp->reset_task, 0);
3231}
3232
9f6c9258
DK
3233int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
3234{
3235 struct net_device *dev = pci_get_drvdata(pdev);
3236 struct bnx2x *bp;
3237
3238 if (!dev) {
3239 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
3240 return -ENODEV;
3241 }
3242 bp = netdev_priv(dev);
3243
3244 rtnl_lock();
3245
3246 pci_save_state(pdev);
3247
3248 if (!netif_running(dev)) {
3249 rtnl_unlock();
3250 return 0;
3251 }
3252
3253 netif_device_detach(dev);
3254
3255 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
3256
3257 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
3258
3259 rtnl_unlock();
3260
3261 return 0;
3262}
3263
3264int bnx2x_resume(struct pci_dev *pdev)
3265{
3266 struct net_device *dev = pci_get_drvdata(pdev);
3267 struct bnx2x *bp;
3268 int rc;
3269
3270 if (!dev) {
3271 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
3272 return -ENODEV;
3273 }
3274 bp = netdev_priv(dev);
3275
3276 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
3277 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
3278 return -EAGAIN;
3279 }
3280
3281 rtnl_lock();
3282
3283 pci_restore_state(pdev);
3284
3285 if (!netif_running(dev)) {
3286 rtnl_unlock();
3287 return 0;
3288 }
3289
3290 bnx2x_set_power_state(bp, PCI_D0);
3291 netif_device_attach(dev);
3292
f2e0899f
DK
3293 /* Since the chip was reset, clear the FW sequence number */
3294 bp->fw_seq = 0;
9f6c9258
DK
3295 rc = bnx2x_nic_load(bp, LOAD_OPEN);
3296
3297 rtnl_unlock();
3298
3299 return rc;
3300}
619c5cb6
VZ
3301
3302
3303void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
3304 u32 cid)
3305{
3306 /* ustorm cxt validation */
3307 cxt->ustorm_ag_context.cdu_usage =
3308 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
3309 CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
3310 /* xcontext validation */
3311 cxt->xstorm_ag_context.cdu_reserved =
3312 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
3313 CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
3314}
3315
3316static inline void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
3317 u8 fw_sb_id, u8 sb_index,
3318 u8 ticks)
3319{
3320
3321 u32 addr = BAR_CSTRORM_INTMEM +
3322 CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
3323 REG_WR8(bp, addr, ticks);
3324 DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d ticks %d\n",
3325 port, fw_sb_id, sb_index, ticks);
3326}
3327
3328static inline void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
3329 u16 fw_sb_id, u8 sb_index,
3330 u8 disable)
3331{
3332 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
3333 u32 addr = BAR_CSTRORM_INTMEM +
3334 CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
3335 u16 flags = REG_RD16(bp, addr);
3336 /* clear and set */
3337 flags &= ~HC_INDEX_DATA_HC_ENABLED;
3338 flags |= enable_flag;
3339 REG_WR16(bp, addr, flags);
3340 DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d disable %d\n",
3341 port, fw_sb_id, sb_index, disable);
3342}
3343
3344void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
3345 u8 sb_index, u8 disable, u16 usec)
3346{
3347 int port = BP_PORT(bp);
3348 u8 ticks = usec / BNX2X_BTR;
3349
3350 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
3351
3352 disable = disable ? 1 : (usec ? 0 : 1);
3353 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
3354}
This page took 0.962189 seconds and 5 git commands to generate.