bnx2x: remove unused variable
[deliverable/linux.git] / drivers / net / ethernet / broadcom / bnx2x / bnx2x_cmn.c
CommitLineData
9f6c9258
DK
1/* bnx2x_cmn.c: Broadcom Everest network driver.
2 *
5de92408 3 * Copyright (c) 2007-2011 Broadcom Corporation
9f6c9258
DK
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
15 *
16 */
17
f1deab50
JP
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
9f6c9258 20#include <linux/etherdevice.h>
9bcc0893 21#include <linux/if_vlan.h>
a6b7a407 22#include <linux/interrupt.h>
9f6c9258 23#include <linux/ip.h>
f2e0899f 24#include <net/ipv6.h>
7f3e01fe 25#include <net/ip6_checksum.h>
6891dd25 26#include <linux/firmware.h>
c0cba59e 27#include <linux/prefetch.h>
9f6c9258 28#include "bnx2x_cmn.h"
523224a3 29#include "bnx2x_init.h"
042181f5 30#include "bnx2x_sp.h"
523224a3 31
619c5cb6 32
9f6c9258 33
b3b83c3f
DK
34/**
35 * bnx2x_bz_fp - zero content of the fastpath structure.
36 *
37 * @bp: driver handle
38 * @index: fastpath index to be zeroed
39 *
40 * Makes sure the contents of the bp->fp[index].napi is kept
41 * intact.
42 */
43static inline void bnx2x_bz_fp(struct bnx2x *bp, int index)
44{
45 struct bnx2x_fastpath *fp = &bp->fp[index];
46 struct napi_struct orig_napi = fp->napi;
47 /* bzero bnx2x_fastpath contents */
48 memset(fp, 0, sizeof(*fp));
49
50 /* Restore the NAPI object as it has been already initialized */
51 fp->napi = orig_napi;
6383c0b3
AE
52
53 fp->bp = bp;
54 fp->index = index;
55 if (IS_ETH_FP(fp))
56 fp->max_cos = bp->max_cos;
57 else
58 /* Special queues support only one CoS */
59 fp->max_cos = 1;
60
61 /*
62 * set the tpa flag for each queue. The tpa flag determines the queue
63 * minimal size so it must be set prior to queue memory allocation
64 */
65 fp->disable_tpa = ((bp->flags & TPA_ENABLE_FLAG) == 0);
66
67#ifdef BCM_CNIC
45d3539a
VZ
68 /* We don't want TPA on an FCoE L2 ring */
69 if (IS_FCOE_FP(fp))
70 fp->disable_tpa = 1;
6383c0b3 71#endif
b3b83c3f
DK
72}
73
74/**
75 * bnx2x_move_fp - move content of the fastpath structure.
76 *
77 * @bp: driver handle
78 * @from: source FP index
79 * @to: destination FP index
80 *
81 * Makes sure the contents of the bp->fp[to].napi is kept
82 * intact.
83 */
84static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
85{
86 struct bnx2x_fastpath *from_fp = &bp->fp[from];
87 struct bnx2x_fastpath *to_fp = &bp->fp[to];
88 struct napi_struct orig_napi = to_fp->napi;
89 /* Move bnx2x_fastpath contents */
90 memcpy(to_fp, from_fp, sizeof(*to_fp));
91 to_fp->index = to;
92
93 /* Restore the NAPI object as it has been already initialized */
94 to_fp->napi = orig_napi;
95}
96
619c5cb6
VZ
97int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
98
9f6c9258
DK
99/* free skb in the packet ring at pos idx
100 * return idx of last bd freed
101 */
6383c0b3 102static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
9f6c9258
DK
103 u16 idx)
104{
6383c0b3 105 struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
9f6c9258
DK
106 struct eth_tx_start_bd *tx_start_bd;
107 struct eth_tx_bd *tx_data_bd;
108 struct sk_buff *skb = tx_buf->skb;
109 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
110 int nbd;
111
112 /* prefetch skb end pointer to speedup dev_kfree_skb() */
113 prefetch(&skb->end);
114
619c5cb6 115 DP(BNX2X_MSG_FP, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n",
6383c0b3 116 txdata->txq_index, idx, tx_buf, skb);
9f6c9258
DK
117
118 /* unmap first bd */
119 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
6383c0b3 120 tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
9f6c9258 121 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
4bca60f4 122 BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
9f6c9258 123
619c5cb6 124
9f6c9258
DK
125 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
126#ifdef BNX2X_STOP_ON_ERROR
127 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
128 BNX2X_ERR("BAD nbd!\n");
129 bnx2x_panic();
130 }
131#endif
132 new_cons = nbd + tx_buf->first_bd;
133
134 /* Get the next bd */
135 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
136
137 /* Skip a parse bd... */
138 --nbd;
139 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
140
141 /* ...and the TSO split header bd since they have no mapping */
142 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
143 --nbd;
144 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
145 }
146
147 /* now free frags */
148 while (nbd > 0) {
149
150 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
6383c0b3 151 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
9f6c9258
DK
152 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
153 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
154 if (--nbd)
155 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
156 }
157
158 /* release skb */
159 WARN_ON(!skb);
40955532 160 dev_kfree_skb_any(skb);
9f6c9258
DK
161 tx_buf->first_bd = 0;
162 tx_buf->skb = NULL;
163
164 return new_cons;
165}
166
6383c0b3 167int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
9f6c9258 168{
9f6c9258 169 struct netdev_queue *txq;
6383c0b3 170 u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
9f6c9258
DK
171
172#ifdef BNX2X_STOP_ON_ERROR
173 if (unlikely(bp->panic))
174 return -1;
175#endif
176
6383c0b3
AE
177 txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
178 hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
179 sw_cons = txdata->tx_pkt_cons;
9f6c9258
DK
180
181 while (sw_cons != hw_cons) {
182 u16 pkt_cons;
183
184 pkt_cons = TX_BD(sw_cons);
185
f2e0899f
DK
186 DP(NETIF_MSG_TX_DONE, "queue[%d]: hw_cons %u sw_cons %u "
187 " pkt_cons %u\n",
6383c0b3 188 txdata->txq_index, hw_cons, sw_cons, pkt_cons);
9f6c9258 189
6383c0b3 190 bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons);
9f6c9258
DK
191 sw_cons++;
192 }
193
6383c0b3
AE
194 txdata->tx_pkt_cons = sw_cons;
195 txdata->tx_bd_cons = bd_cons;
9f6c9258
DK
196
197 /* Need to make the tx_bd_cons update visible to start_xmit()
198 * before checking for netif_tx_queue_stopped(). Without the
199 * memory barrier, there is a small possibility that
200 * start_xmit() will miss it and cause the queue to be stopped
201 * forever.
619c5cb6
VZ
202 * On the other hand we need an rmb() here to ensure the proper
203 * ordering of bit testing in the following
204 * netif_tx_queue_stopped(txq) call.
9f6c9258
DK
205 */
206 smp_mb();
207
9f6c9258
DK
208 if (unlikely(netif_tx_queue_stopped(txq))) {
209 /* Taking tx_lock() is needed to prevent reenabling the queue
210 * while it's empty. This could have happen if rx_action() gets
211 * suspended in bnx2x_tx_int() after the condition before
212 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
213 *
214 * stops the queue->sees fresh tx_bd_cons->releases the queue->
215 * sends some packets consuming the whole queue again->
216 * stops the queue
217 */
218
219 __netif_tx_lock(txq, smp_processor_id());
220
221 if ((netif_tx_queue_stopped(txq)) &&
222 (bp->state == BNX2X_STATE_OPEN) &&
6383c0b3 223 (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 3))
9f6c9258
DK
224 netif_tx_wake_queue(txq);
225
226 __netif_tx_unlock(txq);
227 }
228 return 0;
229}
230
231static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
232 u16 idx)
233{
234 u16 last_max = fp->last_max_sge;
235
236 if (SUB_S16(idx, last_max) > 0)
237 fp->last_max_sge = idx;
238}
239
240static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
241 struct eth_fast_path_rx_cqe *fp_cqe)
242{
243 struct bnx2x *bp = fp->bp;
244 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
245 le16_to_cpu(fp_cqe->len_on_bd)) >>
246 SGE_PAGE_SHIFT;
247 u16 last_max, last_elem, first_elem;
248 u16 delta = 0;
249 u16 i;
250
251 if (!sge_len)
252 return;
253
254 /* First mark all used pages */
255 for (i = 0; i < sge_len; i++)
619c5cb6 256 BIT_VEC64_CLEAR_BIT(fp->sge_mask,
523224a3 257 RX_SGE(le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[i])));
9f6c9258
DK
258
259 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
523224a3 260 sge_len - 1, le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
9f6c9258
DK
261
262 /* Here we assume that the last SGE index is the biggest */
263 prefetch((void *)(fp->sge_mask));
523224a3
DK
264 bnx2x_update_last_max_sge(fp,
265 le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
9f6c9258
DK
266
267 last_max = RX_SGE(fp->last_max_sge);
619c5cb6
VZ
268 last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
269 first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
9f6c9258
DK
270
271 /* If ring is not full */
272 if (last_elem + 1 != first_elem)
273 last_elem++;
274
275 /* Now update the prod */
276 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
277 if (likely(fp->sge_mask[i]))
278 break;
279
619c5cb6
VZ
280 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
281 delta += BIT_VEC64_ELEM_SZ;
9f6c9258
DK
282 }
283
284 if (delta > 0) {
285 fp->rx_sge_prod += delta;
286 /* clear page-end entries */
287 bnx2x_clear_sge_mask_next_elems(fp);
288 }
289
290 DP(NETIF_MSG_RX_STATUS,
291 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
292 fp->last_max_sge, fp->rx_sge_prod);
293}
294
295static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
619c5cb6
VZ
296 struct sk_buff *skb, u16 cons, u16 prod,
297 struct eth_fast_path_rx_cqe *cqe)
9f6c9258
DK
298{
299 struct bnx2x *bp = fp->bp;
300 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
301 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
302 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
303 dma_addr_t mapping;
619c5cb6
VZ
304 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
305 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
9f6c9258 306
619c5cb6
VZ
307 /* print error if current state != stop */
308 if (tpa_info->tpa_state != BNX2X_TPA_STOP)
9f6c9258
DK
309 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
310
619c5cb6
VZ
311 /* Try to map an empty skb from the aggregation info */
312 mapping = dma_map_single(&bp->pdev->dev,
313 first_buf->skb->data,
314 fp->rx_buf_size, DMA_FROM_DEVICE);
315 /*
316 * ...if it fails - move the skb from the consumer to the producer
317 * and set the current aggregation state as ERROR to drop it
318 * when TPA_STOP arrives.
319 */
320
321 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
322 /* Move the BD from the consumer to the producer */
323 bnx2x_reuse_rx_skb(fp, cons, prod);
324 tpa_info->tpa_state = BNX2X_TPA_ERROR;
325 return;
326 }
9f6c9258 327
619c5cb6
VZ
328 /* move empty skb from pool to prod */
329 prod_rx_buf->skb = first_buf->skb;
330 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
9f6c9258
DK
331 /* point prod_bd to new skb */
332 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
333 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
334
619c5cb6
VZ
335 /* move partial skb from cons to pool (don't unmap yet) */
336 *first_buf = *cons_rx_buf;
337
338 /* mark bin state as START */
339 tpa_info->parsing_flags =
340 le16_to_cpu(cqe->pars_flags.flags);
341 tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
342 tpa_info->tpa_state = BNX2X_TPA_START;
343 tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
344 tpa_info->placement_offset = cqe->placement_offset;
345
9f6c9258
DK
346#ifdef BNX2X_STOP_ON_ERROR
347 fp->tpa_queue_used |= (1 << queue);
348#ifdef _ASM_GENERIC_INT_L64_H
349 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
350#else
351 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
352#endif
353 fp->tpa_queue_used);
354#endif
355}
356
e4e3c02a
VZ
357/* Timestamp option length allowed for TPA aggregation:
358 *
359 * nop nop kind length echo val
360 */
361#define TPA_TSTAMP_OPT_LEN 12
362/**
e8920674 363 * bnx2x_set_lro_mss - calculate the approximate value of the MSS
e4e3c02a 364 *
e8920674
DK
365 * @bp: driver handle
366 * @parsing_flags: parsing flags from the START CQE
367 * @len_on_bd: total length of the first packet for the
368 * aggregation.
369 *
370 * Approximate value of the MSS for this aggregation calculated using
371 * the first packet of it.
e4e3c02a
VZ
372 */
373static inline u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags,
374 u16 len_on_bd)
375{
619c5cb6
VZ
376 /*
377 * TPA arrgregation won't have either IP options or TCP options
378 * other than timestamp or IPv6 extension headers.
e4e3c02a 379 */
619c5cb6
VZ
380 u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
381
382 if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
383 PRS_FLAG_OVERETH_IPV6)
384 hdrs_len += sizeof(struct ipv6hdr);
385 else /* IPv4 */
386 hdrs_len += sizeof(struct iphdr);
e4e3c02a
VZ
387
388
389 /* Check if there was a TCP timestamp, if there is it's will
390 * always be 12 bytes length: nop nop kind length echo val.
391 *
392 * Otherwise FW would close the aggregation.
393 */
394 if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
395 hdrs_len += TPA_TSTAMP_OPT_LEN;
396
397 return len_on_bd - hdrs_len;
398}
399
9f6c9258 400static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
619c5cb6
VZ
401 u16 queue, struct sk_buff *skb,
402 struct eth_end_agg_rx_cqe *cqe,
403 u16 cqe_idx)
9f6c9258
DK
404{
405 struct sw_rx_page *rx_pg, old_rx_pg;
9f6c9258
DK
406 u32 i, frag_len, frag_size, pages;
407 int err;
408 int j;
619c5cb6
VZ
409 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
410 u16 len_on_bd = tpa_info->len_on_bd;
9f6c9258 411
619c5cb6 412 frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
9f6c9258
DK
413 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
414
415 /* This is needed in order to enable forwarding support */
416 if (frag_size)
619c5cb6
VZ
417 skb_shinfo(skb)->gso_size = bnx2x_set_lro_mss(bp,
418 tpa_info->parsing_flags, len_on_bd);
9f6c9258
DK
419
420#ifdef BNX2X_STOP_ON_ERROR
421 if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
422 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
423 pages, cqe_idx);
619c5cb6 424 BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
9f6c9258
DK
425 bnx2x_panic();
426 return -EINVAL;
427 }
428#endif
429
430 /* Run through the SGL and compose the fragmented skb */
431 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
619c5cb6 432 u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
9f6c9258
DK
433
434 /* FW gives the indices of the SGE as if the ring is an array
435 (meaning that "next" element will consume 2 indices) */
436 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
437 rx_pg = &fp->rx_page_ring[sge_idx];
438 old_rx_pg = *rx_pg;
439
440 /* If we fail to allocate a substitute page, we simply stop
441 where we are and drop the whole packet */
442 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
443 if (unlikely(err)) {
444 fp->eth_q_stats.rx_skb_alloc_failed++;
445 return err;
446 }
447
448 /* Unmap the page as we r going to pass it to the stack */
449 dma_unmap_page(&bp->pdev->dev,
450 dma_unmap_addr(&old_rx_pg, mapping),
451 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
452
453 /* Add one frag and update the appropriate fields in the skb */
454 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
455
456 skb->data_len += frag_len;
e1ac50f6 457 skb->truesize += SGE_PAGE_SIZE * PAGES_PER_SGE;
9f6c9258
DK
458 skb->len += frag_len;
459
460 frag_size -= frag_len;
461 }
462
463 return 0;
464}
465
466static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
619c5cb6 467 u16 queue, struct eth_end_agg_rx_cqe *cqe,
9f6c9258
DK
468 u16 cqe_idx)
469{
619c5cb6
VZ
470 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
471 struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
472 u8 pad = tpa_info->placement_offset;
473 u16 len = tpa_info->len_on_bd;
9f6c9258
DK
474 struct sk_buff *skb = rx_buf->skb;
475 /* alloc new skb */
619c5cb6
VZ
476 struct sk_buff *new_skb;
477 u8 old_tpa_state = tpa_info->tpa_state;
478
479 tpa_info->tpa_state = BNX2X_TPA_STOP;
480
481 /* If we there was an error during the handling of the TPA_START -
482 * drop this aggregation.
483 */
484 if (old_tpa_state == BNX2X_TPA_ERROR)
485 goto drop;
486
487 /* Try to allocate the new skb */
488 new_skb = netdev_alloc_skb(bp->dev, fp->rx_buf_size);
9f6c9258
DK
489
490 /* Unmap skb in the pool anyway, as we are going to change
491 pool entry status to BNX2X_TPA_STOP even if new skb allocation
492 fails. */
493 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
a8c94b91 494 fp->rx_buf_size, DMA_FROM_DEVICE);
9f6c9258
DK
495
496 if (likely(new_skb)) {
9f6c9258 497 prefetch(skb);
217de5aa 498 prefetch(((char *)(skb)) + L1_CACHE_BYTES);
9f6c9258
DK
499
500#ifdef BNX2X_STOP_ON_ERROR
a8c94b91 501 if (pad + len > fp->rx_buf_size) {
9f6c9258
DK
502 BNX2X_ERR("skb_put is about to fail... "
503 "pad %d len %d rx_buf_size %d\n",
a8c94b91 504 pad, len, fp->rx_buf_size);
9f6c9258
DK
505 bnx2x_panic();
506 return;
507 }
508#endif
509
510 skb_reserve(skb, pad);
511 skb_put(skb, len);
512
513 skb->protocol = eth_type_trans(skb, bp->dev);
514 skb->ip_summed = CHECKSUM_UNNECESSARY;
515
619c5cb6
VZ
516 if (!bnx2x_fill_frag_skb(bp, fp, queue, skb, cqe, cqe_idx)) {
517 if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
518 __vlan_hwaccel_put_tag(skb, tpa_info->vlan_tag);
9bcc0893 519 napi_gro_receive(&fp->napi, skb);
9f6c9258
DK
520 } else {
521 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
522 " - dropping packet!\n");
40955532 523 dev_kfree_skb_any(skb);
9f6c9258
DK
524 }
525
526
527 /* put new skb in bin */
619c5cb6 528 rx_buf->skb = new_skb;
9f6c9258 529
619c5cb6 530 return;
9f6c9258
DK
531 }
532
619c5cb6
VZ
533drop:
534 /* drop the packet and keep the buffer in the bin */
535 DP(NETIF_MSG_RX_STATUS,
536 "Failed to allocate or map a new skb - dropping packet!\n");
537 fp->eth_q_stats.rx_skb_alloc_failed++;
9f6c9258
DK
538}
539
540/* Set Toeplitz hash value in the skb using the value from the
541 * CQE (calculated by HW).
542 */
543static inline void bnx2x_set_skb_rxhash(struct bnx2x *bp, union eth_rx_cqe *cqe,
544 struct sk_buff *skb)
545{
546 /* Set Toeplitz hash from CQE */
547 if ((bp->dev->features & NETIF_F_RXHASH) &&
548 (cqe->fast_path_cqe.status_flags &
549 ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG))
550 skb->rxhash =
551 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result);
552}
553
554int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
555{
556 struct bnx2x *bp = fp->bp;
557 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
558 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
559 int rx_pkt = 0;
560
561#ifdef BNX2X_STOP_ON_ERROR
562 if (unlikely(bp->panic))
563 return 0;
564#endif
565
566 /* CQ "next element" is of the size of the regular element,
567 that's why it's ok here */
568 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
569 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
570 hw_comp_cons++;
571
572 bd_cons = fp->rx_bd_cons;
573 bd_prod = fp->rx_bd_prod;
574 bd_prod_fw = bd_prod;
575 sw_comp_cons = fp->rx_comp_cons;
576 sw_comp_prod = fp->rx_comp_prod;
577
578 /* Memory barrier necessary as speculative reads of the rx
579 * buffer can be ahead of the index in the status block
580 */
581 rmb();
582
583 DP(NETIF_MSG_RX_STATUS,
584 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
585 fp->index, hw_comp_cons, sw_comp_cons);
586
587 while (sw_comp_cons != hw_comp_cons) {
588 struct sw_rx_bd *rx_buf = NULL;
589 struct sk_buff *skb;
590 union eth_rx_cqe *cqe;
619c5cb6 591 struct eth_fast_path_rx_cqe *cqe_fp;
9f6c9258 592 u8 cqe_fp_flags;
619c5cb6 593 enum eth_rx_cqe_type cqe_fp_type;
9f6c9258
DK
594 u16 len, pad;
595
619c5cb6
VZ
596#ifdef BNX2X_STOP_ON_ERROR
597 if (unlikely(bp->panic))
598 return 0;
599#endif
600
9f6c9258
DK
601 comp_ring_cons = RCQ_BD(sw_comp_cons);
602 bd_prod = RX_BD(bd_prod);
603 bd_cons = RX_BD(bd_cons);
604
605 /* Prefetch the page containing the BD descriptor
606 at producer's index. It will be needed when new skb is
607 allocated */
608 prefetch((void *)(PAGE_ALIGN((unsigned long)
609 (&fp->rx_desc_ring[bd_prod])) -
610 PAGE_SIZE + 1));
611
612 cqe = &fp->rx_comp_ring[comp_ring_cons];
619c5cb6
VZ
613 cqe_fp = &cqe->fast_path_cqe;
614 cqe_fp_flags = cqe_fp->type_error_flags;
615 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
9f6c9258
DK
616
617 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
618 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
619c5cb6
VZ
619 cqe_fp_flags, cqe_fp->status_flags,
620 le32_to_cpu(cqe_fp->rss_hash_result),
621 le16_to_cpu(cqe_fp->vlan_tag), le16_to_cpu(cqe_fp->pkt_len));
9f6c9258
DK
622
623 /* is this a slowpath msg? */
619c5cb6 624 if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
9f6c9258
DK
625 bnx2x_sp_event(fp, cqe);
626 goto next_cqe;
627
628 /* this is an rx packet */
629 } else {
630 rx_buf = &fp->rx_buf_ring[bd_cons];
631 skb = rx_buf->skb;
632 prefetch(skb);
9f6c9258 633
619c5cb6
VZ
634 if (!CQE_TYPE_FAST(cqe_fp_type)) {
635#ifdef BNX2X_STOP_ON_ERROR
636 /* sanity check */
637 if (fp->disable_tpa &&
638 (CQE_TYPE_START(cqe_fp_type) ||
639 CQE_TYPE_STOP(cqe_fp_type)))
640 BNX2X_ERR("START/STOP packet while "
641 "disable_tpa type %x\n",
642 CQE_TYPE(cqe_fp_type));
643#endif
9f6c9258 644
619c5cb6
VZ
645 if (CQE_TYPE_START(cqe_fp_type)) {
646 u16 queue = cqe_fp->queue_index;
9f6c9258
DK
647 DP(NETIF_MSG_RX_STATUS,
648 "calling tpa_start on queue %d\n",
649 queue);
650
651 bnx2x_tpa_start(fp, queue, skb,
619c5cb6
VZ
652 bd_cons, bd_prod,
653 cqe_fp);
9f6c9258 654
619c5cb6 655 /* Set Toeplitz hash for LRO skb */
9f6c9258
DK
656 bnx2x_set_skb_rxhash(bp, cqe, skb);
657
658 goto next_rx;
619c5cb6
VZ
659
660 } else {
661 u16 queue =
662 cqe->end_agg_cqe.queue_index;
9f6c9258
DK
663 DP(NETIF_MSG_RX_STATUS,
664 "calling tpa_stop on queue %d\n",
665 queue);
666
619c5cb6
VZ
667 bnx2x_tpa_stop(bp, fp, queue,
668 &cqe->end_agg_cqe,
669 comp_ring_cons);
9f6c9258
DK
670#ifdef BNX2X_STOP_ON_ERROR
671 if (bp->panic)
672 return 0;
673#endif
674
619c5cb6 675 bnx2x_update_sge_prod(fp, cqe_fp);
9f6c9258
DK
676 goto next_cqe;
677 }
678 }
619c5cb6
VZ
679 /* non TPA */
680 len = le16_to_cpu(cqe_fp->pkt_len);
681 pad = cqe_fp->placement_offset;
9924cafc 682 dma_sync_single_for_cpu(&bp->pdev->dev,
9f6c9258 683 dma_unmap_addr(rx_buf, mapping),
619c5cb6
VZ
684 pad + RX_COPY_THRESH,
685 DMA_FROM_DEVICE);
217de5aa 686 prefetch(((char *)(skb)) + L1_CACHE_BYTES);
9f6c9258
DK
687
688 /* is this an error packet? */
689 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
690 DP(NETIF_MSG_RX_ERR,
691 "ERROR flags %x rx packet %u\n",
692 cqe_fp_flags, sw_comp_cons);
693 fp->eth_q_stats.rx_err_discard_pkt++;
694 goto reuse_rx;
695 }
696
697 /* Since we don't have a jumbo ring
698 * copy small packets if mtu > 1500
699 */
700 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
701 (len <= RX_COPY_THRESH)) {
702 struct sk_buff *new_skb;
703
619c5cb6 704 new_skb = netdev_alloc_skb(bp->dev, len + pad);
9f6c9258
DK
705 if (new_skb == NULL) {
706 DP(NETIF_MSG_RX_ERR,
707 "ERROR packet dropped "
708 "because of alloc failure\n");
709 fp->eth_q_stats.rx_skb_alloc_failed++;
710 goto reuse_rx;
711 }
712
713 /* aligned copy */
714 skb_copy_from_linear_data_offset(skb, pad,
715 new_skb->data + pad, len);
716 skb_reserve(new_skb, pad);
717 skb_put(new_skb, len);
718
749a8503 719 bnx2x_reuse_rx_skb(fp, bd_cons, bd_prod);
9f6c9258
DK
720
721 skb = new_skb;
722
723 } else
724 if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
725 dma_unmap_single(&bp->pdev->dev,
726 dma_unmap_addr(rx_buf, mapping),
a8c94b91 727 fp->rx_buf_size,
9f6c9258
DK
728 DMA_FROM_DEVICE);
729 skb_reserve(skb, pad);
730 skb_put(skb, len);
731
732 } else {
733 DP(NETIF_MSG_RX_ERR,
734 "ERROR packet dropped because "
735 "of alloc failure\n");
736 fp->eth_q_stats.rx_skb_alloc_failed++;
737reuse_rx:
749a8503 738 bnx2x_reuse_rx_skb(fp, bd_cons, bd_prod);
9f6c9258
DK
739 goto next_rx;
740 }
741
742 skb->protocol = eth_type_trans(skb, bp->dev);
743
744 /* Set Toeplitz hash for a none-LRO skb */
745 bnx2x_set_skb_rxhash(bp, cqe, skb);
746
bc8acf2c 747 skb_checksum_none_assert(skb);
f85582f8 748
66371c44 749 if (bp->dev->features & NETIF_F_RXCSUM) {
619c5cb6 750
9f6c9258
DK
751 if (likely(BNX2X_RX_CSUM_OK(cqe)))
752 skb->ip_summed = CHECKSUM_UNNECESSARY;
753 else
754 fp->eth_q_stats.hw_csum_err++;
755 }
756 }
757
f233cafe 758 skb_record_rx_queue(skb, fp->rx_queue);
9f6c9258 759
619c5cb6
VZ
760 if (le16_to_cpu(cqe_fp->pars_flags.flags) &
761 PARSING_FLAGS_VLAN)
9bcc0893 762 __vlan_hwaccel_put_tag(skb,
619c5cb6 763 le16_to_cpu(cqe_fp->vlan_tag));
9bcc0893 764 napi_gro_receive(&fp->napi, skb);
9f6c9258
DK
765
766
767next_rx:
768 rx_buf->skb = NULL;
769
770 bd_cons = NEXT_RX_IDX(bd_cons);
771 bd_prod = NEXT_RX_IDX(bd_prod);
772 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
773 rx_pkt++;
774next_cqe:
775 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
776 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
777
778 if (rx_pkt == budget)
779 break;
780 } /* while */
781
782 fp->rx_bd_cons = bd_cons;
783 fp->rx_bd_prod = bd_prod_fw;
784 fp->rx_comp_cons = sw_comp_cons;
785 fp->rx_comp_prod = sw_comp_prod;
786
787 /* Update producers */
788 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
789 fp->rx_sge_prod);
790
791 fp->rx_pkt += rx_pkt;
792 fp->rx_calls++;
793
794 return rx_pkt;
795}
796
797static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
798{
799 struct bnx2x_fastpath *fp = fp_cookie;
800 struct bnx2x *bp = fp->bp;
6383c0b3 801 u8 cos;
9f6c9258 802
523224a3
DK
803 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB "
804 "[fp %d fw_sd %d igusb %d]\n",
805 fp->index, fp->fw_sb_id, fp->igu_sb_id);
806 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
9f6c9258
DK
807
808#ifdef BNX2X_STOP_ON_ERROR
809 if (unlikely(bp->panic))
810 return IRQ_HANDLED;
811#endif
812
813 /* Handle Rx and Tx according to MSI-X vector */
814 prefetch(fp->rx_cons_sb);
6383c0b3
AE
815
816 for_each_cos_in_tx_queue(fp, cos)
817 prefetch(fp->txdata[cos].tx_cons_sb);
818
523224a3 819 prefetch(&fp->sb_running_index[SM_RX_ID]);
9f6c9258
DK
820 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
821
822 return IRQ_HANDLED;
823}
824
9f6c9258
DK
825/* HW Lock for shared dual port PHYs */
826void bnx2x_acquire_phy_lock(struct bnx2x *bp)
827{
828 mutex_lock(&bp->port.phy_mutex);
829
830 if (bp->port.need_hw_lock)
831 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
832}
833
834void bnx2x_release_phy_lock(struct bnx2x *bp)
835{
836 if (bp->port.need_hw_lock)
837 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
838
839 mutex_unlock(&bp->port.phy_mutex);
840}
841
0793f83f
DK
842/* calculates MF speed according to current linespeed and MF configuration */
843u16 bnx2x_get_mf_speed(struct bnx2x *bp)
844{
845 u16 line_speed = bp->link_vars.line_speed;
846 if (IS_MF(bp)) {
faa6fcbb
DK
847 u16 maxCfg = bnx2x_extract_max_cfg(bp,
848 bp->mf_config[BP_VN(bp)]);
849
850 /* Calculate the current MAX line speed limit for the MF
851 * devices
0793f83f 852 */
faa6fcbb
DK
853 if (IS_MF_SI(bp))
854 line_speed = (line_speed * maxCfg) / 100;
855 else { /* SD mode */
0793f83f
DK
856 u16 vn_max_rate = maxCfg * 100;
857
858 if (vn_max_rate < line_speed)
859 line_speed = vn_max_rate;
faa6fcbb 860 }
0793f83f
DK
861 }
862
863 return line_speed;
864}
865
2ae17f66
VZ
866/**
867 * bnx2x_fill_report_data - fill link report data to report
868 *
869 * @bp: driver handle
870 * @data: link state to update
871 *
872 * It uses a none-atomic bit operations because is called under the mutex.
873 */
874static inline void bnx2x_fill_report_data(struct bnx2x *bp,
875 struct bnx2x_link_report_data *data)
876{
877 u16 line_speed = bnx2x_get_mf_speed(bp);
878
879 memset(data, 0, sizeof(*data));
880
881 /* Fill the report data: efective line speed */
882 data->line_speed = line_speed;
883
884 /* Link is down */
885 if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
886 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
887 &data->link_report_flags);
888
889 /* Full DUPLEX */
890 if (bp->link_vars.duplex == DUPLEX_FULL)
891 __set_bit(BNX2X_LINK_REPORT_FD, &data->link_report_flags);
892
893 /* Rx Flow Control is ON */
894 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
895 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON, &data->link_report_flags);
896
897 /* Tx Flow Control is ON */
898 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
899 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
900}
901
902/**
903 * bnx2x_link_report - report link status to OS.
904 *
905 * @bp: driver handle
906 *
907 * Calls the __bnx2x_link_report() under the same locking scheme
908 * as a link/PHY state managing code to ensure a consistent link
909 * reporting.
910 */
911
9f6c9258
DK
912void bnx2x_link_report(struct bnx2x *bp)
913{
2ae17f66
VZ
914 bnx2x_acquire_phy_lock(bp);
915 __bnx2x_link_report(bp);
916 bnx2x_release_phy_lock(bp);
917}
9f6c9258 918
2ae17f66
VZ
919/**
920 * __bnx2x_link_report - report link status to OS.
921 *
922 * @bp: driver handle
923 *
924 * None atomic inmlementation.
925 * Should be called under the phy_lock.
926 */
927void __bnx2x_link_report(struct bnx2x *bp)
928{
929 struct bnx2x_link_report_data cur_data;
9f6c9258 930
2ae17f66
VZ
931 /* reread mf_cfg */
932 if (!CHIP_IS_E1(bp))
933 bnx2x_read_mf_cfg(bp);
934
935 /* Read the current link report info */
936 bnx2x_fill_report_data(bp, &cur_data);
937
938 /* Don't report link down or exactly the same link status twice */
939 if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
940 (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
941 &bp->last_reported_link.link_report_flags) &&
942 test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
943 &cur_data.link_report_flags)))
944 return;
945
946 bp->link_cnt++;
9f6c9258 947
2ae17f66
VZ
948 /* We are going to report a new link parameters now -
949 * remember the current data for the next time.
950 */
951 memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
9f6c9258 952
2ae17f66
VZ
953 if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
954 &cur_data.link_report_flags)) {
955 netif_carrier_off(bp->dev);
956 netdev_err(bp->dev, "NIC Link is Down\n");
957 return;
958 } else {
94f05b0f
JP
959 const char *duplex;
960 const char *flow;
961
2ae17f66 962 netif_carrier_on(bp->dev);
9f6c9258 963
2ae17f66
VZ
964 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
965 &cur_data.link_report_flags))
94f05b0f 966 duplex = "full";
9f6c9258 967 else
94f05b0f 968 duplex = "half";
9f6c9258 969
2ae17f66
VZ
970 /* Handle the FC at the end so that only these flags would be
971 * possibly set. This way we may easily check if there is no FC
972 * enabled.
973 */
974 if (cur_data.link_report_flags) {
975 if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
976 &cur_data.link_report_flags)) {
2ae17f66
VZ
977 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
978 &cur_data.link_report_flags))
94f05b0f
JP
979 flow = "ON - receive & transmit";
980 else
981 flow = "ON - receive";
9f6c9258 982 } else {
94f05b0f 983 flow = "ON - transmit";
9f6c9258 984 }
94f05b0f
JP
985 } else {
986 flow = "none";
9f6c9258 987 }
94f05b0f
JP
988 netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
989 cur_data.line_speed, duplex, flow);
9f6c9258
DK
990 }
991}
992
993void bnx2x_init_rx_rings(struct bnx2x *bp)
994{
995 int func = BP_FUNC(bp);
523224a3 996 u16 ring_prod;
9f6c9258 997 int i, j;
25141580 998
b3b83c3f 999 /* Allocate TPA resources */
ec6ba945 1000 for_each_rx_queue(bp, j) {
523224a3 1001 struct bnx2x_fastpath *fp = &bp->fp[j];
9f6c9258 1002
a8c94b91
VZ
1003 DP(NETIF_MSG_IFUP,
1004 "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
1005
523224a3 1006 if (!fp->disable_tpa) {
619c5cb6 1007 /* Fill the per-aggregtion pool */
dfacf138 1008 for (i = 0; i < MAX_AGG_QS(bp); i++) {
619c5cb6
VZ
1009 struct bnx2x_agg_info *tpa_info =
1010 &fp->tpa_info[i];
1011 struct sw_rx_bd *first_buf =
1012 &tpa_info->first_buf;
1013
1014 first_buf->skb = netdev_alloc_skb(bp->dev,
1015 fp->rx_buf_size);
1016 if (!first_buf->skb) {
9f6c9258
DK
1017 BNX2X_ERR("Failed to allocate TPA "
1018 "skb pool for queue[%d] - "
1019 "disabling TPA on this "
1020 "queue!\n", j);
1021 bnx2x_free_tpa_pool(bp, fp, i);
1022 fp->disable_tpa = 1;
1023 break;
1024 }
619c5cb6
VZ
1025 dma_unmap_addr_set(first_buf, mapping, 0);
1026 tpa_info->tpa_state = BNX2X_TPA_STOP;
9f6c9258 1027 }
523224a3
DK
1028
1029 /* "next page" elements initialization */
1030 bnx2x_set_next_page_sgl(fp);
1031
1032 /* set SGEs bit mask */
1033 bnx2x_init_sge_ring_bit_mask(fp);
1034
1035 /* Allocate SGEs and initialize the ring elements */
1036 for (i = 0, ring_prod = 0;
1037 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
1038
1039 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
1040 BNX2X_ERR("was only able to allocate "
1041 "%d rx sges\n", i);
619c5cb6
VZ
1042 BNX2X_ERR("disabling TPA for "
1043 "queue[%d]\n", j);
523224a3 1044 /* Cleanup already allocated elements */
619c5cb6
VZ
1045 bnx2x_free_rx_sge_range(bp, fp,
1046 ring_prod);
1047 bnx2x_free_tpa_pool(bp, fp,
dfacf138 1048 MAX_AGG_QS(bp));
523224a3
DK
1049 fp->disable_tpa = 1;
1050 ring_prod = 0;
1051 break;
1052 }
1053 ring_prod = NEXT_SGE_IDX(ring_prod);
1054 }
1055
1056 fp->rx_sge_prod = ring_prod;
9f6c9258
DK
1057 }
1058 }
1059
ec6ba945 1060 for_each_rx_queue(bp, j) {
9f6c9258
DK
1061 struct bnx2x_fastpath *fp = &bp->fp[j];
1062
1063 fp->rx_bd_cons = 0;
9f6c9258 1064
b3b83c3f
DK
1065 /* Activate BD ring */
1066 /* Warning!
1067 * this will generate an interrupt (to the TSTORM)
1068 * must only be done after chip is initialized
1069 */
1070 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1071 fp->rx_sge_prod);
9f6c9258 1072
9f6c9258
DK
1073 if (j != 0)
1074 continue;
1075
619c5cb6 1076 if (CHIP_IS_E1(bp)) {
f2e0899f
DK
1077 REG_WR(bp, BAR_USTRORM_INTMEM +
1078 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1079 U64_LO(fp->rx_comp_mapping));
1080 REG_WR(bp, BAR_USTRORM_INTMEM +
1081 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1082 U64_HI(fp->rx_comp_mapping));
1083 }
9f6c9258
DK
1084 }
1085}
f85582f8 1086
9f6c9258
DK
1087static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1088{
1089 int i;
6383c0b3 1090 u8 cos;
9f6c9258 1091
ec6ba945 1092 for_each_tx_queue(bp, i) {
9f6c9258 1093 struct bnx2x_fastpath *fp = &bp->fp[i];
6383c0b3
AE
1094 for_each_cos_in_tx_queue(fp, cos) {
1095 struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
9f6c9258 1096
6383c0b3
AE
1097 u16 sw_prod = txdata->tx_pkt_prod;
1098 u16 sw_cons = txdata->tx_pkt_cons;
9f6c9258 1099
6383c0b3 1100 while (sw_cons != sw_prod) {
ad756594 1101 bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons));
6383c0b3
AE
1102 sw_cons++;
1103 }
9f6c9258
DK
1104 }
1105 }
1106}
1107
b3b83c3f
DK
1108static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1109{
1110 struct bnx2x *bp = fp->bp;
1111 int i;
1112
1113 /* ring wasn't allocated */
1114 if (fp->rx_buf_ring == NULL)
1115 return;
1116
1117 for (i = 0; i < NUM_RX_BD; i++) {
1118 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
1119 struct sk_buff *skb = rx_buf->skb;
1120
1121 if (skb == NULL)
1122 continue;
b3b83c3f
DK
1123 dma_unmap_single(&bp->pdev->dev,
1124 dma_unmap_addr(rx_buf, mapping),
1125 fp->rx_buf_size, DMA_FROM_DEVICE);
1126
1127 rx_buf->skb = NULL;
1128 dev_kfree_skb(skb);
1129 }
1130}
1131
9f6c9258
DK
1132static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1133{
b3b83c3f 1134 int j;
9f6c9258 1135
ec6ba945 1136 for_each_rx_queue(bp, j) {
9f6c9258
DK
1137 struct bnx2x_fastpath *fp = &bp->fp[j];
1138
b3b83c3f 1139 bnx2x_free_rx_bds(fp);
9f6c9258 1140
9f6c9258 1141 if (!fp->disable_tpa)
dfacf138 1142 bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
9f6c9258
DK
1143 }
1144}
1145
1146void bnx2x_free_skbs(struct bnx2x *bp)
1147{
1148 bnx2x_free_tx_skbs(bp);
1149 bnx2x_free_rx_skbs(bp);
1150}
1151
e3835b99
DK
1152void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1153{
1154 /* load old values */
1155 u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1156
1157 if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1158 /* leave all but MAX value */
1159 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1160
1161 /* set new MAX value */
1162 mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1163 & FUNC_MF_CFG_MAX_BW_MASK;
1164
1165 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1166 }
1167}
1168
ca92429f
DK
1169/**
1170 * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1171 *
1172 * @bp: driver handle
1173 * @nvecs: number of vectors to be released
1174 */
1175static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
9f6c9258 1176{
ca92429f 1177 int i, offset = 0;
9f6c9258 1178
ca92429f
DK
1179 if (nvecs == offset)
1180 return;
1181 free_irq(bp->msix_table[offset].vector, bp->dev);
9f6c9258 1182 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
ca92429f
DK
1183 bp->msix_table[offset].vector);
1184 offset++;
9f6c9258 1185#ifdef BCM_CNIC
ca92429f
DK
1186 if (nvecs == offset)
1187 return;
9f6c9258
DK
1188 offset++;
1189#endif
ca92429f 1190
ec6ba945 1191 for_each_eth_queue(bp, i) {
ca92429f
DK
1192 if (nvecs == offset)
1193 return;
1194 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d "
1195 "irq\n", i, bp->msix_table[offset].vector);
9f6c9258 1196
ca92429f 1197 free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
9f6c9258
DK
1198 }
1199}
1200
d6214d7a 1201void bnx2x_free_irq(struct bnx2x *bp)
9f6c9258 1202{
d6214d7a 1203 if (bp->flags & USING_MSIX_FLAG)
ca92429f 1204 bnx2x_free_msix_irqs(bp, BNX2X_NUM_ETH_QUEUES(bp) +
6383c0b3 1205 CNIC_PRESENT + 1);
d6214d7a
DK
1206 else if (bp->flags & USING_MSI_FLAG)
1207 free_irq(bp->pdev->irq, bp->dev);
1208 else
9f6c9258
DK
1209 free_irq(bp->pdev->irq, bp->dev);
1210}
1211
d6214d7a 1212int bnx2x_enable_msix(struct bnx2x *bp)
9f6c9258 1213{
d6214d7a 1214 int msix_vec = 0, i, rc, req_cnt;
9f6c9258 1215
d6214d7a
DK
1216 bp->msix_table[msix_vec].entry = msix_vec;
1217 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n",
1218 bp->msix_table[0].entry);
1219 msix_vec++;
9f6c9258
DK
1220
1221#ifdef BCM_CNIC
d6214d7a
DK
1222 bp->msix_table[msix_vec].entry = msix_vec;
1223 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d (CNIC)\n",
1224 bp->msix_table[msix_vec].entry, bp->msix_table[msix_vec].entry);
1225 msix_vec++;
9f6c9258 1226#endif
6383c0b3 1227 /* We need separate vectors for ETH queues only (not FCoE) */
ec6ba945 1228 for_each_eth_queue(bp, i) {
d6214d7a 1229 bp->msix_table[msix_vec].entry = msix_vec;
9f6c9258 1230 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
d6214d7a
DK
1231 "(fastpath #%u)\n", msix_vec, msix_vec, i);
1232 msix_vec++;
9f6c9258
DK
1233 }
1234
6383c0b3 1235 req_cnt = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_PRESENT + 1;
d6214d7a
DK
1236
1237 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], req_cnt);
9f6c9258
DK
1238
1239 /*
1240 * reconfigure number of tx/rx queues according to available
1241 * MSI-X vectors
1242 */
1243 if (rc >= BNX2X_MIN_MSIX_VEC_CNT) {
d6214d7a
DK
1244 /* how less vectors we will have? */
1245 int diff = req_cnt - rc;
9f6c9258
DK
1246
1247 DP(NETIF_MSG_IFUP,
1248 "Trying to use less MSI-X vectors: %d\n", rc);
1249
1250 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
1251
1252 if (rc) {
1253 DP(NETIF_MSG_IFUP,
1254 "MSI-X is not attainable rc %d\n", rc);
1255 return rc;
1256 }
d6214d7a
DK
1257 /*
1258 * decrease number of queues by number of unallocated entries
1259 */
1260 bp->num_queues -= diff;
9f6c9258
DK
1261
1262 DP(NETIF_MSG_IFUP, "New queue configuration set: %d\n",
1263 bp->num_queues);
1264 } else if (rc) {
d6214d7a
DK
1265 /* fall to INTx if not enough memory */
1266 if (rc == -ENOMEM)
1267 bp->flags |= DISABLE_MSI_FLAG;
9f6c9258
DK
1268 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
1269 return rc;
1270 }
1271
1272 bp->flags |= USING_MSIX_FLAG;
1273
1274 return 0;
1275}
1276
1277static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1278{
ca92429f 1279 int i, rc, offset = 0;
9f6c9258 1280
ca92429f
DK
1281 rc = request_irq(bp->msix_table[offset++].vector,
1282 bnx2x_msix_sp_int, 0,
9f6c9258
DK
1283 bp->dev->name, bp->dev);
1284 if (rc) {
1285 BNX2X_ERR("request sp irq failed\n");
1286 return -EBUSY;
1287 }
1288
1289#ifdef BCM_CNIC
1290 offset++;
1291#endif
ec6ba945 1292 for_each_eth_queue(bp, i) {
9f6c9258
DK
1293 struct bnx2x_fastpath *fp = &bp->fp[i];
1294 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1295 bp->dev->name, i);
1296
d6214d7a 1297 rc = request_irq(bp->msix_table[offset].vector,
9f6c9258
DK
1298 bnx2x_msix_fp_int, 0, fp->name, fp);
1299 if (rc) {
ca92429f
DK
1300 BNX2X_ERR("request fp #%d irq (%d) failed rc %d\n", i,
1301 bp->msix_table[offset].vector, rc);
1302 bnx2x_free_msix_irqs(bp, offset);
9f6c9258
DK
1303 return -EBUSY;
1304 }
1305
d6214d7a 1306 offset++;
9f6c9258
DK
1307 }
1308
ec6ba945 1309 i = BNX2X_NUM_ETH_QUEUES(bp);
6383c0b3 1310 offset = 1 + CNIC_PRESENT;
9f6c9258
DK
1311 netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d"
1312 " ... fp[%d] %d\n",
1313 bp->msix_table[0].vector,
1314 0, bp->msix_table[offset].vector,
1315 i - 1, bp->msix_table[offset + i - 1].vector);
1316
1317 return 0;
1318}
1319
d6214d7a 1320int bnx2x_enable_msi(struct bnx2x *bp)
9f6c9258
DK
1321{
1322 int rc;
1323
1324 rc = pci_enable_msi(bp->pdev);
1325 if (rc) {
1326 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
1327 return -1;
1328 }
1329 bp->flags |= USING_MSI_FLAG;
1330
1331 return 0;
1332}
1333
1334static int bnx2x_req_irq(struct bnx2x *bp)
1335{
1336 unsigned long flags;
1337 int rc;
1338
1339 if (bp->flags & USING_MSI_FLAG)
1340 flags = 0;
1341 else
1342 flags = IRQF_SHARED;
1343
1344 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
1345 bp->dev->name, bp->dev);
9f6c9258
DK
1346 return rc;
1347}
1348
619c5cb6
VZ
1349static inline int bnx2x_setup_irqs(struct bnx2x *bp)
1350{
1351 int rc = 0;
1352 if (bp->flags & USING_MSIX_FLAG) {
1353 rc = bnx2x_req_msix_irqs(bp);
1354 if (rc)
1355 return rc;
1356 } else {
1357 bnx2x_ack_int(bp);
1358 rc = bnx2x_req_irq(bp);
1359 if (rc) {
1360 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
1361 return rc;
1362 }
1363 if (bp->flags & USING_MSI_FLAG) {
1364 bp->dev->irq = bp->pdev->irq;
1365 netdev_info(bp->dev, "using MSI IRQ %d\n",
1366 bp->pdev->irq);
1367 }
1368 }
1369
1370 return 0;
1371}
1372
1373static inline void bnx2x_napi_enable(struct bnx2x *bp)
9f6c9258
DK
1374{
1375 int i;
1376
619c5cb6 1377 for_each_rx_queue(bp, i)
9f6c9258
DK
1378 napi_enable(&bnx2x_fp(bp, i, napi));
1379}
1380
619c5cb6 1381static inline void bnx2x_napi_disable(struct bnx2x *bp)
9f6c9258
DK
1382{
1383 int i;
1384
619c5cb6 1385 for_each_rx_queue(bp, i)
9f6c9258
DK
1386 napi_disable(&bnx2x_fp(bp, i, napi));
1387}
1388
1389void bnx2x_netif_start(struct bnx2x *bp)
1390{
4b7ed897
DK
1391 if (netif_running(bp->dev)) {
1392 bnx2x_napi_enable(bp);
1393 bnx2x_int_enable(bp);
1394 if (bp->state == BNX2X_STATE_OPEN)
1395 netif_tx_wake_all_queues(bp->dev);
9f6c9258
DK
1396 }
1397}
1398
1399void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1400{
1401 bnx2x_int_disable_sync(bp, disable_hw);
1402 bnx2x_napi_disable(bp);
9f6c9258 1403}
9f6c9258 1404
8307fa3e
VZ
1405u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
1406{
8307fa3e 1407 struct bnx2x *bp = netdev_priv(dev);
cdb9d6ae 1408
faa28314 1409#ifdef BCM_CNIC
cdb9d6ae 1410 if (!NO_FCOE(bp)) {
8307fa3e
VZ
1411 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1412 u16 ether_type = ntohs(hdr->h_proto);
1413
1414 /* Skip VLAN tag if present */
1415 if (ether_type == ETH_P_8021Q) {
1416 struct vlan_ethhdr *vhdr =
1417 (struct vlan_ethhdr *)skb->data;
1418
1419 ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1420 }
1421
1422 /* If ethertype is FCoE or FIP - use FCoE ring */
1423 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
6383c0b3 1424 return bnx2x_fcoe_tx(bp, txq_index);
8307fa3e
VZ
1425 }
1426#endif
cdb9d6ae 1427 /* select a non-FCoE queue */
6383c0b3 1428 return __skb_tx_hash(dev, skb, BNX2X_NUM_ETH_QUEUES(bp));
8307fa3e
VZ
1429}
1430
d6214d7a
DK
1431void bnx2x_set_num_queues(struct bnx2x *bp)
1432{
1433 switch (bp->multi_mode) {
1434 case ETH_RSS_MODE_DISABLED:
9f6c9258 1435 bp->num_queues = 1;
d6214d7a
DK
1436 break;
1437 case ETH_RSS_MODE_REGULAR:
1438 bp->num_queues = bnx2x_calc_num_queues(bp);
9f6c9258 1439 break;
f85582f8 1440
9f6c9258 1441 default:
d6214d7a 1442 bp->num_queues = 1;
9f6c9258
DK
1443 break;
1444 }
ec6ba945
VZ
1445
1446 /* Add special queues */
6383c0b3 1447 bp->num_queues += NON_ETH_CONTEXT_USE;
ec6ba945
VZ
1448}
1449
cdb9d6ae
VZ
1450/**
1451 * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
1452 *
1453 * @bp: Driver handle
1454 *
1455 * We currently support for at most 16 Tx queues for each CoS thus we will
1456 * allocate a multiple of 16 for ETH L2 rings according to the value of the
1457 * bp->max_cos.
1458 *
1459 * If there is an FCoE L2 queue the appropriate Tx queue will have the next
1460 * index after all ETH L2 indices.
1461 *
1462 * If the actual number of Tx queues (for each CoS) is less than 16 then there
1463 * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
1464 * 16..31,...) with indicies that are not coupled with any real Tx queue.
1465 *
1466 * The proper configuration of skb->queue_mapping is handled by
1467 * bnx2x_select_queue() and __skb_tx_hash().
1468 *
1469 * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
1470 * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
1471 */
ec6ba945
VZ
1472static inline int bnx2x_set_real_num_queues(struct bnx2x *bp)
1473{
6383c0b3 1474 int rc, tx, rx;
ec6ba945 1475
6383c0b3
AE
1476 tx = MAX_TXQS_PER_COS * bp->max_cos;
1477 rx = BNX2X_NUM_ETH_QUEUES(bp);
ec6ba945 1478
6383c0b3
AE
1479/* account for fcoe queue */
1480#ifdef BCM_CNIC
1481 if (!NO_FCOE(bp)) {
1482 rx += FCOE_PRESENT;
1483 tx += FCOE_PRESENT;
1484 }
ec6ba945 1485#endif
6383c0b3
AE
1486
1487 rc = netif_set_real_num_tx_queues(bp->dev, tx);
1488 if (rc) {
1489 BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
1490 return rc;
1491 }
1492 rc = netif_set_real_num_rx_queues(bp->dev, rx);
1493 if (rc) {
1494 BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
1495 return rc;
1496 }
1497
1498 DP(NETIF_MSG_DRV, "Setting real num queues to (tx, rx) (%d, %d)\n",
1499 tx, rx);
1500
ec6ba945
VZ
1501 return rc;
1502}
1503
a8c94b91
VZ
1504static inline void bnx2x_set_rx_buf_size(struct bnx2x *bp)
1505{
1506 int i;
1507
1508 for_each_queue(bp, i) {
1509 struct bnx2x_fastpath *fp = &bp->fp[i];
1510
1511 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
1512 if (IS_FCOE_IDX(i))
1513 /*
1514 * Although there are no IP frames expected to arrive to
1515 * this ring we still want to add an
1516 * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
1517 * overrun attack.
1518 */
1519 fp->rx_buf_size =
1520 BNX2X_FCOE_MINI_JUMBO_MTU + ETH_OVREHEAD +
619c5cb6 1521 BNX2X_FW_RX_ALIGN + IP_HEADER_ALIGNMENT_PADDING;
a8c94b91
VZ
1522 else
1523 fp->rx_buf_size =
619c5cb6
VZ
1524 bp->dev->mtu + ETH_OVREHEAD +
1525 BNX2X_FW_RX_ALIGN + IP_HEADER_ALIGNMENT_PADDING;
a8c94b91
VZ
1526 }
1527}
1528
619c5cb6
VZ
1529static inline int bnx2x_init_rss_pf(struct bnx2x *bp)
1530{
1531 int i;
1532 u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0};
1533 u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
1534
1535 /*
1536 * Prepare the inital contents fo the indirection table if RSS is
1537 * enabled
1538 */
1539 if (bp->multi_mode != ETH_RSS_MODE_DISABLED) {
1540 for (i = 0; i < sizeof(ind_table); i++)
1541 ind_table[i] =
1542 bp->fp->cl_id + (i % num_eth_queues);
1543 }
1544
1545 /*
1546 * For 57710 and 57711 SEARCHER configuration (rss_keys) is
1547 * per-port, so if explicit configuration is needed , do it only
1548 * for a PMF.
1549 *
1550 * For 57712 and newer on the other hand it's a per-function
1551 * configuration.
1552 */
1553 return bnx2x_config_rss_pf(bp, ind_table,
1554 bp->port.pmf || !CHIP_IS_E1x(bp));
1555}
1556
1557int bnx2x_config_rss_pf(struct bnx2x *bp, u8 *ind_table, bool config_hash)
1558{
1559 struct bnx2x_config_rss_params params = {0};
1560 int i;
1561
1562 /* Although RSS is meaningless when there is a single HW queue we
1563 * still need it enabled in order to have HW Rx hash generated.
1564 *
1565 * if (!is_eth_multi(bp))
1566 * bp->multi_mode = ETH_RSS_MODE_DISABLED;
1567 */
1568
1569 params.rss_obj = &bp->rss_conf_obj;
1570
1571 __set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
1572
1573 /* RSS mode */
1574 switch (bp->multi_mode) {
1575 case ETH_RSS_MODE_DISABLED:
1576 __set_bit(BNX2X_RSS_MODE_DISABLED, &params.rss_flags);
1577 break;
1578 case ETH_RSS_MODE_REGULAR:
1579 __set_bit(BNX2X_RSS_MODE_REGULAR, &params.rss_flags);
1580 break;
1581 case ETH_RSS_MODE_VLAN_PRI:
1582 __set_bit(BNX2X_RSS_MODE_VLAN_PRI, &params.rss_flags);
1583 break;
1584 case ETH_RSS_MODE_E1HOV_PRI:
1585 __set_bit(BNX2X_RSS_MODE_E1HOV_PRI, &params.rss_flags);
1586 break;
1587 case ETH_RSS_MODE_IP_DSCP:
1588 __set_bit(BNX2X_RSS_MODE_IP_DSCP, &params.rss_flags);
1589 break;
1590 default:
1591 BNX2X_ERR("Unknown multi_mode: %d\n", bp->multi_mode);
1592 return -EINVAL;
1593 }
1594
1595 /* If RSS is enabled */
1596 if (bp->multi_mode != ETH_RSS_MODE_DISABLED) {
1597 /* RSS configuration */
1598 __set_bit(BNX2X_RSS_IPV4, &params.rss_flags);
1599 __set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags);
1600 __set_bit(BNX2X_RSS_IPV6, &params.rss_flags);
1601 __set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags);
1602
1603 /* Hash bits */
1604 params.rss_result_mask = MULTI_MASK;
1605
1606 memcpy(params.ind_table, ind_table, sizeof(params.ind_table));
1607
1608 if (config_hash) {
1609 /* RSS keys */
1610 for (i = 0; i < sizeof(params.rss_key) / 4; i++)
1611 params.rss_key[i] = random32();
1612
1613 __set_bit(BNX2X_RSS_SET_SRCH, &params.rss_flags);
1614 }
1615 }
1616
1617 return bnx2x_config_rss(bp, &params);
1618}
1619
1620static inline int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
1621{
1622 struct bnx2x_func_state_params func_params = {0};
1623
1624 /* Prepare parameters for function state transitions */
1625 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
1626
1627 func_params.f_obj = &bp->func_obj;
1628 func_params.cmd = BNX2X_F_CMD_HW_INIT;
1629
1630 func_params.params.hw_init.load_phase = load_code;
1631
1632 return bnx2x_func_state_change(bp, &func_params);
1633}
1634
1635/*
1636 * Cleans the object that have internal lists without sending
1637 * ramrods. Should be run when interrutps are disabled.
1638 */
1639static void bnx2x_squeeze_objects(struct bnx2x *bp)
1640{
1641 int rc;
1642 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
1643 struct bnx2x_mcast_ramrod_params rparam = {0};
1644 struct bnx2x_vlan_mac_obj *mac_obj = &bp->fp->mac_obj;
1645
1646 /***************** Cleanup MACs' object first *************************/
1647
1648 /* Wait for completion of requested */
1649 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
1650 /* Perform a dry cleanup */
1651 __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
1652
1653 /* Clean ETH primary MAC */
1654 __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
1655 rc = mac_obj->delete_all(bp, &bp->fp->mac_obj, &vlan_mac_flags,
1656 &ramrod_flags);
1657 if (rc != 0)
1658 BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
1659
1660 /* Cleanup UC list */
1661 vlan_mac_flags = 0;
1662 __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
1663 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
1664 &ramrod_flags);
1665 if (rc != 0)
1666 BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
1667
1668 /***************** Now clean mcast object *****************************/
1669 rparam.mcast_obj = &bp->mcast_obj;
1670 __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
1671
1672 /* Add a DEL command... */
1673 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
1674 if (rc < 0)
1675 BNX2X_ERR("Failed to add a new DEL command to a multi-cast "
1676 "object: %d\n", rc);
1677
1678 /* ...and wait until all pending commands are cleared */
1679 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
1680 while (rc != 0) {
1681 if (rc < 0) {
1682 BNX2X_ERR("Failed to clean multi-cast object: %d\n",
1683 rc);
1684 return;
1685 }
1686
1687 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
1688 }
1689}
1690
1691#ifndef BNX2X_STOP_ON_ERROR
1692#define LOAD_ERROR_EXIT(bp, label) \
1693 do { \
1694 (bp)->state = BNX2X_STATE_ERROR; \
1695 goto label; \
1696 } while (0)
1697#else
1698#define LOAD_ERROR_EXIT(bp, label) \
1699 do { \
1700 (bp)->state = BNX2X_STATE_ERROR; \
1701 (bp)->panic = 1; \
1702 return -EBUSY; \
1703 } while (0)
1704#endif
1705
9f6c9258
DK
1706/* must be called with rtnl_lock */
1707int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1708{
619c5cb6 1709 int port = BP_PORT(bp);
9f6c9258
DK
1710 u32 load_code;
1711 int i, rc;
1712
1713#ifdef BNX2X_STOP_ON_ERROR
1714 if (unlikely(bp->panic))
1715 return -EPERM;
1716#endif
1717
1718 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
1719
2ae17f66
VZ
1720 /* Set the initial link reported state to link down */
1721 bnx2x_acquire_phy_lock(bp);
1722 memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
1723 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1724 &bp->last_reported_link.link_report_flags);
1725 bnx2x_release_phy_lock(bp);
1726
523224a3
DK
1727 /* must be called before memory allocation and HW init */
1728 bnx2x_ilt_set_info(bp);
1729
6383c0b3
AE
1730 /*
1731 * Zero fastpath structures preserving invariants like napi, which are
1732 * allocated only once, fp index, max_cos, bp pointer.
1733 * Also set fp->disable_tpa.
b3b83c3f
DK
1734 */
1735 for_each_queue(bp, i)
1736 bnx2x_bz_fp(bp, i);
1737
6383c0b3 1738
a8c94b91
VZ
1739 /* Set the receive queues buffer size */
1740 bnx2x_set_rx_buf_size(bp);
1741
d6214d7a 1742 if (bnx2x_alloc_mem(bp))
9f6c9258 1743 return -ENOMEM;
d6214d7a 1744
b3b83c3f
DK
1745 /* As long as bnx2x_alloc_mem() may possibly update
1746 * bp->num_queues, bnx2x_set_real_num_queues() should always
1747 * come after it.
1748 */
ec6ba945 1749 rc = bnx2x_set_real_num_queues(bp);
d6214d7a 1750 if (rc) {
ec6ba945 1751 BNX2X_ERR("Unable to set real_num_queues\n");
619c5cb6 1752 LOAD_ERROR_EXIT(bp, load_error0);
9f6c9258
DK
1753 }
1754
6383c0b3
AE
1755 /* configure multi cos mappings in kernel.
1756 * this configuration may be overriden by a multi class queue discipline
1757 * or by a dcbx negotiation result.
1758 */
1759 bnx2x_setup_tc(bp->dev, bp->max_cos);
1760
9f6c9258
DK
1761 bnx2x_napi_enable(bp);
1762
9f6c9258 1763 /* Send LOAD_REQUEST command to MCP
619c5cb6
VZ
1764 * Returns the type of LOAD command:
1765 * if it is the first port to be initialized
1766 * common blocks should be initialized, otherwise - not
1767 */
9f6c9258 1768 if (!BP_NOMCP(bp)) {
a22f0788 1769 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, 0);
9f6c9258
DK
1770 if (!load_code) {
1771 BNX2X_ERR("MCP response failure, aborting\n");
1772 rc = -EBUSY;
619c5cb6 1773 LOAD_ERROR_EXIT(bp, load_error1);
9f6c9258
DK
1774 }
1775 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
1776 rc = -EBUSY; /* other port in diagnostic mode */
619c5cb6 1777 LOAD_ERROR_EXIT(bp, load_error1);
9f6c9258
DK
1778 }
1779
1780 } else {
f2e0899f 1781 int path = BP_PATH(bp);
9f6c9258 1782
f2e0899f
DK
1783 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
1784 path, load_count[path][0], load_count[path][1],
1785 load_count[path][2]);
1786 load_count[path][0]++;
1787 load_count[path][1 + port]++;
1788 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
1789 path, load_count[path][0], load_count[path][1],
1790 load_count[path][2]);
1791 if (load_count[path][0] == 1)
9f6c9258 1792 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
f2e0899f 1793 else if (load_count[path][1 + port] == 1)
9f6c9258
DK
1794 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
1795 else
1796 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
1797 }
1798
1799 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
f2e0899f 1800 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
3deb8167 1801 (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
9f6c9258 1802 bp->port.pmf = 1;
3deb8167
YR
1803 /*
1804 * We need the barrier to ensure the ordering between the
1805 * writing to bp->port.pmf here and reading it from the
1806 * bnx2x_periodic_task().
1807 */
1808 smp_mb();
1809 queue_delayed_work(bnx2x_wq, &bp->period_task, 0);
1810 } else
9f6c9258 1811 bp->port.pmf = 0;
6383c0b3 1812
9f6c9258
DK
1813 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
1814
619c5cb6
VZ
1815 /* Init Function state controlling object */
1816 bnx2x__init_func_obj(bp);
1817
9f6c9258
DK
1818 /* Initialize HW */
1819 rc = bnx2x_init_hw(bp, load_code);
1820 if (rc) {
1821 BNX2X_ERR("HW init failed, aborting\n");
a22f0788 1822 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
619c5cb6 1823 LOAD_ERROR_EXIT(bp, load_error2);
9f6c9258
DK
1824 }
1825
d6214d7a
DK
1826 /* Connect to IRQs */
1827 rc = bnx2x_setup_irqs(bp);
523224a3
DK
1828 if (rc) {
1829 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
619c5cb6 1830 LOAD_ERROR_EXIT(bp, load_error2);
523224a3
DK
1831 }
1832
9f6c9258
DK
1833 /* Setup NIC internals and enable interrupts */
1834 bnx2x_nic_init(bp, load_code);
1835
619c5cb6
VZ
1836 /* Init per-function objects */
1837 bnx2x_init_bp_objs(bp);
1838
f2e0899f
DK
1839 if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
1840 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
619c5cb6
VZ
1841 (bp->common.shmem2_base)) {
1842 if (SHMEM2_HAS(bp, dcc_support))
1843 SHMEM2_WR(bp, dcc_support,
1844 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
1845 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
1846 }
1847
1848 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
1849 rc = bnx2x_func_start(bp);
1850 if (rc) {
1851 BNX2X_ERR("Function start failed!\n");
c636322b 1852 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
619c5cb6
VZ
1853 LOAD_ERROR_EXIT(bp, load_error3);
1854 }
9f6c9258
DK
1855
1856 /* Send LOAD_DONE command to MCP */
1857 if (!BP_NOMCP(bp)) {
a22f0788 1858 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
9f6c9258
DK
1859 if (!load_code) {
1860 BNX2X_ERR("MCP response failure, aborting\n");
1861 rc = -EBUSY;
619c5cb6 1862 LOAD_ERROR_EXIT(bp, load_error3);
9f6c9258
DK
1863 }
1864 }
1865
619c5cb6 1866 rc = bnx2x_setup_leading(bp);
9f6c9258
DK
1867 if (rc) {
1868 BNX2X_ERR("Setup leading failed!\n");
619c5cb6 1869 LOAD_ERROR_EXIT(bp, load_error3);
f2e0899f 1870 }
9f6c9258 1871
9f6c9258 1872#ifdef BCM_CNIC
523224a3 1873 /* Enable Timer scan */
619c5cb6 1874 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
9f6c9258 1875#endif
f85582f8 1876
523224a3 1877 for_each_nondefault_queue(bp, i) {
619c5cb6 1878 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
523224a3 1879 if (rc)
619c5cb6 1880 LOAD_ERROR_EXIT(bp, load_error4);
523224a3
DK
1881 }
1882
619c5cb6
VZ
1883 rc = bnx2x_init_rss_pf(bp);
1884 if (rc)
1885 LOAD_ERROR_EXIT(bp, load_error4);
1886
523224a3
DK
1887 /* Now when Clients are configured we are ready to work */
1888 bp->state = BNX2X_STATE_OPEN;
1889
619c5cb6
VZ
1890 /* Configure a ucast MAC */
1891 rc = bnx2x_set_eth_mac(bp, true);
1892 if (rc)
1893 LOAD_ERROR_EXIT(bp, load_error4);
6e30dd4e 1894
e3835b99
DK
1895 if (bp->pending_max) {
1896 bnx2x_update_max_mf_config(bp, bp->pending_max);
1897 bp->pending_max = 0;
1898 }
1899
9f6c9258
DK
1900 if (bp->port.pmf)
1901 bnx2x_initial_phy_init(bp, load_mode);
1902
619c5cb6
VZ
1903 /* Start fast path */
1904
1905 /* Initialize Rx filter. */
1906 netif_addr_lock_bh(bp->dev);
6e30dd4e 1907 bnx2x_set_rx_mode(bp->dev);
619c5cb6 1908 netif_addr_unlock_bh(bp->dev);
6e30dd4e 1909
619c5cb6 1910 /* Start the Tx */
9f6c9258
DK
1911 switch (load_mode) {
1912 case LOAD_NORMAL:
523224a3
DK
1913 /* Tx queue should be only reenabled */
1914 netif_tx_wake_all_queues(bp->dev);
9f6c9258
DK
1915 break;
1916
1917 case LOAD_OPEN:
1918 netif_tx_start_all_queues(bp->dev);
523224a3 1919 smp_mb__after_clear_bit();
9f6c9258
DK
1920 break;
1921
1922 case LOAD_DIAG:
9f6c9258
DK
1923 bp->state = BNX2X_STATE_DIAG;
1924 break;
1925
1926 default:
1927 break;
1928 }
1929
1930 if (!bp->port.pmf)
1931 bnx2x__link_status_update(bp);
1932
1933 /* start the timer */
1934 mod_timer(&bp->timer, jiffies + bp->current_interval);
1935
1936#ifdef BCM_CNIC
1937 bnx2x_setup_cnic_irq_info(bp);
1938 if (bp->state == BNX2X_STATE_OPEN)
1939 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
1940#endif
1941 bnx2x_inc_load_cnt(bp);
1942
619c5cb6
VZ
1943 /* Wait for all pending SP commands to complete */
1944 if (!bnx2x_wait_sp_comp(bp, ~0x0UL)) {
1945 BNX2X_ERR("Timeout waiting for SP elements to complete\n");
1946 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
1947 return -EBUSY;
1948 }
6891dd25 1949
619c5cb6 1950 bnx2x_dcbx_init(bp);
9f6c9258
DK
1951 return 0;
1952
619c5cb6 1953#ifndef BNX2X_STOP_ON_ERROR
9f6c9258 1954load_error4:
619c5cb6 1955#ifdef BCM_CNIC
9f6c9258 1956 /* Disable Timer scan */
619c5cb6 1957 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
9f6c9258
DK
1958#endif
1959load_error3:
1960 bnx2x_int_disable_sync(bp, 1);
d6214d7a 1961
619c5cb6
VZ
1962 /* Clean queueable objects */
1963 bnx2x_squeeze_objects(bp);
1964
9f6c9258
DK
1965 /* Free SKBs, SGEs, TPA pool and driver internals */
1966 bnx2x_free_skbs(bp);
ec6ba945 1967 for_each_rx_queue(bp, i)
9f6c9258 1968 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
d6214d7a 1969
9f6c9258 1970 /* Release IRQs */
d6214d7a
DK
1971 bnx2x_free_irq(bp);
1972load_error2:
1973 if (!BP_NOMCP(bp)) {
1974 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
1975 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
1976 }
1977
1978 bp->port.pmf = 0;
9f6c9258
DK
1979load_error1:
1980 bnx2x_napi_disable(bp);
d6214d7a 1981load_error0:
9f6c9258
DK
1982 bnx2x_free_mem(bp);
1983
1984 return rc;
619c5cb6 1985#endif /* ! BNX2X_STOP_ON_ERROR */
9f6c9258
DK
1986}
1987
1988/* must be called with rtnl_lock */
1989int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
1990{
1991 int i;
c9ee9206
VZ
1992 bool global = false;
1993
1994 if ((bp->state == BNX2X_STATE_CLOSED) ||
1995 (bp->state == BNX2X_STATE_ERROR)) {
1996 /* We can get here if the driver has been unloaded
1997 * during parity error recovery and is either waiting for a
1998 * leader to complete or for other functions to unload and
1999 * then ifdown has been issued. In this case we want to
2000 * unload and let other functions to complete a recovery
2001 * process.
2002 */
9f6c9258
DK
2003 bp->recovery_state = BNX2X_RECOVERY_DONE;
2004 bp->is_leader = 0;
c9ee9206
VZ
2005 bnx2x_release_leader_lock(bp);
2006 smp_mb();
2007
2008 DP(NETIF_MSG_HW, "Releasing a leadership...\n");
9f6c9258
DK
2009
2010 return -EINVAL;
2011 }
2012
87b7ba3d
VZ
2013 /*
2014 * It's important to set the bp->state to the value different from
2015 * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
2016 * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
2017 */
2018 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
2019 smp_mb();
2020
9505ee37
VZ
2021 /* Stop Tx */
2022 bnx2x_tx_disable(bp);
2023
9f6c9258
DK
2024#ifdef BCM_CNIC
2025 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
2026#endif
9f6c9258 2027
9f6c9258 2028 bp->rx_mode = BNX2X_RX_MODE_NONE;
9f6c9258 2029
9f6c9258 2030 del_timer_sync(&bp->timer);
f85582f8 2031
619c5cb6
VZ
2032 /* Set ALWAYS_ALIVE bit in shmem */
2033 bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
2034
2035 bnx2x_drv_pulse(bp);
9f6c9258 2036
f85582f8 2037 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9f6c9258
DK
2038
2039 /* Cleanup the chip if needed */
2040 if (unload_mode != UNLOAD_RECOVERY)
2041 bnx2x_chip_cleanup(bp, unload_mode);
523224a3 2042 else {
c9ee9206
VZ
2043 /* Send the UNLOAD_REQUEST to the MCP */
2044 bnx2x_send_unload_req(bp, unload_mode);
2045
2046 /*
2047 * Prevent transactions to host from the functions on the
2048 * engine that doesn't reset global blocks in case of global
2049 * attention once gloabl blocks are reset and gates are opened
2050 * (the engine which leader will perform the recovery
2051 * last).
2052 */
2053 if (!CHIP_IS_E1x(bp))
2054 bnx2x_pf_disable(bp);
2055
2056 /* Disable HW interrupts, NAPI */
523224a3
DK
2057 bnx2x_netif_stop(bp, 1);
2058
2059 /* Release IRQs */
d6214d7a 2060 bnx2x_free_irq(bp);
c9ee9206
VZ
2061
2062 /* Report UNLOAD_DONE to MCP */
2063 bnx2x_send_unload_done(bp);
523224a3 2064 }
9f6c9258 2065
619c5cb6
VZ
2066 /*
2067 * At this stage no more interrupts will arrive so we may safly clean
2068 * the queueable objects here in case they failed to get cleaned so far.
2069 */
2070 bnx2x_squeeze_objects(bp);
2071
79616895
VZ
2072 /* There should be no more pending SP commands at this stage */
2073 bp->sp_state = 0;
2074
9f6c9258
DK
2075 bp->port.pmf = 0;
2076
2077 /* Free SKBs, SGEs, TPA pool and driver internals */
2078 bnx2x_free_skbs(bp);
ec6ba945 2079 for_each_rx_queue(bp, i)
9f6c9258 2080 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
d6214d7a 2081
9f6c9258
DK
2082 bnx2x_free_mem(bp);
2083
2084 bp->state = BNX2X_STATE_CLOSED;
2085
c9ee9206
VZ
2086 /* Check if there are pending parity attentions. If there are - set
2087 * RECOVERY_IN_PROGRESS.
2088 */
2089 if (bnx2x_chk_parity_attn(bp, &global, false)) {
2090 bnx2x_set_reset_in_progress(bp);
2091
2092 /* Set RESET_IS_GLOBAL if needed */
2093 if (global)
2094 bnx2x_set_reset_global(bp);
2095 }
2096
2097
9f6c9258
DK
2098 /* The last driver must disable a "close the gate" if there is no
2099 * parity attention or "process kill" pending.
2100 */
c9ee9206 2101 if (!bnx2x_dec_load_cnt(bp) && bnx2x_reset_is_done(bp, BP_PATH(bp)))
9f6c9258
DK
2102 bnx2x_disable_close_the_gate(bp);
2103
9f6c9258
DK
2104 return 0;
2105}
f85582f8 2106
9f6c9258
DK
2107int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
2108{
2109 u16 pmcsr;
2110
adf5f6a1
DK
2111 /* If there is no power capability, silently succeed */
2112 if (!bp->pm_cap) {
2113 DP(NETIF_MSG_HW, "No power capability. Breaking.\n");
2114 return 0;
2115 }
2116
9f6c9258
DK
2117 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2118
2119 switch (state) {
2120 case PCI_D0:
2121 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2122 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2123 PCI_PM_CTRL_PME_STATUS));
2124
2125 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2126 /* delay required during transition out of D3hot */
2127 msleep(20);
2128 break;
2129
2130 case PCI_D3hot:
2131 /* If there are other clients above don't
2132 shut down the power */
2133 if (atomic_read(&bp->pdev->enable_cnt) != 1)
2134 return 0;
2135 /* Don't shut down the power for emulation and FPGA */
2136 if (CHIP_REV_IS_SLOW(bp))
2137 return 0;
2138
2139 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2140 pmcsr |= 3;
2141
2142 if (bp->wol)
2143 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2144
2145 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2146 pmcsr);
2147
2148 /* No more memory access after this point until
2149 * device is brought back to D0.
2150 */
2151 break;
2152
2153 default:
2154 return -EINVAL;
2155 }
2156 return 0;
2157}
2158
9f6c9258
DK
2159/*
2160 * net_device service functions
2161 */
d6214d7a 2162int bnx2x_poll(struct napi_struct *napi, int budget)
9f6c9258
DK
2163{
2164 int work_done = 0;
6383c0b3 2165 u8 cos;
9f6c9258
DK
2166 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
2167 napi);
2168 struct bnx2x *bp = fp->bp;
2169
2170 while (1) {
2171#ifdef BNX2X_STOP_ON_ERROR
2172 if (unlikely(bp->panic)) {
2173 napi_complete(napi);
2174 return 0;
2175 }
2176#endif
2177
6383c0b3
AE
2178 for_each_cos_in_tx_queue(fp, cos)
2179 if (bnx2x_tx_queue_has_work(&fp->txdata[cos]))
2180 bnx2x_tx_int(bp, &fp->txdata[cos]);
2181
9f6c9258
DK
2182
2183 if (bnx2x_has_rx_work(fp)) {
2184 work_done += bnx2x_rx_int(fp, budget - work_done);
2185
2186 /* must not complete if we consumed full budget */
2187 if (work_done >= budget)
2188 break;
2189 }
2190
2191 /* Fall out from the NAPI loop if needed */
2192 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
ec6ba945
VZ
2193#ifdef BCM_CNIC
2194 /* No need to update SB for FCoE L2 ring as long as
2195 * it's connected to the default SB and the SB
2196 * has been updated when NAPI was scheduled.
2197 */
2198 if (IS_FCOE_FP(fp)) {
2199 napi_complete(napi);
2200 break;
2201 }
2202#endif
2203
9f6c9258 2204 bnx2x_update_fpsb_idx(fp);
f85582f8
DK
2205 /* bnx2x_has_rx_work() reads the status block,
2206 * thus we need to ensure that status block indices
2207 * have been actually read (bnx2x_update_fpsb_idx)
2208 * prior to this check (bnx2x_has_rx_work) so that
2209 * we won't write the "newer" value of the status block
2210 * to IGU (if there was a DMA right after
2211 * bnx2x_has_rx_work and if there is no rmb, the memory
2212 * reading (bnx2x_update_fpsb_idx) may be postponed
2213 * to right before bnx2x_ack_sb). In this case there
2214 * will never be another interrupt until there is
2215 * another update of the status block, while there
2216 * is still unhandled work.
2217 */
9f6c9258
DK
2218 rmb();
2219
2220 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
2221 napi_complete(napi);
2222 /* Re-enable interrupts */
523224a3
DK
2223 DP(NETIF_MSG_HW,
2224 "Update index to %d\n", fp->fp_hc_idx);
2225 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
2226 le16_to_cpu(fp->fp_hc_idx),
9f6c9258
DK
2227 IGU_INT_ENABLE, 1);
2228 break;
2229 }
2230 }
2231 }
2232
2233 return work_done;
2234}
2235
9f6c9258
DK
2236/* we split the first BD into headers and data BDs
2237 * to ease the pain of our fellow microcode engineers
2238 * we use one mapping for both BDs
2239 * So far this has only been observed to happen
2240 * in Other Operating Systems(TM)
2241 */
2242static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
6383c0b3 2243 struct bnx2x_fp_txdata *txdata,
9f6c9258
DK
2244 struct sw_tx_bd *tx_buf,
2245 struct eth_tx_start_bd **tx_bd, u16 hlen,
2246 u16 bd_prod, int nbd)
2247{
2248 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
2249 struct eth_tx_bd *d_tx_bd;
2250 dma_addr_t mapping;
2251 int old_len = le16_to_cpu(h_tx_bd->nbytes);
2252
2253 /* first fix first BD */
2254 h_tx_bd->nbd = cpu_to_le16(nbd);
2255 h_tx_bd->nbytes = cpu_to_le16(hlen);
2256
2257 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
2258 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
2259 h_tx_bd->addr_lo, h_tx_bd->nbd);
2260
2261 /* now get a new data BD
2262 * (after the pbd) and fill it */
2263 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
6383c0b3 2264 d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
9f6c9258
DK
2265
2266 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
2267 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
2268
2269 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2270 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2271 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
2272
2273 /* this marks the BD as one that has no individual mapping */
2274 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
2275
2276 DP(NETIF_MSG_TX_QUEUED,
2277 "TSO split data size is %d (%x:%x)\n",
2278 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
2279
2280 /* update tx_bd */
2281 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
2282
2283 return bd_prod;
2284}
2285
2286static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
2287{
2288 if (fix > 0)
2289 csum = (u16) ~csum_fold(csum_sub(csum,
2290 csum_partial(t_header - fix, fix, 0)));
2291
2292 else if (fix < 0)
2293 csum = (u16) ~csum_fold(csum_add(csum,
2294 csum_partial(t_header, -fix, 0)));
2295
2296 return swab16(csum);
2297}
2298
2299static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
2300{
2301 u32 rc;
2302
2303 if (skb->ip_summed != CHECKSUM_PARTIAL)
2304 rc = XMIT_PLAIN;
2305
2306 else {
d0d9d8ef 2307 if (vlan_get_protocol(skb) == htons(ETH_P_IPV6)) {
9f6c9258
DK
2308 rc = XMIT_CSUM_V6;
2309 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
2310 rc |= XMIT_CSUM_TCP;
2311
2312 } else {
2313 rc = XMIT_CSUM_V4;
2314 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2315 rc |= XMIT_CSUM_TCP;
2316 }
2317 }
2318
5892b9e9
VZ
2319 if (skb_is_gso_v6(skb))
2320 rc |= XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6;
2321 else if (skb_is_gso(skb))
2322 rc |= XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP;
9f6c9258
DK
2323
2324 return rc;
2325}
2326
2327#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
2328/* check if packet requires linearization (packet is too fragmented)
2329 no need to check fragmentation if page size > 8K (there will be no
2330 violation to FW restrictions) */
2331static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
2332 u32 xmit_type)
2333{
2334 int to_copy = 0;
2335 int hlen = 0;
2336 int first_bd_sz = 0;
2337
2338 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
2339 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
2340
2341 if (xmit_type & XMIT_GSO) {
2342 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
2343 /* Check if LSO packet needs to be copied:
2344 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
2345 int wnd_size = MAX_FETCH_BD - 3;
2346 /* Number of windows to check */
2347 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
2348 int wnd_idx = 0;
2349 int frag_idx = 0;
2350 u32 wnd_sum = 0;
2351
2352 /* Headers length */
2353 hlen = (int)(skb_transport_header(skb) - skb->data) +
2354 tcp_hdrlen(skb);
2355
2356 /* Amount of data (w/o headers) on linear part of SKB*/
2357 first_bd_sz = skb_headlen(skb) - hlen;
2358
2359 wnd_sum = first_bd_sz;
2360
2361 /* Calculate the first sum - it's special */
2362 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
2363 wnd_sum +=
9e903e08 2364 skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]);
9f6c9258
DK
2365
2366 /* If there was data on linear skb data - check it */
2367 if (first_bd_sz > 0) {
2368 if (unlikely(wnd_sum < lso_mss)) {
2369 to_copy = 1;
2370 goto exit_lbl;
2371 }
2372
2373 wnd_sum -= first_bd_sz;
2374 }
2375
2376 /* Others are easier: run through the frag list and
2377 check all windows */
2378 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
2379 wnd_sum +=
9e903e08 2380 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]);
9f6c9258
DK
2381
2382 if (unlikely(wnd_sum < lso_mss)) {
2383 to_copy = 1;
2384 break;
2385 }
2386 wnd_sum -=
9e903e08 2387 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx]);
9f6c9258
DK
2388 }
2389 } else {
2390 /* in non-LSO too fragmented packet should always
2391 be linearized */
2392 to_copy = 1;
2393 }
2394 }
2395
2396exit_lbl:
2397 if (unlikely(to_copy))
2398 DP(NETIF_MSG_TX_QUEUED,
2399 "Linearization IS REQUIRED for %s packet. "
2400 "num_frags %d hlen %d first_bd_sz %d\n",
2401 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
2402 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
2403
2404 return to_copy;
2405}
2406#endif
2407
2297a2da
VZ
2408static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
2409 u32 xmit_type)
f2e0899f 2410{
2297a2da
VZ
2411 *parsing_data |= (skb_shinfo(skb)->gso_size <<
2412 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
2413 ETH_TX_PARSE_BD_E2_LSO_MSS;
f2e0899f
DK
2414 if ((xmit_type & XMIT_GSO_V6) &&
2415 (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
2297a2da 2416 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
f2e0899f
DK
2417}
2418
2419/**
e8920674 2420 * bnx2x_set_pbd_gso - update PBD in GSO case.
f2e0899f 2421 *
e8920674
DK
2422 * @skb: packet skb
2423 * @pbd: parse BD
2424 * @xmit_type: xmit flags
f2e0899f
DK
2425 */
2426static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
2427 struct eth_tx_parse_bd_e1x *pbd,
2428 u32 xmit_type)
2429{
2430 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2431 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
2432 pbd->tcp_flags = pbd_tcp_flags(skb);
2433
2434 if (xmit_type & XMIT_GSO_V4) {
2435 pbd->ip_id = swab16(ip_hdr(skb)->id);
2436 pbd->tcp_pseudo_csum =
2437 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
2438 ip_hdr(skb)->daddr,
2439 0, IPPROTO_TCP, 0));
2440
2441 } else
2442 pbd->tcp_pseudo_csum =
2443 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2444 &ipv6_hdr(skb)->daddr,
2445 0, IPPROTO_TCP, 0));
2446
2447 pbd->global_data |= ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN;
2448}
f85582f8 2449
f2e0899f 2450/**
e8920674 2451 * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
f2e0899f 2452 *
e8920674
DK
2453 * @bp: driver handle
2454 * @skb: packet skb
2455 * @parsing_data: data to be updated
2456 * @xmit_type: xmit flags
f2e0899f 2457 *
e8920674 2458 * 57712 related
f2e0899f
DK
2459 */
2460static inline u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
2297a2da 2461 u32 *parsing_data, u32 xmit_type)
f2e0899f 2462{
e39aece7
VZ
2463 *parsing_data |=
2464 ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
2465 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT) &
2466 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W;
f2e0899f 2467
e39aece7
VZ
2468 if (xmit_type & XMIT_CSUM_TCP) {
2469 *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
2470 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
2471 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
f2e0899f 2472
e39aece7
VZ
2473 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
2474 } else
2475 /* We support checksum offload for TCP and UDP only.
2476 * No need to pass the UDP header length - it's a constant.
2477 */
2478 return skb_transport_header(skb) +
2479 sizeof(struct udphdr) - skb->data;
f2e0899f
DK
2480}
2481
93ef5c02
DK
2482static inline void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
2483 struct eth_tx_start_bd *tx_start_bd, u32 xmit_type)
2484{
93ef5c02
DK
2485 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
2486
2487 if (xmit_type & XMIT_CSUM_V4)
2488 tx_start_bd->bd_flags.as_bitfield |=
2489 ETH_TX_BD_FLAGS_IP_CSUM;
2490 else
2491 tx_start_bd->bd_flags.as_bitfield |=
2492 ETH_TX_BD_FLAGS_IPV6;
2493
2494 if (!(xmit_type & XMIT_CSUM_TCP))
2495 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
93ef5c02
DK
2496}
2497
f2e0899f 2498/**
e8920674 2499 * bnx2x_set_pbd_csum - update PBD with checksum and return header length
f2e0899f 2500 *
e8920674
DK
2501 * @bp: driver handle
2502 * @skb: packet skb
2503 * @pbd: parse BD to be updated
2504 * @xmit_type: xmit flags
f2e0899f
DK
2505 */
2506static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
2507 struct eth_tx_parse_bd_e1x *pbd,
2508 u32 xmit_type)
2509{
e39aece7 2510 u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
f2e0899f
DK
2511
2512 /* for now NS flag is not used in Linux */
2513 pbd->global_data =
2514 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
2515 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
2516
2517 pbd->ip_hlen_w = (skb_transport_header(skb) -
e39aece7 2518 skb_network_header(skb)) >> 1;
f2e0899f 2519
e39aece7
VZ
2520 hlen += pbd->ip_hlen_w;
2521
2522 /* We support checksum offload for TCP and UDP only */
2523 if (xmit_type & XMIT_CSUM_TCP)
2524 hlen += tcp_hdrlen(skb) / 2;
2525 else
2526 hlen += sizeof(struct udphdr) / 2;
f2e0899f
DK
2527
2528 pbd->total_hlen_w = cpu_to_le16(hlen);
2529 hlen = hlen*2;
2530
2531 if (xmit_type & XMIT_CSUM_TCP) {
2532 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
2533
2534 } else {
2535 s8 fix = SKB_CS_OFF(skb); /* signed! */
2536
2537 DP(NETIF_MSG_TX_QUEUED,
2538 "hlen %d fix %d csum before fix %x\n",
2539 le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
2540
2541 /* HW bug: fixup the CSUM */
2542 pbd->tcp_pseudo_csum =
2543 bnx2x_csum_fix(skb_transport_header(skb),
2544 SKB_CS(skb), fix);
2545
2546 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
2547 pbd->tcp_pseudo_csum);
2548 }
2549
2550 return hlen;
2551}
f85582f8 2552
9f6c9258
DK
2553/* called with netif_tx_lock
2554 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
2555 * netif_wake_queue()
2556 */
2557netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
2558{
2559 struct bnx2x *bp = netdev_priv(dev);
6383c0b3 2560
9f6c9258
DK
2561 struct bnx2x_fastpath *fp;
2562 struct netdev_queue *txq;
6383c0b3 2563 struct bnx2x_fp_txdata *txdata;
9f6c9258 2564 struct sw_tx_bd *tx_buf;
619c5cb6 2565 struct eth_tx_start_bd *tx_start_bd, *first_bd;
9f6c9258 2566 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
523224a3 2567 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
f2e0899f 2568 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
2297a2da 2569 u32 pbd_e2_parsing_data = 0;
9f6c9258 2570 u16 pkt_prod, bd_prod;
6383c0b3 2571 int nbd, txq_index, fp_index, txdata_index;
9f6c9258
DK
2572 dma_addr_t mapping;
2573 u32 xmit_type = bnx2x_xmit_type(bp, skb);
2574 int i;
2575 u8 hlen = 0;
2576 __le16 pkt_size = 0;
2577 struct ethhdr *eth;
2578 u8 mac_type = UNICAST_ADDRESS;
2579
2580#ifdef BNX2X_STOP_ON_ERROR
2581 if (unlikely(bp->panic))
2582 return NETDEV_TX_BUSY;
2583#endif
2584
6383c0b3
AE
2585 txq_index = skb_get_queue_mapping(skb);
2586 txq = netdev_get_tx_queue(dev, txq_index);
2587
2588 BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + FCOE_PRESENT);
2589
2590 /* decode the fastpath index and the cos index from the txq */
2591 fp_index = TXQ_TO_FP(txq_index);
2592 txdata_index = TXQ_TO_COS(txq_index);
2593
2594#ifdef BCM_CNIC
2595 /*
2596 * Override the above for the FCoE queue:
2597 * - FCoE fp entry is right after the ETH entries.
2598 * - FCoE L2 queue uses bp->txdata[0] only.
2599 */
2600 if (unlikely(!NO_FCOE(bp) && (txq_index ==
2601 bnx2x_fcoe_tx(bp, txq_index)))) {
2602 fp_index = FCOE_IDX;
2603 txdata_index = 0;
2604 }
2605#endif
2606
2607 /* enable this debug print to view the transmission queue being used
94f05b0f 2608 DP(BNX2X_MSG_FP, "indices: txq %d, fp %d, txdata %d\n",
6383c0b3 2609 txq_index, fp_index, txdata_index); */
9f6c9258 2610
6383c0b3 2611 /* locate the fastpath and the txdata */
9f6c9258 2612 fp = &bp->fp[fp_index];
6383c0b3
AE
2613 txdata = &fp->txdata[txdata_index];
2614
2615 /* enable this debug print to view the tranmission details
2616 DP(BNX2X_MSG_FP,"transmitting packet cid %d fp index %d txdata_index %d"
94f05b0f 2617 " tx_data ptr %p fp pointer %p\n",
6383c0b3 2618 txdata->cid, fp_index, txdata_index, txdata, fp); */
9f6c9258 2619
6383c0b3
AE
2620 if (unlikely(bnx2x_tx_avail(bp, txdata) <
2621 (skb_shinfo(skb)->nr_frags + 3))) {
9f6c9258
DK
2622 fp->eth_q_stats.driver_xoff++;
2623 netif_tx_stop_queue(txq);
2624 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
2625 return NETDEV_TX_BUSY;
2626 }
2627
f2e0899f
DK
2628 DP(NETIF_MSG_TX_QUEUED, "queue[%d]: SKB: summed %x protocol %x "
2629 "protocol(%x,%x) gso type %x xmit_type %x\n",
6383c0b3 2630 txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
9f6c9258
DK
2631 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
2632
2633 eth = (struct ethhdr *)skb->data;
2634
2635 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
2636 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
2637 if (is_broadcast_ether_addr(eth->h_dest))
2638 mac_type = BROADCAST_ADDRESS;
2639 else
2640 mac_type = MULTICAST_ADDRESS;
2641 }
2642
2643#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
2644 /* First, check if we need to linearize the skb (due to FW
2645 restrictions). No need to check fragmentation if page size > 8K
2646 (there will be no violation to FW restrictions) */
2647 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
2648 /* Statistics of linearization */
2649 bp->lin_cnt++;
2650 if (skb_linearize(skb) != 0) {
2651 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
2652 "silently dropping this SKB\n");
2653 dev_kfree_skb_any(skb);
2654 return NETDEV_TX_OK;
2655 }
2656 }
2657#endif
619c5cb6
VZ
2658 /* Map skb linear data for DMA */
2659 mapping = dma_map_single(&bp->pdev->dev, skb->data,
2660 skb_headlen(skb), DMA_TO_DEVICE);
2661 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
2662 DP(NETIF_MSG_TX_QUEUED, "SKB mapping failed - "
2663 "silently dropping this SKB\n");
2664 dev_kfree_skb_any(skb);
2665 return NETDEV_TX_OK;
2666 }
9f6c9258
DK
2667 /*
2668 Please read carefully. First we use one BD which we mark as start,
2669 then we have a parsing info BD (used for TSO or xsum),
2670 and only then we have the rest of the TSO BDs.
2671 (don't forget to mark the last one as last,
2672 and to unmap only AFTER you write to the BD ...)
2673 And above all, all pdb sizes are in words - NOT DWORDS!
2674 */
2675
619c5cb6
VZ
2676 /* get current pkt produced now - advance it just before sending packet
2677 * since mapping of pages may fail and cause packet to be dropped
2678 */
6383c0b3
AE
2679 pkt_prod = txdata->tx_pkt_prod;
2680 bd_prod = TX_BD(txdata->tx_bd_prod);
9f6c9258 2681
619c5cb6
VZ
2682 /* get a tx_buf and first BD
2683 * tx_start_bd may be changed during SPLIT,
2684 * but first_bd will always stay first
2685 */
6383c0b3
AE
2686 tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
2687 tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
619c5cb6 2688 first_bd = tx_start_bd;
9f6c9258
DK
2689
2690 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
f85582f8
DK
2691 SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_ETH_ADDR_TYPE,
2692 mac_type);
2693
9f6c9258 2694 /* header nbd */
f85582f8 2695 SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_HDR_NBDS, 1);
9f6c9258
DK
2696
2697 /* remember the first BD of the packet */
6383c0b3 2698 tx_buf->first_bd = txdata->tx_bd_prod;
9f6c9258
DK
2699 tx_buf->skb = skb;
2700 tx_buf->flags = 0;
2701
2702 DP(NETIF_MSG_TX_QUEUED,
2703 "sending pkt %u @%p next_idx %u bd %u @%p\n",
6383c0b3 2704 pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
9f6c9258 2705
eab6d18d 2706 if (vlan_tx_tag_present(skb)) {
523224a3
DK
2707 tx_start_bd->vlan_or_ethertype =
2708 cpu_to_le16(vlan_tx_tag_get(skb));
2709 tx_start_bd->bd_flags.as_bitfield |=
2710 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
9f6c9258 2711 } else
523224a3 2712 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
9f6c9258
DK
2713
2714 /* turn on parsing and get a BD */
2715 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9f6c9258 2716
93ef5c02
DK
2717 if (xmit_type & XMIT_CSUM)
2718 bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
9f6c9258 2719
619c5cb6 2720 if (!CHIP_IS_E1x(bp)) {
6383c0b3 2721 pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
f2e0899f
DK
2722 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
2723 /* Set PBD in checksum offload case */
2724 if (xmit_type & XMIT_CSUM)
2297a2da
VZ
2725 hlen = bnx2x_set_pbd_csum_e2(bp, skb,
2726 &pbd_e2_parsing_data,
2727 xmit_type);
619c5cb6
VZ
2728 if (IS_MF_SI(bp)) {
2729 /*
2730 * fill in the MAC addresses in the PBD - for local
2731 * switching
2732 */
2733 bnx2x_set_fw_mac_addr(&pbd_e2->src_mac_addr_hi,
2734 &pbd_e2->src_mac_addr_mid,
2735 &pbd_e2->src_mac_addr_lo,
2736 eth->h_source);
2737 bnx2x_set_fw_mac_addr(&pbd_e2->dst_mac_addr_hi,
2738 &pbd_e2->dst_mac_addr_mid,
2739 &pbd_e2->dst_mac_addr_lo,
2740 eth->h_dest);
2741 }
f2e0899f 2742 } else {
6383c0b3 2743 pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
f2e0899f
DK
2744 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
2745 /* Set PBD in checksum offload case */
2746 if (xmit_type & XMIT_CSUM)
2747 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
9f6c9258 2748
9f6c9258
DK
2749 }
2750
f85582f8 2751 /* Setup the data pointer of the first BD of the packet */
9f6c9258
DK
2752 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2753 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
619c5cb6 2754 nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
9f6c9258
DK
2755 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
2756 pkt_size = tx_start_bd->nbytes;
2757
2758 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
2759 " nbytes %d flags %x vlan %x\n",
2760 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
2761 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
523224a3
DK
2762 tx_start_bd->bd_flags.as_bitfield,
2763 le16_to_cpu(tx_start_bd->vlan_or_ethertype));
9f6c9258
DK
2764
2765 if (xmit_type & XMIT_GSO) {
2766
2767 DP(NETIF_MSG_TX_QUEUED,
2768 "TSO packet len %d hlen %d total len %d tso size %d\n",
2769 skb->len, hlen, skb_headlen(skb),
2770 skb_shinfo(skb)->gso_size);
2771
2772 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
2773
2774 if (unlikely(skb_headlen(skb) > hlen))
6383c0b3
AE
2775 bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
2776 &tx_start_bd, hlen,
2777 bd_prod, ++nbd);
619c5cb6 2778 if (!CHIP_IS_E1x(bp))
2297a2da
VZ
2779 bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
2780 xmit_type);
f2e0899f
DK
2781 else
2782 bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
9f6c9258 2783 }
2297a2da
VZ
2784
2785 /* Set the PBD's parsing_data field if not zero
2786 * (for the chips newer than 57711).
2787 */
2788 if (pbd_e2_parsing_data)
2789 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
2790
9f6c9258
DK
2791 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
2792
f85582f8 2793 /* Handle fragmented skb */
9f6c9258
DK
2794 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2795 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2796
9e903e08
ED
2797 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
2798 skb_frag_size(frag), DMA_TO_DEVICE);
619c5cb6
VZ
2799 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
2800
2801 DP(NETIF_MSG_TX_QUEUED, "Unable to map page - "
2802 "dropping packet...\n");
2803
2804 /* we need unmap all buffers already mapped
2805 * for this SKB;
2806 * first_bd->nbd need to be properly updated
2807 * before call to bnx2x_free_tx_pkt
2808 */
2809 first_bd->nbd = cpu_to_le16(nbd);
6383c0b3
AE
2810 bnx2x_free_tx_pkt(bp, txdata,
2811 TX_BD(txdata->tx_pkt_prod));
619c5cb6
VZ
2812 return NETDEV_TX_OK;
2813 }
2814
9f6c9258 2815 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
6383c0b3 2816 tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
9f6c9258 2817 if (total_pkt_bd == NULL)
6383c0b3 2818 total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
9f6c9258 2819
9f6c9258
DK
2820 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2821 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9e903e08
ED
2822 tx_data_bd->nbytes = cpu_to_le16(skb_frag_size(frag));
2823 le16_add_cpu(&pkt_size, skb_frag_size(frag));
619c5cb6 2824 nbd++;
9f6c9258
DK
2825
2826 DP(NETIF_MSG_TX_QUEUED,
2827 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
2828 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
2829 le16_to_cpu(tx_data_bd->nbytes));
2830 }
2831
2832 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
2833
619c5cb6
VZ
2834 /* update with actual num BDs */
2835 first_bd->nbd = cpu_to_le16(nbd);
2836
9f6c9258
DK
2837 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2838
2839 /* now send a tx doorbell, counting the next BD
2840 * if the packet contains or ends with it
2841 */
2842 if (TX_BD_POFF(bd_prod) < nbd)
2843 nbd++;
2844
619c5cb6
VZ
2845 /* total_pkt_bytes should be set on the first data BD if
2846 * it's not an LSO packet and there is more than one
2847 * data BD. In this case pkt_size is limited by an MTU value.
2848 * However we prefer to set it for an LSO packet (while we don't
2849 * have to) in order to save some CPU cycles in a none-LSO
2850 * case, when we much more care about them.
2851 */
9f6c9258
DK
2852 if (total_pkt_bd != NULL)
2853 total_pkt_bd->total_pkt_bytes = pkt_size;
2854
523224a3 2855 if (pbd_e1x)
9f6c9258 2856 DP(NETIF_MSG_TX_QUEUED,
523224a3 2857 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
9f6c9258 2858 " tcp_flags %x xsum %x seq %u hlen %u\n",
523224a3
DK
2859 pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
2860 pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
2861 pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
2862 le16_to_cpu(pbd_e1x->total_hlen_w));
f2e0899f
DK
2863 if (pbd_e2)
2864 DP(NETIF_MSG_TX_QUEUED,
2865 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
2866 pbd_e2, pbd_e2->dst_mac_addr_hi, pbd_e2->dst_mac_addr_mid,
2867 pbd_e2->dst_mac_addr_lo, pbd_e2->src_mac_addr_hi,
2868 pbd_e2->src_mac_addr_mid, pbd_e2->src_mac_addr_lo,
2869 pbd_e2->parsing_data);
9f6c9258
DK
2870 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
2871
6383c0b3 2872 txdata->tx_pkt_prod++;
9f6c9258
DK
2873 /*
2874 * Make sure that the BD data is updated before updating the producer
2875 * since FW might read the BD right after the producer is updated.
2876 * This is only applicable for weak-ordered memory model archs such
2877 * as IA-64. The following barrier is also mandatory since FW will
2878 * assumes packets must have BDs.
2879 */
2880 wmb();
2881
6383c0b3 2882 txdata->tx_db.data.prod += nbd;
9f6c9258 2883 barrier();
f85582f8 2884
6383c0b3 2885 DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
9f6c9258
DK
2886
2887 mmiowb();
2888
6383c0b3 2889 txdata->tx_bd_prod += nbd;
9f6c9258 2890
6383c0b3 2891 if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_SKB_FRAGS + 3)) {
9f6c9258
DK
2892 netif_tx_stop_queue(txq);
2893
2894 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
2895 * ordering of set_bit() in netif_tx_stop_queue() and read of
2896 * fp->bd_tx_cons */
2897 smp_mb();
2898
2899 fp->eth_q_stats.driver_xoff++;
6383c0b3 2900 if (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 3)
9f6c9258
DK
2901 netif_tx_wake_queue(txq);
2902 }
6383c0b3 2903 txdata->tx_pkt++;
9f6c9258
DK
2904
2905 return NETDEV_TX_OK;
2906}
f85582f8 2907
6383c0b3
AE
2908/**
2909 * bnx2x_setup_tc - routine to configure net_device for multi tc
2910 *
2911 * @netdev: net device to configure
2912 * @tc: number of traffic classes to enable
2913 *
2914 * callback connected to the ndo_setup_tc function pointer
2915 */
2916int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
2917{
2918 int cos, prio, count, offset;
2919 struct bnx2x *bp = netdev_priv(dev);
2920
2921 /* setup tc must be called under rtnl lock */
2922 ASSERT_RTNL();
2923
2924 /* no traffic classes requested. aborting */
2925 if (!num_tc) {
2926 netdev_reset_tc(dev);
2927 return 0;
2928 }
2929
2930 /* requested to support too many traffic classes */
2931 if (num_tc > bp->max_cos) {
2932 DP(NETIF_MSG_TX_ERR, "support for too many traffic classes"
94f05b0f 2933 " requested: %d. max supported is %d\n",
6383c0b3
AE
2934 num_tc, bp->max_cos);
2935 return -EINVAL;
2936 }
2937
2938 /* declare amount of supported traffic classes */
2939 if (netdev_set_num_tc(dev, num_tc)) {
94f05b0f 2940 DP(NETIF_MSG_TX_ERR, "failed to declare %d traffic classes\n",
6383c0b3
AE
2941 num_tc);
2942 return -EINVAL;
2943 }
2944
2945 /* configure priority to traffic class mapping */
2946 for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
2947 netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[prio]);
94f05b0f 2948 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n",
6383c0b3
AE
2949 prio, bp->prio_to_cos[prio]);
2950 }
2951
2952
2953 /* Use this configuration to diffrentiate tc0 from other COSes
2954 This can be used for ets or pfc, and save the effort of setting
2955 up a multio class queue disc or negotiating DCBX with a switch
2956 netdev_set_prio_tc_map(dev, 0, 0);
94f05b0f 2957 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0);
6383c0b3
AE
2958 for (prio = 1; prio < 16; prio++) {
2959 netdev_set_prio_tc_map(dev, prio, 1);
94f05b0f 2960 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1);
6383c0b3
AE
2961 } */
2962
2963 /* configure traffic class to transmission queue mapping */
2964 for (cos = 0; cos < bp->max_cos; cos++) {
2965 count = BNX2X_NUM_ETH_QUEUES(bp);
2966 offset = cos * MAX_TXQS_PER_COS;
2967 netdev_set_tc_queue(dev, cos, count, offset);
94f05b0f 2968 DP(BNX2X_MSG_SP, "mapping tc %d to offset %d count %d\n",
6383c0b3
AE
2969 cos, offset, count);
2970 }
2971
2972 return 0;
2973}
2974
9f6c9258
DK
2975/* called with rtnl_lock */
2976int bnx2x_change_mac_addr(struct net_device *dev, void *p)
2977{
2978 struct sockaddr *addr = p;
2979 struct bnx2x *bp = netdev_priv(dev);
619c5cb6 2980 int rc = 0;
9f6c9258
DK
2981
2982 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
2983 return -EINVAL;
2984
619c5cb6
VZ
2985 if (netif_running(dev)) {
2986 rc = bnx2x_set_eth_mac(bp, false);
2987 if (rc)
2988 return rc;
2989 }
2990
9f6c9258 2991 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
619c5cb6 2992
523224a3 2993 if (netif_running(dev))
619c5cb6 2994 rc = bnx2x_set_eth_mac(bp, true);
9f6c9258 2995
619c5cb6 2996 return rc;
9f6c9258
DK
2997}
2998
b3b83c3f
DK
2999static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
3000{
3001 union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
3002 struct bnx2x_fastpath *fp = &bp->fp[fp_index];
6383c0b3 3003 u8 cos;
b3b83c3f
DK
3004
3005 /* Common */
3006#ifdef BCM_CNIC
3007 if (IS_FCOE_IDX(fp_index)) {
3008 memset(sb, 0, sizeof(union host_hc_status_block));
3009 fp->status_blk_mapping = 0;
3010
3011 } else {
3012#endif
3013 /* status blocks */
619c5cb6 3014 if (!CHIP_IS_E1x(bp))
b3b83c3f
DK
3015 BNX2X_PCI_FREE(sb->e2_sb,
3016 bnx2x_fp(bp, fp_index,
3017 status_blk_mapping),
3018 sizeof(struct host_hc_status_block_e2));
3019 else
3020 BNX2X_PCI_FREE(sb->e1x_sb,
3021 bnx2x_fp(bp, fp_index,
3022 status_blk_mapping),
3023 sizeof(struct host_hc_status_block_e1x));
3024#ifdef BCM_CNIC
3025 }
3026#endif
3027 /* Rx */
3028 if (!skip_rx_queue(bp, fp_index)) {
3029 bnx2x_free_rx_bds(fp);
3030
3031 /* fastpath rx rings: rx_buf rx_desc rx_comp */
3032 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
3033 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
3034 bnx2x_fp(bp, fp_index, rx_desc_mapping),
3035 sizeof(struct eth_rx_bd) * NUM_RX_BD);
3036
3037 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
3038 bnx2x_fp(bp, fp_index, rx_comp_mapping),
3039 sizeof(struct eth_fast_path_rx_cqe) *
3040 NUM_RCQ_BD);
3041
3042 /* SGE ring */
3043 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
3044 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
3045 bnx2x_fp(bp, fp_index, rx_sge_mapping),
3046 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
3047 }
3048
3049 /* Tx */
3050 if (!skip_tx_queue(bp, fp_index)) {
3051 /* fastpath tx rings: tx_buf tx_desc */
6383c0b3
AE
3052 for_each_cos_in_tx_queue(fp, cos) {
3053 struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
3054
3055 DP(BNX2X_MSG_SP,
94f05b0f 3056 "freeing tx memory of fp %d cos %d cid %d\n",
6383c0b3
AE
3057 fp_index, cos, txdata->cid);
3058
3059 BNX2X_FREE(txdata->tx_buf_ring);
3060 BNX2X_PCI_FREE(txdata->tx_desc_ring,
3061 txdata->tx_desc_mapping,
3062 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
3063 }
b3b83c3f
DK
3064 }
3065 /* end of fastpath */
3066}
3067
3068void bnx2x_free_fp_mem(struct bnx2x *bp)
3069{
3070 int i;
3071 for_each_queue(bp, i)
3072 bnx2x_free_fp_mem_at(bp, i);
3073}
3074
3075static inline void set_sb_shortcuts(struct bnx2x *bp, int index)
3076{
3077 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
619c5cb6 3078 if (!CHIP_IS_E1x(bp)) {
b3b83c3f
DK
3079 bnx2x_fp(bp, index, sb_index_values) =
3080 (__le16 *)status_blk.e2_sb->sb.index_values;
3081 bnx2x_fp(bp, index, sb_running_index) =
3082 (__le16 *)status_blk.e2_sb->sb.running_index;
3083 } else {
3084 bnx2x_fp(bp, index, sb_index_values) =
3085 (__le16 *)status_blk.e1x_sb->sb.index_values;
3086 bnx2x_fp(bp, index, sb_running_index) =
3087 (__le16 *)status_blk.e1x_sb->sb.running_index;
3088 }
3089}
3090
3091static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
3092{
3093 union host_hc_status_block *sb;
3094 struct bnx2x_fastpath *fp = &bp->fp[index];
3095 int ring_size = 0;
6383c0b3 3096 u8 cos;
c2188952 3097 int rx_ring_size = 0;
b3b83c3f
DK
3098
3099 /* if rx_ring_size specified - use it */
c2188952 3100 if (!bp->rx_ring_size) {
b3b83c3f 3101
c2188952
VZ
3102 rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
3103
3104 /* allocate at least number of buffers required by FW */
3105 rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
3106 MIN_RX_SIZE_TPA, rx_ring_size);
3107
3108 bp->rx_ring_size = rx_ring_size;
3109 } else
3110 rx_ring_size = bp->rx_ring_size;
b3b83c3f 3111
b3b83c3f
DK
3112 /* Common */
3113 sb = &bnx2x_fp(bp, index, status_blk);
3114#ifdef BCM_CNIC
3115 if (!IS_FCOE_IDX(index)) {
3116#endif
3117 /* status blocks */
619c5cb6 3118 if (!CHIP_IS_E1x(bp))
b3b83c3f
DK
3119 BNX2X_PCI_ALLOC(sb->e2_sb,
3120 &bnx2x_fp(bp, index, status_blk_mapping),
3121 sizeof(struct host_hc_status_block_e2));
3122 else
3123 BNX2X_PCI_ALLOC(sb->e1x_sb,
3124 &bnx2x_fp(bp, index, status_blk_mapping),
3125 sizeof(struct host_hc_status_block_e1x));
3126#ifdef BCM_CNIC
3127 }
3128#endif
8eef2af1
DK
3129
3130 /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
3131 * set shortcuts for it.
3132 */
3133 if (!IS_FCOE_IDX(index))
3134 set_sb_shortcuts(bp, index);
b3b83c3f
DK
3135
3136 /* Tx */
3137 if (!skip_tx_queue(bp, index)) {
3138 /* fastpath tx rings: tx_buf tx_desc */
6383c0b3
AE
3139 for_each_cos_in_tx_queue(fp, cos) {
3140 struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
3141
3142 DP(BNX2X_MSG_SP, "allocating tx memory of "
94f05b0f 3143 "fp %d cos %d\n",
6383c0b3
AE
3144 index, cos);
3145
3146 BNX2X_ALLOC(txdata->tx_buf_ring,
b3b83c3f 3147 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6383c0b3
AE
3148 BNX2X_PCI_ALLOC(txdata->tx_desc_ring,
3149 &txdata->tx_desc_mapping,
b3b83c3f 3150 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
6383c0b3 3151 }
b3b83c3f
DK
3152 }
3153
3154 /* Rx */
3155 if (!skip_rx_queue(bp, index)) {
3156 /* fastpath rx rings: rx_buf rx_desc rx_comp */
3157 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_buf_ring),
3158 sizeof(struct sw_rx_bd) * NUM_RX_BD);
3159 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_desc_ring),
3160 &bnx2x_fp(bp, index, rx_desc_mapping),
3161 sizeof(struct eth_rx_bd) * NUM_RX_BD);
3162
3163 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_comp_ring),
3164 &bnx2x_fp(bp, index, rx_comp_mapping),
3165 sizeof(struct eth_fast_path_rx_cqe) *
3166 NUM_RCQ_BD);
3167
3168 /* SGE ring */
3169 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_page_ring),
3170 sizeof(struct sw_rx_page) * NUM_RX_SGE);
3171 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_sge_ring),
3172 &bnx2x_fp(bp, index, rx_sge_mapping),
3173 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
3174 /* RX BD ring */
3175 bnx2x_set_next_page_rx_bd(fp);
3176
3177 /* CQ ring */
3178 bnx2x_set_next_page_rx_cq(fp);
3179
3180 /* BDs */
3181 ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
3182 if (ring_size < rx_ring_size)
3183 goto alloc_mem_err;
3184 }
3185
3186 return 0;
3187
3188/* handles low memory cases */
3189alloc_mem_err:
3190 BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
3191 index, ring_size);
3192 /* FW will drop all packets if queue is not big enough,
3193 * In these cases we disable the queue
6383c0b3 3194 * Min size is different for OOO, TPA and non-TPA queues
b3b83c3f
DK
3195 */
3196 if (ring_size < (fp->disable_tpa ?
eb722d7a 3197 MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
b3b83c3f
DK
3198 /* release memory allocated for this queue */
3199 bnx2x_free_fp_mem_at(bp, index);
3200 return -ENOMEM;
3201 }
3202 return 0;
3203}
3204
3205int bnx2x_alloc_fp_mem(struct bnx2x *bp)
3206{
3207 int i;
3208
3209 /**
3210 * 1. Allocate FP for leading - fatal if error
3211 * 2. {CNIC} Allocate FCoE FP - fatal if error
6383c0b3
AE
3212 * 3. {CNIC} Allocate OOO + FWD - disable OOO if error
3213 * 4. Allocate RSS - fix number of queues if error
b3b83c3f
DK
3214 */
3215
3216 /* leading */
3217 if (bnx2x_alloc_fp_mem_at(bp, 0))
3218 return -ENOMEM;
6383c0b3 3219
b3b83c3f 3220#ifdef BCM_CNIC
8eef2af1
DK
3221 if (!NO_FCOE(bp))
3222 /* FCoE */
3223 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX))
3224 /* we will fail load process instead of mark
3225 * NO_FCOE_FLAG
3226 */
3227 return -ENOMEM;
b3b83c3f 3228#endif
6383c0b3 3229
b3b83c3f
DK
3230 /* RSS */
3231 for_each_nondefault_eth_queue(bp, i)
3232 if (bnx2x_alloc_fp_mem_at(bp, i))
3233 break;
3234
3235 /* handle memory failures */
3236 if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
3237 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
3238
3239 WARN_ON(delta < 0);
3240#ifdef BCM_CNIC
3241 /**
3242 * move non eth FPs next to last eth FP
3243 * must be done in that order
3244 * FCOE_IDX < FWD_IDX < OOO_IDX
3245 */
3246
6383c0b3 3247 /* move FCoE fp even NO_FCOE_FLAG is on */
b3b83c3f
DK
3248 bnx2x_move_fp(bp, FCOE_IDX, FCOE_IDX - delta);
3249#endif
3250 bp->num_queues -= delta;
3251 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
3252 bp->num_queues + delta, bp->num_queues);
3253 }
3254
3255 return 0;
3256}
d6214d7a 3257
523224a3
DK
3258void bnx2x_free_mem_bp(struct bnx2x *bp)
3259{
3260 kfree(bp->fp);
3261 kfree(bp->msix_table);
3262 kfree(bp->ilt);
3263}
3264
3265int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp)
3266{
3267 struct bnx2x_fastpath *fp;
3268 struct msix_entry *tbl;
3269 struct bnx2x_ilt *ilt;
6383c0b3
AE
3270 int msix_table_size = 0;
3271
3272 /*
3273 * The biggest MSI-X table we might need is as a maximum number of fast
3274 * path IGU SBs plus default SB (for PF).
3275 */
3276 msix_table_size = bp->igu_sb_cnt + 1;
523224a3 3277
6383c0b3
AE
3278 /* fp array: RSS plus CNIC related L2 queues */
3279 fp = kzalloc((BNX2X_MAX_RSS_COUNT(bp) + NON_ETH_CONTEXT_USE) *
3280 sizeof(*fp), GFP_KERNEL);
523224a3
DK
3281 if (!fp)
3282 goto alloc_err;
3283 bp->fp = fp;
3284
3285 /* msix table */
6383c0b3 3286 tbl = kzalloc(msix_table_size * sizeof(*tbl), GFP_KERNEL);
523224a3
DK
3287 if (!tbl)
3288 goto alloc_err;
3289 bp->msix_table = tbl;
3290
3291 /* ilt */
3292 ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
3293 if (!ilt)
3294 goto alloc_err;
3295 bp->ilt = ilt;
3296
3297 return 0;
3298alloc_err:
3299 bnx2x_free_mem_bp(bp);
3300 return -ENOMEM;
3301
3302}
3303
a9fccec7 3304int bnx2x_reload_if_running(struct net_device *dev)
66371c44
MM
3305{
3306 struct bnx2x *bp = netdev_priv(dev);
3307
3308 if (unlikely(!netif_running(dev)))
3309 return 0;
3310
3311 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
3312 return bnx2x_nic_load(bp, LOAD_NORMAL);
3313}
3314
1ac9e428
YR
3315int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
3316{
3317 u32 sel_phy_idx = 0;
3318 if (bp->link_params.num_phys <= 1)
3319 return INT_PHY;
3320
3321 if (bp->link_vars.link_up) {
3322 sel_phy_idx = EXT_PHY1;
3323 /* In case link is SERDES, check if the EXT_PHY2 is the one */
3324 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
3325 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
3326 sel_phy_idx = EXT_PHY2;
3327 } else {
3328
3329 switch (bnx2x_phy_selection(&bp->link_params)) {
3330 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
3331 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
3332 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
3333 sel_phy_idx = EXT_PHY1;
3334 break;
3335 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
3336 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
3337 sel_phy_idx = EXT_PHY2;
3338 break;
3339 }
3340 }
3341
3342 return sel_phy_idx;
3343
3344}
3345int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
3346{
3347 u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
3348 /*
3349 * The selected actived PHY is always after swapping (in case PHY
3350 * swapping is enabled). So when swapping is enabled, we need to reverse
3351 * the configuration
3352 */
3353
3354 if (bp->link_params.multi_phy_config &
3355 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
3356 if (sel_phy_idx == EXT_PHY1)
3357 sel_phy_idx = EXT_PHY2;
3358 else if (sel_phy_idx == EXT_PHY2)
3359 sel_phy_idx = EXT_PHY1;
3360 }
3361 return LINK_CONFIG_IDX(sel_phy_idx);
3362}
3363
bf61ee14
VZ
3364#if defined(NETDEV_FCOE_WWNN) && defined(BCM_CNIC)
3365int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
3366{
3367 struct bnx2x *bp = netdev_priv(dev);
3368 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
3369
3370 switch (type) {
3371 case NETDEV_FCOE_WWNN:
3372 *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi,
3373 cp->fcoe_wwn_node_name_lo);
3374 break;
3375 case NETDEV_FCOE_WWPN:
3376 *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi,
3377 cp->fcoe_wwn_port_name_lo);
3378 break;
3379 default:
3380 return -EINVAL;
3381 }
3382
3383 return 0;
3384}
3385#endif
3386
9f6c9258
DK
3387/* called with rtnl_lock */
3388int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
3389{
3390 struct bnx2x *bp = netdev_priv(dev);
9f6c9258
DK
3391
3392 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
f1deab50 3393 pr_err("Handling parity error recovery. Try again later\n");
9f6c9258
DK
3394 return -EAGAIN;
3395 }
3396
3397 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
3398 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
3399 return -EINVAL;
3400
3401 /* This does not race with packet allocation
3402 * because the actual alloc size is
3403 * only updated as part of load
3404 */
3405 dev->mtu = new_mtu;
3406
66371c44
MM
3407 return bnx2x_reload_if_running(dev);
3408}
3409
3410u32 bnx2x_fix_features(struct net_device *dev, u32 features)
3411{
3412 struct bnx2x *bp = netdev_priv(dev);
3413
3414 /* TPA requires Rx CSUM offloading */
3415 if (!(features & NETIF_F_RXCSUM) || bp->disable_tpa)
3416 features &= ~NETIF_F_LRO;
3417
3418 return features;
3419}
3420
3421int bnx2x_set_features(struct net_device *dev, u32 features)
3422{
3423 struct bnx2x *bp = netdev_priv(dev);
3424 u32 flags = bp->flags;
538dd2e3 3425 bool bnx2x_reload = false;
66371c44
MM
3426
3427 if (features & NETIF_F_LRO)
3428 flags |= TPA_ENABLE_FLAG;
3429 else
3430 flags &= ~TPA_ENABLE_FLAG;
3431
538dd2e3
MB
3432 if (features & NETIF_F_LOOPBACK) {
3433 if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
3434 bp->link_params.loopback_mode = LOOPBACK_BMAC;
3435 bnx2x_reload = true;
3436 }
3437 } else {
3438 if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
3439 bp->link_params.loopback_mode = LOOPBACK_NONE;
3440 bnx2x_reload = true;
3441 }
3442 }
3443
66371c44
MM
3444 if (flags ^ bp->flags) {
3445 bp->flags = flags;
538dd2e3
MB
3446 bnx2x_reload = true;
3447 }
66371c44 3448
538dd2e3 3449 if (bnx2x_reload) {
66371c44
MM
3450 if (bp->recovery_state == BNX2X_RECOVERY_DONE)
3451 return bnx2x_reload_if_running(dev);
3452 /* else: bnx2x_nic_load() will be called at end of recovery */
9f6c9258
DK
3453 }
3454
66371c44 3455 return 0;
9f6c9258
DK
3456}
3457
3458void bnx2x_tx_timeout(struct net_device *dev)
3459{
3460 struct bnx2x *bp = netdev_priv(dev);
3461
3462#ifdef BNX2X_STOP_ON_ERROR
3463 if (!bp->panic)
3464 bnx2x_panic();
3465#endif
7be08a72
AE
3466
3467 smp_mb__before_clear_bit();
3468 set_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state);
3469 smp_mb__after_clear_bit();
3470
9f6c9258 3471 /* This allows the netif to be shutdown gracefully before resetting */
7be08a72 3472 schedule_delayed_work(&bp->sp_rtnl_task, 0);
9f6c9258
DK
3473}
3474
9f6c9258
DK
3475int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
3476{
3477 struct net_device *dev = pci_get_drvdata(pdev);
3478 struct bnx2x *bp;
3479
3480 if (!dev) {
3481 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
3482 return -ENODEV;
3483 }
3484 bp = netdev_priv(dev);
3485
3486 rtnl_lock();
3487
3488 pci_save_state(pdev);
3489
3490 if (!netif_running(dev)) {
3491 rtnl_unlock();
3492 return 0;
3493 }
3494
3495 netif_device_detach(dev);
3496
3497 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
3498
3499 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
3500
3501 rtnl_unlock();
3502
3503 return 0;
3504}
3505
3506int bnx2x_resume(struct pci_dev *pdev)
3507{
3508 struct net_device *dev = pci_get_drvdata(pdev);
3509 struct bnx2x *bp;
3510 int rc;
3511
3512 if (!dev) {
3513 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
3514 return -ENODEV;
3515 }
3516 bp = netdev_priv(dev);
3517
3518 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
f1deab50 3519 pr_err("Handling parity error recovery. Try again later\n");
9f6c9258
DK
3520 return -EAGAIN;
3521 }
3522
3523 rtnl_lock();
3524
3525 pci_restore_state(pdev);
3526
3527 if (!netif_running(dev)) {
3528 rtnl_unlock();
3529 return 0;
3530 }
3531
3532 bnx2x_set_power_state(bp, PCI_D0);
3533 netif_device_attach(dev);
3534
f2e0899f
DK
3535 /* Since the chip was reset, clear the FW sequence number */
3536 bp->fw_seq = 0;
9f6c9258
DK
3537 rc = bnx2x_nic_load(bp, LOAD_OPEN);
3538
3539 rtnl_unlock();
3540
3541 return rc;
3542}
619c5cb6
VZ
3543
3544
3545void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
3546 u32 cid)
3547{
3548 /* ustorm cxt validation */
3549 cxt->ustorm_ag_context.cdu_usage =
3550 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
3551 CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
3552 /* xcontext validation */
3553 cxt->xstorm_ag_context.cdu_reserved =
3554 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
3555 CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
3556}
3557
3558static inline void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
3559 u8 fw_sb_id, u8 sb_index,
3560 u8 ticks)
3561{
3562
3563 u32 addr = BAR_CSTRORM_INTMEM +
3564 CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
3565 REG_WR8(bp, addr, ticks);
3566 DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d ticks %d\n",
3567 port, fw_sb_id, sb_index, ticks);
3568}
3569
3570static inline void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
3571 u16 fw_sb_id, u8 sb_index,
3572 u8 disable)
3573{
3574 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
3575 u32 addr = BAR_CSTRORM_INTMEM +
3576 CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
3577 u16 flags = REG_RD16(bp, addr);
3578 /* clear and set */
3579 flags &= ~HC_INDEX_DATA_HC_ENABLED;
3580 flags |= enable_flag;
3581 REG_WR16(bp, addr, flags);
3582 DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d disable %d\n",
3583 port, fw_sb_id, sb_index, disable);
3584}
3585
3586void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
3587 u8 sb_index, u8 disable, u16 usec)
3588{
3589 int port = BP_PORT(bp);
3590 u8 ticks = usec / BNX2X_BTR;
3591
3592 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
3593
3594 disable = disable ? 1 : (usec ? 0 : 1);
3595 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
3596}
This page took 0.616815 seconds and 5 git commands to generate.