bnx2x: use proper constants for dma_unmap* calls
[deliverable/linux.git] / drivers / net / bnx2x / bnx2x_cmn.c
CommitLineData
9f6c9258
DK
1/* bnx2x_cmn.c: Broadcom Everest network driver.
2 *
3 * Copyright (c) 2007-2010 Broadcom Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
15 *
16 */
17
18
19#include <linux/etherdevice.h>
20#include <linux/ip.h>
f2e0899f 21#include <net/ipv6.h>
7f3e01fe 22#include <net/ip6_checksum.h>
6891dd25 23#include <linux/firmware.h>
9f6c9258
DK
24#include "bnx2x_cmn.h"
25
26#ifdef BCM_VLAN
27#include <linux/if_vlan.h>
28#endif
29
523224a3
DK
30#include "bnx2x_init.h"
31
9f6c9258
DK
32static int bnx2x_poll(struct napi_struct *napi, int budget);
33
34/* free skb in the packet ring at pos idx
35 * return idx of last bd freed
36 */
37static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
38 u16 idx)
39{
40 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
41 struct eth_tx_start_bd *tx_start_bd;
42 struct eth_tx_bd *tx_data_bd;
43 struct sk_buff *skb = tx_buf->skb;
44 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
45 int nbd;
46
47 /* prefetch skb end pointer to speedup dev_kfree_skb() */
48 prefetch(&skb->end);
49
50 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
51 idx, tx_buf, skb);
52
53 /* unmap first bd */
54 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
55 tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
56 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
4bca60f4 57 BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
9f6c9258
DK
58
59 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
60#ifdef BNX2X_STOP_ON_ERROR
61 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
62 BNX2X_ERR("BAD nbd!\n");
63 bnx2x_panic();
64 }
65#endif
66 new_cons = nbd + tx_buf->first_bd;
67
68 /* Get the next bd */
69 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
70
71 /* Skip a parse bd... */
72 --nbd;
73 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
74
75 /* ...and the TSO split header bd since they have no mapping */
76 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
77 --nbd;
78 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
79 }
80
81 /* now free frags */
82 while (nbd > 0) {
83
84 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
85 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
86 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
87 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
88 if (--nbd)
89 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
90 }
91
92 /* release skb */
93 WARN_ON(!skb);
94 dev_kfree_skb(skb);
95 tx_buf->first_bd = 0;
96 tx_buf->skb = NULL;
97
98 return new_cons;
99}
100
101int bnx2x_tx_int(struct bnx2x_fastpath *fp)
102{
103 struct bnx2x *bp = fp->bp;
104 struct netdev_queue *txq;
105 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
106
107#ifdef BNX2X_STOP_ON_ERROR
108 if (unlikely(bp->panic))
109 return -1;
110#endif
111
112 txq = netdev_get_tx_queue(bp->dev, fp->index);
113 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
114 sw_cons = fp->tx_pkt_cons;
115
116 while (sw_cons != hw_cons) {
117 u16 pkt_cons;
118
119 pkt_cons = TX_BD(sw_cons);
120
f2e0899f
DK
121 DP(NETIF_MSG_TX_DONE, "queue[%d]: hw_cons %u sw_cons %u "
122 " pkt_cons %u\n",
123 fp->index, hw_cons, sw_cons, pkt_cons);
9f6c9258 124
9f6c9258
DK
125 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
126 sw_cons++;
127 }
128
129 fp->tx_pkt_cons = sw_cons;
130 fp->tx_bd_cons = bd_cons;
131
132 /* Need to make the tx_bd_cons update visible to start_xmit()
133 * before checking for netif_tx_queue_stopped(). Without the
134 * memory barrier, there is a small possibility that
135 * start_xmit() will miss it and cause the queue to be stopped
136 * forever.
137 */
138 smp_mb();
139
140 /* TBD need a thresh? */
141 if (unlikely(netif_tx_queue_stopped(txq))) {
142 /* Taking tx_lock() is needed to prevent reenabling the queue
143 * while it's empty. This could have happen if rx_action() gets
144 * suspended in bnx2x_tx_int() after the condition before
145 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
146 *
147 * stops the queue->sees fresh tx_bd_cons->releases the queue->
148 * sends some packets consuming the whole queue again->
149 * stops the queue
150 */
151
152 __netif_tx_lock(txq, smp_processor_id());
153
154 if ((netif_tx_queue_stopped(txq)) &&
155 (bp->state == BNX2X_STATE_OPEN) &&
156 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
157 netif_tx_wake_queue(txq);
158
159 __netif_tx_unlock(txq);
160 }
161 return 0;
162}
163
164static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
165 u16 idx)
166{
167 u16 last_max = fp->last_max_sge;
168
169 if (SUB_S16(idx, last_max) > 0)
170 fp->last_max_sge = idx;
171}
172
173static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
174 struct eth_fast_path_rx_cqe *fp_cqe)
175{
176 struct bnx2x *bp = fp->bp;
177 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
178 le16_to_cpu(fp_cqe->len_on_bd)) >>
179 SGE_PAGE_SHIFT;
180 u16 last_max, last_elem, first_elem;
181 u16 delta = 0;
182 u16 i;
183
184 if (!sge_len)
185 return;
186
187 /* First mark all used pages */
188 for (i = 0; i < sge_len; i++)
523224a3
DK
189 SGE_MASK_CLEAR_BIT(fp,
190 RX_SGE(le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[i])));
9f6c9258
DK
191
192 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
523224a3 193 sge_len - 1, le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
9f6c9258
DK
194
195 /* Here we assume that the last SGE index is the biggest */
196 prefetch((void *)(fp->sge_mask));
523224a3
DK
197 bnx2x_update_last_max_sge(fp,
198 le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
9f6c9258
DK
199
200 last_max = RX_SGE(fp->last_max_sge);
201 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
202 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
203
204 /* If ring is not full */
205 if (last_elem + 1 != first_elem)
206 last_elem++;
207
208 /* Now update the prod */
209 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
210 if (likely(fp->sge_mask[i]))
211 break;
212
213 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
214 delta += RX_SGE_MASK_ELEM_SZ;
215 }
216
217 if (delta > 0) {
218 fp->rx_sge_prod += delta;
219 /* clear page-end entries */
220 bnx2x_clear_sge_mask_next_elems(fp);
221 }
222
223 DP(NETIF_MSG_RX_STATUS,
224 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
225 fp->last_max_sge, fp->rx_sge_prod);
226}
227
228static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
229 struct sk_buff *skb, u16 cons, u16 prod)
230{
231 struct bnx2x *bp = fp->bp;
232 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
233 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
234 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
235 dma_addr_t mapping;
236
237 /* move empty skb from pool to prod and map it */
238 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
239 mapping = dma_map_single(&bp->pdev->dev, fp->tpa_pool[queue].skb->data,
240 bp->rx_buf_size, DMA_FROM_DEVICE);
241 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
242
243 /* move partial skb from cons to pool (don't unmap yet) */
244 fp->tpa_pool[queue] = *cons_rx_buf;
245
246 /* mark bin state as start - print error if current state != stop */
247 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
248 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
249
250 fp->tpa_state[queue] = BNX2X_TPA_START;
251
252 /* point prod_bd to new skb */
253 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
254 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
255
256#ifdef BNX2X_STOP_ON_ERROR
257 fp->tpa_queue_used |= (1 << queue);
258#ifdef _ASM_GENERIC_INT_L64_H
259 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
260#else
261 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
262#endif
263 fp->tpa_queue_used);
264#endif
265}
266
267static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
268 struct sk_buff *skb,
269 struct eth_fast_path_rx_cqe *fp_cqe,
270 u16 cqe_idx)
271{
272 struct sw_rx_page *rx_pg, old_rx_pg;
273 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
274 u32 i, frag_len, frag_size, pages;
275 int err;
276 int j;
277
278 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
279 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
280
281 /* This is needed in order to enable forwarding support */
282 if (frag_size)
283 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
284 max(frag_size, (u32)len_on_bd));
285
286#ifdef BNX2X_STOP_ON_ERROR
287 if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
288 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
289 pages, cqe_idx);
290 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
291 fp_cqe->pkt_len, len_on_bd);
292 bnx2x_panic();
293 return -EINVAL;
294 }
295#endif
296
297 /* Run through the SGL and compose the fragmented skb */
298 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
523224a3
DK
299 u16 sge_idx =
300 RX_SGE(le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[j]));
9f6c9258
DK
301
302 /* FW gives the indices of the SGE as if the ring is an array
303 (meaning that "next" element will consume 2 indices) */
304 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
305 rx_pg = &fp->rx_page_ring[sge_idx];
306 old_rx_pg = *rx_pg;
307
308 /* If we fail to allocate a substitute page, we simply stop
309 where we are and drop the whole packet */
310 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
311 if (unlikely(err)) {
312 fp->eth_q_stats.rx_skb_alloc_failed++;
313 return err;
314 }
315
316 /* Unmap the page as we r going to pass it to the stack */
317 dma_unmap_page(&bp->pdev->dev,
318 dma_unmap_addr(&old_rx_pg, mapping),
319 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
320
321 /* Add one frag and update the appropriate fields in the skb */
322 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
323
324 skb->data_len += frag_len;
325 skb->truesize += frag_len;
326 skb->len += frag_len;
327
328 frag_size -= frag_len;
329 }
330
331 return 0;
332}
333
334static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
335 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
336 u16 cqe_idx)
337{
338 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
339 struct sk_buff *skb = rx_buf->skb;
340 /* alloc new skb */
341 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
342
343 /* Unmap skb in the pool anyway, as we are going to change
344 pool entry status to BNX2X_TPA_STOP even if new skb allocation
345 fails. */
346 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
347 bp->rx_buf_size, DMA_FROM_DEVICE);
348
349 if (likely(new_skb)) {
350 /* fix ip xsum and give it to the stack */
351 /* (no need to map the new skb) */
352#ifdef BCM_VLAN
353 int is_vlan_cqe =
354 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
355 PARSING_FLAGS_VLAN);
356 int is_not_hwaccel_vlan_cqe =
357 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
358#endif
359
360 prefetch(skb);
361 prefetch(((char *)(skb)) + 128);
362
363#ifdef BNX2X_STOP_ON_ERROR
364 if (pad + len > bp->rx_buf_size) {
365 BNX2X_ERR("skb_put is about to fail... "
366 "pad %d len %d rx_buf_size %d\n",
367 pad, len, bp->rx_buf_size);
368 bnx2x_panic();
369 return;
370 }
371#endif
372
373 skb_reserve(skb, pad);
374 skb_put(skb, len);
375
376 skb->protocol = eth_type_trans(skb, bp->dev);
377 skb->ip_summed = CHECKSUM_UNNECESSARY;
378
379 {
380 struct iphdr *iph;
381
382 iph = (struct iphdr *)skb->data;
383#ifdef BCM_VLAN
384 /* If there is no Rx VLAN offloading -
385 take VLAN tag into an account */
386 if (unlikely(is_not_hwaccel_vlan_cqe))
387 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
388#endif
389 iph->check = 0;
390 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
391 }
392
393 if (!bnx2x_fill_frag_skb(bp, fp, skb,
394 &cqe->fast_path_cqe, cqe_idx)) {
395#ifdef BCM_VLAN
523224a3
DK
396 if ((bp->vlgrp != NULL) &&
397 (le16_to_cpu(cqe->fast_path_cqe.
398 pars_flags.flags) & PARSING_FLAGS_VLAN))
9f6c9258
DK
399 vlan_gro_receive(&fp->napi, bp->vlgrp,
400 le16_to_cpu(cqe->fast_path_cqe.
401 vlan_tag), skb);
402 else
403#endif
404 napi_gro_receive(&fp->napi, skb);
405 } else {
406 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
407 " - dropping packet!\n");
408 dev_kfree_skb(skb);
409 }
410
411
412 /* put new skb in bin */
413 fp->tpa_pool[queue].skb = new_skb;
414
415 } else {
416 /* else drop the packet and keep the buffer in the bin */
417 DP(NETIF_MSG_RX_STATUS,
418 "Failed to allocate new skb - dropping packet!\n");
419 fp->eth_q_stats.rx_skb_alloc_failed++;
420 }
421
422 fp->tpa_state[queue] = BNX2X_TPA_STOP;
423}
424
425/* Set Toeplitz hash value in the skb using the value from the
426 * CQE (calculated by HW).
427 */
428static inline void bnx2x_set_skb_rxhash(struct bnx2x *bp, union eth_rx_cqe *cqe,
429 struct sk_buff *skb)
430{
431 /* Set Toeplitz hash from CQE */
432 if ((bp->dev->features & NETIF_F_RXHASH) &&
433 (cqe->fast_path_cqe.status_flags &
434 ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG))
435 skb->rxhash =
436 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result);
437}
438
439int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
440{
441 struct bnx2x *bp = fp->bp;
442 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
443 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
444 int rx_pkt = 0;
445
446#ifdef BNX2X_STOP_ON_ERROR
447 if (unlikely(bp->panic))
448 return 0;
449#endif
450
451 /* CQ "next element" is of the size of the regular element,
452 that's why it's ok here */
453 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
454 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
455 hw_comp_cons++;
456
457 bd_cons = fp->rx_bd_cons;
458 bd_prod = fp->rx_bd_prod;
459 bd_prod_fw = bd_prod;
460 sw_comp_cons = fp->rx_comp_cons;
461 sw_comp_prod = fp->rx_comp_prod;
462
463 /* Memory barrier necessary as speculative reads of the rx
464 * buffer can be ahead of the index in the status block
465 */
466 rmb();
467
468 DP(NETIF_MSG_RX_STATUS,
469 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
470 fp->index, hw_comp_cons, sw_comp_cons);
471
472 while (sw_comp_cons != hw_comp_cons) {
473 struct sw_rx_bd *rx_buf = NULL;
474 struct sk_buff *skb;
475 union eth_rx_cqe *cqe;
476 u8 cqe_fp_flags;
477 u16 len, pad;
478
479 comp_ring_cons = RCQ_BD(sw_comp_cons);
480 bd_prod = RX_BD(bd_prod);
481 bd_cons = RX_BD(bd_cons);
482
483 /* Prefetch the page containing the BD descriptor
484 at producer's index. It will be needed when new skb is
485 allocated */
486 prefetch((void *)(PAGE_ALIGN((unsigned long)
487 (&fp->rx_desc_ring[bd_prod])) -
488 PAGE_SIZE + 1));
489
490 cqe = &fp->rx_comp_ring[comp_ring_cons];
491 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
492
493 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
494 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
495 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
496 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
497 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
498 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
499
500 /* is this a slowpath msg? */
501 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
502 bnx2x_sp_event(fp, cqe);
503 goto next_cqe;
504
505 /* this is an rx packet */
506 } else {
507 rx_buf = &fp->rx_buf_ring[bd_cons];
508 skb = rx_buf->skb;
509 prefetch(skb);
510 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
511 pad = cqe->fast_path_cqe.placement_offset;
512
513 /* If CQE is marked both TPA_START and TPA_END
514 it is a non-TPA CQE */
515 if ((!fp->disable_tpa) &&
516 (TPA_TYPE(cqe_fp_flags) !=
517 (TPA_TYPE_START | TPA_TYPE_END))) {
518 u16 queue = cqe->fast_path_cqe.queue_index;
519
520 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
521 DP(NETIF_MSG_RX_STATUS,
522 "calling tpa_start on queue %d\n",
523 queue);
524
525 bnx2x_tpa_start(fp, queue, skb,
526 bd_cons, bd_prod);
527
528 /* Set Toeplitz hash for an LRO skb */
529 bnx2x_set_skb_rxhash(bp, cqe, skb);
530
531 goto next_rx;
532 }
533
534 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
535 DP(NETIF_MSG_RX_STATUS,
536 "calling tpa_stop on queue %d\n",
537 queue);
538
539 if (!BNX2X_RX_SUM_FIX(cqe))
540 BNX2X_ERR("STOP on none TCP "
541 "data\n");
542
543 /* This is a size of the linear data
544 on this skb */
545 len = le16_to_cpu(cqe->fast_path_cqe.
546 len_on_bd);
547 bnx2x_tpa_stop(bp, fp, queue, pad,
548 len, cqe, comp_ring_cons);
549#ifdef BNX2X_STOP_ON_ERROR
550 if (bp->panic)
551 return 0;
552#endif
553
554 bnx2x_update_sge_prod(fp,
555 &cqe->fast_path_cqe);
556 goto next_cqe;
557 }
558 }
559
560 dma_sync_single_for_device(&bp->pdev->dev,
561 dma_unmap_addr(rx_buf, mapping),
562 pad + RX_COPY_THRESH,
563 DMA_FROM_DEVICE);
564 prefetch(((char *)(skb)) + 128);
565
566 /* is this an error packet? */
567 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
568 DP(NETIF_MSG_RX_ERR,
569 "ERROR flags %x rx packet %u\n",
570 cqe_fp_flags, sw_comp_cons);
571 fp->eth_q_stats.rx_err_discard_pkt++;
572 goto reuse_rx;
573 }
574
575 /* Since we don't have a jumbo ring
576 * copy small packets if mtu > 1500
577 */
578 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
579 (len <= RX_COPY_THRESH)) {
580 struct sk_buff *new_skb;
581
582 new_skb = netdev_alloc_skb(bp->dev,
583 len + pad);
584 if (new_skb == NULL) {
585 DP(NETIF_MSG_RX_ERR,
586 "ERROR packet dropped "
587 "because of alloc failure\n");
588 fp->eth_q_stats.rx_skb_alloc_failed++;
589 goto reuse_rx;
590 }
591
592 /* aligned copy */
593 skb_copy_from_linear_data_offset(skb, pad,
594 new_skb->data + pad, len);
595 skb_reserve(new_skb, pad);
596 skb_put(new_skb, len);
597
749a8503 598 bnx2x_reuse_rx_skb(fp, bd_cons, bd_prod);
9f6c9258
DK
599
600 skb = new_skb;
601
602 } else
603 if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
604 dma_unmap_single(&bp->pdev->dev,
605 dma_unmap_addr(rx_buf, mapping),
606 bp->rx_buf_size,
607 DMA_FROM_DEVICE);
608 skb_reserve(skb, pad);
609 skb_put(skb, len);
610
611 } else {
612 DP(NETIF_MSG_RX_ERR,
613 "ERROR packet dropped because "
614 "of alloc failure\n");
615 fp->eth_q_stats.rx_skb_alloc_failed++;
616reuse_rx:
749a8503 617 bnx2x_reuse_rx_skb(fp, bd_cons, bd_prod);
9f6c9258
DK
618 goto next_rx;
619 }
620
621 skb->protocol = eth_type_trans(skb, bp->dev);
622
623 /* Set Toeplitz hash for a none-LRO skb */
624 bnx2x_set_skb_rxhash(bp, cqe, skb);
625
bc8acf2c 626 skb_checksum_none_assert(skb);
9f6c9258
DK
627 if (bp->rx_csum) {
628 if (likely(BNX2X_RX_CSUM_OK(cqe)))
629 skb->ip_summed = CHECKSUM_UNNECESSARY;
630 else
631 fp->eth_q_stats.hw_csum_err++;
632 }
633 }
634
635 skb_record_rx_queue(skb, fp->index);
636
637#ifdef BCM_VLAN
638 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
639 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
640 PARSING_FLAGS_VLAN))
641 vlan_gro_receive(&fp->napi, bp->vlgrp,
642 le16_to_cpu(cqe->fast_path_cqe.vlan_tag), skb);
643 else
644#endif
645 napi_gro_receive(&fp->napi, skb);
646
647
648next_rx:
649 rx_buf->skb = NULL;
650
651 bd_cons = NEXT_RX_IDX(bd_cons);
652 bd_prod = NEXT_RX_IDX(bd_prod);
653 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
654 rx_pkt++;
655next_cqe:
656 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
657 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
658
659 if (rx_pkt == budget)
660 break;
661 } /* while */
662
663 fp->rx_bd_cons = bd_cons;
664 fp->rx_bd_prod = bd_prod_fw;
665 fp->rx_comp_cons = sw_comp_cons;
666 fp->rx_comp_prod = sw_comp_prod;
667
668 /* Update producers */
669 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
670 fp->rx_sge_prod);
671
672 fp->rx_pkt += rx_pkt;
673 fp->rx_calls++;
674
675 return rx_pkt;
676}
677
678static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
679{
680 struct bnx2x_fastpath *fp = fp_cookie;
681 struct bnx2x *bp = fp->bp;
682
683 /* Return here if interrupt is disabled */
684 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
685 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
686 return IRQ_HANDLED;
687 }
688
523224a3
DK
689 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB "
690 "[fp %d fw_sd %d igusb %d]\n",
691 fp->index, fp->fw_sb_id, fp->igu_sb_id);
692 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
9f6c9258
DK
693
694#ifdef BNX2X_STOP_ON_ERROR
695 if (unlikely(bp->panic))
696 return IRQ_HANDLED;
697#endif
698
699 /* Handle Rx and Tx according to MSI-X vector */
700 prefetch(fp->rx_cons_sb);
701 prefetch(fp->tx_cons_sb);
523224a3 702 prefetch(&fp->sb_running_index[SM_RX_ID]);
9f6c9258
DK
703 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
704
705 return IRQ_HANDLED;
706}
707
708
709/* HW Lock for shared dual port PHYs */
710void bnx2x_acquire_phy_lock(struct bnx2x *bp)
711{
712 mutex_lock(&bp->port.phy_mutex);
713
714 if (bp->port.need_hw_lock)
715 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
716}
717
718void bnx2x_release_phy_lock(struct bnx2x *bp)
719{
720 if (bp->port.need_hw_lock)
721 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
722
723 mutex_unlock(&bp->port.phy_mutex);
724}
725
726void bnx2x_link_report(struct bnx2x *bp)
727{
728 if (bp->flags & MF_FUNC_DIS) {
729 netif_carrier_off(bp->dev);
730 netdev_err(bp->dev, "NIC Link is Down\n");
731 return;
732 }
733
734 if (bp->link_vars.link_up) {
735 u16 line_speed;
736
737 if (bp->state == BNX2X_STATE_OPEN)
738 netif_carrier_on(bp->dev);
739 netdev_info(bp->dev, "NIC Link is Up, ");
740
741 line_speed = bp->link_vars.line_speed;
fb3bff17 742 if (IS_MF(bp)) {
9f6c9258
DK
743 u16 vn_max_rate;
744
745 vn_max_rate =
f2e0899f
DK
746 ((bp->mf_config[BP_VN(bp)] &
747 FUNC_MF_CFG_MAX_BW_MASK) >>
748 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
9f6c9258
DK
749 if (vn_max_rate < line_speed)
750 line_speed = vn_max_rate;
751 }
752 pr_cont("%d Mbps ", line_speed);
753
754 if (bp->link_vars.duplex == DUPLEX_FULL)
755 pr_cont("full duplex");
756 else
757 pr_cont("half duplex");
758
759 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
760 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
761 pr_cont(", receive ");
762 if (bp->link_vars.flow_ctrl &
763 BNX2X_FLOW_CTRL_TX)
764 pr_cont("& transmit ");
765 } else {
766 pr_cont(", transmit ");
767 }
768 pr_cont("flow control ON");
769 }
770 pr_cont("\n");
771
772 } else { /* link_down */
773 netif_carrier_off(bp->dev);
774 netdev_err(bp->dev, "NIC Link is Down\n");
775 }
776}
777
523224a3
DK
778/* Returns the number of actually allocated BDs */
779static inline int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
780 int rx_ring_size)
781{
782 struct bnx2x *bp = fp->bp;
783 u16 ring_prod, cqe_ring_prod;
784 int i;
785
786 fp->rx_comp_cons = 0;
787 cqe_ring_prod = ring_prod = 0;
788 for (i = 0; i < rx_ring_size; i++) {
789 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
790 BNX2X_ERR("was only able to allocate "
791 "%d rx skbs on queue[%d]\n", i, fp->index);
792 fp->eth_q_stats.rx_skb_alloc_failed++;
793 break;
794 }
795 ring_prod = NEXT_RX_IDX(ring_prod);
796 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
797 WARN_ON(ring_prod <= i);
798 }
799
800 fp->rx_bd_prod = ring_prod;
801 /* Limit the CQE producer by the CQE ring size */
802 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
803 cqe_ring_prod);
804 fp->rx_pkt = fp->rx_calls = 0;
805
806 return i;
807}
808
809static inline void bnx2x_alloc_rx_bd_ring(struct bnx2x_fastpath *fp)
810{
811 struct bnx2x *bp = fp->bp;
812 int rx_ring_size = bp->rx_ring_size ? bp->rx_ring_size :
813 MAX_RX_AVAIL/bp->num_queues;
814
815 rx_ring_size = max_t(int, MIN_RX_AVAIL, rx_ring_size);
816
817 bnx2x_alloc_rx_bds(fp, rx_ring_size);
818
819 /* Warning!
820 * this will generate an interrupt (to the TSTORM)
821 * must only be done after chip is initialized
822 */
823 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
824 fp->rx_sge_prod);
825}
826
9f6c9258
DK
827void bnx2x_init_rx_rings(struct bnx2x *bp)
828{
829 int func = BP_FUNC(bp);
830 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
831 ETH_MAX_AGGREGATION_QUEUES_E1H;
523224a3 832 u16 ring_prod;
9f6c9258 833 int i, j;
25141580 834
523224a3
DK
835 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN +
836 BNX2X_FW_IP_HDR_ALIGN_PAD;
9f6c9258 837
9f6c9258
DK
838 DP(NETIF_MSG_IFUP,
839 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
840
523224a3
DK
841 for_each_queue(bp, j) {
842 struct bnx2x_fastpath *fp = &bp->fp[j];
9f6c9258 843
523224a3 844 if (!fp->disable_tpa) {
9f6c9258
DK
845 for (i = 0; i < max_agg_queues; i++) {
846 fp->tpa_pool[i].skb =
847 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
848 if (!fp->tpa_pool[i].skb) {
849 BNX2X_ERR("Failed to allocate TPA "
850 "skb pool for queue[%d] - "
851 "disabling TPA on this "
852 "queue!\n", j);
853 bnx2x_free_tpa_pool(bp, fp, i);
854 fp->disable_tpa = 1;
855 break;
856 }
857 dma_unmap_addr_set((struct sw_rx_bd *)
858 &bp->fp->tpa_pool[i],
859 mapping, 0);
860 fp->tpa_state[i] = BNX2X_TPA_STOP;
861 }
523224a3
DK
862
863 /* "next page" elements initialization */
864 bnx2x_set_next_page_sgl(fp);
865
866 /* set SGEs bit mask */
867 bnx2x_init_sge_ring_bit_mask(fp);
868
869 /* Allocate SGEs and initialize the ring elements */
870 for (i = 0, ring_prod = 0;
871 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
872
873 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
874 BNX2X_ERR("was only able to allocate "
875 "%d rx sges\n", i);
876 BNX2X_ERR("disabling TPA for"
877 " queue[%d]\n", j);
878 /* Cleanup already allocated elements */
879 bnx2x_free_rx_sge_range(bp,
880 fp, ring_prod);
881 bnx2x_free_tpa_pool(bp,
882 fp, max_agg_queues);
883 fp->disable_tpa = 1;
884 ring_prod = 0;
885 break;
886 }
887 ring_prod = NEXT_SGE_IDX(ring_prod);
888 }
889
890 fp->rx_sge_prod = ring_prod;
9f6c9258
DK
891 }
892 }
893
894 for_each_queue(bp, j) {
895 struct bnx2x_fastpath *fp = &bp->fp[j];
896
897 fp->rx_bd_cons = 0;
9f6c9258 898
523224a3 899 bnx2x_set_next_page_rx_bd(fp);
9f6c9258
DK
900
901 /* CQ ring */
523224a3 902 bnx2x_set_next_page_rx_cq(fp);
9f6c9258
DK
903
904 /* Allocate BDs and initialize BD ring */
523224a3 905 bnx2x_alloc_rx_bd_ring(fp);
9f6c9258 906
9f6c9258
DK
907 if (j != 0)
908 continue;
909
f2e0899f
DK
910 if (!CHIP_IS_E2(bp)) {
911 REG_WR(bp, BAR_USTRORM_INTMEM +
912 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
913 U64_LO(fp->rx_comp_mapping));
914 REG_WR(bp, BAR_USTRORM_INTMEM +
915 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
916 U64_HI(fp->rx_comp_mapping));
917 }
9f6c9258
DK
918 }
919}
920static void bnx2x_free_tx_skbs(struct bnx2x *bp)
921{
922 int i;
923
924 for_each_queue(bp, i) {
925 struct bnx2x_fastpath *fp = &bp->fp[i];
926
927 u16 bd_cons = fp->tx_bd_cons;
928 u16 sw_prod = fp->tx_pkt_prod;
929 u16 sw_cons = fp->tx_pkt_cons;
930
931 while (sw_cons != sw_prod) {
932 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
933 sw_cons++;
934 }
935 }
936}
937
938static void bnx2x_free_rx_skbs(struct bnx2x *bp)
939{
940 int i, j;
941
942 for_each_queue(bp, j) {
943 struct bnx2x_fastpath *fp = &bp->fp[j];
944
945 for (i = 0; i < NUM_RX_BD; i++) {
946 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
947 struct sk_buff *skb = rx_buf->skb;
948
949 if (skb == NULL)
950 continue;
951
952 dma_unmap_single(&bp->pdev->dev,
953 dma_unmap_addr(rx_buf, mapping),
954 bp->rx_buf_size, DMA_FROM_DEVICE);
955
956 rx_buf->skb = NULL;
957 dev_kfree_skb(skb);
958 }
959 if (!fp->disable_tpa)
960 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
961 ETH_MAX_AGGREGATION_QUEUES_E1 :
962 ETH_MAX_AGGREGATION_QUEUES_E1H);
963 }
964}
965
966void bnx2x_free_skbs(struct bnx2x *bp)
967{
968 bnx2x_free_tx_skbs(bp);
969 bnx2x_free_rx_skbs(bp);
970}
971
972static void bnx2x_free_msix_irqs(struct bnx2x *bp)
973{
974 int i, offset = 1;
975
976 free_irq(bp->msix_table[0].vector, bp->dev);
977 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
978 bp->msix_table[0].vector);
979
980#ifdef BCM_CNIC
981 offset++;
982#endif
983 for_each_queue(bp, i) {
984 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
985 "state %x\n", i, bp->msix_table[i + offset].vector,
986 bnx2x_fp(bp, i, state));
987
988 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
989 }
990}
991
992void bnx2x_free_irq(struct bnx2x *bp, bool disable_only)
993{
994 if (bp->flags & USING_MSIX_FLAG) {
995 if (!disable_only)
996 bnx2x_free_msix_irqs(bp);
997 pci_disable_msix(bp->pdev);
998 bp->flags &= ~USING_MSIX_FLAG;
999
1000 } else if (bp->flags & USING_MSI_FLAG) {
1001 if (!disable_only)
1002 free_irq(bp->pdev->irq, bp->dev);
1003 pci_disable_msi(bp->pdev);
1004 bp->flags &= ~USING_MSI_FLAG;
1005
1006 } else if (!disable_only)
1007 free_irq(bp->pdev->irq, bp->dev);
1008}
1009
1010static int bnx2x_enable_msix(struct bnx2x *bp)
1011{
1012 int i, rc, offset = 1;
1013 int igu_vec = 0;
1014
1015 bp->msix_table[0].entry = igu_vec;
1016 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
1017
1018#ifdef BCM_CNIC
1019 igu_vec = BP_L_ID(bp) + offset;
1020 bp->msix_table[1].entry = igu_vec;
1021 DP(NETIF_MSG_IFUP, "msix_table[1].entry = %d (CNIC)\n", igu_vec);
1022 offset++;
1023#endif
1024 for_each_queue(bp, i) {
1025 igu_vec = BP_L_ID(bp) + offset + i;
1026 bp->msix_table[i + offset].entry = igu_vec;
1027 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
1028 "(fastpath #%u)\n", i + offset, igu_vec, i);
1029 }
1030
1031 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
1032 BNX2X_NUM_QUEUES(bp) + offset);
1033
1034 /*
1035 * reconfigure number of tx/rx queues according to available
1036 * MSI-X vectors
1037 */
1038 if (rc >= BNX2X_MIN_MSIX_VEC_CNT) {
1039 /* vectors available for FP */
1040 int fp_vec = rc - BNX2X_MSIX_VEC_FP_START;
1041
1042 DP(NETIF_MSG_IFUP,
1043 "Trying to use less MSI-X vectors: %d\n", rc);
1044
1045 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
1046
1047 if (rc) {
1048 DP(NETIF_MSG_IFUP,
1049 "MSI-X is not attainable rc %d\n", rc);
1050 return rc;
1051 }
1052
1053 bp->num_queues = min(bp->num_queues, fp_vec);
1054
1055 DP(NETIF_MSG_IFUP, "New queue configuration set: %d\n",
1056 bp->num_queues);
1057 } else if (rc) {
1058 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
1059 return rc;
1060 }
1061
1062 bp->flags |= USING_MSIX_FLAG;
1063
1064 return 0;
1065}
1066
1067static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1068{
1069 int i, rc, offset = 1;
1070
1071 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
1072 bp->dev->name, bp->dev);
1073 if (rc) {
1074 BNX2X_ERR("request sp irq failed\n");
1075 return -EBUSY;
1076 }
1077
1078#ifdef BCM_CNIC
1079 offset++;
1080#endif
1081 for_each_queue(bp, i) {
1082 struct bnx2x_fastpath *fp = &bp->fp[i];
1083 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1084 bp->dev->name, i);
1085
1086 rc = request_irq(bp->msix_table[i + offset].vector,
1087 bnx2x_msix_fp_int, 0, fp->name, fp);
1088 if (rc) {
1089 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
1090 bnx2x_free_msix_irqs(bp);
1091 return -EBUSY;
1092 }
1093
1094 fp->state = BNX2X_FP_STATE_IRQ;
1095 }
1096
1097 i = BNX2X_NUM_QUEUES(bp);
1098 netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d"
1099 " ... fp[%d] %d\n",
1100 bp->msix_table[0].vector,
1101 0, bp->msix_table[offset].vector,
1102 i - 1, bp->msix_table[offset + i - 1].vector);
1103
1104 return 0;
1105}
1106
1107static int bnx2x_enable_msi(struct bnx2x *bp)
1108{
1109 int rc;
1110
1111 rc = pci_enable_msi(bp->pdev);
1112 if (rc) {
1113 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
1114 return -1;
1115 }
1116 bp->flags |= USING_MSI_FLAG;
1117
1118 return 0;
1119}
1120
1121static int bnx2x_req_irq(struct bnx2x *bp)
1122{
1123 unsigned long flags;
1124 int rc;
1125
1126 if (bp->flags & USING_MSI_FLAG)
1127 flags = 0;
1128 else
1129 flags = IRQF_SHARED;
1130
1131 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
1132 bp->dev->name, bp->dev);
1133 if (!rc)
1134 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
1135
1136 return rc;
1137}
1138
1139static void bnx2x_napi_enable(struct bnx2x *bp)
1140{
1141 int i;
1142
1143 for_each_queue(bp, i)
1144 napi_enable(&bnx2x_fp(bp, i, napi));
1145}
1146
1147static void bnx2x_napi_disable(struct bnx2x *bp)
1148{
1149 int i;
1150
1151 for_each_queue(bp, i)
1152 napi_disable(&bnx2x_fp(bp, i, napi));
1153}
1154
1155void bnx2x_netif_start(struct bnx2x *bp)
1156{
1157 int intr_sem;
1158
1159 intr_sem = atomic_dec_and_test(&bp->intr_sem);
1160 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
1161
1162 if (intr_sem) {
1163 if (netif_running(bp->dev)) {
1164 bnx2x_napi_enable(bp);
1165 bnx2x_int_enable(bp);
1166 if (bp->state == BNX2X_STATE_OPEN)
1167 netif_tx_wake_all_queues(bp->dev);
1168 }
1169 }
1170}
1171
1172void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1173{
1174 bnx2x_int_disable_sync(bp, disable_hw);
1175 bnx2x_napi_disable(bp);
1176 netif_tx_disable(bp->dev);
1177}
1178static int bnx2x_set_num_queues(struct bnx2x *bp)
1179{
1180 int rc = 0;
1181
1182 switch (bp->int_mode) {
9f6c9258 1183 case INT_MODE_MSI:
8681dc3a
DK
1184 bnx2x_enable_msi(bp);
1185 /* falling through... */
1186 case INT_MODE_INTx:
9f6c9258
DK
1187 bp->num_queues = 1;
1188 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
1189 break;
1190 default:
1191 /* Set number of queues according to bp->multi_mode value */
1192 bnx2x_set_num_queues_msix(bp);
1193
1194 DP(NETIF_MSG_IFUP, "set number of queues to %d\n",
1195 bp->num_queues);
1196
1197 /* if we can't use MSI-X we only need one fp,
1198 * so try to enable MSI-X with the requested number of fp's
1199 * and fallback to MSI or legacy INTx with one fp
1200 */
1201 rc = bnx2x_enable_msix(bp);
8681dc3a 1202 if (rc) {
9f6c9258
DK
1203 /* failed to enable MSI-X */
1204 bp->num_queues = 1;
8681dc3a
DK
1205
1206 /* Fall to INTx if failed to enable MSI-X due to lack of
1207 * memory (in bnx2x_set_num_queues()) */
1208 if ((rc != -ENOMEM) && (bp->int_mode != INT_MODE_INTx))
1209 bnx2x_enable_msi(bp);
1210 }
1211
9f6c9258
DK
1212 break;
1213 }
31b600b5
BH
1214 netif_set_real_num_tx_queues(bp->dev, bp->num_queues);
1215 return netif_set_real_num_rx_queues(bp->dev, bp->num_queues);
9f6c9258
DK
1216}
1217
6891dd25
DK
1218static void bnx2x_release_firmware(struct bnx2x *bp)
1219{
1220 kfree(bp->init_ops_offsets);
1221 kfree(bp->init_ops);
1222 kfree(bp->init_data);
1223 release_firmware(bp->firmware);
1224}
1225
9f6c9258
DK
1226/* must be called with rtnl_lock */
1227int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1228{
1229 u32 load_code;
1230 int i, rc;
1231
6891dd25
DK
1232 /* Set init arrays */
1233 rc = bnx2x_init_firmware(bp);
1234 if (rc) {
1235 BNX2X_ERR("Error loading firmware\n");
1236 return rc;
1237 }
1238
9f6c9258
DK
1239#ifdef BNX2X_STOP_ON_ERROR
1240 if (unlikely(bp->panic))
1241 return -EPERM;
1242#endif
1243
1244 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
1245
1246 rc = bnx2x_set_num_queues(bp);
31b600b5
BH
1247 if (rc)
1248 return rc;
9f6c9258 1249
523224a3
DK
1250 /* must be called before memory allocation and HW init */
1251 bnx2x_ilt_set_info(bp);
1252
9f6c9258
DK
1253 if (bnx2x_alloc_mem(bp)) {
1254 bnx2x_free_irq(bp, true);
1255 return -ENOMEM;
1256 }
1257
1258 for_each_queue(bp, i)
1259 bnx2x_fp(bp, i, disable_tpa) =
1260 ((bp->flags & TPA_ENABLE_FLAG) == 0);
1261
1262 for_each_queue(bp, i)
1263 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
1264 bnx2x_poll, 128);
1265
1266 bnx2x_napi_enable(bp);
1267
1268 if (bp->flags & USING_MSIX_FLAG) {
1269 rc = bnx2x_req_msix_irqs(bp);
1270 if (rc) {
1271 bnx2x_free_irq(bp, true);
1272 goto load_error1;
1273 }
1274 } else {
9f6c9258
DK
1275 bnx2x_ack_int(bp);
1276 rc = bnx2x_req_irq(bp);
1277 if (rc) {
1278 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
1279 bnx2x_free_irq(bp, true);
1280 goto load_error1;
1281 }
1282 if (bp->flags & USING_MSI_FLAG) {
1283 bp->dev->irq = bp->pdev->irq;
1284 netdev_info(bp->dev, "using MSI IRQ %d\n",
1285 bp->pdev->irq);
1286 }
1287 }
1288
1289 /* Send LOAD_REQUEST command to MCP
1290 Returns the type of LOAD command:
1291 if it is the first port to be initialized
1292 common blocks should be initialized, otherwise - not
1293 */
1294 if (!BP_NOMCP(bp)) {
a22f0788 1295 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, 0);
9f6c9258
DK
1296 if (!load_code) {
1297 BNX2X_ERR("MCP response failure, aborting\n");
1298 rc = -EBUSY;
1299 goto load_error2;
1300 }
1301 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
1302 rc = -EBUSY; /* other port in diagnostic mode */
1303 goto load_error2;
1304 }
1305
1306 } else {
f2e0899f 1307 int path = BP_PATH(bp);
9f6c9258
DK
1308 int port = BP_PORT(bp);
1309
f2e0899f
DK
1310 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
1311 path, load_count[path][0], load_count[path][1],
1312 load_count[path][2]);
1313 load_count[path][0]++;
1314 load_count[path][1 + port]++;
1315 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
1316 path, load_count[path][0], load_count[path][1],
1317 load_count[path][2]);
1318 if (load_count[path][0] == 1)
9f6c9258 1319 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
f2e0899f 1320 else if (load_count[path][1 + port] == 1)
9f6c9258
DK
1321 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
1322 else
1323 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
1324 }
1325
1326 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
f2e0899f 1327 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
9f6c9258
DK
1328 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
1329 bp->port.pmf = 1;
1330 else
1331 bp->port.pmf = 0;
1332 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
1333
1334 /* Initialize HW */
1335 rc = bnx2x_init_hw(bp, load_code);
1336 if (rc) {
1337 BNX2X_ERR("HW init failed, aborting\n");
a22f0788
YR
1338 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
1339 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
1340 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
9f6c9258
DK
1341 goto load_error2;
1342 }
1343
523224a3
DK
1344 if (rc) {
1345 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
1346 goto load_error2;
1347 }
1348
9f6c9258
DK
1349 /* Setup NIC internals and enable interrupts */
1350 bnx2x_nic_init(bp, load_code);
1351
f2e0899f
DK
1352 if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
1353 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
9f6c9258
DK
1354 (bp->common.shmem2_base))
1355 SHMEM2_WR(bp, dcc_support,
1356 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
1357 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
1358
1359 /* Send LOAD_DONE command to MCP */
1360 if (!BP_NOMCP(bp)) {
a22f0788 1361 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
9f6c9258
DK
1362 if (!load_code) {
1363 BNX2X_ERR("MCP response failure, aborting\n");
1364 rc = -EBUSY;
1365 goto load_error3;
1366 }
1367 }
1368
1369 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
1370
523224a3
DK
1371 rc = bnx2x_func_start(bp);
1372 if (rc) {
1373 BNX2X_ERR("Function start failed!\n");
1374#ifndef BNX2X_STOP_ON_ERROR
1375 goto load_error3;
1376#else
1377 bp->panic = 1;
1378 return -EBUSY;
1379#endif
1380 }
1381
1382 rc = bnx2x_setup_client(bp, &bp->fp[0], 1 /* Leading */);
9f6c9258
DK
1383 if (rc) {
1384 BNX2X_ERR("Setup leading failed!\n");
1385#ifndef BNX2X_STOP_ON_ERROR
1386 goto load_error3;
1387#else
1388 bp->panic = 1;
1389 return -EBUSY;
1390#endif
1391 }
1392
f2e0899f
DK
1393 if (!CHIP_IS_E1(bp) &&
1394 (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED)) {
1395 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
1396 bp->flags |= MF_FUNC_DIS;
1397 }
9f6c9258 1398
9f6c9258 1399#ifdef BCM_CNIC
523224a3
DK
1400 /* Enable Timer scan */
1401 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1);
9f6c9258 1402#endif
523224a3
DK
1403 for_each_nondefault_queue(bp, i) {
1404 rc = bnx2x_setup_client(bp, &bp->fp[i], 0);
1405 if (rc)
9f6c9258 1406#ifdef BCM_CNIC
523224a3 1407 goto load_error4;
9f6c9258 1408#else
523224a3 1409 goto load_error3;
9f6c9258 1410#endif
523224a3
DK
1411 }
1412
1413 /* Now when Clients are configured we are ready to work */
1414 bp->state = BNX2X_STATE_OPEN;
1415
1416 bnx2x_set_eth_mac(bp, 1);
9f6c9258 1417
9f6c9258 1418#ifdef BCM_CNIC
523224a3
DK
1419 /* Set iSCSI L2 MAC */
1420 mutex_lock(&bp->cnic_mutex);
1421 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD) {
1422 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
1423 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
1424 bnx2x_init_sb(bp, bp->cnic_sb_mapping,
1425 BNX2X_VF_ID_INVALID, false,
1426 CNIC_SB_ID(bp), CNIC_IGU_SB_ID(bp));
9f6c9258 1427 }
523224a3
DK
1428 mutex_unlock(&bp->cnic_mutex);
1429#endif
9f6c9258
DK
1430
1431 if (bp->port.pmf)
1432 bnx2x_initial_phy_init(bp, load_mode);
1433
1434 /* Start fast path */
1435 switch (load_mode) {
1436 case LOAD_NORMAL:
523224a3
DK
1437 /* Tx queue should be only reenabled */
1438 netif_tx_wake_all_queues(bp->dev);
9f6c9258
DK
1439 /* Initialize the receive filter. */
1440 bnx2x_set_rx_mode(bp->dev);
1441 break;
1442
1443 case LOAD_OPEN:
1444 netif_tx_start_all_queues(bp->dev);
523224a3 1445 smp_mb__after_clear_bit();
9f6c9258
DK
1446 /* Initialize the receive filter. */
1447 bnx2x_set_rx_mode(bp->dev);
1448 break;
1449
1450 case LOAD_DIAG:
1451 /* Initialize the receive filter. */
1452 bnx2x_set_rx_mode(bp->dev);
1453 bp->state = BNX2X_STATE_DIAG;
1454 break;
1455
1456 default:
1457 break;
1458 }
1459
1460 if (!bp->port.pmf)
1461 bnx2x__link_status_update(bp);
1462
1463 /* start the timer */
1464 mod_timer(&bp->timer, jiffies + bp->current_interval);
1465
1466#ifdef BCM_CNIC
1467 bnx2x_setup_cnic_irq_info(bp);
1468 if (bp->state == BNX2X_STATE_OPEN)
1469 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
1470#endif
1471 bnx2x_inc_load_cnt(bp);
1472
6891dd25
DK
1473 bnx2x_release_firmware(bp);
1474
9f6c9258
DK
1475 return 0;
1476
1477#ifdef BCM_CNIC
1478load_error4:
1479 /* Disable Timer scan */
1480 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 0);
1481#endif
1482load_error3:
1483 bnx2x_int_disable_sync(bp, 1);
1484 if (!BP_NOMCP(bp)) {
a22f0788
YR
1485 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
1486 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
9f6c9258
DK
1487 }
1488 bp->port.pmf = 0;
1489 /* Free SKBs, SGEs, TPA pool and driver internals */
1490 bnx2x_free_skbs(bp);
1491 for_each_queue(bp, i)
1492 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
1493load_error2:
1494 /* Release IRQs */
1495 bnx2x_free_irq(bp, false);
1496load_error1:
1497 bnx2x_napi_disable(bp);
1498 for_each_queue(bp, i)
1499 netif_napi_del(&bnx2x_fp(bp, i, napi));
1500 bnx2x_free_mem(bp);
1501
6891dd25
DK
1502 bnx2x_release_firmware(bp);
1503
9f6c9258
DK
1504 return rc;
1505}
1506
1507/* must be called with rtnl_lock */
1508int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
1509{
1510 int i;
1511
1512 if (bp->state == BNX2X_STATE_CLOSED) {
1513 /* Interface has been removed - nothing to recover */
1514 bp->recovery_state = BNX2X_RECOVERY_DONE;
1515 bp->is_leader = 0;
1516 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
1517 smp_wmb();
1518
1519 return -EINVAL;
1520 }
1521
1522#ifdef BCM_CNIC
1523 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
1524#endif
1525 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
1526
1527 /* Set "drop all" */
1528 bp->rx_mode = BNX2X_RX_MODE_NONE;
1529 bnx2x_set_storm_rx_mode(bp);
1530
f2e0899f
DK
1531 /* Stop Tx */
1532 bnx2x_tx_disable(bp);
9f6c9258 1533 del_timer_sync(&bp->timer);
f2e0899f 1534 SHMEM_WR(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb,
9f6c9258
DK
1535 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
1536 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
1537
9f6c9258
DK
1538
1539 /* Cleanup the chip if needed */
1540 if (unload_mode != UNLOAD_RECOVERY)
1541 bnx2x_chip_cleanup(bp, unload_mode);
523224a3
DK
1542 else {
1543 /* Disable HW interrupts, NAPI and Tx */
1544 bnx2x_netif_stop(bp, 1);
1545
1546 /* Release IRQs */
1547 bnx2x_free_irq(bp, false);
1548 }
9f6c9258
DK
1549
1550 bp->port.pmf = 0;
1551
1552 /* Free SKBs, SGEs, TPA pool and driver internals */
1553 bnx2x_free_skbs(bp);
1554 for_each_queue(bp, i)
1555 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
1556 for_each_queue(bp, i)
1557 netif_napi_del(&bnx2x_fp(bp, i, napi));
1558 bnx2x_free_mem(bp);
1559
1560 bp->state = BNX2X_STATE_CLOSED;
1561
1562 /* The last driver must disable a "close the gate" if there is no
1563 * parity attention or "process kill" pending.
1564 */
1565 if ((!bnx2x_dec_load_cnt(bp)) && (!bnx2x_chk_parity_attn(bp)) &&
1566 bnx2x_reset_is_done(bp))
1567 bnx2x_disable_close_the_gate(bp);
1568
1569 /* Reset MCP mail box sequence if there is on going recovery */
1570 if (unload_mode == UNLOAD_RECOVERY)
1571 bp->fw_seq = 0;
1572
1573 return 0;
1574}
1575int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
1576{
1577 u16 pmcsr;
1578
1579 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
1580
1581 switch (state) {
1582 case PCI_D0:
1583 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
1584 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
1585 PCI_PM_CTRL_PME_STATUS));
1586
1587 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
1588 /* delay required during transition out of D3hot */
1589 msleep(20);
1590 break;
1591
1592 case PCI_D3hot:
1593 /* If there are other clients above don't
1594 shut down the power */
1595 if (atomic_read(&bp->pdev->enable_cnt) != 1)
1596 return 0;
1597 /* Don't shut down the power for emulation and FPGA */
1598 if (CHIP_REV_IS_SLOW(bp))
1599 return 0;
1600
1601 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
1602 pmcsr |= 3;
1603
1604 if (bp->wol)
1605 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
1606
1607 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
1608 pmcsr);
1609
1610 /* No more memory access after this point until
1611 * device is brought back to D0.
1612 */
1613 break;
1614
1615 default:
1616 return -EINVAL;
1617 }
1618 return 0;
1619}
1620
1621
1622
1623/*
1624 * net_device service functions
1625 */
1626
1627static int bnx2x_poll(struct napi_struct *napi, int budget)
1628{
1629 int work_done = 0;
1630 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
1631 napi);
1632 struct bnx2x *bp = fp->bp;
1633
1634 while (1) {
1635#ifdef BNX2X_STOP_ON_ERROR
1636 if (unlikely(bp->panic)) {
1637 napi_complete(napi);
1638 return 0;
1639 }
1640#endif
1641
1642 if (bnx2x_has_tx_work(fp))
1643 bnx2x_tx_int(fp);
1644
1645 if (bnx2x_has_rx_work(fp)) {
1646 work_done += bnx2x_rx_int(fp, budget - work_done);
1647
1648 /* must not complete if we consumed full budget */
1649 if (work_done >= budget)
1650 break;
1651 }
1652
1653 /* Fall out from the NAPI loop if needed */
1654 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
1655 bnx2x_update_fpsb_idx(fp);
523224a3
DK
1656 /* bnx2x_has_rx_work() reads the status block,
1657 * thus we need to ensure that status block indices
1658 * have been actually read (bnx2x_update_fpsb_idx)
1659 * prior to this check (bnx2x_has_rx_work) so that
1660 * we won't write the "newer" value of the status block
1661 * to IGU (if there was a DMA right after
1662 * bnx2x_has_rx_work and if there is no rmb, the memory
1663 * reading (bnx2x_update_fpsb_idx) may be postponed
1664 * to right before bnx2x_ack_sb). In this case there
1665 * will never be another interrupt until there is
1666 * another update of the status block, while there
1667 * is still unhandled work.
9f6c9258
DK
1668 */
1669 rmb();
1670
1671 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
1672 napi_complete(napi);
1673 /* Re-enable interrupts */
523224a3
DK
1674 DP(NETIF_MSG_HW,
1675 "Update index to %d\n", fp->fp_hc_idx);
1676 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
1677 le16_to_cpu(fp->fp_hc_idx),
9f6c9258
DK
1678 IGU_INT_ENABLE, 1);
1679 break;
1680 }
1681 }
1682 }
1683
1684 return work_done;
1685}
1686
1687
1688/* we split the first BD into headers and data BDs
1689 * to ease the pain of our fellow microcode engineers
1690 * we use one mapping for both BDs
1691 * So far this has only been observed to happen
1692 * in Other Operating Systems(TM)
1693 */
1694static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
1695 struct bnx2x_fastpath *fp,
1696 struct sw_tx_bd *tx_buf,
1697 struct eth_tx_start_bd **tx_bd, u16 hlen,
1698 u16 bd_prod, int nbd)
1699{
1700 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
1701 struct eth_tx_bd *d_tx_bd;
1702 dma_addr_t mapping;
1703 int old_len = le16_to_cpu(h_tx_bd->nbytes);
1704
1705 /* first fix first BD */
1706 h_tx_bd->nbd = cpu_to_le16(nbd);
1707 h_tx_bd->nbytes = cpu_to_le16(hlen);
1708
1709 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
1710 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
1711 h_tx_bd->addr_lo, h_tx_bd->nbd);
1712
1713 /* now get a new data BD
1714 * (after the pbd) and fill it */
1715 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
1716 d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
1717
1718 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
1719 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
1720
1721 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1722 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1723 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
1724
1725 /* this marks the BD as one that has no individual mapping */
1726 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
1727
1728 DP(NETIF_MSG_TX_QUEUED,
1729 "TSO split data size is %d (%x:%x)\n",
1730 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
1731
1732 /* update tx_bd */
1733 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
1734
1735 return bd_prod;
1736}
1737
1738static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
1739{
1740 if (fix > 0)
1741 csum = (u16) ~csum_fold(csum_sub(csum,
1742 csum_partial(t_header - fix, fix, 0)));
1743
1744 else if (fix < 0)
1745 csum = (u16) ~csum_fold(csum_add(csum,
1746 csum_partial(t_header, -fix, 0)));
1747
1748 return swab16(csum);
1749}
1750
1751static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
1752{
1753 u32 rc;
1754
1755 if (skb->ip_summed != CHECKSUM_PARTIAL)
1756 rc = XMIT_PLAIN;
1757
1758 else {
1759 if (skb->protocol == htons(ETH_P_IPV6)) {
1760 rc = XMIT_CSUM_V6;
1761 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
1762 rc |= XMIT_CSUM_TCP;
1763
1764 } else {
1765 rc = XMIT_CSUM_V4;
1766 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
1767 rc |= XMIT_CSUM_TCP;
1768 }
1769 }
1770
1771 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
1772 rc |= (XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP);
1773
1774 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
1775 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6);
1776
1777 return rc;
1778}
1779
1780#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
1781/* check if packet requires linearization (packet is too fragmented)
1782 no need to check fragmentation if page size > 8K (there will be no
1783 violation to FW restrictions) */
1784static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
1785 u32 xmit_type)
1786{
1787 int to_copy = 0;
1788 int hlen = 0;
1789 int first_bd_sz = 0;
1790
1791 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
1792 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
1793
1794 if (xmit_type & XMIT_GSO) {
1795 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
1796 /* Check if LSO packet needs to be copied:
1797 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
1798 int wnd_size = MAX_FETCH_BD - 3;
1799 /* Number of windows to check */
1800 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
1801 int wnd_idx = 0;
1802 int frag_idx = 0;
1803 u32 wnd_sum = 0;
1804
1805 /* Headers length */
1806 hlen = (int)(skb_transport_header(skb) - skb->data) +
1807 tcp_hdrlen(skb);
1808
1809 /* Amount of data (w/o headers) on linear part of SKB*/
1810 first_bd_sz = skb_headlen(skb) - hlen;
1811
1812 wnd_sum = first_bd_sz;
1813
1814 /* Calculate the first sum - it's special */
1815 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
1816 wnd_sum +=
1817 skb_shinfo(skb)->frags[frag_idx].size;
1818
1819 /* If there was data on linear skb data - check it */
1820 if (first_bd_sz > 0) {
1821 if (unlikely(wnd_sum < lso_mss)) {
1822 to_copy = 1;
1823 goto exit_lbl;
1824 }
1825
1826 wnd_sum -= first_bd_sz;
1827 }
1828
1829 /* Others are easier: run through the frag list and
1830 check all windows */
1831 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
1832 wnd_sum +=
1833 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
1834
1835 if (unlikely(wnd_sum < lso_mss)) {
1836 to_copy = 1;
1837 break;
1838 }
1839 wnd_sum -=
1840 skb_shinfo(skb)->frags[wnd_idx].size;
1841 }
1842 } else {
1843 /* in non-LSO too fragmented packet should always
1844 be linearized */
1845 to_copy = 1;
1846 }
1847 }
1848
1849exit_lbl:
1850 if (unlikely(to_copy))
1851 DP(NETIF_MSG_TX_QUEUED,
1852 "Linearization IS REQUIRED for %s packet. "
1853 "num_frags %d hlen %d first_bd_sz %d\n",
1854 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
1855 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
1856
1857 return to_copy;
1858}
1859#endif
1860
f2e0899f
DK
1861static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb,
1862 struct eth_tx_parse_bd_e2 *pbd,
1863 u32 xmit_type)
1864{
1865 pbd->parsing_data |= cpu_to_le16(skb_shinfo(skb)->gso_size) <<
1866 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT;
1867 if ((xmit_type & XMIT_GSO_V6) &&
1868 (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
1869 pbd->parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
1870}
1871
1872/**
1873 * Update PBD in GSO case.
1874 *
1875 * @param skb
1876 * @param tx_start_bd
1877 * @param pbd
1878 * @param xmit_type
1879 */
1880static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
1881 struct eth_tx_parse_bd_e1x *pbd,
1882 u32 xmit_type)
1883{
1884 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
1885 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
1886 pbd->tcp_flags = pbd_tcp_flags(skb);
1887
1888 if (xmit_type & XMIT_GSO_V4) {
1889 pbd->ip_id = swab16(ip_hdr(skb)->id);
1890 pbd->tcp_pseudo_csum =
1891 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
1892 ip_hdr(skb)->daddr,
1893 0, IPPROTO_TCP, 0));
1894
1895 } else
1896 pbd->tcp_pseudo_csum =
1897 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
1898 &ipv6_hdr(skb)->daddr,
1899 0, IPPROTO_TCP, 0));
1900
1901 pbd->global_data |= ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN;
1902}
1903/**
1904 *
1905 * @param skb
1906 * @param tx_start_bd
1907 * @param pbd_e2
1908 * @param xmit_type
1909 *
1910 * @return header len
1911 */
1912static inline u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
1913 struct eth_tx_parse_bd_e2 *pbd,
1914 u32 xmit_type)
1915{
1916 pbd->parsing_data |= cpu_to_le16(tcp_hdrlen(skb)/4) <<
1917 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT;
1918
1919 pbd->parsing_data |= cpu_to_le16(((unsigned char *)tcp_hdr(skb) -
1920 skb->data) / 2) <<
1921 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT;
1922
1923 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
1924}
1925
1926/**
1927 *
1928 * @param skb
1929 * @param tx_start_bd
1930 * @param pbd
1931 * @param xmit_type
1932 *
1933 * @return Header length
1934 */
1935static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
1936 struct eth_tx_parse_bd_e1x *pbd,
1937 u32 xmit_type)
1938{
1939 u8 hlen = (skb_network_header(skb) - skb->data) / 2;
1940
1941 /* for now NS flag is not used in Linux */
1942 pbd->global_data =
1943 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
1944 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
1945
1946 pbd->ip_hlen_w = (skb_transport_header(skb) -
1947 skb_network_header(skb)) / 2;
1948
1949 hlen += pbd->ip_hlen_w + tcp_hdrlen(skb) / 2;
1950
1951 pbd->total_hlen_w = cpu_to_le16(hlen);
1952 hlen = hlen*2;
1953
1954 if (xmit_type & XMIT_CSUM_TCP) {
1955 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
1956
1957 } else {
1958 s8 fix = SKB_CS_OFF(skb); /* signed! */
1959
1960 DP(NETIF_MSG_TX_QUEUED,
1961 "hlen %d fix %d csum before fix %x\n",
1962 le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
1963
1964 /* HW bug: fixup the CSUM */
1965 pbd->tcp_pseudo_csum =
1966 bnx2x_csum_fix(skb_transport_header(skb),
1967 SKB_CS(skb), fix);
1968
1969 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
1970 pbd->tcp_pseudo_csum);
1971 }
1972
1973 return hlen;
1974}
9f6c9258
DK
1975/* called with netif_tx_lock
1976 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
1977 * netif_wake_queue()
1978 */
1979netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
1980{
1981 struct bnx2x *bp = netdev_priv(dev);
1982 struct bnx2x_fastpath *fp;
1983 struct netdev_queue *txq;
1984 struct sw_tx_bd *tx_buf;
1985 struct eth_tx_start_bd *tx_start_bd;
1986 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
523224a3 1987 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
f2e0899f 1988 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
9f6c9258
DK
1989 u16 pkt_prod, bd_prod;
1990 int nbd, fp_index;
1991 dma_addr_t mapping;
1992 u32 xmit_type = bnx2x_xmit_type(bp, skb);
1993 int i;
1994 u8 hlen = 0;
1995 __le16 pkt_size = 0;
1996 struct ethhdr *eth;
1997 u8 mac_type = UNICAST_ADDRESS;
1998
1999#ifdef BNX2X_STOP_ON_ERROR
2000 if (unlikely(bp->panic))
2001 return NETDEV_TX_BUSY;
2002#endif
2003
2004 fp_index = skb_get_queue_mapping(skb);
2005 txq = netdev_get_tx_queue(dev, fp_index);
2006
2007 fp = &bp->fp[fp_index];
2008
2009 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
2010 fp->eth_q_stats.driver_xoff++;
2011 netif_tx_stop_queue(txq);
2012 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
2013 return NETDEV_TX_BUSY;
2014 }
2015
f2e0899f
DK
2016 DP(NETIF_MSG_TX_QUEUED, "queue[%d]: SKB: summed %x protocol %x "
2017 "protocol(%x,%x) gso type %x xmit_type %x\n",
2018 fp_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
9f6c9258
DK
2019 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
2020
2021 eth = (struct ethhdr *)skb->data;
2022
2023 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
2024 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
2025 if (is_broadcast_ether_addr(eth->h_dest))
2026 mac_type = BROADCAST_ADDRESS;
2027 else
2028 mac_type = MULTICAST_ADDRESS;
2029 }
2030
2031#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
2032 /* First, check if we need to linearize the skb (due to FW
2033 restrictions). No need to check fragmentation if page size > 8K
2034 (there will be no violation to FW restrictions) */
2035 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
2036 /* Statistics of linearization */
2037 bp->lin_cnt++;
2038 if (skb_linearize(skb) != 0) {
2039 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
2040 "silently dropping this SKB\n");
2041 dev_kfree_skb_any(skb);
2042 return NETDEV_TX_OK;
2043 }
2044 }
2045#endif
2046
2047 /*
2048 Please read carefully. First we use one BD which we mark as start,
2049 then we have a parsing info BD (used for TSO or xsum),
2050 and only then we have the rest of the TSO BDs.
2051 (don't forget to mark the last one as last,
2052 and to unmap only AFTER you write to the BD ...)
2053 And above all, all pdb sizes are in words - NOT DWORDS!
2054 */
2055
2056 pkt_prod = fp->tx_pkt_prod++;
2057 bd_prod = TX_BD(fp->tx_bd_prod);
2058
2059 /* get a tx_buf and first BD */
2060 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
2061 tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
2062
2063 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
523224a3
DK
2064 SET_FLAG(tx_start_bd->general_data,
2065 ETH_TX_START_BD_ETH_ADDR_TYPE,
2066 mac_type);
9f6c9258 2067 /* header nbd */
523224a3
DK
2068 SET_FLAG(tx_start_bd->general_data,
2069 ETH_TX_START_BD_HDR_NBDS,
2070 1);
9f6c9258
DK
2071
2072 /* remember the first BD of the packet */
2073 tx_buf->first_bd = fp->tx_bd_prod;
2074 tx_buf->skb = skb;
2075 tx_buf->flags = 0;
2076
2077 DP(NETIF_MSG_TX_QUEUED,
2078 "sending pkt %u @%p next_idx %u bd %u @%p\n",
2079 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
2080
2081#ifdef BCM_VLAN
2082 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
2083 (bp->flags & HW_VLAN_TX_FLAG)) {
523224a3
DK
2084 tx_start_bd->vlan_or_ethertype =
2085 cpu_to_le16(vlan_tx_tag_get(skb));
2086 tx_start_bd->bd_flags.as_bitfield |=
2087 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
9f6c9258
DK
2088 } else
2089#endif
523224a3 2090 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
9f6c9258
DK
2091
2092 /* turn on parsing and get a BD */
2093 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9f6c9258 2094
523224a3
DK
2095 if (xmit_type & XMIT_CSUM) {
2096 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
2097
2098 if (xmit_type & XMIT_CSUM_V4)
2099 tx_start_bd->bd_flags.as_bitfield |=
2100 ETH_TX_BD_FLAGS_IP_CSUM;
2101 else
2102 tx_start_bd->bd_flags.as_bitfield |=
2103 ETH_TX_BD_FLAGS_IPV6;
9f6c9258 2104
523224a3
DK
2105 if (!(xmit_type & XMIT_CSUM_TCP))
2106 tx_start_bd->bd_flags.as_bitfield |=
2107 ETH_TX_BD_FLAGS_IS_UDP;
2108 }
9f6c9258 2109
f2e0899f
DK
2110 if (CHIP_IS_E2(bp)) {
2111 pbd_e2 = &fp->tx_desc_ring[bd_prod].parse_bd_e2;
2112 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
2113 /* Set PBD in checksum offload case */
2114 if (xmit_type & XMIT_CSUM)
2115 hlen = bnx2x_set_pbd_csum_e2(bp,
2116 skb, pbd_e2, xmit_type);
2117 } else {
2118 pbd_e1x = &fp->tx_desc_ring[bd_prod].parse_bd_e1x;
2119 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
2120 /* Set PBD in checksum offload case */
2121 if (xmit_type & XMIT_CSUM)
2122 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
9f6c9258 2123
9f6c9258
DK
2124 }
2125
2126 mapping = dma_map_single(&bp->pdev->dev, skb->data,
2127 skb_headlen(skb), DMA_TO_DEVICE);
2128
2129 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2130 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2131 nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
2132 tx_start_bd->nbd = cpu_to_le16(nbd);
2133 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
2134 pkt_size = tx_start_bd->nbytes;
2135
2136 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
2137 " nbytes %d flags %x vlan %x\n",
2138 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
2139 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
523224a3
DK
2140 tx_start_bd->bd_flags.as_bitfield,
2141 le16_to_cpu(tx_start_bd->vlan_or_ethertype));
9f6c9258
DK
2142
2143 if (xmit_type & XMIT_GSO) {
2144
2145 DP(NETIF_MSG_TX_QUEUED,
2146 "TSO packet len %d hlen %d total len %d tso size %d\n",
2147 skb->len, hlen, skb_headlen(skb),
2148 skb_shinfo(skb)->gso_size);
2149
2150 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
2151
2152 if (unlikely(skb_headlen(skb) > hlen))
2153 bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
2154 hlen, bd_prod, ++nbd);
f2e0899f
DK
2155 if (CHIP_IS_E2(bp))
2156 bnx2x_set_pbd_gso_e2(skb, pbd_e2, xmit_type);
2157 else
2158 bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
9f6c9258
DK
2159 }
2160 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
2161
2162 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2163 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2164
2165 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2166 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
2167 if (total_pkt_bd == NULL)
2168 total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
2169
2170 mapping = dma_map_page(&bp->pdev->dev, frag->page,
2171 frag->page_offset,
2172 frag->size, DMA_TO_DEVICE);
2173
2174 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2175 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2176 tx_data_bd->nbytes = cpu_to_le16(frag->size);
2177 le16_add_cpu(&pkt_size, frag->size);
2178
2179 DP(NETIF_MSG_TX_QUEUED,
2180 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
2181 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
2182 le16_to_cpu(tx_data_bd->nbytes));
2183 }
2184
2185 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
2186
2187 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2188
2189 /* now send a tx doorbell, counting the next BD
2190 * if the packet contains or ends with it
2191 */
2192 if (TX_BD_POFF(bd_prod) < nbd)
2193 nbd++;
2194
2195 if (total_pkt_bd != NULL)
2196 total_pkt_bd->total_pkt_bytes = pkt_size;
2197
523224a3 2198 if (pbd_e1x)
9f6c9258 2199 DP(NETIF_MSG_TX_QUEUED,
523224a3 2200 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
9f6c9258 2201 " tcp_flags %x xsum %x seq %u hlen %u\n",
523224a3
DK
2202 pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
2203 pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
2204 pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
2205 le16_to_cpu(pbd_e1x->total_hlen_w));
f2e0899f
DK
2206 if (pbd_e2)
2207 DP(NETIF_MSG_TX_QUEUED,
2208 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
2209 pbd_e2, pbd_e2->dst_mac_addr_hi, pbd_e2->dst_mac_addr_mid,
2210 pbd_e2->dst_mac_addr_lo, pbd_e2->src_mac_addr_hi,
2211 pbd_e2->src_mac_addr_mid, pbd_e2->src_mac_addr_lo,
2212 pbd_e2->parsing_data);
9f6c9258
DK
2213 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
2214
2215 /*
2216 * Make sure that the BD data is updated before updating the producer
2217 * since FW might read the BD right after the producer is updated.
2218 * This is only applicable for weak-ordered memory model archs such
2219 * as IA-64. The following barrier is also mandatory since FW will
2220 * assumes packets must have BDs.
2221 */
2222 wmb();
2223
2224 fp->tx_db.data.prod += nbd;
2225 barrier();
523224a3 2226 DOORBELL(bp, fp->cid, fp->tx_db.raw);
9f6c9258
DK
2227
2228 mmiowb();
2229
2230 fp->tx_bd_prod += nbd;
2231
2232 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
2233 netif_tx_stop_queue(txq);
2234
2235 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
2236 * ordering of set_bit() in netif_tx_stop_queue() and read of
2237 * fp->bd_tx_cons */
2238 smp_mb();
2239
2240 fp->eth_q_stats.driver_xoff++;
2241 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
2242 netif_tx_wake_queue(txq);
2243 }
2244 fp->tx_pkt++;
2245
2246 return NETDEV_TX_OK;
2247}
2248/* called with rtnl_lock */
2249int bnx2x_change_mac_addr(struct net_device *dev, void *p)
2250{
2251 struct sockaddr *addr = p;
2252 struct bnx2x *bp = netdev_priv(dev);
2253
2254 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
2255 return -EINVAL;
2256
2257 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
523224a3
DK
2258 if (netif_running(dev))
2259 bnx2x_set_eth_mac(bp, 1);
9f6c9258
DK
2260
2261 return 0;
2262}
2263
523224a3
DK
2264void bnx2x_free_mem_bp(struct bnx2x *bp)
2265{
2266 kfree(bp->fp);
2267 kfree(bp->msix_table);
2268 kfree(bp->ilt);
2269}
2270
2271int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp)
2272{
2273 struct bnx2x_fastpath *fp;
2274 struct msix_entry *tbl;
2275 struct bnx2x_ilt *ilt;
2276
2277 /* fp array */
2278 fp = kzalloc(L2_FP_COUNT(bp->l2_cid_count)*sizeof(*fp), GFP_KERNEL);
2279 if (!fp)
2280 goto alloc_err;
2281 bp->fp = fp;
2282
2283 /* msix table */
2284 tbl = kzalloc((bp->l2_cid_count + 1) * sizeof(*tbl),
2285 GFP_KERNEL);
2286 if (!tbl)
2287 goto alloc_err;
2288 bp->msix_table = tbl;
2289
2290 /* ilt */
2291 ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
2292 if (!ilt)
2293 goto alloc_err;
2294 bp->ilt = ilt;
2295
2296 return 0;
2297alloc_err:
2298 bnx2x_free_mem_bp(bp);
2299 return -ENOMEM;
2300
2301}
2302
9f6c9258
DK
2303/* called with rtnl_lock */
2304int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
2305{
2306 struct bnx2x *bp = netdev_priv(dev);
2307 int rc = 0;
2308
2309 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
2310 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
2311 return -EAGAIN;
2312 }
2313
2314 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
2315 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
2316 return -EINVAL;
2317
2318 /* This does not race with packet allocation
2319 * because the actual alloc size is
2320 * only updated as part of load
2321 */
2322 dev->mtu = new_mtu;
2323
2324 if (netif_running(dev)) {
2325 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
2326 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
2327 }
2328
2329 return rc;
2330}
2331
2332void bnx2x_tx_timeout(struct net_device *dev)
2333{
2334 struct bnx2x *bp = netdev_priv(dev);
2335
2336#ifdef BNX2X_STOP_ON_ERROR
2337 if (!bp->panic)
2338 bnx2x_panic();
2339#endif
2340 /* This allows the netif to be shutdown gracefully before resetting */
2341 schedule_delayed_work(&bp->reset_task, 0);
2342}
2343
2344#ifdef BCM_VLAN
2345/* called with rtnl_lock */
2346void bnx2x_vlan_rx_register(struct net_device *dev,
2347 struct vlan_group *vlgrp)
2348{
2349 struct bnx2x *bp = netdev_priv(dev);
2350
2351 bp->vlgrp = vlgrp;
9f6c9258
DK
2352}
2353
2354#endif
2355int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
2356{
2357 struct net_device *dev = pci_get_drvdata(pdev);
2358 struct bnx2x *bp;
2359
2360 if (!dev) {
2361 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
2362 return -ENODEV;
2363 }
2364 bp = netdev_priv(dev);
2365
2366 rtnl_lock();
2367
2368 pci_save_state(pdev);
2369
2370 if (!netif_running(dev)) {
2371 rtnl_unlock();
2372 return 0;
2373 }
2374
2375 netif_device_detach(dev);
2376
2377 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
2378
2379 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
2380
2381 rtnl_unlock();
2382
2383 return 0;
2384}
2385
2386int bnx2x_resume(struct pci_dev *pdev)
2387{
2388 struct net_device *dev = pci_get_drvdata(pdev);
2389 struct bnx2x *bp;
2390 int rc;
2391
2392 if (!dev) {
2393 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
2394 return -ENODEV;
2395 }
2396 bp = netdev_priv(dev);
2397
2398 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
2399 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
2400 return -EAGAIN;
2401 }
2402
2403 rtnl_lock();
2404
2405 pci_restore_state(pdev);
2406
2407 if (!netif_running(dev)) {
2408 rtnl_unlock();
2409 return 0;
2410 }
2411
2412 bnx2x_set_power_state(bp, PCI_D0);
2413 netif_device_attach(dev);
2414
f2e0899f
DK
2415 /* Since the chip was reset, clear the FW sequence number */
2416 bp->fw_seq = 0;
9f6c9258
DK
2417 rc = bnx2x_nic_load(bp, LOAD_OPEN);
2418
2419 rtnl_unlock();
2420
2421 return rc;
2422}
This page took 0.156808 seconds and 5 git commands to generate.