[BNX2]: Re-organize firmware structures.
[deliverable/linux.git] / drivers / net / bnx2.c
CommitLineData
b6016b76
MC
1/* bnx2.c: Broadcom NX2 network driver.
2 *
206cc83c 3 * Copyright (c) 2004, 2005, 2006 Broadcom Corporation
b6016b76
MC
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Written by: Michael Chan (mchan@broadcom.com)
10 */
11
f2a4f052
MC
12
13#include <linux/module.h>
14#include <linux/moduleparam.h>
15
16#include <linux/kernel.h>
17#include <linux/timer.h>
18#include <linux/errno.h>
19#include <linux/ioport.h>
20#include <linux/slab.h>
21#include <linux/vmalloc.h>
22#include <linux/interrupt.h>
23#include <linux/pci.h>
24#include <linux/init.h>
25#include <linux/netdevice.h>
26#include <linux/etherdevice.h>
27#include <linux/skbuff.h>
28#include <linux/dma-mapping.h>
29#include <asm/bitops.h>
30#include <asm/io.h>
31#include <asm/irq.h>
32#include <linux/delay.h>
33#include <asm/byteorder.h>
c86a31f4 34#include <asm/page.h>
f2a4f052
MC
35#include <linux/time.h>
36#include <linux/ethtool.h>
37#include <linux/mii.h>
38#ifdef NETIF_F_HW_VLAN_TX
39#include <linux/if_vlan.h>
40#define BCM_VLAN 1
41#endif
42#ifdef NETIF_F_TSO
43#include <net/ip.h>
44#include <net/tcp.h>
45#include <net/checksum.h>
46#define BCM_TSO 1
47#endif
48#include <linux/workqueue.h>
49#include <linux/crc32.h>
50#include <linux/prefetch.h>
29b12174 51#include <linux/cache.h>
fba9fe91 52#include <linux/zlib.h>
f2a4f052 53
b6016b76
MC
54#include "bnx2.h"
55#include "bnx2_fw.h"
56
57#define DRV_MODULE_NAME "bnx2"
58#define PFX DRV_MODULE_NAME ": "
f9317a40
MC
59#define DRV_MODULE_VERSION "1.4.45"
60#define DRV_MODULE_RELDATE "September 29, 2006"
b6016b76
MC
61
62#define RUN_AT(x) (jiffies + (x))
63
64/* Time in jiffies before concluding the transmitter is hung. */
65#define TX_TIMEOUT (5*HZ)
66
e19360f2 67static const char version[] __devinitdata =
b6016b76
MC
68 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
69
70MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
05d0f1cf 71MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
b6016b76
MC
72MODULE_LICENSE("GPL");
73MODULE_VERSION(DRV_MODULE_VERSION);
74
75static int disable_msi = 0;
76
77module_param(disable_msi, int, 0);
78MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
79
80typedef enum {
81 BCM5706 = 0,
82 NC370T,
83 NC370I,
84 BCM5706S,
85 NC370F,
5b0c76ad
MC
86 BCM5708,
87 BCM5708S,
b6016b76
MC
88} board_t;
89
90/* indexed by board_t, above */
f71e1309 91static const struct {
b6016b76
MC
92 char *name;
93} board_info[] __devinitdata = {
94 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
95 { "HP NC370T Multifunction Gigabit Server Adapter" },
96 { "HP NC370i Multifunction Gigabit Server Adapter" },
97 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
98 { "HP NC370F Multifunction Gigabit Server Adapter" },
5b0c76ad
MC
99 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
100 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
b6016b76
MC
101 };
102
103static struct pci_device_id bnx2_pci_tbl[] = {
104 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
105 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
106 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
107 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
108 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
109 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
5b0c76ad
MC
110 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
111 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
b6016b76
MC
112 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
113 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
114 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
115 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
5b0c76ad
MC
116 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
117 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
b6016b76
MC
118 { 0, }
119};
120
121static struct flash_spec flash_table[] =
122{
123 /* Slow EEPROM */
37137709 124 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
b6016b76
MC
125 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
126 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
127 "EEPROM - slow"},
37137709
MC
128 /* Expansion entry 0001 */
129 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
b6016b76 130 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
37137709
MC
131 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
132 "Entry 0001"},
b6016b76
MC
133 /* Saifun SA25F010 (non-buffered flash) */
134 /* strap, cfg1, & write1 need updates */
37137709 135 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
b6016b76
MC
136 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
137 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
138 "Non-buffered flash (128kB)"},
139 /* Saifun SA25F020 (non-buffered flash) */
140 /* strap, cfg1, & write1 need updates */
37137709 141 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
b6016b76
MC
142 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
143 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
144 "Non-buffered flash (256kB)"},
37137709
MC
145 /* Expansion entry 0100 */
146 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
147 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
148 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
149 "Entry 0100"},
150 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
6aa20a22 151 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
37137709
MC
152 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
153 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
154 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
155 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
156 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
157 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
158 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
159 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
160 /* Saifun SA25F005 (non-buffered flash) */
161 /* strap, cfg1, & write1 need updates */
162 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
163 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
164 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
165 "Non-buffered flash (64kB)"},
166 /* Fast EEPROM */
167 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
168 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
169 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
170 "EEPROM - fast"},
171 /* Expansion entry 1001 */
172 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
173 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
174 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
175 "Entry 1001"},
176 /* Expansion entry 1010 */
177 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
178 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
179 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
180 "Entry 1010"},
181 /* ATMEL AT45DB011B (buffered flash) */
182 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
183 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
184 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
185 "Buffered flash (128kB)"},
186 /* Expansion entry 1100 */
187 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
188 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
189 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
190 "Entry 1100"},
191 /* Expansion entry 1101 */
192 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
193 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
194 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
195 "Entry 1101"},
196 /* Ateml Expansion entry 1110 */
197 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
198 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
199 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
200 "Entry 1110 (Atmel)"},
201 /* ATMEL AT45DB021B (buffered flash) */
202 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
203 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
204 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
205 "Buffered flash (256kB)"},
b6016b76
MC
206};
207
208MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
209
e89bbf10
MC
210static inline u32 bnx2_tx_avail(struct bnx2 *bp)
211{
2f8af120 212 u32 diff;
e89bbf10 213
2f8af120
MC
214 smp_mb();
215 diff = TX_RING_IDX(bp->tx_prod) - TX_RING_IDX(bp->tx_cons);
e89bbf10
MC
216 if (diff > MAX_TX_DESC_CNT)
217 diff = (diff & MAX_TX_DESC_CNT) - 1;
218 return (bp->tx_ring_size - diff);
219}
220
b6016b76
MC
221static u32
222bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
223{
224 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
225 return (REG_RD(bp, BNX2_PCICFG_REG_WINDOW));
226}
227
228static void
229bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
230{
231 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
232 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
233}
234
235static void
236bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
237{
238 offset += cid_addr;
239 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
240 REG_WR(bp, BNX2_CTX_DATA, val);
241}
242
243static int
244bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
245{
246 u32 val1;
247 int i, ret;
248
249 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
250 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
251 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
252
253 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
254 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
255
256 udelay(40);
257 }
258
259 val1 = (bp->phy_addr << 21) | (reg << 16) |
260 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
261 BNX2_EMAC_MDIO_COMM_START_BUSY;
262 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
263
264 for (i = 0; i < 50; i++) {
265 udelay(10);
266
267 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
268 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
269 udelay(5);
270
271 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
272 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
273
274 break;
275 }
276 }
277
278 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
279 *val = 0x0;
280 ret = -EBUSY;
281 }
282 else {
283 *val = val1;
284 ret = 0;
285 }
286
287 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
288 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
289 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
290
291 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
292 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
293
294 udelay(40);
295 }
296
297 return ret;
298}
299
300static int
301bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
302{
303 u32 val1;
304 int i, ret;
305
306 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
307 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
308 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
309
310 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
311 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
312
313 udelay(40);
314 }
315
316 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
317 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
318 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
319 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
6aa20a22 320
b6016b76
MC
321 for (i = 0; i < 50; i++) {
322 udelay(10);
323
324 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
325 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
326 udelay(5);
327 break;
328 }
329 }
330
331 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
332 ret = -EBUSY;
333 else
334 ret = 0;
335
336 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
337 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
338 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
339
340 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
341 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
342
343 udelay(40);
344 }
345
346 return ret;
347}
348
349static void
350bnx2_disable_int(struct bnx2 *bp)
351{
352 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
353 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
354 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
355}
356
357static void
358bnx2_enable_int(struct bnx2 *bp)
359{
1269a8a6
MC
360 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
361 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
362 BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bp->last_status_idx);
363
b6016b76
MC
364 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
365 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bp->last_status_idx);
366
bf5295bb 367 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
b6016b76
MC
368}
369
370static void
371bnx2_disable_int_sync(struct bnx2 *bp)
372{
373 atomic_inc(&bp->intr_sem);
374 bnx2_disable_int(bp);
375 synchronize_irq(bp->pdev->irq);
376}
377
378static void
379bnx2_netif_stop(struct bnx2 *bp)
380{
381 bnx2_disable_int_sync(bp);
382 if (netif_running(bp->dev)) {
383 netif_poll_disable(bp->dev);
384 netif_tx_disable(bp->dev);
385 bp->dev->trans_start = jiffies; /* prevent tx timeout */
386 }
387}
388
389static void
390bnx2_netif_start(struct bnx2 *bp)
391{
392 if (atomic_dec_and_test(&bp->intr_sem)) {
393 if (netif_running(bp->dev)) {
394 netif_wake_queue(bp->dev);
395 netif_poll_enable(bp->dev);
396 bnx2_enable_int(bp);
397 }
398 }
399}
400
401static void
402bnx2_free_mem(struct bnx2 *bp)
403{
13daffa2
MC
404 int i;
405
b6016b76 406 if (bp->status_blk) {
0f31f994 407 pci_free_consistent(bp->pdev, bp->status_stats_size,
b6016b76
MC
408 bp->status_blk, bp->status_blk_mapping);
409 bp->status_blk = NULL;
0f31f994 410 bp->stats_blk = NULL;
b6016b76
MC
411 }
412 if (bp->tx_desc_ring) {
413 pci_free_consistent(bp->pdev,
414 sizeof(struct tx_bd) * TX_DESC_CNT,
415 bp->tx_desc_ring, bp->tx_desc_mapping);
416 bp->tx_desc_ring = NULL;
417 }
b4558ea9
JJ
418 kfree(bp->tx_buf_ring);
419 bp->tx_buf_ring = NULL;
13daffa2
MC
420 for (i = 0; i < bp->rx_max_ring; i++) {
421 if (bp->rx_desc_ring[i])
422 pci_free_consistent(bp->pdev,
423 sizeof(struct rx_bd) * RX_DESC_CNT,
424 bp->rx_desc_ring[i],
425 bp->rx_desc_mapping[i]);
426 bp->rx_desc_ring[i] = NULL;
427 }
428 vfree(bp->rx_buf_ring);
b4558ea9 429 bp->rx_buf_ring = NULL;
b6016b76
MC
430}
431
432static int
433bnx2_alloc_mem(struct bnx2 *bp)
434{
0f31f994 435 int i, status_blk_size;
13daffa2 436
0f31f994
MC
437 bp->tx_buf_ring = kzalloc(sizeof(struct sw_bd) * TX_DESC_CNT,
438 GFP_KERNEL);
b6016b76
MC
439 if (bp->tx_buf_ring == NULL)
440 return -ENOMEM;
441
b6016b76
MC
442 bp->tx_desc_ring = pci_alloc_consistent(bp->pdev,
443 sizeof(struct tx_bd) *
444 TX_DESC_CNT,
445 &bp->tx_desc_mapping);
446 if (bp->tx_desc_ring == NULL)
447 goto alloc_mem_err;
448
13daffa2
MC
449 bp->rx_buf_ring = vmalloc(sizeof(struct sw_bd) * RX_DESC_CNT *
450 bp->rx_max_ring);
b6016b76
MC
451 if (bp->rx_buf_ring == NULL)
452 goto alloc_mem_err;
453
13daffa2
MC
454 memset(bp->rx_buf_ring, 0, sizeof(struct sw_bd) * RX_DESC_CNT *
455 bp->rx_max_ring);
456
457 for (i = 0; i < bp->rx_max_ring; i++) {
458 bp->rx_desc_ring[i] =
459 pci_alloc_consistent(bp->pdev,
460 sizeof(struct rx_bd) * RX_DESC_CNT,
461 &bp->rx_desc_mapping[i]);
462 if (bp->rx_desc_ring[i] == NULL)
463 goto alloc_mem_err;
464
465 }
b6016b76 466
0f31f994
MC
467 /* Combine status and statistics blocks into one allocation. */
468 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
469 bp->status_stats_size = status_blk_size +
470 sizeof(struct statistics_block);
471
472 bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
b6016b76
MC
473 &bp->status_blk_mapping);
474 if (bp->status_blk == NULL)
475 goto alloc_mem_err;
476
0f31f994 477 memset(bp->status_blk, 0, bp->status_stats_size);
b6016b76 478
0f31f994
MC
479 bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
480 status_blk_size);
b6016b76 481
0f31f994 482 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
b6016b76
MC
483
484 return 0;
485
486alloc_mem_err:
487 bnx2_free_mem(bp);
488 return -ENOMEM;
489}
490
e3648b3d
MC
491static void
492bnx2_report_fw_link(struct bnx2 *bp)
493{
494 u32 fw_link_status = 0;
495
496 if (bp->link_up) {
497 u32 bmsr;
498
499 switch (bp->line_speed) {
500 case SPEED_10:
501 if (bp->duplex == DUPLEX_HALF)
502 fw_link_status = BNX2_LINK_STATUS_10HALF;
503 else
504 fw_link_status = BNX2_LINK_STATUS_10FULL;
505 break;
506 case SPEED_100:
507 if (bp->duplex == DUPLEX_HALF)
508 fw_link_status = BNX2_LINK_STATUS_100HALF;
509 else
510 fw_link_status = BNX2_LINK_STATUS_100FULL;
511 break;
512 case SPEED_1000:
513 if (bp->duplex == DUPLEX_HALF)
514 fw_link_status = BNX2_LINK_STATUS_1000HALF;
515 else
516 fw_link_status = BNX2_LINK_STATUS_1000FULL;
517 break;
518 case SPEED_2500:
519 if (bp->duplex == DUPLEX_HALF)
520 fw_link_status = BNX2_LINK_STATUS_2500HALF;
521 else
522 fw_link_status = BNX2_LINK_STATUS_2500FULL;
523 break;
524 }
525
526 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
527
528 if (bp->autoneg) {
529 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
530
531 bnx2_read_phy(bp, MII_BMSR, &bmsr);
532 bnx2_read_phy(bp, MII_BMSR, &bmsr);
533
534 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
535 bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
536 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
537 else
538 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
539 }
540 }
541 else
542 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
543
544 REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
545}
546
b6016b76
MC
547static void
548bnx2_report_link(struct bnx2 *bp)
549{
550 if (bp->link_up) {
551 netif_carrier_on(bp->dev);
552 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
553
554 printk("%d Mbps ", bp->line_speed);
555
556 if (bp->duplex == DUPLEX_FULL)
557 printk("full duplex");
558 else
559 printk("half duplex");
560
561 if (bp->flow_ctrl) {
562 if (bp->flow_ctrl & FLOW_CTRL_RX) {
563 printk(", receive ");
564 if (bp->flow_ctrl & FLOW_CTRL_TX)
565 printk("& transmit ");
566 }
567 else {
568 printk(", transmit ");
569 }
570 printk("flow control ON");
571 }
572 printk("\n");
573 }
574 else {
575 netif_carrier_off(bp->dev);
576 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
577 }
e3648b3d
MC
578
579 bnx2_report_fw_link(bp);
b6016b76
MC
580}
581
582static void
583bnx2_resolve_flow_ctrl(struct bnx2 *bp)
584{
585 u32 local_adv, remote_adv;
586
587 bp->flow_ctrl = 0;
6aa20a22 588 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
b6016b76
MC
589 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
590
591 if (bp->duplex == DUPLEX_FULL) {
592 bp->flow_ctrl = bp->req_flow_ctrl;
593 }
594 return;
595 }
596
597 if (bp->duplex != DUPLEX_FULL) {
598 return;
599 }
600
5b0c76ad
MC
601 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
602 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
603 u32 val;
604
605 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
606 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
607 bp->flow_ctrl |= FLOW_CTRL_TX;
608 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
609 bp->flow_ctrl |= FLOW_CTRL_RX;
610 return;
611 }
612
b6016b76
MC
613 bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
614 bnx2_read_phy(bp, MII_LPA, &remote_adv);
615
616 if (bp->phy_flags & PHY_SERDES_FLAG) {
617 u32 new_local_adv = 0;
618 u32 new_remote_adv = 0;
619
620 if (local_adv & ADVERTISE_1000XPAUSE)
621 new_local_adv |= ADVERTISE_PAUSE_CAP;
622 if (local_adv & ADVERTISE_1000XPSE_ASYM)
623 new_local_adv |= ADVERTISE_PAUSE_ASYM;
624 if (remote_adv & ADVERTISE_1000XPAUSE)
625 new_remote_adv |= ADVERTISE_PAUSE_CAP;
626 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
627 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
628
629 local_adv = new_local_adv;
630 remote_adv = new_remote_adv;
631 }
632
633 /* See Table 28B-3 of 802.3ab-1999 spec. */
634 if (local_adv & ADVERTISE_PAUSE_CAP) {
635 if(local_adv & ADVERTISE_PAUSE_ASYM) {
636 if (remote_adv & ADVERTISE_PAUSE_CAP) {
637 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
638 }
639 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
640 bp->flow_ctrl = FLOW_CTRL_RX;
641 }
642 }
643 else {
644 if (remote_adv & ADVERTISE_PAUSE_CAP) {
645 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
646 }
647 }
648 }
649 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
650 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
651 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
652
653 bp->flow_ctrl = FLOW_CTRL_TX;
654 }
655 }
656}
657
658static int
5b0c76ad
MC
659bnx2_5708s_linkup(struct bnx2 *bp)
660{
661 u32 val;
662
663 bp->link_up = 1;
664 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
665 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
666 case BCM5708S_1000X_STAT1_SPEED_10:
667 bp->line_speed = SPEED_10;
668 break;
669 case BCM5708S_1000X_STAT1_SPEED_100:
670 bp->line_speed = SPEED_100;
671 break;
672 case BCM5708S_1000X_STAT1_SPEED_1G:
673 bp->line_speed = SPEED_1000;
674 break;
675 case BCM5708S_1000X_STAT1_SPEED_2G5:
676 bp->line_speed = SPEED_2500;
677 break;
678 }
679 if (val & BCM5708S_1000X_STAT1_FD)
680 bp->duplex = DUPLEX_FULL;
681 else
682 bp->duplex = DUPLEX_HALF;
683
684 return 0;
685}
686
687static int
688bnx2_5706s_linkup(struct bnx2 *bp)
b6016b76
MC
689{
690 u32 bmcr, local_adv, remote_adv, common;
691
692 bp->link_up = 1;
693 bp->line_speed = SPEED_1000;
694
695 bnx2_read_phy(bp, MII_BMCR, &bmcr);
696 if (bmcr & BMCR_FULLDPLX) {
697 bp->duplex = DUPLEX_FULL;
698 }
699 else {
700 bp->duplex = DUPLEX_HALF;
701 }
702
703 if (!(bmcr & BMCR_ANENABLE)) {
704 return 0;
705 }
706
707 bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
708 bnx2_read_phy(bp, MII_LPA, &remote_adv);
709
710 common = local_adv & remote_adv;
711 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
712
713 if (common & ADVERTISE_1000XFULL) {
714 bp->duplex = DUPLEX_FULL;
715 }
716 else {
717 bp->duplex = DUPLEX_HALF;
718 }
719 }
720
721 return 0;
722}
723
724static int
725bnx2_copper_linkup(struct bnx2 *bp)
726{
727 u32 bmcr;
728
729 bnx2_read_phy(bp, MII_BMCR, &bmcr);
730 if (bmcr & BMCR_ANENABLE) {
731 u32 local_adv, remote_adv, common;
732
733 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
734 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
735
736 common = local_adv & (remote_adv >> 2);
737 if (common & ADVERTISE_1000FULL) {
738 bp->line_speed = SPEED_1000;
739 bp->duplex = DUPLEX_FULL;
740 }
741 else if (common & ADVERTISE_1000HALF) {
742 bp->line_speed = SPEED_1000;
743 bp->duplex = DUPLEX_HALF;
744 }
745 else {
746 bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
747 bnx2_read_phy(bp, MII_LPA, &remote_adv);
748
749 common = local_adv & remote_adv;
750 if (common & ADVERTISE_100FULL) {
751 bp->line_speed = SPEED_100;
752 bp->duplex = DUPLEX_FULL;
753 }
754 else if (common & ADVERTISE_100HALF) {
755 bp->line_speed = SPEED_100;
756 bp->duplex = DUPLEX_HALF;
757 }
758 else if (common & ADVERTISE_10FULL) {
759 bp->line_speed = SPEED_10;
760 bp->duplex = DUPLEX_FULL;
761 }
762 else if (common & ADVERTISE_10HALF) {
763 bp->line_speed = SPEED_10;
764 bp->duplex = DUPLEX_HALF;
765 }
766 else {
767 bp->line_speed = 0;
768 bp->link_up = 0;
769 }
770 }
771 }
772 else {
773 if (bmcr & BMCR_SPEED100) {
774 bp->line_speed = SPEED_100;
775 }
776 else {
777 bp->line_speed = SPEED_10;
778 }
779 if (bmcr & BMCR_FULLDPLX) {
780 bp->duplex = DUPLEX_FULL;
781 }
782 else {
783 bp->duplex = DUPLEX_HALF;
784 }
785 }
786
787 return 0;
788}
789
790static int
791bnx2_set_mac_link(struct bnx2 *bp)
792{
793 u32 val;
794
795 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
796 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
797 (bp->duplex == DUPLEX_HALF)) {
798 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
799 }
800
801 /* Configure the EMAC mode register. */
802 val = REG_RD(bp, BNX2_EMAC_MODE);
803
804 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
5b0c76ad
MC
805 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
806 BNX2_EMAC_MODE_25G);
b6016b76
MC
807
808 if (bp->link_up) {
5b0c76ad
MC
809 switch (bp->line_speed) {
810 case SPEED_10:
811 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
812 val |= BNX2_EMAC_MODE_PORT_MII_10;
813 break;
814 }
815 /* fall through */
816 case SPEED_100:
817 val |= BNX2_EMAC_MODE_PORT_MII;
818 break;
819 case SPEED_2500:
820 val |= BNX2_EMAC_MODE_25G;
821 /* fall through */
822 case SPEED_1000:
823 val |= BNX2_EMAC_MODE_PORT_GMII;
824 break;
825 }
b6016b76
MC
826 }
827 else {
828 val |= BNX2_EMAC_MODE_PORT_GMII;
829 }
830
831 /* Set the MAC to operate in the appropriate duplex mode. */
832 if (bp->duplex == DUPLEX_HALF)
833 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
834 REG_WR(bp, BNX2_EMAC_MODE, val);
835
836 /* Enable/disable rx PAUSE. */
837 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
838
839 if (bp->flow_ctrl & FLOW_CTRL_RX)
840 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
841 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
842
843 /* Enable/disable tx PAUSE. */
844 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
845 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
846
847 if (bp->flow_ctrl & FLOW_CTRL_TX)
848 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
849 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
850
851 /* Acknowledge the interrupt. */
852 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
853
854 return 0;
855}
856
857static int
858bnx2_set_link(struct bnx2 *bp)
859{
860 u32 bmsr;
861 u8 link_up;
862
80be4434 863 if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
b6016b76
MC
864 bp->link_up = 1;
865 return 0;
866 }
867
868 link_up = bp->link_up;
869
870 bnx2_read_phy(bp, MII_BMSR, &bmsr);
871 bnx2_read_phy(bp, MII_BMSR, &bmsr);
872
873 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
874 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
875 u32 val;
876
877 val = REG_RD(bp, BNX2_EMAC_STATUS);
878 if (val & BNX2_EMAC_STATUS_LINK)
879 bmsr |= BMSR_LSTATUS;
880 else
881 bmsr &= ~BMSR_LSTATUS;
882 }
883
884 if (bmsr & BMSR_LSTATUS) {
885 bp->link_up = 1;
886
887 if (bp->phy_flags & PHY_SERDES_FLAG) {
5b0c76ad
MC
888 if (CHIP_NUM(bp) == CHIP_NUM_5706)
889 bnx2_5706s_linkup(bp);
890 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
891 bnx2_5708s_linkup(bp);
b6016b76
MC
892 }
893 else {
894 bnx2_copper_linkup(bp);
895 }
896 bnx2_resolve_flow_ctrl(bp);
897 }
898 else {
899 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
900 (bp->autoneg & AUTONEG_SPEED)) {
901
902 u32 bmcr;
903
904 bnx2_read_phy(bp, MII_BMCR, &bmcr);
80be4434 905 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
b6016b76
MC
906 if (!(bmcr & BMCR_ANENABLE)) {
907 bnx2_write_phy(bp, MII_BMCR, bmcr |
908 BMCR_ANENABLE);
909 }
910 }
911 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
912 bp->link_up = 0;
913 }
914
915 if (bp->link_up != link_up) {
916 bnx2_report_link(bp);
917 }
918
919 bnx2_set_mac_link(bp);
920
921 return 0;
922}
923
924static int
925bnx2_reset_phy(struct bnx2 *bp)
926{
927 int i;
928 u32 reg;
929
930 bnx2_write_phy(bp, MII_BMCR, BMCR_RESET);
931
932#define PHY_RESET_MAX_WAIT 100
933 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
934 udelay(10);
935
936 bnx2_read_phy(bp, MII_BMCR, &reg);
937 if (!(reg & BMCR_RESET)) {
938 udelay(20);
939 break;
940 }
941 }
942 if (i == PHY_RESET_MAX_WAIT) {
943 return -EBUSY;
944 }
945 return 0;
946}
947
948static u32
949bnx2_phy_get_pause_adv(struct bnx2 *bp)
950{
951 u32 adv = 0;
952
953 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
954 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
955
956 if (bp->phy_flags & PHY_SERDES_FLAG) {
957 adv = ADVERTISE_1000XPAUSE;
958 }
959 else {
960 adv = ADVERTISE_PAUSE_CAP;
961 }
962 }
963 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
964 if (bp->phy_flags & PHY_SERDES_FLAG) {
965 adv = ADVERTISE_1000XPSE_ASYM;
966 }
967 else {
968 adv = ADVERTISE_PAUSE_ASYM;
969 }
970 }
971 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
972 if (bp->phy_flags & PHY_SERDES_FLAG) {
973 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
974 }
975 else {
976 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
977 }
978 }
979 return adv;
980}
981
982static int
983bnx2_setup_serdes_phy(struct bnx2 *bp)
984{
5b0c76ad 985 u32 adv, bmcr, up1;
b6016b76
MC
986 u32 new_adv = 0;
987
988 if (!(bp->autoneg & AUTONEG_SPEED)) {
989 u32 new_bmcr;
5b0c76ad
MC
990 int force_link_down = 0;
991
80be4434
MC
992 bnx2_read_phy(bp, MII_ADVERTISE, &adv);
993 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
994
995 bnx2_read_phy(bp, MII_BMCR, &bmcr);
996 new_bmcr = bmcr & ~(BMCR_ANENABLE | BCM5708S_BMCR_FORCE_2500);
997 new_bmcr |= BMCR_SPEED1000;
998 if (bp->req_line_speed == SPEED_2500) {
999 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1000 bnx2_read_phy(bp, BCM5708S_UP1, &up1);
1001 if (!(up1 & BCM5708S_UP1_2G5)) {
1002 up1 |= BCM5708S_UP1_2G5;
1003 bnx2_write_phy(bp, BCM5708S_UP1, up1);
1004 force_link_down = 1;
1005 }
1006 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
5b0c76ad
MC
1007 bnx2_read_phy(bp, BCM5708S_UP1, &up1);
1008 if (up1 & BCM5708S_UP1_2G5) {
1009 up1 &= ~BCM5708S_UP1_2G5;
1010 bnx2_write_phy(bp, BCM5708S_UP1, up1);
1011 force_link_down = 1;
1012 }
1013 }
1014
b6016b76 1015 if (bp->req_duplex == DUPLEX_FULL) {
5b0c76ad 1016 adv |= ADVERTISE_1000XFULL;
b6016b76
MC
1017 new_bmcr |= BMCR_FULLDPLX;
1018 }
1019 else {
5b0c76ad 1020 adv |= ADVERTISE_1000XHALF;
b6016b76
MC
1021 new_bmcr &= ~BMCR_FULLDPLX;
1022 }
5b0c76ad 1023 if ((new_bmcr != bmcr) || (force_link_down)) {
b6016b76
MC
1024 /* Force a link down visible on the other side */
1025 if (bp->link_up) {
5b0c76ad
MC
1026 bnx2_write_phy(bp, MII_ADVERTISE, adv &
1027 ~(ADVERTISE_1000XFULL |
1028 ADVERTISE_1000XHALF));
b6016b76
MC
1029 bnx2_write_phy(bp, MII_BMCR, bmcr |
1030 BMCR_ANRESTART | BMCR_ANENABLE);
1031
1032 bp->link_up = 0;
1033 netif_carrier_off(bp->dev);
5b0c76ad 1034 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
80be4434 1035 bnx2_report_link(bp);
b6016b76 1036 }
5b0c76ad 1037 bnx2_write_phy(bp, MII_ADVERTISE, adv);
b6016b76
MC
1038 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1039 }
1040 return 0;
1041 }
1042
5b0c76ad
MC
1043 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1044 bnx2_read_phy(bp, BCM5708S_UP1, &up1);
1045 up1 |= BCM5708S_UP1_2G5;
1046 bnx2_write_phy(bp, BCM5708S_UP1, up1);
1047 }
1048
b6016b76
MC
1049 if (bp->advertising & ADVERTISED_1000baseT_Full)
1050 new_adv |= ADVERTISE_1000XFULL;
1051
1052 new_adv |= bnx2_phy_get_pause_adv(bp);
1053
1054 bnx2_read_phy(bp, MII_ADVERTISE, &adv);
1055 bnx2_read_phy(bp, MII_BMCR, &bmcr);
1056
1057 bp->serdes_an_pending = 0;
1058 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1059 /* Force a link down visible on the other side */
1060 if (bp->link_up) {
b6016b76 1061 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
80be4434
MC
1062 spin_unlock_bh(&bp->phy_lock);
1063 msleep(20);
1064 spin_lock_bh(&bp->phy_lock);
b6016b76
MC
1065 }
1066
1067 bnx2_write_phy(bp, MII_ADVERTISE, new_adv);
1068 bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART |
1069 BMCR_ANENABLE);
f8dd064e
MC
1070 /* Speed up link-up time when the link partner
1071 * does not autonegotiate which is very common
1072 * in blade servers. Some blade servers use
1073 * IPMI for kerboard input and it's important
1074 * to minimize link disruptions. Autoneg. involves
1075 * exchanging base pages plus 3 next pages and
1076 * normally completes in about 120 msec.
1077 */
1078 bp->current_interval = SERDES_AN_TIMEOUT;
1079 bp->serdes_an_pending = 1;
1080 mod_timer(&bp->timer, jiffies + bp->current_interval);
b6016b76
MC
1081 }
1082
1083 return 0;
1084}
1085
1086#define ETHTOOL_ALL_FIBRE_SPEED \
1087 (ADVERTISED_1000baseT_Full)
1088
1089#define ETHTOOL_ALL_COPPER_SPEED \
1090 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1091 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1092 ADVERTISED_1000baseT_Full)
1093
1094#define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1095 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
6aa20a22 1096
b6016b76
MC
1097#define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1098
1099static int
1100bnx2_setup_copper_phy(struct bnx2 *bp)
1101{
1102 u32 bmcr;
1103 u32 new_bmcr;
1104
1105 bnx2_read_phy(bp, MII_BMCR, &bmcr);
1106
1107 if (bp->autoneg & AUTONEG_SPEED) {
1108 u32 adv_reg, adv1000_reg;
1109 u32 new_adv_reg = 0;
1110 u32 new_adv1000_reg = 0;
1111
1112 bnx2_read_phy(bp, MII_ADVERTISE, &adv_reg);
1113 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1114 ADVERTISE_PAUSE_ASYM);
1115
1116 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1117 adv1000_reg &= PHY_ALL_1000_SPEED;
1118
1119 if (bp->advertising & ADVERTISED_10baseT_Half)
1120 new_adv_reg |= ADVERTISE_10HALF;
1121 if (bp->advertising & ADVERTISED_10baseT_Full)
1122 new_adv_reg |= ADVERTISE_10FULL;
1123 if (bp->advertising & ADVERTISED_100baseT_Half)
1124 new_adv_reg |= ADVERTISE_100HALF;
1125 if (bp->advertising & ADVERTISED_100baseT_Full)
1126 new_adv_reg |= ADVERTISE_100FULL;
1127 if (bp->advertising & ADVERTISED_1000baseT_Full)
1128 new_adv1000_reg |= ADVERTISE_1000FULL;
6aa20a22 1129
b6016b76
MC
1130 new_adv_reg |= ADVERTISE_CSMA;
1131
1132 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1133
1134 if ((adv1000_reg != new_adv1000_reg) ||
1135 (adv_reg != new_adv_reg) ||
1136 ((bmcr & BMCR_ANENABLE) == 0)) {
1137
1138 bnx2_write_phy(bp, MII_ADVERTISE, new_adv_reg);
1139 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1140 bnx2_write_phy(bp, MII_BMCR, BMCR_ANRESTART |
1141 BMCR_ANENABLE);
1142 }
1143 else if (bp->link_up) {
1144 /* Flow ctrl may have changed from auto to forced */
1145 /* or vice-versa. */
1146
1147 bnx2_resolve_flow_ctrl(bp);
1148 bnx2_set_mac_link(bp);
1149 }
1150 return 0;
1151 }
1152
1153 new_bmcr = 0;
1154 if (bp->req_line_speed == SPEED_100) {
1155 new_bmcr |= BMCR_SPEED100;
1156 }
1157 if (bp->req_duplex == DUPLEX_FULL) {
1158 new_bmcr |= BMCR_FULLDPLX;
1159 }
1160 if (new_bmcr != bmcr) {
1161 u32 bmsr;
b6016b76
MC
1162
1163 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1164 bnx2_read_phy(bp, MII_BMSR, &bmsr);
6aa20a22 1165
b6016b76
MC
1166 if (bmsr & BMSR_LSTATUS) {
1167 /* Force link down */
1168 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
a16dda0e
MC
1169 spin_unlock_bh(&bp->phy_lock);
1170 msleep(50);
1171 spin_lock_bh(&bp->phy_lock);
1172
1173 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1174 bnx2_read_phy(bp, MII_BMSR, &bmsr);
b6016b76
MC
1175 }
1176
1177 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1178
1179 /* Normally, the new speed is setup after the link has
1180 * gone down and up again. In some cases, link will not go
1181 * down so we need to set up the new speed here.
1182 */
1183 if (bmsr & BMSR_LSTATUS) {
1184 bp->line_speed = bp->req_line_speed;
1185 bp->duplex = bp->req_duplex;
1186 bnx2_resolve_flow_ctrl(bp);
1187 bnx2_set_mac_link(bp);
1188 }
1189 }
1190 return 0;
1191}
1192
1193static int
1194bnx2_setup_phy(struct bnx2 *bp)
1195{
1196 if (bp->loopback == MAC_LOOPBACK)
1197 return 0;
1198
1199 if (bp->phy_flags & PHY_SERDES_FLAG) {
1200 return (bnx2_setup_serdes_phy(bp));
1201 }
1202 else {
1203 return (bnx2_setup_copper_phy(bp));
1204 }
1205}
1206
1207static int
5b0c76ad
MC
1208bnx2_init_5708s_phy(struct bnx2 *bp)
1209{
1210 u32 val;
1211
1212 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1213 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1214 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1215
1216 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1217 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1218 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1219
1220 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1221 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1222 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1223
1224 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1225 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1226 val |= BCM5708S_UP1_2G5;
1227 bnx2_write_phy(bp, BCM5708S_UP1, val);
1228 }
1229
1230 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
dda1e390
MC
1231 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1232 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
5b0c76ad
MC
1233 /* increase tx signal amplitude */
1234 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1235 BCM5708S_BLK_ADDR_TX_MISC);
1236 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1237 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1238 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1239 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1240 }
1241
e3648b3d 1242 val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
5b0c76ad
MC
1243 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1244
1245 if (val) {
1246 u32 is_backplane;
1247
e3648b3d 1248 is_backplane = REG_RD_IND(bp, bp->shmem_base +
5b0c76ad
MC
1249 BNX2_SHARED_HW_CFG_CONFIG);
1250 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1251 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1252 BCM5708S_BLK_ADDR_TX_MISC);
1253 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1254 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1255 BCM5708S_BLK_ADDR_DIG);
1256 }
1257 }
1258 return 0;
1259}
1260
1261static int
1262bnx2_init_5706s_phy(struct bnx2 *bp)
b6016b76
MC
1263{
1264 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1265
1266 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
1267 REG_WR(bp, BNX2_MISC_UNUSED0, 0x300);
1268 }
1269
1270 if (bp->dev->mtu > 1500) {
1271 u32 val;
1272
1273 /* Set extended packet length bit */
1274 bnx2_write_phy(bp, 0x18, 0x7);
1275 bnx2_read_phy(bp, 0x18, &val);
1276 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1277
1278 bnx2_write_phy(bp, 0x1c, 0x6c00);
1279 bnx2_read_phy(bp, 0x1c, &val);
1280 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1281 }
1282 else {
1283 u32 val;
1284
1285 bnx2_write_phy(bp, 0x18, 0x7);
1286 bnx2_read_phy(bp, 0x18, &val);
1287 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1288
1289 bnx2_write_phy(bp, 0x1c, 0x6c00);
1290 bnx2_read_phy(bp, 0x1c, &val);
1291 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1292 }
1293
1294 return 0;
1295}
1296
1297static int
1298bnx2_init_copper_phy(struct bnx2 *bp)
1299{
5b0c76ad
MC
1300 u32 val;
1301
b6016b76
MC
1302 bp->phy_flags |= PHY_CRC_FIX_FLAG;
1303
1304 if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1305 bnx2_write_phy(bp, 0x18, 0x0c00);
1306 bnx2_write_phy(bp, 0x17, 0x000a);
1307 bnx2_write_phy(bp, 0x15, 0x310b);
1308 bnx2_write_phy(bp, 0x17, 0x201f);
1309 bnx2_write_phy(bp, 0x15, 0x9506);
1310 bnx2_write_phy(bp, 0x17, 0x401f);
1311 bnx2_write_phy(bp, 0x15, 0x14e2);
1312 bnx2_write_phy(bp, 0x18, 0x0400);
1313 }
1314
1315 if (bp->dev->mtu > 1500) {
b6016b76
MC
1316 /* Set extended packet length bit */
1317 bnx2_write_phy(bp, 0x18, 0x7);
1318 bnx2_read_phy(bp, 0x18, &val);
1319 bnx2_write_phy(bp, 0x18, val | 0x4000);
1320
1321 bnx2_read_phy(bp, 0x10, &val);
1322 bnx2_write_phy(bp, 0x10, val | 0x1);
1323 }
1324 else {
b6016b76
MC
1325 bnx2_write_phy(bp, 0x18, 0x7);
1326 bnx2_read_phy(bp, 0x18, &val);
1327 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1328
1329 bnx2_read_phy(bp, 0x10, &val);
1330 bnx2_write_phy(bp, 0x10, val & ~0x1);
1331 }
1332
5b0c76ad
MC
1333 /* ethernet@wirespeed */
1334 bnx2_write_phy(bp, 0x18, 0x7007);
1335 bnx2_read_phy(bp, 0x18, &val);
1336 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
b6016b76
MC
1337 return 0;
1338}
1339
1340
1341static int
1342bnx2_init_phy(struct bnx2 *bp)
1343{
1344 u32 val;
1345 int rc = 0;
1346
1347 bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1348 bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1349
1350 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
1351
1352 bnx2_reset_phy(bp);
1353
1354 bnx2_read_phy(bp, MII_PHYSID1, &val);
1355 bp->phy_id = val << 16;
1356 bnx2_read_phy(bp, MII_PHYSID2, &val);
1357 bp->phy_id |= val & 0xffff;
1358
1359 if (bp->phy_flags & PHY_SERDES_FLAG) {
5b0c76ad
MC
1360 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1361 rc = bnx2_init_5706s_phy(bp);
1362 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1363 rc = bnx2_init_5708s_phy(bp);
b6016b76
MC
1364 }
1365 else {
1366 rc = bnx2_init_copper_phy(bp);
1367 }
1368
1369 bnx2_setup_phy(bp);
1370
1371 return rc;
1372}
1373
1374static int
1375bnx2_set_mac_loopback(struct bnx2 *bp)
1376{
1377 u32 mac_mode;
1378
1379 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1380 mac_mode &= ~BNX2_EMAC_MODE_PORT;
1381 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
1382 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1383 bp->link_up = 1;
1384 return 0;
1385}
1386
bc5a0690
MC
1387static int bnx2_test_link(struct bnx2 *);
1388
1389static int
1390bnx2_set_phy_loopback(struct bnx2 *bp)
1391{
1392 u32 mac_mode;
1393 int rc, i;
1394
1395 spin_lock_bh(&bp->phy_lock);
1396 rc = bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK | BMCR_FULLDPLX |
1397 BMCR_SPEED1000);
1398 spin_unlock_bh(&bp->phy_lock);
1399 if (rc)
1400 return rc;
1401
1402 for (i = 0; i < 10; i++) {
1403 if (bnx2_test_link(bp) == 0)
1404 break;
80be4434 1405 msleep(100);
bc5a0690
MC
1406 }
1407
1408 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1409 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1410 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1411 BNX2_EMAC_MODE_25G);
1412
1413 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
1414 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1415 bp->link_up = 1;
1416 return 0;
1417}
1418
b6016b76 1419static int
b090ae2b 1420bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
b6016b76
MC
1421{
1422 int i;
1423 u32 val;
1424
b6016b76
MC
1425 bp->fw_wr_seq++;
1426 msg_data |= bp->fw_wr_seq;
1427
e3648b3d 1428 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
b6016b76
MC
1429
1430 /* wait for an acknowledgement. */
b090ae2b
MC
1431 for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
1432 msleep(10);
b6016b76 1433
e3648b3d 1434 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
b6016b76
MC
1435
1436 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
1437 break;
1438 }
b090ae2b
MC
1439 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
1440 return 0;
b6016b76
MC
1441
1442 /* If we timed out, inform the firmware that this is the case. */
b090ae2b
MC
1443 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
1444 if (!silent)
1445 printk(KERN_ERR PFX "fw sync timeout, reset code = "
1446 "%x\n", msg_data);
b6016b76
MC
1447
1448 msg_data &= ~BNX2_DRV_MSG_CODE;
1449 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
1450
e3648b3d 1451 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
b6016b76 1452
b6016b76
MC
1453 return -EBUSY;
1454 }
1455
b090ae2b
MC
1456 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
1457 return -EIO;
1458
b6016b76
MC
1459 return 0;
1460}
1461
1462static void
1463bnx2_init_context(struct bnx2 *bp)
1464{
1465 u32 vcid;
1466
1467 vcid = 96;
1468 while (vcid) {
1469 u32 vcid_addr, pcid_addr, offset;
1470
1471 vcid--;
1472
1473 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
1474 u32 new_vcid;
1475
1476 vcid_addr = GET_PCID_ADDR(vcid);
1477 if (vcid & 0x8) {
1478 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
1479 }
1480 else {
1481 new_vcid = vcid;
1482 }
1483 pcid_addr = GET_PCID_ADDR(new_vcid);
1484 }
1485 else {
1486 vcid_addr = GET_CID_ADDR(vcid);
1487 pcid_addr = vcid_addr;
1488 }
1489
1490 REG_WR(bp, BNX2_CTX_VIRT_ADDR, 0x00);
1491 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1492
1493 /* Zero out the context. */
1494 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4) {
1495 CTX_WR(bp, 0x00, offset, 0);
1496 }
1497
1498 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
1499 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1500 }
1501}
1502
1503static int
1504bnx2_alloc_bad_rbuf(struct bnx2 *bp)
1505{
1506 u16 *good_mbuf;
1507 u32 good_mbuf_cnt;
1508 u32 val;
1509
1510 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
1511 if (good_mbuf == NULL) {
1512 printk(KERN_ERR PFX "Failed to allocate memory in "
1513 "bnx2_alloc_bad_rbuf\n");
1514 return -ENOMEM;
1515 }
1516
1517 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
1518 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
1519
1520 good_mbuf_cnt = 0;
1521
1522 /* Allocate a bunch of mbufs and save the good ones in an array. */
1523 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1524 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
1525 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
1526
1527 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
1528
1529 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
1530
1531 /* The addresses with Bit 9 set are bad memory blocks. */
1532 if (!(val & (1 << 9))) {
1533 good_mbuf[good_mbuf_cnt] = (u16) val;
1534 good_mbuf_cnt++;
1535 }
1536
1537 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1538 }
1539
1540 /* Free the good ones back to the mbuf pool thus discarding
1541 * all the bad ones. */
1542 while (good_mbuf_cnt) {
1543 good_mbuf_cnt--;
1544
1545 val = good_mbuf[good_mbuf_cnt];
1546 val = (val << 9) | val | 1;
1547
1548 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
1549 }
1550 kfree(good_mbuf);
1551 return 0;
1552}
1553
1554static void
6aa20a22 1555bnx2_set_mac_addr(struct bnx2 *bp)
b6016b76
MC
1556{
1557 u32 val;
1558 u8 *mac_addr = bp->dev->dev_addr;
1559
1560 val = (mac_addr[0] << 8) | mac_addr[1];
1561
1562 REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
1563
6aa20a22 1564 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
b6016b76
MC
1565 (mac_addr[4] << 8) | mac_addr[5];
1566
1567 REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
1568}
1569
1570static inline int
1571bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
1572{
1573 struct sk_buff *skb;
1574 struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
1575 dma_addr_t mapping;
13daffa2 1576 struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
b6016b76
MC
1577 unsigned long align;
1578
932f3772 1579 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
b6016b76
MC
1580 if (skb == NULL) {
1581 return -ENOMEM;
1582 }
1583
1584 if (unlikely((align = (unsigned long) skb->data & 0x7))) {
1585 skb_reserve(skb, 8 - align);
1586 }
1587
b6016b76
MC
1588 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
1589 PCI_DMA_FROMDEVICE);
1590
1591 rx_buf->skb = skb;
1592 pci_unmap_addr_set(rx_buf, mapping, mapping);
1593
1594 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
1595 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
1596
1597 bp->rx_prod_bseq += bp->rx_buf_use_size;
1598
1599 return 0;
1600}
1601
1602static void
1603bnx2_phy_int(struct bnx2 *bp)
1604{
1605 u32 new_link_state, old_link_state;
1606
1607 new_link_state = bp->status_blk->status_attn_bits &
1608 STATUS_ATTN_BITS_LINK_STATE;
1609 old_link_state = bp->status_blk->status_attn_bits_ack &
1610 STATUS_ATTN_BITS_LINK_STATE;
1611 if (new_link_state != old_link_state) {
1612 if (new_link_state) {
1613 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD,
1614 STATUS_ATTN_BITS_LINK_STATE);
1615 }
1616 else {
1617 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD,
1618 STATUS_ATTN_BITS_LINK_STATE);
1619 }
1620 bnx2_set_link(bp);
1621 }
1622}
1623
1624static void
1625bnx2_tx_int(struct bnx2 *bp)
1626{
f4e418f7 1627 struct status_block *sblk = bp->status_blk;
b6016b76
MC
1628 u16 hw_cons, sw_cons, sw_ring_cons;
1629 int tx_free_bd = 0;
1630
f4e418f7 1631 hw_cons = bp->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
b6016b76
MC
1632 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1633 hw_cons++;
1634 }
1635 sw_cons = bp->tx_cons;
1636
1637 while (sw_cons != hw_cons) {
1638 struct sw_bd *tx_buf;
1639 struct sk_buff *skb;
1640 int i, last;
1641
1642 sw_ring_cons = TX_RING_IDX(sw_cons);
1643
1644 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
1645 skb = tx_buf->skb;
6aa20a22 1646#ifdef BCM_TSO
b6016b76 1647 /* partial BD completions possible with TSO packets */
89114afd 1648 if (skb_is_gso(skb)) {
b6016b76
MC
1649 u16 last_idx, last_ring_idx;
1650
1651 last_idx = sw_cons +
1652 skb_shinfo(skb)->nr_frags + 1;
1653 last_ring_idx = sw_ring_cons +
1654 skb_shinfo(skb)->nr_frags + 1;
1655 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
1656 last_idx++;
1657 }
1658 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
1659 break;
1660 }
1661 }
1662#endif
1663 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
1664 skb_headlen(skb), PCI_DMA_TODEVICE);
1665
1666 tx_buf->skb = NULL;
1667 last = skb_shinfo(skb)->nr_frags;
1668
1669 for (i = 0; i < last; i++) {
1670 sw_cons = NEXT_TX_BD(sw_cons);
1671
1672 pci_unmap_page(bp->pdev,
1673 pci_unmap_addr(
1674 &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
1675 mapping),
1676 skb_shinfo(skb)->frags[i].size,
1677 PCI_DMA_TODEVICE);
1678 }
1679
1680 sw_cons = NEXT_TX_BD(sw_cons);
1681
1682 tx_free_bd += last + 1;
1683
745720e5 1684 dev_kfree_skb(skb);
b6016b76 1685
f4e418f7
MC
1686 hw_cons = bp->hw_tx_cons =
1687 sblk->status_tx_quick_consumer_index0;
1688
b6016b76
MC
1689 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1690 hw_cons++;
1691 }
1692 }
1693
e89bbf10 1694 bp->tx_cons = sw_cons;
2f8af120
MC
1695 /* Need to make the tx_cons update visible to bnx2_start_xmit()
1696 * before checking for netif_queue_stopped(). Without the
1697 * memory barrier, there is a small possibility that bnx2_start_xmit()
1698 * will miss it and cause the queue to be stopped forever.
1699 */
1700 smp_mb();
b6016b76 1701
2f8af120
MC
1702 if (unlikely(netif_queue_stopped(bp->dev)) &&
1703 (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) {
1704 netif_tx_lock(bp->dev);
b6016b76 1705 if ((netif_queue_stopped(bp->dev)) &&
2f8af120 1706 (bnx2_tx_avail(bp) > bp->tx_wake_thresh))
b6016b76 1707 netif_wake_queue(bp->dev);
2f8af120 1708 netif_tx_unlock(bp->dev);
b6016b76 1709 }
b6016b76
MC
1710}
1711
1712static inline void
1713bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
1714 u16 cons, u16 prod)
1715{
236b6394
MC
1716 struct sw_bd *cons_rx_buf, *prod_rx_buf;
1717 struct rx_bd *cons_bd, *prod_bd;
1718
1719 cons_rx_buf = &bp->rx_buf_ring[cons];
1720 prod_rx_buf = &bp->rx_buf_ring[prod];
b6016b76
MC
1721
1722 pci_dma_sync_single_for_device(bp->pdev,
1723 pci_unmap_addr(cons_rx_buf, mapping),
1724 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1725
236b6394 1726 bp->rx_prod_bseq += bp->rx_buf_use_size;
b6016b76 1727
236b6394 1728 prod_rx_buf->skb = skb;
b6016b76 1729
236b6394
MC
1730 if (cons == prod)
1731 return;
b6016b76 1732
236b6394
MC
1733 pci_unmap_addr_set(prod_rx_buf, mapping,
1734 pci_unmap_addr(cons_rx_buf, mapping));
1735
3fdfcc2c
MC
1736 cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
1737 prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
236b6394
MC
1738 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
1739 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
b6016b76
MC
1740}
1741
1742static int
1743bnx2_rx_int(struct bnx2 *bp, int budget)
1744{
f4e418f7 1745 struct status_block *sblk = bp->status_blk;
b6016b76
MC
1746 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
1747 struct l2_fhdr *rx_hdr;
1748 int rx_pkt = 0;
1749
f4e418f7 1750 hw_cons = bp->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
b6016b76
MC
1751 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT) {
1752 hw_cons++;
1753 }
1754 sw_cons = bp->rx_cons;
1755 sw_prod = bp->rx_prod;
1756
1757 /* Memory barrier necessary as speculative reads of the rx
1758 * buffer can be ahead of the index in the status block
1759 */
1760 rmb();
1761 while (sw_cons != hw_cons) {
1762 unsigned int len;
ade2bfe7 1763 u32 status;
b6016b76
MC
1764 struct sw_bd *rx_buf;
1765 struct sk_buff *skb;
236b6394 1766 dma_addr_t dma_addr;
b6016b76
MC
1767
1768 sw_ring_cons = RX_RING_IDX(sw_cons);
1769 sw_ring_prod = RX_RING_IDX(sw_prod);
1770
1771 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
1772 skb = rx_buf->skb;
236b6394
MC
1773
1774 rx_buf->skb = NULL;
1775
1776 dma_addr = pci_unmap_addr(rx_buf, mapping);
1777
1778 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
b6016b76
MC
1779 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1780
1781 rx_hdr = (struct l2_fhdr *) skb->data;
1782 len = rx_hdr->l2_fhdr_pkt_len - 4;
1783
ade2bfe7 1784 if ((status = rx_hdr->l2_fhdr_status) &
b6016b76
MC
1785 (L2_FHDR_ERRORS_BAD_CRC |
1786 L2_FHDR_ERRORS_PHY_DECODE |
1787 L2_FHDR_ERRORS_ALIGNMENT |
1788 L2_FHDR_ERRORS_TOO_SHORT |
1789 L2_FHDR_ERRORS_GIANT_FRAME)) {
1790
1791 goto reuse_rx;
1792 }
1793
1794 /* Since we don't have a jumbo ring, copy small packets
1795 * if mtu > 1500
1796 */
1797 if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) {
1798 struct sk_buff *new_skb;
1799
932f3772 1800 new_skb = netdev_alloc_skb(bp->dev, len + 2);
b6016b76
MC
1801 if (new_skb == NULL)
1802 goto reuse_rx;
1803
1804 /* aligned copy */
1805 memcpy(new_skb->data,
1806 skb->data + bp->rx_offset - 2,
1807 len + 2);
1808
1809 skb_reserve(new_skb, 2);
1810 skb_put(new_skb, len);
b6016b76
MC
1811
1812 bnx2_reuse_rx_skb(bp, skb,
1813 sw_ring_cons, sw_ring_prod);
1814
1815 skb = new_skb;
1816 }
1817 else if (bnx2_alloc_rx_skb(bp, sw_ring_prod) == 0) {
236b6394 1818 pci_unmap_single(bp->pdev, dma_addr,
b6016b76
MC
1819 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
1820
1821 skb_reserve(skb, bp->rx_offset);
1822 skb_put(skb, len);
1823 }
1824 else {
1825reuse_rx:
1826 bnx2_reuse_rx_skb(bp, skb,
1827 sw_ring_cons, sw_ring_prod);
1828 goto next_rx;
1829 }
1830
1831 skb->protocol = eth_type_trans(skb, bp->dev);
1832
1833 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
d1e100ba 1834 (ntohs(skb->protocol) != 0x8100)) {
b6016b76 1835
745720e5 1836 dev_kfree_skb(skb);
b6016b76
MC
1837 goto next_rx;
1838
1839 }
1840
b6016b76
MC
1841 skb->ip_summed = CHECKSUM_NONE;
1842 if (bp->rx_csum &&
1843 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
1844 L2_FHDR_STATUS_UDP_DATAGRAM))) {
1845
ade2bfe7
MC
1846 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
1847 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
b6016b76
MC
1848 skb->ip_summed = CHECKSUM_UNNECESSARY;
1849 }
1850
1851#ifdef BCM_VLAN
1852 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
1853 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1854 rx_hdr->l2_fhdr_vlan_tag);
1855 }
1856 else
1857#endif
1858 netif_receive_skb(skb);
1859
1860 bp->dev->last_rx = jiffies;
1861 rx_pkt++;
1862
1863next_rx:
b6016b76
MC
1864 sw_cons = NEXT_RX_BD(sw_cons);
1865 sw_prod = NEXT_RX_BD(sw_prod);
1866
1867 if ((rx_pkt == budget))
1868 break;
f4e418f7
MC
1869
1870 /* Refresh hw_cons to see if there is new work */
1871 if (sw_cons == hw_cons) {
1872 hw_cons = bp->hw_rx_cons =
1873 sblk->status_rx_quick_consumer_index0;
1874 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT)
1875 hw_cons++;
1876 rmb();
1877 }
b6016b76
MC
1878 }
1879 bp->rx_cons = sw_cons;
1880 bp->rx_prod = sw_prod;
1881
1882 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
1883
1884 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
1885
1886 mmiowb();
1887
1888 return rx_pkt;
1889
1890}
1891
1892/* MSI ISR - The only difference between this and the INTx ISR
1893 * is that the MSI interrupt is always serviced.
1894 */
1895static irqreturn_t
7d12e780 1896bnx2_msi(int irq, void *dev_instance)
b6016b76
MC
1897{
1898 struct net_device *dev = dev_instance;
972ec0d4 1899 struct bnx2 *bp = netdev_priv(dev);
b6016b76 1900
c921e4c4 1901 prefetch(bp->status_blk);
b6016b76
MC
1902 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1903 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
1904 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
1905
1906 /* Return here if interrupt is disabled. */
73eef4cd
MC
1907 if (unlikely(atomic_read(&bp->intr_sem) != 0))
1908 return IRQ_HANDLED;
b6016b76 1909
73eef4cd 1910 netif_rx_schedule(dev);
b6016b76 1911
73eef4cd 1912 return IRQ_HANDLED;
b6016b76
MC
1913}
1914
1915static irqreturn_t
7d12e780 1916bnx2_interrupt(int irq, void *dev_instance)
b6016b76
MC
1917{
1918 struct net_device *dev = dev_instance;
972ec0d4 1919 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
1920
1921 /* When using INTx, it is possible for the interrupt to arrive
1922 * at the CPU before the status block posted prior to the
1923 * interrupt. Reading a register will flush the status block.
1924 * When using MSI, the MSI message will always complete after
1925 * the status block write.
1926 */
c921e4c4 1927 if ((bp->status_blk->status_idx == bp->last_status_idx) &&
b6016b76
MC
1928 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
1929 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
73eef4cd 1930 return IRQ_NONE;
b6016b76
MC
1931
1932 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1933 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
1934 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
1935
1936 /* Return here if interrupt is shared and is disabled. */
73eef4cd
MC
1937 if (unlikely(atomic_read(&bp->intr_sem) != 0))
1938 return IRQ_HANDLED;
b6016b76 1939
73eef4cd 1940 netif_rx_schedule(dev);
b6016b76 1941
73eef4cd 1942 return IRQ_HANDLED;
b6016b76
MC
1943}
1944
f4e418f7
MC
1945static inline int
1946bnx2_has_work(struct bnx2 *bp)
1947{
1948 struct status_block *sblk = bp->status_blk;
1949
1950 if ((sblk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) ||
1951 (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons))
1952 return 1;
1953
1954 if (((sblk->status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) != 0) !=
1955 bp->link_up)
1956 return 1;
1957
1958 return 0;
1959}
1960
b6016b76
MC
1961static int
1962bnx2_poll(struct net_device *dev, int *budget)
1963{
972ec0d4 1964 struct bnx2 *bp = netdev_priv(dev);
b6016b76 1965
b6016b76
MC
1966 if ((bp->status_blk->status_attn_bits &
1967 STATUS_ATTN_BITS_LINK_STATE) !=
1968 (bp->status_blk->status_attn_bits_ack &
1969 STATUS_ATTN_BITS_LINK_STATE)) {
1970
c770a65c 1971 spin_lock(&bp->phy_lock);
b6016b76 1972 bnx2_phy_int(bp);
c770a65c 1973 spin_unlock(&bp->phy_lock);
bf5295bb
MC
1974
1975 /* This is needed to take care of transient status
1976 * during link changes.
1977 */
1978 REG_WR(bp, BNX2_HC_COMMAND,
1979 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
1980 REG_RD(bp, BNX2_HC_COMMAND);
b6016b76
MC
1981 }
1982
f4e418f7 1983 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->hw_tx_cons)
b6016b76 1984 bnx2_tx_int(bp);
b6016b76 1985
f4e418f7 1986 if (bp->status_blk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) {
b6016b76
MC
1987 int orig_budget = *budget;
1988 int work_done;
1989
1990 if (orig_budget > dev->quota)
1991 orig_budget = dev->quota;
6aa20a22 1992
b6016b76
MC
1993 work_done = bnx2_rx_int(bp, orig_budget);
1994 *budget -= work_done;
1995 dev->quota -= work_done;
b6016b76 1996 }
6aa20a22 1997
f4e418f7
MC
1998 bp->last_status_idx = bp->status_blk->status_idx;
1999 rmb();
2000
2001 if (!bnx2_has_work(bp)) {
b6016b76 2002 netif_rx_complete(dev);
1269a8a6
MC
2003 if (likely(bp->flags & USING_MSI_FLAG)) {
2004 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2005 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2006 bp->last_status_idx);
2007 return 0;
2008 }
2009 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2010 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2011 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
2012 bp->last_status_idx);
2013
b6016b76 2014 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1269a8a6
MC
2015 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2016 bp->last_status_idx);
b6016b76
MC
2017 return 0;
2018 }
2019
2020 return 1;
2021}
2022
932ff279 2023/* Called with rtnl_lock from vlan functions and also netif_tx_lock
b6016b76
MC
2024 * from set_multicast.
2025 */
2026static void
2027bnx2_set_rx_mode(struct net_device *dev)
2028{
972ec0d4 2029 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
2030 u32 rx_mode, sort_mode;
2031 int i;
b6016b76 2032
c770a65c 2033 spin_lock_bh(&bp->phy_lock);
b6016b76
MC
2034
2035 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
2036 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
2037 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
2038#ifdef BCM_VLAN
e29054f9 2039 if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG))
b6016b76 2040 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
b6016b76 2041#else
e29054f9
MC
2042 if (!(bp->flags & ASF_ENABLE_FLAG))
2043 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
b6016b76
MC
2044#endif
2045 if (dev->flags & IFF_PROMISC) {
2046 /* Promiscuous mode. */
2047 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
7510873d
MC
2048 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
2049 BNX2_RPM_SORT_USER0_PROM_VLAN;
b6016b76
MC
2050 }
2051 else if (dev->flags & IFF_ALLMULTI) {
2052 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2053 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2054 0xffffffff);
2055 }
2056 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
2057 }
2058 else {
2059 /* Accept one or more multicast(s). */
2060 struct dev_mc_list *mclist;
2061 u32 mc_filter[NUM_MC_HASH_REGISTERS];
2062 u32 regidx;
2063 u32 bit;
2064 u32 crc;
2065
2066 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
2067
2068 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2069 i++, mclist = mclist->next) {
2070
2071 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
2072 bit = crc & 0xff;
2073 regidx = (bit & 0xe0) >> 5;
2074 bit &= 0x1f;
2075 mc_filter[regidx] |= (1 << bit);
2076 }
2077
2078 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2079 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2080 mc_filter[i]);
2081 }
2082
2083 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
2084 }
2085
2086 if (rx_mode != bp->rx_mode) {
2087 bp->rx_mode = rx_mode;
2088 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
2089 }
2090
2091 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2092 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
2093 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
2094
c770a65c 2095 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
2096}
2097
fba9fe91
MC
2098#define FW_BUF_SIZE 0x8000
2099
2100static int
2101bnx2_gunzip_init(struct bnx2 *bp)
2102{
2103 if ((bp->gunzip_buf = vmalloc(FW_BUF_SIZE)) == NULL)
2104 goto gunzip_nomem1;
2105
2106 if ((bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL)) == NULL)
2107 goto gunzip_nomem2;
2108
2109 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(), GFP_KERNEL);
2110 if (bp->strm->workspace == NULL)
2111 goto gunzip_nomem3;
2112
2113 return 0;
2114
2115gunzip_nomem3:
2116 kfree(bp->strm);
2117 bp->strm = NULL;
2118
2119gunzip_nomem2:
2120 vfree(bp->gunzip_buf);
2121 bp->gunzip_buf = NULL;
2122
2123gunzip_nomem1:
2124 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for "
2125 "uncompression.\n", bp->dev->name);
2126 return -ENOMEM;
2127}
2128
2129static void
2130bnx2_gunzip_end(struct bnx2 *bp)
2131{
2132 kfree(bp->strm->workspace);
2133
2134 kfree(bp->strm);
2135 bp->strm = NULL;
2136
2137 if (bp->gunzip_buf) {
2138 vfree(bp->gunzip_buf);
2139 bp->gunzip_buf = NULL;
2140 }
2141}
2142
2143static int
2144bnx2_gunzip(struct bnx2 *bp, u8 *zbuf, int len, void **outbuf, int *outlen)
2145{
2146 int n, rc;
2147
2148 /* check gzip header */
2149 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
2150 return -EINVAL;
2151
2152 n = 10;
2153
2154#define FNAME 0x8
2155 if (zbuf[3] & FNAME)
2156 while ((zbuf[n++] != 0) && (n < len));
2157
2158 bp->strm->next_in = zbuf + n;
2159 bp->strm->avail_in = len - n;
2160 bp->strm->next_out = bp->gunzip_buf;
2161 bp->strm->avail_out = FW_BUF_SIZE;
2162
2163 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
2164 if (rc != Z_OK)
2165 return rc;
2166
2167 rc = zlib_inflate(bp->strm, Z_FINISH);
2168
2169 *outlen = FW_BUF_SIZE - bp->strm->avail_out;
2170 *outbuf = bp->gunzip_buf;
2171
2172 if ((rc != Z_OK) && (rc != Z_STREAM_END))
2173 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
2174 bp->dev->name, bp->strm->msg);
2175
2176 zlib_inflateEnd(bp->strm);
2177
2178 if (rc == Z_STREAM_END)
2179 return 0;
2180
2181 return rc;
2182}
2183
b6016b76
MC
2184static void
2185load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
2186 u32 rv2p_proc)
2187{
2188 int i;
2189 u32 val;
2190
2191
2192 for (i = 0; i < rv2p_code_len; i += 8) {
fba9fe91 2193 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, cpu_to_le32(*rv2p_code));
b6016b76 2194 rv2p_code++;
fba9fe91 2195 REG_WR(bp, BNX2_RV2P_INSTR_LOW, cpu_to_le32(*rv2p_code));
b6016b76
MC
2196 rv2p_code++;
2197
2198 if (rv2p_proc == RV2P_PROC1) {
2199 val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
2200 REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
2201 }
2202 else {
2203 val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
2204 REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
2205 }
2206 }
2207
2208 /* Reset the processor, un-stall is done later. */
2209 if (rv2p_proc == RV2P_PROC1) {
2210 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
2211 }
2212 else {
2213 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
2214 }
2215}
2216
af3ee519 2217static int
b6016b76
MC
2218load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
2219{
2220 u32 offset;
2221 u32 val;
af3ee519 2222 int rc;
b6016b76
MC
2223
2224 /* Halt the CPU. */
2225 val = REG_RD_IND(bp, cpu_reg->mode);
2226 val |= cpu_reg->mode_value_halt;
2227 REG_WR_IND(bp, cpu_reg->mode, val);
2228 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2229
2230 /* Load the Text area. */
2231 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
af3ee519
MC
2232 if (fw->gz_text) {
2233 u32 text_len;
2234 void *text;
2235
2236 rc = bnx2_gunzip(bp, fw->gz_text, fw->gz_text_len, &text,
2237 &text_len);
2238 if (rc)
2239 return rc;
2240
2241 fw->text = text;
2242 }
2243 if (fw->gz_text) {
b6016b76
MC
2244 int j;
2245
2246 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
fba9fe91 2247 REG_WR_IND(bp, offset, cpu_to_le32(fw->text[j]));
b6016b76
MC
2248 }
2249 }
2250
2251 /* Load the Data area. */
2252 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2253 if (fw->data) {
2254 int j;
2255
2256 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
2257 REG_WR_IND(bp, offset, fw->data[j]);
2258 }
2259 }
2260
2261 /* Load the SBSS area. */
2262 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2263 if (fw->sbss) {
2264 int j;
2265
2266 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
2267 REG_WR_IND(bp, offset, fw->sbss[j]);
2268 }
2269 }
2270
2271 /* Load the BSS area. */
2272 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2273 if (fw->bss) {
2274 int j;
2275
2276 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
2277 REG_WR_IND(bp, offset, fw->bss[j]);
2278 }
2279 }
2280
2281 /* Load the Read-Only area. */
2282 offset = cpu_reg->spad_base +
2283 (fw->rodata_addr - cpu_reg->mips_view_base);
2284 if (fw->rodata) {
2285 int j;
2286
2287 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
2288 REG_WR_IND(bp, offset, fw->rodata[j]);
2289 }
2290 }
2291
2292 /* Clear the pre-fetch instruction. */
2293 REG_WR_IND(bp, cpu_reg->inst, 0);
2294 REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
2295
2296 /* Start the CPU. */
2297 val = REG_RD_IND(bp, cpu_reg->mode);
2298 val &= ~cpu_reg->mode_value_halt;
2299 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2300 REG_WR_IND(bp, cpu_reg->mode, val);
af3ee519
MC
2301
2302 return 0;
b6016b76
MC
2303}
2304
fba9fe91 2305static int
b6016b76
MC
2306bnx2_init_cpus(struct bnx2 *bp)
2307{
2308 struct cpu_reg cpu_reg;
af3ee519 2309 struct fw_info *fw;
fba9fe91
MC
2310 int rc = 0;
2311 void *text;
2312 u32 text_len;
2313
2314 if ((rc = bnx2_gunzip_init(bp)) != 0)
2315 return rc;
b6016b76
MC
2316
2317 /* Initialize the RV2P processor. */
fba9fe91
MC
2318 rc = bnx2_gunzip(bp, bnx2_rv2p_proc1, sizeof(bnx2_rv2p_proc1), &text,
2319 &text_len);
2320 if (rc)
2321 goto init_cpu_err;
2322
2323 load_rv2p_fw(bp, text, text_len, RV2P_PROC1);
2324
2325 rc = bnx2_gunzip(bp, bnx2_rv2p_proc2, sizeof(bnx2_rv2p_proc2), &text,
2326 &text_len);
2327 if (rc)
2328 goto init_cpu_err;
2329
2330 load_rv2p_fw(bp, text, text_len, RV2P_PROC2);
b6016b76
MC
2331
2332 /* Initialize the RX Processor. */
2333 cpu_reg.mode = BNX2_RXP_CPU_MODE;
2334 cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
2335 cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
2336 cpu_reg.state = BNX2_RXP_CPU_STATE;
2337 cpu_reg.state_value_clear = 0xffffff;
2338 cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
2339 cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
2340 cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
2341 cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
2342 cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
2343 cpu_reg.spad_base = BNX2_RXP_SCRATCH;
2344 cpu_reg.mips_view_base = 0x8000000;
6aa20a22 2345
af3ee519 2346 fw = &bnx2_rxp_fw_06;
b6016b76 2347
af3ee519 2348 rc = load_cpu_fw(bp, &cpu_reg, fw);
fba9fe91
MC
2349 if (rc)
2350 goto init_cpu_err;
2351
b6016b76
MC
2352 /* Initialize the TX Processor. */
2353 cpu_reg.mode = BNX2_TXP_CPU_MODE;
2354 cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
2355 cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
2356 cpu_reg.state = BNX2_TXP_CPU_STATE;
2357 cpu_reg.state_value_clear = 0xffffff;
2358 cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
2359 cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
2360 cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
2361 cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
2362 cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
2363 cpu_reg.spad_base = BNX2_TXP_SCRATCH;
2364 cpu_reg.mips_view_base = 0x8000000;
6aa20a22 2365
af3ee519 2366 fw = &bnx2_txp_fw_06;
b6016b76 2367
af3ee519 2368 rc = load_cpu_fw(bp, &cpu_reg, fw);
fba9fe91
MC
2369 if (rc)
2370 goto init_cpu_err;
2371
b6016b76
MC
2372 /* Initialize the TX Patch-up Processor. */
2373 cpu_reg.mode = BNX2_TPAT_CPU_MODE;
2374 cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
2375 cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
2376 cpu_reg.state = BNX2_TPAT_CPU_STATE;
2377 cpu_reg.state_value_clear = 0xffffff;
2378 cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
2379 cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
2380 cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
2381 cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
2382 cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
2383 cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
2384 cpu_reg.mips_view_base = 0x8000000;
6aa20a22 2385
af3ee519 2386 fw = &bnx2_tpat_fw_06;
b6016b76 2387
af3ee519 2388 rc = load_cpu_fw(bp, &cpu_reg, fw);
fba9fe91
MC
2389 if (rc)
2390 goto init_cpu_err;
2391
b6016b76
MC
2392 /* Initialize the Completion Processor. */
2393 cpu_reg.mode = BNX2_COM_CPU_MODE;
2394 cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
2395 cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
2396 cpu_reg.state = BNX2_COM_CPU_STATE;
2397 cpu_reg.state_value_clear = 0xffffff;
2398 cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
2399 cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
2400 cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
2401 cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
2402 cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
2403 cpu_reg.spad_base = BNX2_COM_SCRATCH;
2404 cpu_reg.mips_view_base = 0x8000000;
6aa20a22 2405
af3ee519 2406 fw = &bnx2_com_fw_06;
fba9fe91 2407
af3ee519 2408 rc = load_cpu_fw(bp, &cpu_reg, fw);
fba9fe91
MC
2409 if (rc)
2410 goto init_cpu_err;
2411
fba9fe91
MC
2412init_cpu_err:
2413 bnx2_gunzip_end(bp);
2414 return rc;
b6016b76
MC
2415}
2416
2417static int
829ca9a3 2418bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
b6016b76
MC
2419{
2420 u16 pmcsr;
2421
2422 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2423
2424 switch (state) {
829ca9a3 2425 case PCI_D0: {
b6016b76
MC
2426 u32 val;
2427
2428 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2429 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2430 PCI_PM_CTRL_PME_STATUS);
2431
2432 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2433 /* delay required during transition out of D3hot */
2434 msleep(20);
2435
2436 val = REG_RD(bp, BNX2_EMAC_MODE);
2437 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
2438 val &= ~BNX2_EMAC_MODE_MPKT;
2439 REG_WR(bp, BNX2_EMAC_MODE, val);
2440
2441 val = REG_RD(bp, BNX2_RPM_CONFIG);
2442 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2443 REG_WR(bp, BNX2_RPM_CONFIG, val);
2444 break;
2445 }
829ca9a3 2446 case PCI_D3hot: {
b6016b76
MC
2447 int i;
2448 u32 val, wol_msg;
2449
2450 if (bp->wol) {
2451 u32 advertising;
2452 u8 autoneg;
2453
2454 autoneg = bp->autoneg;
2455 advertising = bp->advertising;
2456
2457 bp->autoneg = AUTONEG_SPEED;
2458 bp->advertising = ADVERTISED_10baseT_Half |
2459 ADVERTISED_10baseT_Full |
2460 ADVERTISED_100baseT_Half |
2461 ADVERTISED_100baseT_Full |
2462 ADVERTISED_Autoneg;
2463
2464 bnx2_setup_copper_phy(bp);
2465
2466 bp->autoneg = autoneg;
2467 bp->advertising = advertising;
2468
2469 bnx2_set_mac_addr(bp);
2470
2471 val = REG_RD(bp, BNX2_EMAC_MODE);
2472
2473 /* Enable port mode. */
2474 val &= ~BNX2_EMAC_MODE_PORT;
2475 val |= BNX2_EMAC_MODE_PORT_MII |
2476 BNX2_EMAC_MODE_MPKT_RCVD |
2477 BNX2_EMAC_MODE_ACPI_RCVD |
b6016b76
MC
2478 BNX2_EMAC_MODE_MPKT;
2479
2480 REG_WR(bp, BNX2_EMAC_MODE, val);
2481
2482 /* receive all multicast */
2483 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2484 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2485 0xffffffff);
2486 }
2487 REG_WR(bp, BNX2_EMAC_RX_MODE,
2488 BNX2_EMAC_RX_MODE_SORT_MODE);
2489
2490 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
2491 BNX2_RPM_SORT_USER0_MC_EN;
2492 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2493 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
2494 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
2495 BNX2_RPM_SORT_USER0_ENA);
2496
2497 /* Need to enable EMAC and RPM for WOL. */
2498 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2499 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
2500 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
2501 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
2502
2503 val = REG_RD(bp, BNX2_RPM_CONFIG);
2504 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2505 REG_WR(bp, BNX2_RPM_CONFIG, val);
2506
2507 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
2508 }
2509 else {
2510 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
2511 }
2512
dda1e390
MC
2513 if (!(bp->flags & NO_WOL_FLAG))
2514 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
b6016b76
MC
2515
2516 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2517 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
2518 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
2519
2520 if (bp->wol)
2521 pmcsr |= 3;
2522 }
2523 else {
2524 pmcsr |= 3;
2525 }
2526 if (bp->wol) {
2527 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2528 }
2529 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2530 pmcsr);
2531
2532 /* No more memory access after this point until
2533 * device is brought back to D0.
2534 */
2535 udelay(50);
2536 break;
2537 }
2538 default:
2539 return -EINVAL;
2540 }
2541 return 0;
2542}
2543
2544static int
2545bnx2_acquire_nvram_lock(struct bnx2 *bp)
2546{
2547 u32 val;
2548 int j;
2549
2550 /* Request access to the flash interface. */
2551 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
2552 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2553 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2554 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
2555 break;
2556
2557 udelay(5);
2558 }
2559
2560 if (j >= NVRAM_TIMEOUT_COUNT)
2561 return -EBUSY;
2562
2563 return 0;
2564}
2565
2566static int
2567bnx2_release_nvram_lock(struct bnx2 *bp)
2568{
2569 int j;
2570 u32 val;
2571
2572 /* Relinquish nvram interface. */
2573 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
2574
2575 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2576 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2577 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
2578 break;
2579
2580 udelay(5);
2581 }
2582
2583 if (j >= NVRAM_TIMEOUT_COUNT)
2584 return -EBUSY;
2585
2586 return 0;
2587}
2588
2589
2590static int
2591bnx2_enable_nvram_write(struct bnx2 *bp)
2592{
2593 u32 val;
2594
2595 val = REG_RD(bp, BNX2_MISC_CFG);
2596 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
2597
2598 if (!bp->flash_info->buffered) {
2599 int j;
2600
2601 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2602 REG_WR(bp, BNX2_NVM_COMMAND,
2603 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
2604
2605 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2606 udelay(5);
2607
2608 val = REG_RD(bp, BNX2_NVM_COMMAND);
2609 if (val & BNX2_NVM_COMMAND_DONE)
2610 break;
2611 }
2612
2613 if (j >= NVRAM_TIMEOUT_COUNT)
2614 return -EBUSY;
2615 }
2616 return 0;
2617}
2618
2619static void
2620bnx2_disable_nvram_write(struct bnx2 *bp)
2621{
2622 u32 val;
2623
2624 val = REG_RD(bp, BNX2_MISC_CFG);
2625 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
2626}
2627
2628
2629static void
2630bnx2_enable_nvram_access(struct bnx2 *bp)
2631{
2632 u32 val;
2633
2634 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
2635 /* Enable both bits, even on read. */
6aa20a22 2636 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
b6016b76
MC
2637 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
2638}
2639
2640static void
2641bnx2_disable_nvram_access(struct bnx2 *bp)
2642{
2643 u32 val;
2644
2645 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
2646 /* Disable both bits, even after read. */
6aa20a22 2647 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
b6016b76
MC
2648 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
2649 BNX2_NVM_ACCESS_ENABLE_WR_EN));
2650}
2651
2652static int
2653bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
2654{
2655 u32 cmd;
2656 int j;
2657
2658 if (bp->flash_info->buffered)
2659 /* Buffered flash, no erase needed */
2660 return 0;
2661
2662 /* Build an erase command */
2663 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
2664 BNX2_NVM_COMMAND_DOIT;
2665
2666 /* Need to clear DONE bit separately. */
2667 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2668
2669 /* Address of the NVRAM to read from. */
2670 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2671
2672 /* Issue an erase command. */
2673 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2674
2675 /* Wait for completion. */
2676 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2677 u32 val;
2678
2679 udelay(5);
2680
2681 val = REG_RD(bp, BNX2_NVM_COMMAND);
2682 if (val & BNX2_NVM_COMMAND_DONE)
2683 break;
2684 }
2685
2686 if (j >= NVRAM_TIMEOUT_COUNT)
2687 return -EBUSY;
2688
2689 return 0;
2690}
2691
2692static int
2693bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
2694{
2695 u32 cmd;
2696 int j;
2697
2698 /* Build the command word. */
2699 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
2700
2701 /* Calculate an offset of a buffered flash. */
2702 if (bp->flash_info->buffered) {
2703 offset = ((offset / bp->flash_info->page_size) <<
2704 bp->flash_info->page_bits) +
2705 (offset % bp->flash_info->page_size);
2706 }
2707
2708 /* Need to clear DONE bit separately. */
2709 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2710
2711 /* Address of the NVRAM to read from. */
2712 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2713
2714 /* Issue a read command. */
2715 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2716
2717 /* Wait for completion. */
2718 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2719 u32 val;
2720
2721 udelay(5);
2722
2723 val = REG_RD(bp, BNX2_NVM_COMMAND);
2724 if (val & BNX2_NVM_COMMAND_DONE) {
2725 val = REG_RD(bp, BNX2_NVM_READ);
2726
2727 val = be32_to_cpu(val);
2728 memcpy(ret_val, &val, 4);
2729 break;
2730 }
2731 }
2732 if (j >= NVRAM_TIMEOUT_COUNT)
2733 return -EBUSY;
2734
2735 return 0;
2736}
2737
2738
2739static int
2740bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
2741{
2742 u32 cmd, val32;
2743 int j;
2744
2745 /* Build the command word. */
2746 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
2747
2748 /* Calculate an offset of a buffered flash. */
2749 if (bp->flash_info->buffered) {
2750 offset = ((offset / bp->flash_info->page_size) <<
2751 bp->flash_info->page_bits) +
2752 (offset % bp->flash_info->page_size);
2753 }
2754
2755 /* Need to clear DONE bit separately. */
2756 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2757
2758 memcpy(&val32, val, 4);
2759 val32 = cpu_to_be32(val32);
2760
2761 /* Write the data. */
2762 REG_WR(bp, BNX2_NVM_WRITE, val32);
2763
2764 /* Address of the NVRAM to write to. */
2765 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2766
2767 /* Issue the write command. */
2768 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2769
2770 /* Wait for completion. */
2771 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2772 udelay(5);
2773
2774 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
2775 break;
2776 }
2777 if (j >= NVRAM_TIMEOUT_COUNT)
2778 return -EBUSY;
2779
2780 return 0;
2781}
2782
2783static int
2784bnx2_init_nvram(struct bnx2 *bp)
2785{
2786 u32 val;
2787 int j, entry_count, rc;
2788 struct flash_spec *flash;
2789
2790 /* Determine the selected interface. */
2791 val = REG_RD(bp, BNX2_NVM_CFG1);
2792
2793 entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
2794
2795 rc = 0;
2796 if (val & 0x40000000) {
2797
2798 /* Flash interface has been reconfigured */
2799 for (j = 0, flash = &flash_table[0]; j < entry_count;
37137709
MC
2800 j++, flash++) {
2801 if ((val & FLASH_BACKUP_STRAP_MASK) ==
2802 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
b6016b76
MC
2803 bp->flash_info = flash;
2804 break;
2805 }
2806 }
2807 }
2808 else {
37137709 2809 u32 mask;
b6016b76
MC
2810 /* Not yet been reconfigured */
2811
37137709
MC
2812 if (val & (1 << 23))
2813 mask = FLASH_BACKUP_STRAP_MASK;
2814 else
2815 mask = FLASH_STRAP_MASK;
2816
b6016b76
MC
2817 for (j = 0, flash = &flash_table[0]; j < entry_count;
2818 j++, flash++) {
2819
37137709 2820 if ((val & mask) == (flash->strapping & mask)) {
b6016b76
MC
2821 bp->flash_info = flash;
2822
2823 /* Request access to the flash interface. */
2824 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
2825 return rc;
2826
2827 /* Enable access to flash interface */
2828 bnx2_enable_nvram_access(bp);
2829
2830 /* Reconfigure the flash interface */
2831 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
2832 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
2833 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
2834 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
2835
2836 /* Disable access to flash interface */
2837 bnx2_disable_nvram_access(bp);
2838 bnx2_release_nvram_lock(bp);
2839
2840 break;
2841 }
2842 }
2843 } /* if (val & 0x40000000) */
2844
2845 if (j == entry_count) {
2846 bp->flash_info = NULL;
2f23c523 2847 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
1122db71 2848 return -ENODEV;
b6016b76
MC
2849 }
2850
1122db71
MC
2851 val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
2852 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
2853 if (val)
2854 bp->flash_size = val;
2855 else
2856 bp->flash_size = bp->flash_info->total_size;
2857
b6016b76
MC
2858 return rc;
2859}
2860
2861static int
2862bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
2863 int buf_size)
2864{
2865 int rc = 0;
2866 u32 cmd_flags, offset32, len32, extra;
2867
2868 if (buf_size == 0)
2869 return 0;
2870
2871 /* Request access to the flash interface. */
2872 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
2873 return rc;
2874
2875 /* Enable access to flash interface */
2876 bnx2_enable_nvram_access(bp);
2877
2878 len32 = buf_size;
2879 offset32 = offset;
2880 extra = 0;
2881
2882 cmd_flags = 0;
2883
2884 if (offset32 & 3) {
2885 u8 buf[4];
2886 u32 pre_len;
2887
2888 offset32 &= ~3;
2889 pre_len = 4 - (offset & 3);
2890
2891 if (pre_len >= len32) {
2892 pre_len = len32;
2893 cmd_flags = BNX2_NVM_COMMAND_FIRST |
2894 BNX2_NVM_COMMAND_LAST;
2895 }
2896 else {
2897 cmd_flags = BNX2_NVM_COMMAND_FIRST;
2898 }
2899
2900 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
2901
2902 if (rc)
2903 return rc;
2904
2905 memcpy(ret_buf, buf + (offset & 3), pre_len);
2906
2907 offset32 += 4;
2908 ret_buf += pre_len;
2909 len32 -= pre_len;
2910 }
2911 if (len32 & 3) {
2912 extra = 4 - (len32 & 3);
2913 len32 = (len32 + 4) & ~3;
2914 }
2915
2916 if (len32 == 4) {
2917 u8 buf[4];
2918
2919 if (cmd_flags)
2920 cmd_flags = BNX2_NVM_COMMAND_LAST;
2921 else
2922 cmd_flags = BNX2_NVM_COMMAND_FIRST |
2923 BNX2_NVM_COMMAND_LAST;
2924
2925 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
2926
2927 memcpy(ret_buf, buf, 4 - extra);
2928 }
2929 else if (len32 > 0) {
2930 u8 buf[4];
2931
2932 /* Read the first word. */
2933 if (cmd_flags)
2934 cmd_flags = 0;
2935 else
2936 cmd_flags = BNX2_NVM_COMMAND_FIRST;
2937
2938 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
2939
2940 /* Advance to the next dword. */
2941 offset32 += 4;
2942 ret_buf += 4;
2943 len32 -= 4;
2944
2945 while (len32 > 4 && rc == 0) {
2946 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
2947
2948 /* Advance to the next dword. */
2949 offset32 += 4;
2950 ret_buf += 4;
2951 len32 -= 4;
2952 }
2953
2954 if (rc)
2955 return rc;
2956
2957 cmd_flags = BNX2_NVM_COMMAND_LAST;
2958 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
2959
2960 memcpy(ret_buf, buf, 4 - extra);
2961 }
2962
2963 /* Disable access to flash interface */
2964 bnx2_disable_nvram_access(bp);
2965
2966 bnx2_release_nvram_lock(bp);
2967
2968 return rc;
2969}
2970
2971static int
2972bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
2973 int buf_size)
2974{
2975 u32 written, offset32, len32;
ae181bc4 2976 u8 *buf, start[4], end[4], *flash_buffer = NULL;
b6016b76
MC
2977 int rc = 0;
2978 int align_start, align_end;
2979
2980 buf = data_buf;
2981 offset32 = offset;
2982 len32 = buf_size;
2983 align_start = align_end = 0;
2984
2985 if ((align_start = (offset32 & 3))) {
2986 offset32 &= ~3;
2987 len32 += align_start;
2988 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
2989 return rc;
2990 }
2991
2992 if (len32 & 3) {
2993 if ((len32 > 4) || !align_start) {
2994 align_end = 4 - (len32 & 3);
2995 len32 += align_end;
2996 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4,
2997 end, 4))) {
2998 return rc;
2999 }
3000 }
3001 }
3002
3003 if (align_start || align_end) {
3004 buf = kmalloc(len32, GFP_KERNEL);
3005 if (buf == 0)
3006 return -ENOMEM;
3007 if (align_start) {
3008 memcpy(buf, start, 4);
3009 }
3010 if (align_end) {
3011 memcpy(buf + len32 - 4, end, 4);
3012 }
3013 memcpy(buf + align_start, data_buf, buf_size);
3014 }
3015
ae181bc4
MC
3016 if (bp->flash_info->buffered == 0) {
3017 flash_buffer = kmalloc(264, GFP_KERNEL);
3018 if (flash_buffer == NULL) {
3019 rc = -ENOMEM;
3020 goto nvram_write_end;
3021 }
3022 }
3023
b6016b76
MC
3024 written = 0;
3025 while ((written < len32) && (rc == 0)) {
3026 u32 page_start, page_end, data_start, data_end;
3027 u32 addr, cmd_flags;
3028 int i;
b6016b76
MC
3029
3030 /* Find the page_start addr */
3031 page_start = offset32 + written;
3032 page_start -= (page_start % bp->flash_info->page_size);
3033 /* Find the page_end addr */
3034 page_end = page_start + bp->flash_info->page_size;
3035 /* Find the data_start addr */
3036 data_start = (written == 0) ? offset32 : page_start;
3037 /* Find the data_end addr */
6aa20a22 3038 data_end = (page_end > offset32 + len32) ?
b6016b76
MC
3039 (offset32 + len32) : page_end;
3040
3041 /* Request access to the flash interface. */
3042 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3043 goto nvram_write_end;
3044
3045 /* Enable access to flash interface */
3046 bnx2_enable_nvram_access(bp);
3047
3048 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3049 if (bp->flash_info->buffered == 0) {
3050 int j;
3051
3052 /* Read the whole page into the buffer
3053 * (non-buffer flash only) */
3054 for (j = 0; j < bp->flash_info->page_size; j += 4) {
3055 if (j == (bp->flash_info->page_size - 4)) {
3056 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3057 }
3058 rc = bnx2_nvram_read_dword(bp,
6aa20a22
JG
3059 page_start + j,
3060 &flash_buffer[j],
b6016b76
MC
3061 cmd_flags);
3062
3063 if (rc)
3064 goto nvram_write_end;
3065
3066 cmd_flags = 0;
3067 }
3068 }
3069
3070 /* Enable writes to flash interface (unlock write-protect) */
3071 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
3072 goto nvram_write_end;
3073
3074 /* Erase the page */
3075 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
3076 goto nvram_write_end;
3077
3078 /* Re-enable the write again for the actual write */
3079 bnx2_enable_nvram_write(bp);
3080
3081 /* Loop to write back the buffer data from page_start to
3082 * data_start */
3083 i = 0;
3084 if (bp->flash_info->buffered == 0) {
3085 for (addr = page_start; addr < data_start;
3086 addr += 4, i += 4) {
6aa20a22 3087
b6016b76
MC
3088 rc = bnx2_nvram_write_dword(bp, addr,
3089 &flash_buffer[i], cmd_flags);
3090
3091 if (rc != 0)
3092 goto nvram_write_end;
3093
3094 cmd_flags = 0;
3095 }
3096 }
3097
3098 /* Loop to write the new data from data_start to data_end */
bae25761 3099 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
b6016b76
MC
3100 if ((addr == page_end - 4) ||
3101 ((bp->flash_info->buffered) &&
3102 (addr == data_end - 4))) {
3103
3104 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3105 }
3106 rc = bnx2_nvram_write_dword(bp, addr, buf,
3107 cmd_flags);
3108
3109 if (rc != 0)
3110 goto nvram_write_end;
3111
3112 cmd_flags = 0;
3113 buf += 4;
3114 }
3115
3116 /* Loop to write back the buffer data from data_end
3117 * to page_end */
3118 if (bp->flash_info->buffered == 0) {
3119 for (addr = data_end; addr < page_end;
3120 addr += 4, i += 4) {
6aa20a22 3121
b6016b76
MC
3122 if (addr == page_end-4) {
3123 cmd_flags = BNX2_NVM_COMMAND_LAST;
3124 }
3125 rc = bnx2_nvram_write_dword(bp, addr,
3126 &flash_buffer[i], cmd_flags);
3127
3128 if (rc != 0)
3129 goto nvram_write_end;
3130
3131 cmd_flags = 0;
3132 }
3133 }
3134
3135 /* Disable writes to flash interface (lock write-protect) */
3136 bnx2_disable_nvram_write(bp);
3137
3138 /* Disable access to flash interface */
3139 bnx2_disable_nvram_access(bp);
3140 bnx2_release_nvram_lock(bp);
3141
3142 /* Increment written */
3143 written += data_end - data_start;
3144 }
3145
3146nvram_write_end:
ae181bc4
MC
3147 if (bp->flash_info->buffered == 0)
3148 kfree(flash_buffer);
3149
b6016b76
MC
3150 if (align_start || align_end)
3151 kfree(buf);
3152 return rc;
3153}
3154
3155static int
3156bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
3157{
3158 u32 val;
3159 int i, rc = 0;
3160
3161 /* Wait for the current PCI transaction to complete before
3162 * issuing a reset. */
3163 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
3164 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3165 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3166 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3167 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3168 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
3169 udelay(5);
3170
b090ae2b
MC
3171 /* Wait for the firmware to tell us it is ok to issue a reset. */
3172 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
3173
b6016b76
MC
3174 /* Deposit a driver reset signature so the firmware knows that
3175 * this is a soft reset. */
e3648b3d 3176 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE,
b6016b76
MC
3177 BNX2_DRV_RESET_SIGNATURE_MAGIC);
3178
b6016b76
MC
3179 /* Do a dummy read to force the chip to complete all current transaction
3180 * before we issue a reset. */
3181 val = REG_RD(bp, BNX2_MISC_ID);
3182
3183 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3184 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3185 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3186
3187 /* Chip reset. */
3188 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
3189
3190 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3191 (CHIP_ID(bp) == CHIP_ID_5706_A1))
3192 msleep(15);
3193
3194 /* Reset takes approximate 30 usec */
3195 for (i = 0; i < 10; i++) {
3196 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
3197 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3198 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0) {
3199 break;
3200 }
3201 udelay(10);
3202 }
3203
3204 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3205 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3206 printk(KERN_ERR PFX "Chip reset did not complete\n");
3207 return -EBUSY;
3208 }
3209
3210 /* Make sure byte swapping is properly configured. */
3211 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
3212 if (val != 0x01020304) {
3213 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
3214 return -ENODEV;
3215 }
3216
b6016b76 3217 /* Wait for the firmware to finish its initialization. */
b090ae2b
MC
3218 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
3219 if (rc)
3220 return rc;
b6016b76
MC
3221
3222 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3223 /* Adjust the voltage regular to two steps lower. The default
3224 * of this register is 0x0000000e. */
3225 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
3226
3227 /* Remove bad rbuf memory from the free pool. */
3228 rc = bnx2_alloc_bad_rbuf(bp);
3229 }
3230
3231 return rc;
3232}
3233
3234static int
3235bnx2_init_chip(struct bnx2 *bp)
3236{
3237 u32 val;
b090ae2b 3238 int rc;
b6016b76
MC
3239
3240 /* Make sure the interrupt is not active. */
3241 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3242
3243 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
3244 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
3245#ifdef __BIG_ENDIAN
6aa20a22 3246 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
b6016b76 3247#endif
6aa20a22 3248 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
b6016b76
MC
3249 DMA_READ_CHANS << 12 |
3250 DMA_WRITE_CHANS << 16;
3251
3252 val |= (0x2 << 20) | (1 << 11);
3253
dda1e390 3254 if ((bp->flags & PCIX_FLAG) && (bp->bus_speed_mhz == 133))
b6016b76
MC
3255 val |= (1 << 23);
3256
3257 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
3258 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & PCIX_FLAG))
3259 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
3260
3261 REG_WR(bp, BNX2_DMA_CONFIG, val);
3262
3263 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3264 val = REG_RD(bp, BNX2_TDMA_CONFIG);
3265 val |= BNX2_TDMA_CONFIG_ONE_DMA;
3266 REG_WR(bp, BNX2_TDMA_CONFIG, val);
3267 }
3268
3269 if (bp->flags & PCIX_FLAG) {
3270 u16 val16;
3271
3272 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3273 &val16);
3274 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3275 val16 & ~PCI_X_CMD_ERO);
3276 }
3277
3278 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3279 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3280 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3281 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3282
3283 /* Initialize context mapping and zero out the quick contexts. The
3284 * context block must have already been enabled. */
3285 bnx2_init_context(bp);
3286
fba9fe91
MC
3287 if ((rc = bnx2_init_cpus(bp)) != 0)
3288 return rc;
3289
b6016b76
MC
3290 bnx2_init_nvram(bp);
3291
3292 bnx2_set_mac_addr(bp);
3293
3294 val = REG_RD(bp, BNX2_MQ_CONFIG);
3295 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
3296 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
3297 REG_WR(bp, BNX2_MQ_CONFIG, val);
3298
3299 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
3300 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
3301 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
3302
3303 val = (BCM_PAGE_BITS - 8) << 24;
3304 REG_WR(bp, BNX2_RV2P_CONFIG, val);
3305
3306 /* Configure page size. */
3307 val = REG_RD(bp, BNX2_TBDR_CONFIG);
3308 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
3309 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
3310 REG_WR(bp, BNX2_TBDR_CONFIG, val);
3311
3312 val = bp->mac_addr[0] +
3313 (bp->mac_addr[1] << 8) +
3314 (bp->mac_addr[2] << 16) +
3315 bp->mac_addr[3] +
3316 (bp->mac_addr[4] << 8) +
3317 (bp->mac_addr[5] << 16);
3318 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
3319
3320 /* Program the MTU. Also include 4 bytes for CRC32. */
3321 val = bp->dev->mtu + ETH_HLEN + 4;
3322 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
3323 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
3324 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
3325
3326 bp->last_status_idx = 0;
3327 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
3328
3329 /* Set up how to generate a link change interrupt. */
3330 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
3331
3332 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
3333 (u64) bp->status_blk_mapping & 0xffffffff);
3334 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
3335
3336 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
3337 (u64) bp->stats_blk_mapping & 0xffffffff);
3338 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
3339 (u64) bp->stats_blk_mapping >> 32);
3340
6aa20a22 3341 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
b6016b76
MC
3342 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
3343
3344 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
3345 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
3346
3347 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
3348 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
3349
3350 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
3351
3352 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
3353
3354 REG_WR(bp, BNX2_HC_COM_TICKS,
3355 (bp->com_ticks_int << 16) | bp->com_ticks);
3356
3357 REG_WR(bp, BNX2_HC_CMD_TICKS,
3358 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
3359
3360 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks & 0xffff00);
3361 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
3362
3363 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
3364 REG_WR(bp, BNX2_HC_CONFIG, BNX2_HC_CONFIG_COLLECT_STATS);
3365 else {
3366 REG_WR(bp, BNX2_HC_CONFIG, BNX2_HC_CONFIG_RX_TMR_MODE |
3367 BNX2_HC_CONFIG_TX_TMR_MODE |
3368 BNX2_HC_CONFIG_COLLECT_STATS);
3369 }
3370
3371 /* Clear internal stats counters. */
3372 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
3373
3374 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE);
3375
e29054f9
MC
3376 if (REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE) &
3377 BNX2_PORT_FEATURE_ASF_ENABLED)
3378 bp->flags |= ASF_ENABLE_FLAG;
3379
b6016b76
MC
3380 /* Initialize the receive filter. */
3381 bnx2_set_rx_mode(bp->dev);
3382
b090ae2b
MC
3383 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
3384 0);
b6016b76
MC
3385
3386 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, 0x5ffffff);
3387 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
3388
3389 udelay(20);
3390
bf5295bb
MC
3391 bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
3392
b090ae2b 3393 return rc;
b6016b76
MC
3394}
3395
3396
3397static void
3398bnx2_init_tx_ring(struct bnx2 *bp)
3399{
3400 struct tx_bd *txbd;
3401 u32 val;
3402
2f8af120
MC
3403 bp->tx_wake_thresh = bp->tx_ring_size / 2;
3404
b6016b76 3405 txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
6aa20a22 3406
b6016b76
MC
3407 txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
3408 txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
3409
3410 bp->tx_prod = 0;
3411 bp->tx_cons = 0;
f4e418f7 3412 bp->hw_tx_cons = 0;
b6016b76 3413 bp->tx_prod_bseq = 0;
6aa20a22 3414
b6016b76
MC
3415 val = BNX2_L2CTX_TYPE_TYPE_L2;
3416 val |= BNX2_L2CTX_TYPE_SIZE_L2;
3417 CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_TYPE, val);
3418
3419 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2;
3420 val |= 8 << 16;
3421 CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_CMD_TYPE, val);
3422
3423 val = (u64) bp->tx_desc_mapping >> 32;
3424 CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_TBDR_BHADDR_HI, val);
3425
3426 val = (u64) bp->tx_desc_mapping & 0xffffffff;
3427 CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_TBDR_BHADDR_LO, val);
3428}
3429
3430static void
3431bnx2_init_rx_ring(struct bnx2 *bp)
3432{
3433 struct rx_bd *rxbd;
3434 int i;
6aa20a22 3435 u16 prod, ring_prod;
b6016b76
MC
3436 u32 val;
3437
3438 /* 8 for CRC and VLAN */
3439 bp->rx_buf_use_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
3440 /* 8 for alignment */
3441 bp->rx_buf_size = bp->rx_buf_use_size + 8;
3442
3443 ring_prod = prod = bp->rx_prod = 0;
3444 bp->rx_cons = 0;
f4e418f7 3445 bp->hw_rx_cons = 0;
b6016b76 3446 bp->rx_prod_bseq = 0;
6aa20a22 3447
13daffa2
MC
3448 for (i = 0; i < bp->rx_max_ring; i++) {
3449 int j;
b6016b76 3450
13daffa2
MC
3451 rxbd = &bp->rx_desc_ring[i][0];
3452 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
3453 rxbd->rx_bd_len = bp->rx_buf_use_size;
3454 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
3455 }
3456 if (i == (bp->rx_max_ring - 1))
3457 j = 0;
3458 else
3459 j = i + 1;
3460 rxbd->rx_bd_haddr_hi = (u64) bp->rx_desc_mapping[j] >> 32;
3461 rxbd->rx_bd_haddr_lo = (u64) bp->rx_desc_mapping[j] &
3462 0xffffffff;
3463 }
b6016b76
MC
3464
3465 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
3466 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
3467 val |= 0x02 << 8;
3468 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_CTX_TYPE, val);
3469
13daffa2 3470 val = (u64) bp->rx_desc_mapping[0] >> 32;
b6016b76
MC
3471 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_HI, val);
3472
13daffa2 3473 val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
b6016b76
MC
3474 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_LO, val);
3475
236b6394 3476 for (i = 0; i < bp->rx_ring_size; i++) {
b6016b76
MC
3477 if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) {
3478 break;
3479 }
3480 prod = NEXT_RX_BD(prod);
3481 ring_prod = RX_RING_IDX(prod);
3482 }
3483 bp->rx_prod = prod;
3484
3485 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
3486
3487 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
3488}
3489
13daffa2
MC
3490static void
3491bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
3492{
3493 u32 num_rings, max;
3494
3495 bp->rx_ring_size = size;
3496 num_rings = 1;
3497 while (size > MAX_RX_DESC_CNT) {
3498 size -= MAX_RX_DESC_CNT;
3499 num_rings++;
3500 }
3501 /* round to next power of 2 */
3502 max = MAX_RX_RINGS;
3503 while ((max & num_rings) == 0)
3504 max >>= 1;
3505
3506 if (num_rings != max)
3507 max <<= 1;
3508
3509 bp->rx_max_ring = max;
3510 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
3511}
3512
b6016b76
MC
3513static void
3514bnx2_free_tx_skbs(struct bnx2 *bp)
3515{
3516 int i;
3517
3518 if (bp->tx_buf_ring == NULL)
3519 return;
3520
3521 for (i = 0; i < TX_DESC_CNT; ) {
3522 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
3523 struct sk_buff *skb = tx_buf->skb;
3524 int j, last;
3525
3526 if (skb == NULL) {
3527 i++;
3528 continue;
3529 }
3530
3531 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
3532 skb_headlen(skb), PCI_DMA_TODEVICE);
3533
3534 tx_buf->skb = NULL;
3535
3536 last = skb_shinfo(skb)->nr_frags;
3537 for (j = 0; j < last; j++) {
3538 tx_buf = &bp->tx_buf_ring[i + j + 1];
3539 pci_unmap_page(bp->pdev,
3540 pci_unmap_addr(tx_buf, mapping),
3541 skb_shinfo(skb)->frags[j].size,
3542 PCI_DMA_TODEVICE);
3543 }
745720e5 3544 dev_kfree_skb(skb);
b6016b76
MC
3545 i += j + 1;
3546 }
3547
3548}
3549
3550static void
3551bnx2_free_rx_skbs(struct bnx2 *bp)
3552{
3553 int i;
3554
3555 if (bp->rx_buf_ring == NULL)
3556 return;
3557
13daffa2 3558 for (i = 0; i < bp->rx_max_ring_idx; i++) {
b6016b76
MC
3559 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
3560 struct sk_buff *skb = rx_buf->skb;
3561
05d0f1cf 3562 if (skb == NULL)
b6016b76
MC
3563 continue;
3564
3565 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
3566 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
3567
3568 rx_buf->skb = NULL;
3569
745720e5 3570 dev_kfree_skb(skb);
b6016b76
MC
3571 }
3572}
3573
3574static void
3575bnx2_free_skbs(struct bnx2 *bp)
3576{
3577 bnx2_free_tx_skbs(bp);
3578 bnx2_free_rx_skbs(bp);
3579}
3580
3581static int
3582bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
3583{
3584 int rc;
3585
3586 rc = bnx2_reset_chip(bp, reset_code);
3587 bnx2_free_skbs(bp);
3588 if (rc)
3589 return rc;
3590
fba9fe91
MC
3591 if ((rc = bnx2_init_chip(bp)) != 0)
3592 return rc;
3593
b6016b76
MC
3594 bnx2_init_tx_ring(bp);
3595 bnx2_init_rx_ring(bp);
3596 return 0;
3597}
3598
3599static int
3600bnx2_init_nic(struct bnx2 *bp)
3601{
3602 int rc;
3603
3604 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
3605 return rc;
3606
80be4434 3607 spin_lock_bh(&bp->phy_lock);
b6016b76 3608 bnx2_init_phy(bp);
80be4434 3609 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
3610 bnx2_set_link(bp);
3611 return 0;
3612}
3613
3614static int
3615bnx2_test_registers(struct bnx2 *bp)
3616{
3617 int ret;
3618 int i;
f71e1309 3619 static const struct {
b6016b76
MC
3620 u16 offset;
3621 u16 flags;
3622 u32 rw_mask;
3623 u32 ro_mask;
3624 } reg_tbl[] = {
3625 { 0x006c, 0, 0x00000000, 0x0000003f },
3626 { 0x0090, 0, 0xffffffff, 0x00000000 },
3627 { 0x0094, 0, 0x00000000, 0x00000000 },
3628
3629 { 0x0404, 0, 0x00003f00, 0x00000000 },
3630 { 0x0418, 0, 0x00000000, 0xffffffff },
3631 { 0x041c, 0, 0x00000000, 0xffffffff },
3632 { 0x0420, 0, 0x00000000, 0x80ffffff },
3633 { 0x0424, 0, 0x00000000, 0x00000000 },
3634 { 0x0428, 0, 0x00000000, 0x00000001 },
3635 { 0x0450, 0, 0x00000000, 0x0000ffff },
3636 { 0x0454, 0, 0x00000000, 0xffffffff },
3637 { 0x0458, 0, 0x00000000, 0xffffffff },
3638
3639 { 0x0808, 0, 0x00000000, 0xffffffff },
3640 { 0x0854, 0, 0x00000000, 0xffffffff },
3641 { 0x0868, 0, 0x00000000, 0x77777777 },
3642 { 0x086c, 0, 0x00000000, 0x77777777 },
3643 { 0x0870, 0, 0x00000000, 0x77777777 },
3644 { 0x0874, 0, 0x00000000, 0x77777777 },
3645
3646 { 0x0c00, 0, 0x00000000, 0x00000001 },
3647 { 0x0c04, 0, 0x00000000, 0x03ff0001 },
3648 { 0x0c08, 0, 0x0f0ff073, 0x00000000 },
b6016b76
MC
3649
3650 { 0x1000, 0, 0x00000000, 0x00000001 },
3651 { 0x1004, 0, 0x00000000, 0x000f0001 },
b6016b76
MC
3652
3653 { 0x1408, 0, 0x01c00800, 0x00000000 },
3654 { 0x149c, 0, 0x8000ffff, 0x00000000 },
3655 { 0x14a8, 0, 0x00000000, 0x000001ff },
5b0c76ad 3656 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
b6016b76
MC
3657 { 0x14b0, 0, 0x00000002, 0x00000001 },
3658 { 0x14b8, 0, 0x00000000, 0x00000000 },
3659 { 0x14c0, 0, 0x00000000, 0x00000009 },
3660 { 0x14c4, 0, 0x00003fff, 0x00000000 },
3661 { 0x14cc, 0, 0x00000000, 0x00000001 },
3662 { 0x14d0, 0, 0xffffffff, 0x00000000 },
b6016b76
MC
3663
3664 { 0x1800, 0, 0x00000000, 0x00000001 },
3665 { 0x1804, 0, 0x00000000, 0x00000003 },
b6016b76
MC
3666
3667 { 0x2800, 0, 0x00000000, 0x00000001 },
3668 { 0x2804, 0, 0x00000000, 0x00003f01 },
3669 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
3670 { 0x2810, 0, 0xffff0000, 0x00000000 },
3671 { 0x2814, 0, 0xffff0000, 0x00000000 },
3672 { 0x2818, 0, 0xffff0000, 0x00000000 },
3673 { 0x281c, 0, 0xffff0000, 0x00000000 },
3674 { 0x2834, 0, 0xffffffff, 0x00000000 },
3675 { 0x2840, 0, 0x00000000, 0xffffffff },
3676 { 0x2844, 0, 0x00000000, 0xffffffff },
3677 { 0x2848, 0, 0xffffffff, 0x00000000 },
3678 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
3679
3680 { 0x2c00, 0, 0x00000000, 0x00000011 },
3681 { 0x2c04, 0, 0x00000000, 0x00030007 },
3682
b6016b76
MC
3683 { 0x3c00, 0, 0x00000000, 0x00000001 },
3684 { 0x3c04, 0, 0x00000000, 0x00070000 },
3685 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
3686 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
3687 { 0x3c10, 0, 0xffffffff, 0x00000000 },
3688 { 0x3c14, 0, 0x00000000, 0xffffffff },
3689 { 0x3c18, 0, 0x00000000, 0xffffffff },
3690 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
3691 { 0x3c20, 0, 0xffffff00, 0x00000000 },
b6016b76
MC
3692
3693 { 0x5004, 0, 0x00000000, 0x0000007f },
3694 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
3695 { 0x500c, 0, 0xf800f800, 0x07ff07ff },
3696
b6016b76
MC
3697 { 0x5c00, 0, 0x00000000, 0x00000001 },
3698 { 0x5c04, 0, 0x00000000, 0x0003000f },
3699 { 0x5c08, 0, 0x00000003, 0x00000000 },
3700 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
3701 { 0x5c10, 0, 0x00000000, 0xffffffff },
3702 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
3703 { 0x5c84, 0, 0x00000000, 0x0000f333 },
3704 { 0x5c88, 0, 0x00000000, 0x00077373 },
3705 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
3706
3707 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
3708 { 0x680c, 0, 0xffffffff, 0x00000000 },
3709 { 0x6810, 0, 0xffffffff, 0x00000000 },
3710 { 0x6814, 0, 0xffffffff, 0x00000000 },
3711 { 0x6818, 0, 0xffffffff, 0x00000000 },
3712 { 0x681c, 0, 0xffffffff, 0x00000000 },
3713 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
3714 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
3715 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
3716 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
3717 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
3718 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
3719 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
3720 { 0x683c, 0, 0x0000ffff, 0x00000000 },
3721 { 0x6840, 0, 0x00000ff0, 0x00000000 },
3722 { 0x6844, 0, 0x00ffff00, 0x00000000 },
3723 { 0x684c, 0, 0xffffffff, 0x00000000 },
3724 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
3725 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
3726 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
3727 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
3728 { 0x6908, 0, 0x00000000, 0x0001ff0f },
3729 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
3730
3731 { 0xffff, 0, 0x00000000, 0x00000000 },
3732 };
3733
3734 ret = 0;
3735 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
3736 u32 offset, rw_mask, ro_mask, save_val, val;
3737
3738 offset = (u32) reg_tbl[i].offset;
3739 rw_mask = reg_tbl[i].rw_mask;
3740 ro_mask = reg_tbl[i].ro_mask;
3741
14ab9b86 3742 save_val = readl(bp->regview + offset);
b6016b76 3743
14ab9b86 3744 writel(0, bp->regview + offset);
b6016b76 3745
14ab9b86 3746 val = readl(bp->regview + offset);
b6016b76
MC
3747 if ((val & rw_mask) != 0) {
3748 goto reg_test_err;
3749 }
3750
3751 if ((val & ro_mask) != (save_val & ro_mask)) {
3752 goto reg_test_err;
3753 }
3754
14ab9b86 3755 writel(0xffffffff, bp->regview + offset);
b6016b76 3756
14ab9b86 3757 val = readl(bp->regview + offset);
b6016b76
MC
3758 if ((val & rw_mask) != rw_mask) {
3759 goto reg_test_err;
3760 }
3761
3762 if ((val & ro_mask) != (save_val & ro_mask)) {
3763 goto reg_test_err;
3764 }
3765
14ab9b86 3766 writel(save_val, bp->regview + offset);
b6016b76
MC
3767 continue;
3768
3769reg_test_err:
14ab9b86 3770 writel(save_val, bp->regview + offset);
b6016b76
MC
3771 ret = -ENODEV;
3772 break;
3773 }
3774 return ret;
3775}
3776
3777static int
3778bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
3779{
f71e1309 3780 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
b6016b76
MC
3781 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
3782 int i;
3783
3784 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
3785 u32 offset;
3786
3787 for (offset = 0; offset < size; offset += 4) {
3788
3789 REG_WR_IND(bp, start + offset, test_pattern[i]);
3790
3791 if (REG_RD_IND(bp, start + offset) !=
3792 test_pattern[i]) {
3793 return -ENODEV;
3794 }
3795 }
3796 }
3797 return 0;
3798}
3799
3800static int
3801bnx2_test_memory(struct bnx2 *bp)
3802{
3803 int ret = 0;
3804 int i;
f71e1309 3805 static const struct {
b6016b76
MC
3806 u32 offset;
3807 u32 len;
3808 } mem_tbl[] = {
3809 { 0x60000, 0x4000 },
5b0c76ad 3810 { 0xa0000, 0x3000 },
b6016b76
MC
3811 { 0xe0000, 0x4000 },
3812 { 0x120000, 0x4000 },
3813 { 0x1a0000, 0x4000 },
3814 { 0x160000, 0x4000 },
3815 { 0xffffffff, 0 },
3816 };
3817
3818 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
3819 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
3820 mem_tbl[i].len)) != 0) {
3821 return ret;
3822 }
3823 }
6aa20a22 3824
b6016b76
MC
3825 return ret;
3826}
3827
bc5a0690
MC
3828#define BNX2_MAC_LOOPBACK 0
3829#define BNX2_PHY_LOOPBACK 1
3830
b6016b76 3831static int
bc5a0690 3832bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
b6016b76
MC
3833{
3834 unsigned int pkt_size, num_pkts, i;
3835 struct sk_buff *skb, *rx_skb;
3836 unsigned char *packet;
bc5a0690 3837 u16 rx_start_idx, rx_idx;
b6016b76
MC
3838 dma_addr_t map;
3839 struct tx_bd *txbd;
3840 struct sw_bd *rx_buf;
3841 struct l2_fhdr *rx_hdr;
3842 int ret = -ENODEV;
3843
bc5a0690
MC
3844 if (loopback_mode == BNX2_MAC_LOOPBACK) {
3845 bp->loopback = MAC_LOOPBACK;
3846 bnx2_set_mac_loopback(bp);
3847 }
3848 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
80be4434 3849 bp->loopback = PHY_LOOPBACK;
bc5a0690
MC
3850 bnx2_set_phy_loopback(bp);
3851 }
3852 else
3853 return -EINVAL;
b6016b76
MC
3854
3855 pkt_size = 1514;
932f3772 3856 skb = netdev_alloc_skb(bp->dev, pkt_size);
b6cbc3b6
JL
3857 if (!skb)
3858 return -ENOMEM;
b6016b76
MC
3859 packet = skb_put(skb, pkt_size);
3860 memcpy(packet, bp->mac_addr, 6);
3861 memset(packet + 6, 0x0, 8);
3862 for (i = 14; i < pkt_size; i++)
3863 packet[i] = (unsigned char) (i & 0xff);
3864
3865 map = pci_map_single(bp->pdev, skb->data, pkt_size,
3866 PCI_DMA_TODEVICE);
3867
bf5295bb
MC
3868 REG_WR(bp, BNX2_HC_COMMAND,
3869 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3870
b6016b76
MC
3871 REG_RD(bp, BNX2_HC_COMMAND);
3872
3873 udelay(5);
3874 rx_start_idx = bp->status_blk->status_rx_quick_consumer_index0;
3875
b6016b76
MC
3876 num_pkts = 0;
3877
bc5a0690 3878 txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
b6016b76
MC
3879
3880 txbd->tx_bd_haddr_hi = (u64) map >> 32;
3881 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
3882 txbd->tx_bd_mss_nbytes = pkt_size;
3883 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
3884
3885 num_pkts++;
bc5a0690
MC
3886 bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
3887 bp->tx_prod_bseq += pkt_size;
b6016b76 3888
bc5a0690
MC
3889 REG_WR16(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BIDX, bp->tx_prod);
3890 REG_WR(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BSEQ, bp->tx_prod_bseq);
b6016b76
MC
3891
3892 udelay(100);
3893
bf5295bb
MC
3894 REG_WR(bp, BNX2_HC_COMMAND,
3895 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3896
b6016b76
MC
3897 REG_RD(bp, BNX2_HC_COMMAND);
3898
3899 udelay(5);
3900
3901 pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
745720e5 3902 dev_kfree_skb(skb);
b6016b76 3903
bc5a0690 3904 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->tx_prod) {
b6016b76
MC
3905 goto loopback_test_done;
3906 }
3907
3908 rx_idx = bp->status_blk->status_rx_quick_consumer_index0;
3909 if (rx_idx != rx_start_idx + num_pkts) {
3910 goto loopback_test_done;
3911 }
3912
3913 rx_buf = &bp->rx_buf_ring[rx_start_idx];
3914 rx_skb = rx_buf->skb;
3915
3916 rx_hdr = (struct l2_fhdr *) rx_skb->data;
3917 skb_reserve(rx_skb, bp->rx_offset);
3918
3919 pci_dma_sync_single_for_cpu(bp->pdev,
3920 pci_unmap_addr(rx_buf, mapping),
3921 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
3922
ade2bfe7 3923 if (rx_hdr->l2_fhdr_status &
b6016b76
MC
3924 (L2_FHDR_ERRORS_BAD_CRC |
3925 L2_FHDR_ERRORS_PHY_DECODE |
3926 L2_FHDR_ERRORS_ALIGNMENT |
3927 L2_FHDR_ERRORS_TOO_SHORT |
3928 L2_FHDR_ERRORS_GIANT_FRAME)) {
3929
3930 goto loopback_test_done;
3931 }
3932
3933 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
3934 goto loopback_test_done;
3935 }
3936
3937 for (i = 14; i < pkt_size; i++) {
3938 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
3939 goto loopback_test_done;
3940 }
3941 }
3942
3943 ret = 0;
3944
3945loopback_test_done:
3946 bp->loopback = 0;
3947 return ret;
3948}
3949
bc5a0690
MC
3950#define BNX2_MAC_LOOPBACK_FAILED 1
3951#define BNX2_PHY_LOOPBACK_FAILED 2
3952#define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
3953 BNX2_PHY_LOOPBACK_FAILED)
3954
3955static int
3956bnx2_test_loopback(struct bnx2 *bp)
3957{
3958 int rc = 0;
3959
3960 if (!netif_running(bp->dev))
3961 return BNX2_LOOPBACK_FAILED;
3962
3963 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
3964 spin_lock_bh(&bp->phy_lock);
3965 bnx2_init_phy(bp);
3966 spin_unlock_bh(&bp->phy_lock);
3967 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
3968 rc |= BNX2_MAC_LOOPBACK_FAILED;
3969 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
3970 rc |= BNX2_PHY_LOOPBACK_FAILED;
3971 return rc;
3972}
3973
b6016b76
MC
3974#define NVRAM_SIZE 0x200
3975#define CRC32_RESIDUAL 0xdebb20e3
3976
3977static int
3978bnx2_test_nvram(struct bnx2 *bp)
3979{
3980 u32 buf[NVRAM_SIZE / 4];
3981 u8 *data = (u8 *) buf;
3982 int rc = 0;
3983 u32 magic, csum;
3984
3985 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
3986 goto test_nvram_done;
3987
3988 magic = be32_to_cpu(buf[0]);
3989 if (magic != 0x669955aa) {
3990 rc = -ENODEV;
3991 goto test_nvram_done;
3992 }
3993
3994 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
3995 goto test_nvram_done;
3996
3997 csum = ether_crc_le(0x100, data);
3998 if (csum != CRC32_RESIDUAL) {
3999 rc = -ENODEV;
4000 goto test_nvram_done;
4001 }
4002
4003 csum = ether_crc_le(0x100, data + 0x100);
4004 if (csum != CRC32_RESIDUAL) {
4005 rc = -ENODEV;
4006 }
4007
4008test_nvram_done:
4009 return rc;
4010}
4011
4012static int
4013bnx2_test_link(struct bnx2 *bp)
4014{
4015 u32 bmsr;
4016
c770a65c 4017 spin_lock_bh(&bp->phy_lock);
b6016b76
MC
4018 bnx2_read_phy(bp, MII_BMSR, &bmsr);
4019 bnx2_read_phy(bp, MII_BMSR, &bmsr);
c770a65c 4020 spin_unlock_bh(&bp->phy_lock);
6aa20a22 4021
b6016b76
MC
4022 if (bmsr & BMSR_LSTATUS) {
4023 return 0;
4024 }
4025 return -ENODEV;
4026}
4027
4028static int
4029bnx2_test_intr(struct bnx2 *bp)
4030{
4031 int i;
b6016b76
MC
4032 u16 status_idx;
4033
4034 if (!netif_running(bp->dev))
4035 return -ENODEV;
4036
4037 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
4038
4039 /* This register is not touched during run-time. */
bf5295bb 4040 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
b6016b76
MC
4041 REG_RD(bp, BNX2_HC_COMMAND);
4042
4043 for (i = 0; i < 10; i++) {
4044 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
4045 status_idx) {
4046
4047 break;
4048 }
4049
4050 msleep_interruptible(10);
4051 }
4052 if (i < 10)
4053 return 0;
4054
4055 return -ENODEV;
4056}
4057
4058static void
48b01e2d 4059bnx2_5706_serdes_timer(struct bnx2 *bp)
b6016b76 4060{
48b01e2d
MC
4061 spin_lock(&bp->phy_lock);
4062 if (bp->serdes_an_pending)
4063 bp->serdes_an_pending--;
4064 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4065 u32 bmcr;
b6016b76 4066
48b01e2d 4067 bp->current_interval = bp->timer_interval;
cd339a0e 4068
48b01e2d 4069 bnx2_read_phy(bp, MII_BMCR, &bmcr);
b6016b76 4070
48b01e2d
MC
4071 if (bmcr & BMCR_ANENABLE) {
4072 u32 phy1, phy2;
b6016b76 4073
48b01e2d
MC
4074 bnx2_write_phy(bp, 0x1c, 0x7c00);
4075 bnx2_read_phy(bp, 0x1c, &phy1);
cea94db9 4076
48b01e2d
MC
4077 bnx2_write_phy(bp, 0x17, 0x0f01);
4078 bnx2_read_phy(bp, 0x15, &phy2);
4079 bnx2_write_phy(bp, 0x17, 0x0f01);
4080 bnx2_read_phy(bp, 0x15, &phy2);
b6016b76 4081
48b01e2d
MC
4082 if ((phy1 & 0x10) && /* SIGNAL DETECT */
4083 !(phy2 & 0x20)) { /* no CONFIG */
4084
4085 bmcr &= ~BMCR_ANENABLE;
4086 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4087 bnx2_write_phy(bp, MII_BMCR, bmcr);
4088 bp->phy_flags |= PHY_PARALLEL_DETECT_FLAG;
4089 }
b6016b76 4090 }
48b01e2d
MC
4091 }
4092 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
4093 (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
4094 u32 phy2;
b6016b76 4095
48b01e2d
MC
4096 bnx2_write_phy(bp, 0x17, 0x0f01);
4097 bnx2_read_phy(bp, 0x15, &phy2);
4098 if (phy2 & 0x20) {
4099 u32 bmcr;
cd339a0e 4100
b6016b76 4101 bnx2_read_phy(bp, MII_BMCR, &bmcr);
48b01e2d
MC
4102 bmcr |= BMCR_ANENABLE;
4103 bnx2_write_phy(bp, MII_BMCR, bmcr);
b6016b76 4104
48b01e2d
MC
4105 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
4106 }
4107 } else
4108 bp->current_interval = bp->timer_interval;
b6016b76 4109
48b01e2d
MC
4110 spin_unlock(&bp->phy_lock);
4111}
b6016b76 4112
f8dd064e
MC
4113static void
4114bnx2_5708_serdes_timer(struct bnx2 *bp)
4115{
4116 if ((bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) == 0) {
4117 bp->serdes_an_pending = 0;
4118 return;
4119 }
4120
4121 spin_lock(&bp->phy_lock);
4122 if (bp->serdes_an_pending)
4123 bp->serdes_an_pending--;
4124 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4125 u32 bmcr;
4126
4127 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4128
4129 if (bmcr & BMCR_ANENABLE) {
4130 bmcr &= ~BMCR_ANENABLE;
4131 bmcr |= BMCR_FULLDPLX | BCM5708S_BMCR_FORCE_2500;
4132 bnx2_write_phy(bp, MII_BMCR, bmcr);
4133 bp->current_interval = SERDES_FORCED_TIMEOUT;
4134 } else {
4135 bmcr &= ~(BMCR_FULLDPLX | BCM5708S_BMCR_FORCE_2500);
4136 bmcr |= BMCR_ANENABLE;
4137 bnx2_write_phy(bp, MII_BMCR, bmcr);
4138 bp->serdes_an_pending = 2;
4139 bp->current_interval = bp->timer_interval;
4140 }
4141
4142 } else
4143 bp->current_interval = bp->timer_interval;
4144
4145 spin_unlock(&bp->phy_lock);
4146}
4147
48b01e2d
MC
4148static void
4149bnx2_timer(unsigned long data)
4150{
4151 struct bnx2 *bp = (struct bnx2 *) data;
4152 u32 msg;
b6016b76 4153
48b01e2d
MC
4154 if (!netif_running(bp->dev))
4155 return;
b6016b76 4156
48b01e2d
MC
4157 if (atomic_read(&bp->intr_sem) != 0)
4158 goto bnx2_restart_timer;
b6016b76 4159
48b01e2d
MC
4160 msg = (u32) ++bp->fw_drv_pulse_wr_seq;
4161 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_PULSE_MB, msg);
b6016b76 4162
48b01e2d 4163 bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT);
b6016b76 4164
f8dd064e
MC
4165 if (bp->phy_flags & PHY_SERDES_FLAG) {
4166 if (CHIP_NUM(bp) == CHIP_NUM_5706)
4167 bnx2_5706_serdes_timer(bp);
4168 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
4169 bnx2_5708_serdes_timer(bp);
4170 }
b6016b76
MC
4171
4172bnx2_restart_timer:
cd339a0e 4173 mod_timer(&bp->timer, jiffies + bp->current_interval);
b6016b76
MC
4174}
4175
4176/* Called with rtnl_lock */
4177static int
4178bnx2_open(struct net_device *dev)
4179{
972ec0d4 4180 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
4181 int rc;
4182
829ca9a3 4183 bnx2_set_power_state(bp, PCI_D0);
b6016b76
MC
4184 bnx2_disable_int(bp);
4185
4186 rc = bnx2_alloc_mem(bp);
4187 if (rc)
4188 return rc;
4189
4190 if ((CHIP_ID(bp) != CHIP_ID_5706_A0) &&
4191 (CHIP_ID(bp) != CHIP_ID_5706_A1) &&
4192 !disable_msi) {
4193
4194 if (pci_enable_msi(bp->pdev) == 0) {
4195 bp->flags |= USING_MSI_FLAG;
4196 rc = request_irq(bp->pdev->irq, bnx2_msi, 0, dev->name,
4197 dev);
4198 }
4199 else {
4200 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
1fb9df5d 4201 IRQF_SHARED, dev->name, dev);
b6016b76
MC
4202 }
4203 }
4204 else {
1fb9df5d 4205 rc = request_irq(bp->pdev->irq, bnx2_interrupt, IRQF_SHARED,
b6016b76
MC
4206 dev->name, dev);
4207 }
4208 if (rc) {
4209 bnx2_free_mem(bp);
4210 return rc;
4211 }
4212
4213 rc = bnx2_init_nic(bp);
4214
4215 if (rc) {
4216 free_irq(bp->pdev->irq, dev);
4217 if (bp->flags & USING_MSI_FLAG) {
4218 pci_disable_msi(bp->pdev);
4219 bp->flags &= ~USING_MSI_FLAG;
4220 }
4221 bnx2_free_skbs(bp);
4222 bnx2_free_mem(bp);
4223 return rc;
4224 }
6aa20a22 4225
cd339a0e 4226 mod_timer(&bp->timer, jiffies + bp->current_interval);
b6016b76
MC
4227
4228 atomic_set(&bp->intr_sem, 0);
4229
4230 bnx2_enable_int(bp);
4231
4232 if (bp->flags & USING_MSI_FLAG) {
4233 /* Test MSI to make sure it is working
4234 * If MSI test fails, go back to INTx mode
4235 */
4236 if (bnx2_test_intr(bp) != 0) {
4237 printk(KERN_WARNING PFX "%s: No interrupt was generated"
4238 " using MSI, switching to INTx mode. Please"
4239 " report this failure to the PCI maintainer"
4240 " and include system chipset information.\n",
4241 bp->dev->name);
4242
4243 bnx2_disable_int(bp);
4244 free_irq(bp->pdev->irq, dev);
4245 pci_disable_msi(bp->pdev);
4246 bp->flags &= ~USING_MSI_FLAG;
4247
4248 rc = bnx2_init_nic(bp);
4249
4250 if (!rc) {
4251 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
1fb9df5d 4252 IRQF_SHARED, dev->name, dev);
b6016b76
MC
4253 }
4254 if (rc) {
4255 bnx2_free_skbs(bp);
4256 bnx2_free_mem(bp);
4257 del_timer_sync(&bp->timer);
4258 return rc;
4259 }
4260 bnx2_enable_int(bp);
4261 }
4262 }
4263 if (bp->flags & USING_MSI_FLAG) {
4264 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
4265 }
4266
4267 netif_start_queue(dev);
4268
4269 return 0;
4270}
4271
4272static void
4273bnx2_reset_task(void *data)
4274{
4275 struct bnx2 *bp = data;
4276
afdc08b9
MC
4277 if (!netif_running(bp->dev))
4278 return;
4279
4280 bp->in_reset_task = 1;
b6016b76
MC
4281 bnx2_netif_stop(bp);
4282
4283 bnx2_init_nic(bp);
4284
4285 atomic_set(&bp->intr_sem, 1);
4286 bnx2_netif_start(bp);
afdc08b9 4287 bp->in_reset_task = 0;
b6016b76
MC
4288}
4289
4290static void
4291bnx2_tx_timeout(struct net_device *dev)
4292{
972ec0d4 4293 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
4294
4295 /* This allows the netif to be shutdown gracefully before resetting */
4296 schedule_work(&bp->reset_task);
4297}
4298
4299#ifdef BCM_VLAN
4300/* Called with rtnl_lock */
4301static void
4302bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
4303{
972ec0d4 4304 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
4305
4306 bnx2_netif_stop(bp);
4307
4308 bp->vlgrp = vlgrp;
4309 bnx2_set_rx_mode(dev);
4310
4311 bnx2_netif_start(bp);
4312}
4313
4314/* Called with rtnl_lock */
4315static void
4316bnx2_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid)
4317{
972ec0d4 4318 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
4319
4320 bnx2_netif_stop(bp);
4321
4322 if (bp->vlgrp)
4323 bp->vlgrp->vlan_devices[vid] = NULL;
4324 bnx2_set_rx_mode(dev);
4325
4326 bnx2_netif_start(bp);
4327}
4328#endif
4329
932ff279 4330/* Called with netif_tx_lock.
2f8af120
MC
4331 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
4332 * netif_wake_queue().
b6016b76
MC
4333 */
4334static int
4335bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
4336{
972ec0d4 4337 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
4338 dma_addr_t mapping;
4339 struct tx_bd *txbd;
4340 struct sw_bd *tx_buf;
4341 u32 len, vlan_tag_flags, last_frag, mss;
4342 u16 prod, ring_prod;
4343 int i;
4344
e89bbf10 4345 if (unlikely(bnx2_tx_avail(bp) < (skb_shinfo(skb)->nr_frags + 1))) {
b6016b76
MC
4346 netif_stop_queue(dev);
4347 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
4348 dev->name);
4349
4350 return NETDEV_TX_BUSY;
4351 }
4352 len = skb_headlen(skb);
4353 prod = bp->tx_prod;
4354 ring_prod = TX_RING_IDX(prod);
4355
4356 vlan_tag_flags = 0;
84fa7933 4357 if (skb->ip_summed == CHECKSUM_PARTIAL) {
b6016b76
MC
4358 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
4359 }
4360
4361 if (bp->vlgrp != 0 && vlan_tx_tag_present(skb)) {
4362 vlan_tag_flags |=
4363 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
4364 }
6aa20a22 4365#ifdef BCM_TSO
7967168c 4366 if ((mss = skb_shinfo(skb)->gso_size) &&
b6016b76
MC
4367 (skb->len > (bp->dev->mtu + ETH_HLEN))) {
4368 u32 tcp_opt_len, ip_tcp_len;
4369
4370 if (skb_header_cloned(skb) &&
4371 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4372 dev_kfree_skb(skb);
4373 return NETDEV_TX_OK;
4374 }
4375
4376 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
4377 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
4378
4379 tcp_opt_len = 0;
4380 if (skb->h.th->doff > 5) {
4381 tcp_opt_len = (skb->h.th->doff - 5) << 2;
4382 }
4383 ip_tcp_len = (skb->nh.iph->ihl << 2) + sizeof(struct tcphdr);
4384
4385 skb->nh.iph->check = 0;
d1e100ba 4386 skb->nh.iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
b6016b76
MC
4387 skb->h.th->check =
4388 ~csum_tcpudp_magic(skb->nh.iph->saddr,
4389 skb->nh.iph->daddr,
4390 0, IPPROTO_TCP, 0);
4391
4392 if (tcp_opt_len || (skb->nh.iph->ihl > 5)) {
4393 vlan_tag_flags |= ((skb->nh.iph->ihl - 5) +
4394 (tcp_opt_len >> 2)) << 8;
4395 }
4396 }
4397 else
4398#endif
4399 {
4400 mss = 0;
4401 }
4402
4403 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
6aa20a22 4404
b6016b76
MC
4405 tx_buf = &bp->tx_buf_ring[ring_prod];
4406 tx_buf->skb = skb;
4407 pci_unmap_addr_set(tx_buf, mapping, mapping);
4408
4409 txbd = &bp->tx_desc_ring[ring_prod];
4410
4411 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4412 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4413 txbd->tx_bd_mss_nbytes = len | (mss << 16);
4414 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
4415
4416 last_frag = skb_shinfo(skb)->nr_frags;
4417
4418 for (i = 0; i < last_frag; i++) {
4419 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4420
4421 prod = NEXT_TX_BD(prod);
4422 ring_prod = TX_RING_IDX(prod);
4423 txbd = &bp->tx_desc_ring[ring_prod];
4424
4425 len = frag->size;
4426 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
4427 len, PCI_DMA_TODEVICE);
4428 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
4429 mapping, mapping);
4430
4431 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4432 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4433 txbd->tx_bd_mss_nbytes = len | (mss << 16);
4434 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
4435
4436 }
4437 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
4438
4439 prod = NEXT_TX_BD(prod);
4440 bp->tx_prod_bseq += skb->len;
4441
b6016b76
MC
4442 REG_WR16(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BIDX, prod);
4443 REG_WR(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BSEQ, bp->tx_prod_bseq);
4444
4445 mmiowb();
4446
4447 bp->tx_prod = prod;
4448 dev->trans_start = jiffies;
4449
e89bbf10 4450 if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
e89bbf10 4451 netif_stop_queue(dev);
2f8af120 4452 if (bnx2_tx_avail(bp) > bp->tx_wake_thresh)
e89bbf10 4453 netif_wake_queue(dev);
b6016b76
MC
4454 }
4455
4456 return NETDEV_TX_OK;
4457}
4458
4459/* Called with rtnl_lock */
4460static int
4461bnx2_close(struct net_device *dev)
4462{
972ec0d4 4463 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
4464 u32 reset_code;
4465
afdc08b9
MC
4466 /* Calling flush_scheduled_work() may deadlock because
4467 * linkwatch_event() may be on the workqueue and it will try to get
4468 * the rtnl_lock which we are holding.
4469 */
4470 while (bp->in_reset_task)
4471 msleep(1);
4472
b6016b76
MC
4473 bnx2_netif_stop(bp);
4474 del_timer_sync(&bp->timer);
dda1e390 4475 if (bp->flags & NO_WOL_FLAG)
6c4f095e 4476 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
dda1e390 4477 else if (bp->wol)
b6016b76
MC
4478 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
4479 else
4480 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
4481 bnx2_reset_chip(bp, reset_code);
4482 free_irq(bp->pdev->irq, dev);
4483 if (bp->flags & USING_MSI_FLAG) {
4484 pci_disable_msi(bp->pdev);
4485 bp->flags &= ~USING_MSI_FLAG;
4486 }
4487 bnx2_free_skbs(bp);
4488 bnx2_free_mem(bp);
4489 bp->link_up = 0;
4490 netif_carrier_off(bp->dev);
829ca9a3 4491 bnx2_set_power_state(bp, PCI_D3hot);
b6016b76
MC
4492 return 0;
4493}
4494
4495#define GET_NET_STATS64(ctr) \
4496 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
4497 (unsigned long) (ctr##_lo)
4498
4499#define GET_NET_STATS32(ctr) \
4500 (ctr##_lo)
4501
4502#if (BITS_PER_LONG == 64)
4503#define GET_NET_STATS GET_NET_STATS64
4504#else
4505#define GET_NET_STATS GET_NET_STATS32
4506#endif
4507
4508static struct net_device_stats *
4509bnx2_get_stats(struct net_device *dev)
4510{
972ec0d4 4511 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
4512 struct statistics_block *stats_blk = bp->stats_blk;
4513 struct net_device_stats *net_stats = &bp->net_stats;
4514
4515 if (bp->stats_blk == NULL) {
4516 return net_stats;
4517 }
4518 net_stats->rx_packets =
4519 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
4520 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
4521 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
4522
4523 net_stats->tx_packets =
4524 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
4525 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
4526 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
4527
4528 net_stats->rx_bytes =
4529 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
4530
4531 net_stats->tx_bytes =
4532 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
4533
6aa20a22 4534 net_stats->multicast =
b6016b76
MC
4535 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
4536
6aa20a22 4537 net_stats->collisions =
b6016b76
MC
4538 (unsigned long) stats_blk->stat_EtherStatsCollisions;
4539
6aa20a22 4540 net_stats->rx_length_errors =
b6016b76
MC
4541 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
4542 stats_blk->stat_EtherStatsOverrsizePkts);
4543
6aa20a22 4544 net_stats->rx_over_errors =
b6016b76
MC
4545 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
4546
6aa20a22 4547 net_stats->rx_frame_errors =
b6016b76
MC
4548 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
4549
6aa20a22 4550 net_stats->rx_crc_errors =
b6016b76
MC
4551 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
4552
4553 net_stats->rx_errors = net_stats->rx_length_errors +
4554 net_stats->rx_over_errors + net_stats->rx_frame_errors +
4555 net_stats->rx_crc_errors;
4556
4557 net_stats->tx_aborted_errors =
4558 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
4559 stats_blk->stat_Dot3StatsLateCollisions);
4560
5b0c76ad
MC
4561 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
4562 (CHIP_ID(bp) == CHIP_ID_5708_A0))
b6016b76
MC
4563 net_stats->tx_carrier_errors = 0;
4564 else {
4565 net_stats->tx_carrier_errors =
4566 (unsigned long)
4567 stats_blk->stat_Dot3StatsCarrierSenseErrors;
4568 }
4569
4570 net_stats->tx_errors =
6aa20a22 4571 (unsigned long)
b6016b76
MC
4572 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
4573 +
4574 net_stats->tx_aborted_errors +
4575 net_stats->tx_carrier_errors;
4576
cea94db9
MC
4577 net_stats->rx_missed_errors =
4578 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
4579 stats_blk->stat_FwRxDrop);
4580
b6016b76
MC
4581 return net_stats;
4582}
4583
4584/* All ethtool functions called with rtnl_lock */
4585
4586static int
4587bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4588{
972ec0d4 4589 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
4590
4591 cmd->supported = SUPPORTED_Autoneg;
4592 if (bp->phy_flags & PHY_SERDES_FLAG) {
4593 cmd->supported |= SUPPORTED_1000baseT_Full |
4594 SUPPORTED_FIBRE;
4595
4596 cmd->port = PORT_FIBRE;
4597 }
4598 else {
4599 cmd->supported |= SUPPORTED_10baseT_Half |
4600 SUPPORTED_10baseT_Full |
4601 SUPPORTED_100baseT_Half |
4602 SUPPORTED_100baseT_Full |
4603 SUPPORTED_1000baseT_Full |
4604 SUPPORTED_TP;
4605
4606 cmd->port = PORT_TP;
4607 }
4608
4609 cmd->advertising = bp->advertising;
4610
4611 if (bp->autoneg & AUTONEG_SPEED) {
4612 cmd->autoneg = AUTONEG_ENABLE;
4613 }
4614 else {
4615 cmd->autoneg = AUTONEG_DISABLE;
4616 }
4617
4618 if (netif_carrier_ok(dev)) {
4619 cmd->speed = bp->line_speed;
4620 cmd->duplex = bp->duplex;
4621 }
4622 else {
4623 cmd->speed = -1;
4624 cmd->duplex = -1;
4625 }
4626
4627 cmd->transceiver = XCVR_INTERNAL;
4628 cmd->phy_address = bp->phy_addr;
4629
4630 return 0;
4631}
6aa20a22 4632
b6016b76
MC
4633static int
4634bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4635{
972ec0d4 4636 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
4637 u8 autoneg = bp->autoneg;
4638 u8 req_duplex = bp->req_duplex;
4639 u16 req_line_speed = bp->req_line_speed;
4640 u32 advertising = bp->advertising;
4641
4642 if (cmd->autoneg == AUTONEG_ENABLE) {
4643 autoneg |= AUTONEG_SPEED;
4644
6aa20a22 4645 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
b6016b76
MC
4646
4647 /* allow advertising 1 speed */
4648 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
4649 (cmd->advertising == ADVERTISED_10baseT_Full) ||
4650 (cmd->advertising == ADVERTISED_100baseT_Half) ||
4651 (cmd->advertising == ADVERTISED_100baseT_Full)) {
4652
4653 if (bp->phy_flags & PHY_SERDES_FLAG)
4654 return -EINVAL;
4655
4656 advertising = cmd->advertising;
4657
4658 }
4659 else if (cmd->advertising == ADVERTISED_1000baseT_Full) {
4660 advertising = cmd->advertising;
4661 }
4662 else if (cmd->advertising == ADVERTISED_1000baseT_Half) {
4663 return -EINVAL;
4664 }
4665 else {
4666 if (bp->phy_flags & PHY_SERDES_FLAG) {
4667 advertising = ETHTOOL_ALL_FIBRE_SPEED;
4668 }
4669 else {
4670 advertising = ETHTOOL_ALL_COPPER_SPEED;
4671 }
4672 }
4673 advertising |= ADVERTISED_Autoneg;
4674 }
4675 else {
4676 if (bp->phy_flags & PHY_SERDES_FLAG) {
80be4434
MC
4677 if ((cmd->speed != SPEED_1000 &&
4678 cmd->speed != SPEED_2500) ||
4679 (cmd->duplex != DUPLEX_FULL))
4680 return -EINVAL;
4681
4682 if (cmd->speed == SPEED_2500 &&
4683 !(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
b6016b76 4684 return -EINVAL;
b6016b76
MC
4685 }
4686 else if (cmd->speed == SPEED_1000) {
4687 return -EINVAL;
4688 }
4689 autoneg &= ~AUTONEG_SPEED;
4690 req_line_speed = cmd->speed;
4691 req_duplex = cmd->duplex;
4692 advertising = 0;
4693 }
4694
4695 bp->autoneg = autoneg;
4696 bp->advertising = advertising;
4697 bp->req_line_speed = req_line_speed;
4698 bp->req_duplex = req_duplex;
4699
c770a65c 4700 spin_lock_bh(&bp->phy_lock);
b6016b76
MC
4701
4702 bnx2_setup_phy(bp);
4703
c770a65c 4704 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
4705
4706 return 0;
4707}
4708
4709static void
4710bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
4711{
972ec0d4 4712 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
4713
4714 strcpy(info->driver, DRV_MODULE_NAME);
4715 strcpy(info->version, DRV_MODULE_VERSION);
4716 strcpy(info->bus_info, pci_name(bp->pdev));
4717 info->fw_version[0] = ((bp->fw_ver & 0xff000000) >> 24) + '0';
4718 info->fw_version[2] = ((bp->fw_ver & 0xff0000) >> 16) + '0';
4719 info->fw_version[4] = ((bp->fw_ver & 0xff00) >> 8) + '0';
206cc83c
MC
4720 info->fw_version[1] = info->fw_version[3] = '.';
4721 info->fw_version[5] = 0;
b6016b76
MC
4722}
4723
244ac4f4
MC
4724#define BNX2_REGDUMP_LEN (32 * 1024)
4725
4726static int
4727bnx2_get_regs_len(struct net_device *dev)
4728{
4729 return BNX2_REGDUMP_LEN;
4730}
4731
4732static void
4733bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
4734{
4735 u32 *p = _p, i, offset;
4736 u8 *orig_p = _p;
4737 struct bnx2 *bp = netdev_priv(dev);
4738 u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
4739 0x0800, 0x0880, 0x0c00, 0x0c10,
4740 0x0c30, 0x0d08, 0x1000, 0x101c,
4741 0x1040, 0x1048, 0x1080, 0x10a4,
4742 0x1400, 0x1490, 0x1498, 0x14f0,
4743 0x1500, 0x155c, 0x1580, 0x15dc,
4744 0x1600, 0x1658, 0x1680, 0x16d8,
4745 0x1800, 0x1820, 0x1840, 0x1854,
4746 0x1880, 0x1894, 0x1900, 0x1984,
4747 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
4748 0x1c80, 0x1c94, 0x1d00, 0x1d84,
4749 0x2000, 0x2030, 0x23c0, 0x2400,
4750 0x2800, 0x2820, 0x2830, 0x2850,
4751 0x2b40, 0x2c10, 0x2fc0, 0x3058,
4752 0x3c00, 0x3c94, 0x4000, 0x4010,
4753 0x4080, 0x4090, 0x43c0, 0x4458,
4754 0x4c00, 0x4c18, 0x4c40, 0x4c54,
4755 0x4fc0, 0x5010, 0x53c0, 0x5444,
4756 0x5c00, 0x5c18, 0x5c80, 0x5c90,
4757 0x5fc0, 0x6000, 0x6400, 0x6428,
4758 0x6800, 0x6848, 0x684c, 0x6860,
4759 0x6888, 0x6910, 0x8000 };
4760
4761 regs->version = 0;
4762
4763 memset(p, 0, BNX2_REGDUMP_LEN);
4764
4765 if (!netif_running(bp->dev))
4766 return;
4767
4768 i = 0;
4769 offset = reg_boundaries[0];
4770 p += offset;
4771 while (offset < BNX2_REGDUMP_LEN) {
4772 *p++ = REG_RD(bp, offset);
4773 offset += 4;
4774 if (offset == reg_boundaries[i + 1]) {
4775 offset = reg_boundaries[i + 2];
4776 p = (u32 *) (orig_p + offset);
4777 i += 2;
4778 }
4779 }
4780}
4781
b6016b76
MC
4782static void
4783bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
4784{
972ec0d4 4785 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
4786
4787 if (bp->flags & NO_WOL_FLAG) {
4788 wol->supported = 0;
4789 wol->wolopts = 0;
4790 }
4791 else {
4792 wol->supported = WAKE_MAGIC;
4793 if (bp->wol)
4794 wol->wolopts = WAKE_MAGIC;
4795 else
4796 wol->wolopts = 0;
4797 }
4798 memset(&wol->sopass, 0, sizeof(wol->sopass));
4799}
4800
4801static int
4802bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
4803{
972ec0d4 4804 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
4805
4806 if (wol->wolopts & ~WAKE_MAGIC)
4807 return -EINVAL;
4808
4809 if (wol->wolopts & WAKE_MAGIC) {
4810 if (bp->flags & NO_WOL_FLAG)
4811 return -EINVAL;
4812
4813 bp->wol = 1;
4814 }
4815 else {
4816 bp->wol = 0;
4817 }
4818 return 0;
4819}
4820
4821static int
4822bnx2_nway_reset(struct net_device *dev)
4823{
972ec0d4 4824 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
4825 u32 bmcr;
4826
4827 if (!(bp->autoneg & AUTONEG_SPEED)) {
4828 return -EINVAL;
4829 }
4830
c770a65c 4831 spin_lock_bh(&bp->phy_lock);
b6016b76
MC
4832
4833 /* Force a link down visible on the other side */
4834 if (bp->phy_flags & PHY_SERDES_FLAG) {
4835 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
c770a65c 4836 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
4837
4838 msleep(20);
4839
c770a65c 4840 spin_lock_bh(&bp->phy_lock);
f8dd064e
MC
4841
4842 bp->current_interval = SERDES_AN_TIMEOUT;
4843 bp->serdes_an_pending = 1;
4844 mod_timer(&bp->timer, jiffies + bp->current_interval);
b6016b76
MC
4845 }
4846
4847 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4848 bmcr &= ~BMCR_LOOPBACK;
4849 bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
4850
c770a65c 4851 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
4852
4853 return 0;
4854}
4855
4856static int
4857bnx2_get_eeprom_len(struct net_device *dev)
4858{
972ec0d4 4859 struct bnx2 *bp = netdev_priv(dev);
b6016b76 4860
1122db71 4861 if (bp->flash_info == NULL)
b6016b76
MC
4862 return 0;
4863
1122db71 4864 return (int) bp->flash_size;
b6016b76
MC
4865}
4866
4867static int
4868bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
4869 u8 *eebuf)
4870{
972ec0d4 4871 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
4872 int rc;
4873
1064e944 4874 /* parameters already validated in ethtool_get_eeprom */
b6016b76
MC
4875
4876 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
4877
4878 return rc;
4879}
4880
4881static int
4882bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
4883 u8 *eebuf)
4884{
972ec0d4 4885 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
4886 int rc;
4887
1064e944 4888 /* parameters already validated in ethtool_set_eeprom */
b6016b76
MC
4889
4890 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
4891
4892 return rc;
4893}
4894
4895static int
4896bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
4897{
972ec0d4 4898 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
4899
4900 memset(coal, 0, sizeof(struct ethtool_coalesce));
4901
4902 coal->rx_coalesce_usecs = bp->rx_ticks;
4903 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
4904 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
4905 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
4906
4907 coal->tx_coalesce_usecs = bp->tx_ticks;
4908 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
4909 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
4910 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
4911
4912 coal->stats_block_coalesce_usecs = bp->stats_ticks;
4913
4914 return 0;
4915}
4916
4917static int
4918bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
4919{
972ec0d4 4920 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
4921
4922 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
4923 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
4924
6aa20a22 4925 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
b6016b76
MC
4926 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
4927
4928 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
4929 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
4930
4931 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
4932 if (bp->rx_quick_cons_trip_int > 0xff)
4933 bp->rx_quick_cons_trip_int = 0xff;
4934
4935 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
4936 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
4937
4938 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
4939 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
4940
4941 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
4942 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
4943
4944 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
4945 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
4946 0xff;
4947
4948 bp->stats_ticks = coal->stats_block_coalesce_usecs;
4949 if (bp->stats_ticks > 0xffff00) bp->stats_ticks = 0xffff00;
4950 bp->stats_ticks &= 0xffff00;
4951
4952 if (netif_running(bp->dev)) {
4953 bnx2_netif_stop(bp);
4954 bnx2_init_nic(bp);
4955 bnx2_netif_start(bp);
4956 }
4957
4958 return 0;
4959}
4960
4961static void
4962bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
4963{
972ec0d4 4964 struct bnx2 *bp = netdev_priv(dev);
b6016b76 4965
13daffa2 4966 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
b6016b76
MC
4967 ering->rx_mini_max_pending = 0;
4968 ering->rx_jumbo_max_pending = 0;
4969
4970 ering->rx_pending = bp->rx_ring_size;
4971 ering->rx_mini_pending = 0;
4972 ering->rx_jumbo_pending = 0;
4973
4974 ering->tx_max_pending = MAX_TX_DESC_CNT;
4975 ering->tx_pending = bp->tx_ring_size;
4976}
4977
4978static int
4979bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
4980{
972ec0d4 4981 struct bnx2 *bp = netdev_priv(dev);
b6016b76 4982
13daffa2 4983 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
b6016b76
MC
4984 (ering->tx_pending > MAX_TX_DESC_CNT) ||
4985 (ering->tx_pending <= MAX_SKB_FRAGS)) {
4986
4987 return -EINVAL;
4988 }
13daffa2
MC
4989 if (netif_running(bp->dev)) {
4990 bnx2_netif_stop(bp);
4991 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
4992 bnx2_free_skbs(bp);
4993 bnx2_free_mem(bp);
4994 }
4995
4996 bnx2_set_rx_ring_size(bp, ering->rx_pending);
b6016b76
MC
4997 bp->tx_ring_size = ering->tx_pending;
4998
4999 if (netif_running(bp->dev)) {
13daffa2
MC
5000 int rc;
5001
5002 rc = bnx2_alloc_mem(bp);
5003 if (rc)
5004 return rc;
b6016b76
MC
5005 bnx2_init_nic(bp);
5006 bnx2_netif_start(bp);
5007 }
5008
5009 return 0;
5010}
5011
5012static void
5013bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5014{
972ec0d4 5015 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5016
5017 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
5018 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
5019 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
5020}
5021
5022static int
5023bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5024{
972ec0d4 5025 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5026
5027 bp->req_flow_ctrl = 0;
5028 if (epause->rx_pause)
5029 bp->req_flow_ctrl |= FLOW_CTRL_RX;
5030 if (epause->tx_pause)
5031 bp->req_flow_ctrl |= FLOW_CTRL_TX;
5032
5033 if (epause->autoneg) {
5034 bp->autoneg |= AUTONEG_FLOW_CTRL;
5035 }
5036 else {
5037 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
5038 }
5039
c770a65c 5040 spin_lock_bh(&bp->phy_lock);
b6016b76
MC
5041
5042 bnx2_setup_phy(bp);
5043
c770a65c 5044 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
5045
5046 return 0;
5047}
5048
5049static u32
5050bnx2_get_rx_csum(struct net_device *dev)
5051{
972ec0d4 5052 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5053
5054 return bp->rx_csum;
5055}
5056
5057static int
5058bnx2_set_rx_csum(struct net_device *dev, u32 data)
5059{
972ec0d4 5060 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5061
5062 bp->rx_csum = data;
5063 return 0;
5064}
5065
b11d6213
MC
5066static int
5067bnx2_set_tso(struct net_device *dev, u32 data)
5068{
5069 if (data)
5070 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
5071 else
5072 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
5073 return 0;
5074}
5075
cea94db9 5076#define BNX2_NUM_STATS 46
b6016b76 5077
14ab9b86 5078static struct {
b6016b76
MC
5079 char string[ETH_GSTRING_LEN];
5080} bnx2_stats_str_arr[BNX2_NUM_STATS] = {
5081 { "rx_bytes" },
5082 { "rx_error_bytes" },
5083 { "tx_bytes" },
5084 { "tx_error_bytes" },
5085 { "rx_ucast_packets" },
5086 { "rx_mcast_packets" },
5087 { "rx_bcast_packets" },
5088 { "tx_ucast_packets" },
5089 { "tx_mcast_packets" },
5090 { "tx_bcast_packets" },
5091 { "tx_mac_errors" },
5092 { "tx_carrier_errors" },
5093 { "rx_crc_errors" },
5094 { "rx_align_errors" },
5095 { "tx_single_collisions" },
5096 { "tx_multi_collisions" },
5097 { "tx_deferred" },
5098 { "tx_excess_collisions" },
5099 { "tx_late_collisions" },
5100 { "tx_total_collisions" },
5101 { "rx_fragments" },
5102 { "rx_jabbers" },
5103 { "rx_undersize_packets" },
5104 { "rx_oversize_packets" },
5105 { "rx_64_byte_packets" },
5106 { "rx_65_to_127_byte_packets" },
5107 { "rx_128_to_255_byte_packets" },
5108 { "rx_256_to_511_byte_packets" },
5109 { "rx_512_to_1023_byte_packets" },
5110 { "rx_1024_to_1522_byte_packets" },
5111 { "rx_1523_to_9022_byte_packets" },
5112 { "tx_64_byte_packets" },
5113 { "tx_65_to_127_byte_packets" },
5114 { "tx_128_to_255_byte_packets" },
5115 { "tx_256_to_511_byte_packets" },
5116 { "tx_512_to_1023_byte_packets" },
5117 { "tx_1024_to_1522_byte_packets" },
5118 { "tx_1523_to_9022_byte_packets" },
5119 { "rx_xon_frames" },
5120 { "rx_xoff_frames" },
5121 { "tx_xon_frames" },
5122 { "tx_xoff_frames" },
5123 { "rx_mac_ctrl_frames" },
5124 { "rx_filtered_packets" },
5125 { "rx_discards" },
cea94db9 5126 { "rx_fw_discards" },
b6016b76
MC
5127};
5128
5129#define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
5130
f71e1309 5131static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
b6016b76
MC
5132 STATS_OFFSET32(stat_IfHCInOctets_hi),
5133 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
5134 STATS_OFFSET32(stat_IfHCOutOctets_hi),
5135 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
5136 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
5137 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
5138 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
5139 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
5140 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
5141 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
5142 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
6aa20a22
JG
5143 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
5144 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
5145 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
5146 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
5147 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
5148 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
5149 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
5150 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
5151 STATS_OFFSET32(stat_EtherStatsCollisions),
5152 STATS_OFFSET32(stat_EtherStatsFragments),
5153 STATS_OFFSET32(stat_EtherStatsJabbers),
5154 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
5155 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
5156 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
5157 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
5158 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
5159 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
5160 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
5161 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
5162 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
5163 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
5164 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
5165 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
5166 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
5167 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
5168 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
5169 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
5170 STATS_OFFSET32(stat_XonPauseFramesReceived),
5171 STATS_OFFSET32(stat_XoffPauseFramesReceived),
5172 STATS_OFFSET32(stat_OutXonSent),
5173 STATS_OFFSET32(stat_OutXoffSent),
5174 STATS_OFFSET32(stat_MacControlFramesReceived),
5175 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
5176 STATS_OFFSET32(stat_IfInMBUFDiscards),
cea94db9 5177 STATS_OFFSET32(stat_FwRxDrop),
b6016b76
MC
5178};
5179
5180/* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
5181 * skipped because of errata.
6aa20a22 5182 */
14ab9b86 5183static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
b6016b76
MC
5184 8,0,8,8,8,8,8,8,8,8,
5185 4,0,4,4,4,4,4,4,4,4,
5186 4,4,4,4,4,4,4,4,4,4,
5187 4,4,4,4,4,4,4,4,4,4,
cea94db9 5188 4,4,4,4,4,4,
b6016b76
MC
5189};
5190
5b0c76ad
MC
5191static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
5192 8,0,8,8,8,8,8,8,8,8,
5193 4,4,4,4,4,4,4,4,4,4,
5194 4,4,4,4,4,4,4,4,4,4,
5195 4,4,4,4,4,4,4,4,4,4,
cea94db9 5196 4,4,4,4,4,4,
5b0c76ad
MC
5197};
5198
b6016b76
MC
5199#define BNX2_NUM_TESTS 6
5200
14ab9b86 5201static struct {
b6016b76
MC
5202 char string[ETH_GSTRING_LEN];
5203} bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
5204 { "register_test (offline)" },
5205 { "memory_test (offline)" },
5206 { "loopback_test (offline)" },
5207 { "nvram_test (online)" },
5208 { "interrupt_test (online)" },
5209 { "link_test (online)" },
5210};
5211
5212static int
5213bnx2_self_test_count(struct net_device *dev)
5214{
5215 return BNX2_NUM_TESTS;
5216}
5217
5218static void
5219bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
5220{
972ec0d4 5221 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5222
5223 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
5224 if (etest->flags & ETH_TEST_FL_OFFLINE) {
80be4434
MC
5225 int i;
5226
b6016b76
MC
5227 bnx2_netif_stop(bp);
5228 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
5229 bnx2_free_skbs(bp);
5230
5231 if (bnx2_test_registers(bp) != 0) {
5232 buf[0] = 1;
5233 etest->flags |= ETH_TEST_FL_FAILED;
5234 }
5235 if (bnx2_test_memory(bp) != 0) {
5236 buf[1] = 1;
5237 etest->flags |= ETH_TEST_FL_FAILED;
5238 }
bc5a0690 5239 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
b6016b76 5240 etest->flags |= ETH_TEST_FL_FAILED;
b6016b76
MC
5241
5242 if (!netif_running(bp->dev)) {
5243 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5244 }
5245 else {
5246 bnx2_init_nic(bp);
5247 bnx2_netif_start(bp);
5248 }
5249
5250 /* wait for link up */
80be4434
MC
5251 for (i = 0; i < 7; i++) {
5252 if (bp->link_up)
5253 break;
5254 msleep_interruptible(1000);
5255 }
b6016b76
MC
5256 }
5257
5258 if (bnx2_test_nvram(bp) != 0) {
5259 buf[3] = 1;
5260 etest->flags |= ETH_TEST_FL_FAILED;
5261 }
5262 if (bnx2_test_intr(bp) != 0) {
5263 buf[4] = 1;
5264 etest->flags |= ETH_TEST_FL_FAILED;
5265 }
5266
5267 if (bnx2_test_link(bp) != 0) {
5268 buf[5] = 1;
5269 etest->flags |= ETH_TEST_FL_FAILED;
5270
5271 }
5272}
5273
5274static void
5275bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
5276{
5277 switch (stringset) {
5278 case ETH_SS_STATS:
5279 memcpy(buf, bnx2_stats_str_arr,
5280 sizeof(bnx2_stats_str_arr));
5281 break;
5282 case ETH_SS_TEST:
5283 memcpy(buf, bnx2_tests_str_arr,
5284 sizeof(bnx2_tests_str_arr));
5285 break;
5286 }
5287}
5288
5289static int
5290bnx2_get_stats_count(struct net_device *dev)
5291{
5292 return BNX2_NUM_STATS;
5293}
5294
5295static void
5296bnx2_get_ethtool_stats(struct net_device *dev,
5297 struct ethtool_stats *stats, u64 *buf)
5298{
972ec0d4 5299 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5300 int i;
5301 u32 *hw_stats = (u32 *) bp->stats_blk;
14ab9b86 5302 u8 *stats_len_arr = NULL;
b6016b76
MC
5303
5304 if (hw_stats == NULL) {
5305 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
5306 return;
5307 }
5308
5b0c76ad
MC
5309 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
5310 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
5311 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
5312 (CHIP_ID(bp) == CHIP_ID_5708_A0))
b6016b76 5313 stats_len_arr = bnx2_5706_stats_len_arr;
5b0c76ad
MC
5314 else
5315 stats_len_arr = bnx2_5708_stats_len_arr;
b6016b76
MC
5316
5317 for (i = 0; i < BNX2_NUM_STATS; i++) {
5318 if (stats_len_arr[i] == 0) {
5319 /* skip this counter */
5320 buf[i] = 0;
5321 continue;
5322 }
5323 if (stats_len_arr[i] == 4) {
5324 /* 4-byte counter */
5325 buf[i] = (u64)
5326 *(hw_stats + bnx2_stats_offset_arr[i]);
5327 continue;
5328 }
5329 /* 8-byte counter */
5330 buf[i] = (((u64) *(hw_stats +
5331 bnx2_stats_offset_arr[i])) << 32) +
5332 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
5333 }
5334}
5335
5336static int
5337bnx2_phys_id(struct net_device *dev, u32 data)
5338{
972ec0d4 5339 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5340 int i;
5341 u32 save;
5342
5343 if (data == 0)
5344 data = 2;
5345
5346 save = REG_RD(bp, BNX2_MISC_CFG);
5347 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
5348
5349 for (i = 0; i < (data * 2); i++) {
5350 if ((i % 2) == 0) {
5351 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
5352 }
5353 else {
5354 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
5355 BNX2_EMAC_LED_1000MB_OVERRIDE |
5356 BNX2_EMAC_LED_100MB_OVERRIDE |
5357 BNX2_EMAC_LED_10MB_OVERRIDE |
5358 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
5359 BNX2_EMAC_LED_TRAFFIC);
5360 }
5361 msleep_interruptible(500);
5362 if (signal_pending(current))
5363 break;
5364 }
5365 REG_WR(bp, BNX2_EMAC_LED, 0);
5366 REG_WR(bp, BNX2_MISC_CFG, save);
5367 return 0;
5368}
5369
7282d491 5370static const struct ethtool_ops bnx2_ethtool_ops = {
b6016b76
MC
5371 .get_settings = bnx2_get_settings,
5372 .set_settings = bnx2_set_settings,
5373 .get_drvinfo = bnx2_get_drvinfo,
244ac4f4
MC
5374 .get_regs_len = bnx2_get_regs_len,
5375 .get_regs = bnx2_get_regs,
b6016b76
MC
5376 .get_wol = bnx2_get_wol,
5377 .set_wol = bnx2_set_wol,
5378 .nway_reset = bnx2_nway_reset,
5379 .get_link = ethtool_op_get_link,
5380 .get_eeprom_len = bnx2_get_eeprom_len,
5381 .get_eeprom = bnx2_get_eeprom,
5382 .set_eeprom = bnx2_set_eeprom,
5383 .get_coalesce = bnx2_get_coalesce,
5384 .set_coalesce = bnx2_set_coalesce,
5385 .get_ringparam = bnx2_get_ringparam,
5386 .set_ringparam = bnx2_set_ringparam,
5387 .get_pauseparam = bnx2_get_pauseparam,
5388 .set_pauseparam = bnx2_set_pauseparam,
5389 .get_rx_csum = bnx2_get_rx_csum,
5390 .set_rx_csum = bnx2_set_rx_csum,
5391 .get_tx_csum = ethtool_op_get_tx_csum,
5392 .set_tx_csum = ethtool_op_set_tx_csum,
5393 .get_sg = ethtool_op_get_sg,
5394 .set_sg = ethtool_op_set_sg,
5395#ifdef BCM_TSO
5396 .get_tso = ethtool_op_get_tso,
b11d6213 5397 .set_tso = bnx2_set_tso,
b6016b76
MC
5398#endif
5399 .self_test_count = bnx2_self_test_count,
5400 .self_test = bnx2_self_test,
5401 .get_strings = bnx2_get_strings,
5402 .phys_id = bnx2_phys_id,
5403 .get_stats_count = bnx2_get_stats_count,
5404 .get_ethtool_stats = bnx2_get_ethtool_stats,
24b8e05d 5405 .get_perm_addr = ethtool_op_get_perm_addr,
b6016b76
MC
5406};
5407
5408/* Called with rtnl_lock */
5409static int
5410bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
5411{
14ab9b86 5412 struct mii_ioctl_data *data = if_mii(ifr);
972ec0d4 5413 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5414 int err;
5415
5416 switch(cmd) {
5417 case SIOCGMIIPHY:
5418 data->phy_id = bp->phy_addr;
5419
5420 /* fallthru */
5421 case SIOCGMIIREG: {
5422 u32 mii_regval;
5423
c770a65c 5424 spin_lock_bh(&bp->phy_lock);
b6016b76 5425 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
c770a65c 5426 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
5427
5428 data->val_out = mii_regval;
5429
5430 return err;
5431 }
5432
5433 case SIOCSMIIREG:
5434 if (!capable(CAP_NET_ADMIN))
5435 return -EPERM;
5436
c770a65c 5437 spin_lock_bh(&bp->phy_lock);
b6016b76 5438 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
c770a65c 5439 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
5440
5441 return err;
5442
5443 default:
5444 /* do nothing */
5445 break;
5446 }
5447 return -EOPNOTSUPP;
5448}
5449
5450/* Called with rtnl_lock */
5451static int
5452bnx2_change_mac_addr(struct net_device *dev, void *p)
5453{
5454 struct sockaddr *addr = p;
972ec0d4 5455 struct bnx2 *bp = netdev_priv(dev);
b6016b76 5456
73eef4cd
MC
5457 if (!is_valid_ether_addr(addr->sa_data))
5458 return -EINVAL;
5459
b6016b76
MC
5460 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5461 if (netif_running(dev))
5462 bnx2_set_mac_addr(bp);
5463
5464 return 0;
5465}
5466
5467/* Called with rtnl_lock */
5468static int
5469bnx2_change_mtu(struct net_device *dev, int new_mtu)
5470{
972ec0d4 5471 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5472
5473 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
5474 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
5475 return -EINVAL;
5476
5477 dev->mtu = new_mtu;
5478 if (netif_running(dev)) {
5479 bnx2_netif_stop(bp);
5480
5481 bnx2_init_nic(bp);
5482
5483 bnx2_netif_start(bp);
5484 }
5485 return 0;
5486}
5487
5488#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
5489static void
5490poll_bnx2(struct net_device *dev)
5491{
972ec0d4 5492 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5493
5494 disable_irq(bp->pdev->irq);
7d12e780 5495 bnx2_interrupt(bp->pdev->irq, dev);
b6016b76
MC
5496 enable_irq(bp->pdev->irq);
5497}
5498#endif
5499
5500static int __devinit
5501bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
5502{
5503 struct bnx2 *bp;
5504 unsigned long mem_len;
5505 int rc;
5506 u32 reg;
5507
5508 SET_MODULE_OWNER(dev);
5509 SET_NETDEV_DEV(dev, &pdev->dev);
972ec0d4 5510 bp = netdev_priv(dev);
b6016b76
MC
5511
5512 bp->flags = 0;
5513 bp->phy_flags = 0;
5514
5515 /* enable device (incl. PCI PM wakeup), and bus-mastering */
5516 rc = pci_enable_device(pdev);
5517 if (rc) {
9b91cf9d 5518 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.");
b6016b76
MC
5519 goto err_out;
5520 }
5521
5522 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
9b91cf9d 5523 dev_err(&pdev->dev,
2e8a538d 5524 "Cannot find PCI device base address, aborting.\n");
b6016b76
MC
5525 rc = -ENODEV;
5526 goto err_out_disable;
5527 }
5528
5529 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
5530 if (rc) {
9b91cf9d 5531 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
b6016b76
MC
5532 goto err_out_disable;
5533 }
5534
5535 pci_set_master(pdev);
5536
5537 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
5538 if (bp->pm_cap == 0) {
9b91cf9d 5539 dev_err(&pdev->dev,
2e8a538d 5540 "Cannot find power management capability, aborting.\n");
b6016b76
MC
5541 rc = -EIO;
5542 goto err_out_release;
5543 }
5544
5545 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
5546 if (bp->pcix_cap == 0) {
9b91cf9d 5547 dev_err(&pdev->dev, "Cannot find PCIX capability, aborting.\n");
b6016b76
MC
5548 rc = -EIO;
5549 goto err_out_release;
5550 }
5551
5552 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
5553 bp->flags |= USING_DAC_FLAG;
5554 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
9b91cf9d 5555 dev_err(&pdev->dev,
2e8a538d 5556 "pci_set_consistent_dma_mask failed, aborting.\n");
b6016b76
MC
5557 rc = -EIO;
5558 goto err_out_release;
5559 }
5560 }
5561 else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
9b91cf9d 5562 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
b6016b76
MC
5563 rc = -EIO;
5564 goto err_out_release;
5565 }
5566
5567 bp->dev = dev;
5568 bp->pdev = pdev;
5569
5570 spin_lock_init(&bp->phy_lock);
b6016b76
MC
5571 INIT_WORK(&bp->reset_task, bnx2_reset_task, bp);
5572
5573 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
5574 mem_len = MB_GET_CID_ADDR(17);
5575 dev->mem_end = dev->mem_start + mem_len;
5576 dev->irq = pdev->irq;
5577
5578 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
5579
5580 if (!bp->regview) {
9b91cf9d 5581 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
b6016b76
MC
5582 rc = -ENOMEM;
5583 goto err_out_release;
5584 }
5585
5586 /* Configure byte swap and enable write to the reg_window registers.
5587 * Rely on CPU to do target byte swapping on big endian systems
5588 * The chip's target access swapping will not swap all accesses
5589 */
5590 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
5591 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
5592 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
5593
829ca9a3 5594 bnx2_set_power_state(bp, PCI_D0);
b6016b76
MC
5595
5596 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
5597
b6016b76
MC
5598 /* Get bus information. */
5599 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
5600 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
5601 u32 clkreg;
5602
5603 bp->flags |= PCIX_FLAG;
5604
5605 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
6aa20a22 5606
b6016b76
MC
5607 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
5608 switch (clkreg) {
5609 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
5610 bp->bus_speed_mhz = 133;
5611 break;
5612
5613 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
5614 bp->bus_speed_mhz = 100;
5615 break;
5616
5617 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
5618 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
5619 bp->bus_speed_mhz = 66;
5620 break;
5621
5622 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
5623 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
5624 bp->bus_speed_mhz = 50;
5625 break;
5626
5627 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
5628 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
5629 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
5630 bp->bus_speed_mhz = 33;
5631 break;
5632 }
5633 }
5634 else {
5635 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
5636 bp->bus_speed_mhz = 66;
5637 else
5638 bp->bus_speed_mhz = 33;
5639 }
5640
5641 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
5642 bp->flags |= PCI_32BIT_FLAG;
5643
5644 /* 5706A0 may falsely detect SERR and PERR. */
5645 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
5646 reg = REG_RD(bp, PCI_COMMAND);
5647 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
5648 REG_WR(bp, PCI_COMMAND, reg);
5649 }
5650 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
5651 !(bp->flags & PCIX_FLAG)) {
5652
9b91cf9d 5653 dev_err(&pdev->dev,
2e8a538d 5654 "5706 A1 can only be used in a PCIX bus, aborting.\n");
b6016b76
MC
5655 goto err_out_unmap;
5656 }
5657
5658 bnx2_init_nvram(bp);
5659
e3648b3d
MC
5660 reg = REG_RD_IND(bp, BNX2_SHM_HDR_SIGNATURE);
5661
5662 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
5663 BNX2_SHM_HDR_SIGNATURE_SIG)
5664 bp->shmem_base = REG_RD_IND(bp, BNX2_SHM_HDR_ADDR_0);
5665 else
5666 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
5667
b6016b76
MC
5668 /* Get the permanent MAC address. First we need to make sure the
5669 * firmware is actually running.
5670 */
e3648b3d 5671 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_SIGNATURE);
b6016b76
MC
5672
5673 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
5674 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
9b91cf9d 5675 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
b6016b76
MC
5676 rc = -ENODEV;
5677 goto err_out_unmap;
5678 }
5679
e3648b3d 5680 bp->fw_ver = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_BC_REV);
b6016b76 5681
e3648b3d 5682 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_UPPER);
b6016b76
MC
5683 bp->mac_addr[0] = (u8) (reg >> 8);
5684 bp->mac_addr[1] = (u8) reg;
5685
e3648b3d 5686 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_LOWER);
b6016b76
MC
5687 bp->mac_addr[2] = (u8) (reg >> 24);
5688 bp->mac_addr[3] = (u8) (reg >> 16);
5689 bp->mac_addr[4] = (u8) (reg >> 8);
5690 bp->mac_addr[5] = (u8) reg;
5691
5692 bp->tx_ring_size = MAX_TX_DESC_CNT;
932f3772 5693 bnx2_set_rx_ring_size(bp, 255);
b6016b76
MC
5694
5695 bp->rx_csum = 1;
5696
5697 bp->rx_offset = sizeof(struct l2_fhdr) + 2;
5698
5699 bp->tx_quick_cons_trip_int = 20;
5700 bp->tx_quick_cons_trip = 20;
5701 bp->tx_ticks_int = 80;
5702 bp->tx_ticks = 80;
6aa20a22 5703
b6016b76
MC
5704 bp->rx_quick_cons_trip_int = 6;
5705 bp->rx_quick_cons_trip = 6;
5706 bp->rx_ticks_int = 18;
5707 bp->rx_ticks = 18;
5708
5709 bp->stats_ticks = 1000000 & 0xffff00;
5710
5711 bp->timer_interval = HZ;
cd339a0e 5712 bp->current_interval = HZ;
b6016b76 5713
5b0c76ad
MC
5714 bp->phy_addr = 1;
5715
b6016b76
MC
5716 /* Disable WOL support if we are running on a SERDES chip. */
5717 if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT) {
5718 bp->phy_flags |= PHY_SERDES_FLAG;
5719 bp->flags |= NO_WOL_FLAG;
5b0c76ad
MC
5720 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
5721 bp->phy_addr = 2;
e3648b3d 5722 reg = REG_RD_IND(bp, bp->shmem_base +
5b0c76ad
MC
5723 BNX2_SHARED_HW_CFG_CONFIG);
5724 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
5725 bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG;
5726 }
b6016b76
MC
5727 }
5728
16088272
MC
5729 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
5730 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
5731 (CHIP_ID(bp) == CHIP_ID_5708_B1))
dda1e390
MC
5732 bp->flags |= NO_WOL_FLAG;
5733
b6016b76
MC
5734 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
5735 bp->tx_quick_cons_trip_int =
5736 bp->tx_quick_cons_trip;
5737 bp->tx_ticks_int = bp->tx_ticks;
5738 bp->rx_quick_cons_trip_int =
5739 bp->rx_quick_cons_trip;
5740 bp->rx_ticks_int = bp->rx_ticks;
5741 bp->comp_prod_trip_int = bp->comp_prod_trip;
5742 bp->com_ticks_int = bp->com_ticks;
5743 bp->cmd_ticks_int = bp->cmd_ticks;
5744 }
5745
f9317a40
MC
5746 /* Disable MSI on 5706 if AMD 8132 bridge is found.
5747 *
5748 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
5749 * with byte enables disabled on the unused 32-bit word. This is legal
5750 * but causes problems on the AMD 8132 which will eventually stop
5751 * responding after a while.
5752 *
5753 * AMD believes this incompatibility is unique to the 5706, and
5754 * prefers to locally disable MSI rather than globally disabling it
5755 * using pci_msi_quirk.
5756 */
5757 if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
5758 struct pci_dev *amd_8132 = NULL;
5759
5760 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
5761 PCI_DEVICE_ID_AMD_8132_BRIDGE,
5762 amd_8132))) {
5763 u8 rev;
5764
5765 pci_read_config_byte(amd_8132, PCI_REVISION_ID, &rev);
5766 if (rev >= 0x10 && rev <= 0x13) {
5767 disable_msi = 1;
5768 pci_dev_put(amd_8132);
5769 break;
5770 }
5771 }
5772 }
5773
b6016b76
MC
5774 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
5775 bp->req_line_speed = 0;
5776 if (bp->phy_flags & PHY_SERDES_FLAG) {
5777 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
cd339a0e 5778
e3648b3d 5779 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
cd339a0e
MC
5780 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
5781 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
5782 bp->autoneg = 0;
5783 bp->req_line_speed = bp->line_speed = SPEED_1000;
5784 bp->req_duplex = DUPLEX_FULL;
5785 }
b6016b76
MC
5786 }
5787 else {
5788 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
5789 }
5790
5791 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
5792
cd339a0e
MC
5793 init_timer(&bp->timer);
5794 bp->timer.expires = RUN_AT(bp->timer_interval);
5795 bp->timer.data = (unsigned long) bp;
5796 bp->timer.function = bnx2_timer;
5797
b6016b76
MC
5798 return 0;
5799
5800err_out_unmap:
5801 if (bp->regview) {
5802 iounmap(bp->regview);
73eef4cd 5803 bp->regview = NULL;
b6016b76
MC
5804 }
5805
5806err_out_release:
5807 pci_release_regions(pdev);
5808
5809err_out_disable:
5810 pci_disable_device(pdev);
5811 pci_set_drvdata(pdev, NULL);
5812
5813err_out:
5814 return rc;
5815}
5816
5817static int __devinit
5818bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
5819{
5820 static int version_printed = 0;
5821 struct net_device *dev = NULL;
5822 struct bnx2 *bp;
5823 int rc, i;
5824
5825 if (version_printed++ == 0)
5826 printk(KERN_INFO "%s", version);
5827
5828 /* dev zeroed in init_etherdev */
5829 dev = alloc_etherdev(sizeof(*bp));
5830
5831 if (!dev)
5832 return -ENOMEM;
5833
5834 rc = bnx2_init_board(pdev, dev);
5835 if (rc < 0) {
5836 free_netdev(dev);
5837 return rc;
5838 }
5839
5840 dev->open = bnx2_open;
5841 dev->hard_start_xmit = bnx2_start_xmit;
5842 dev->stop = bnx2_close;
5843 dev->get_stats = bnx2_get_stats;
5844 dev->set_multicast_list = bnx2_set_rx_mode;
5845 dev->do_ioctl = bnx2_ioctl;
5846 dev->set_mac_address = bnx2_change_mac_addr;
5847 dev->change_mtu = bnx2_change_mtu;
5848 dev->tx_timeout = bnx2_tx_timeout;
5849 dev->watchdog_timeo = TX_TIMEOUT;
5850#ifdef BCM_VLAN
5851 dev->vlan_rx_register = bnx2_vlan_rx_register;
5852 dev->vlan_rx_kill_vid = bnx2_vlan_rx_kill_vid;
5853#endif
5854 dev->poll = bnx2_poll;
5855 dev->ethtool_ops = &bnx2_ethtool_ops;
5856 dev->weight = 64;
5857
972ec0d4 5858 bp = netdev_priv(dev);
b6016b76
MC
5859
5860#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
5861 dev->poll_controller = poll_bnx2;
5862#endif
5863
5864 if ((rc = register_netdev(dev))) {
9b91cf9d 5865 dev_err(&pdev->dev, "Cannot register net device\n");
b6016b76
MC
5866 if (bp->regview)
5867 iounmap(bp->regview);
5868 pci_release_regions(pdev);
5869 pci_disable_device(pdev);
5870 pci_set_drvdata(pdev, NULL);
5871 free_netdev(dev);
5872 return rc;
5873 }
5874
5875 pci_set_drvdata(pdev, dev);
5876
5877 memcpy(dev->dev_addr, bp->mac_addr, 6);
24b8e05d 5878 memcpy(dev->perm_addr, bp->mac_addr, 6);
b6016b76
MC
5879 bp->name = board_info[ent->driver_data].name,
5880 printk(KERN_INFO "%s: %s (%c%d) PCI%s %s %dMHz found at mem %lx, "
5881 "IRQ %d, ",
5882 dev->name,
5883 bp->name,
5884 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
5885 ((CHIP_ID(bp) & 0x0ff0) >> 4),
5886 ((bp->flags & PCIX_FLAG) ? "-X" : ""),
5887 ((bp->flags & PCI_32BIT_FLAG) ? "32-bit" : "64-bit"),
5888 bp->bus_speed_mhz,
5889 dev->base_addr,
5890 bp->pdev->irq);
5891
5892 printk("node addr ");
5893 for (i = 0; i < 6; i++)
5894 printk("%2.2x", dev->dev_addr[i]);
5895 printk("\n");
5896
5897 dev->features |= NETIF_F_SG;
5898 if (bp->flags & USING_DAC_FLAG)
5899 dev->features |= NETIF_F_HIGHDMA;
5900 dev->features |= NETIF_F_IP_CSUM;
5901#ifdef BCM_VLAN
5902 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
5903#endif
5904#ifdef BCM_TSO
b11d6213 5905 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
b6016b76
MC
5906#endif
5907
5908 netif_carrier_off(bp->dev);
5909
5910 return 0;
5911}
5912
5913static void __devexit
5914bnx2_remove_one(struct pci_dev *pdev)
5915{
5916 struct net_device *dev = pci_get_drvdata(pdev);
972ec0d4 5917 struct bnx2 *bp = netdev_priv(dev);
b6016b76 5918
afdc08b9
MC
5919 flush_scheduled_work();
5920
b6016b76
MC
5921 unregister_netdev(dev);
5922
5923 if (bp->regview)
5924 iounmap(bp->regview);
5925
5926 free_netdev(dev);
5927 pci_release_regions(pdev);
5928 pci_disable_device(pdev);
5929 pci_set_drvdata(pdev, NULL);
5930}
5931
5932static int
829ca9a3 5933bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
b6016b76
MC
5934{
5935 struct net_device *dev = pci_get_drvdata(pdev);
972ec0d4 5936 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5937 u32 reset_code;
5938
5939 if (!netif_running(dev))
5940 return 0;
5941
1d60290f 5942 flush_scheduled_work();
b6016b76
MC
5943 bnx2_netif_stop(bp);
5944 netif_device_detach(dev);
5945 del_timer_sync(&bp->timer);
dda1e390 5946 if (bp->flags & NO_WOL_FLAG)
6c4f095e 5947 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
dda1e390 5948 else if (bp->wol)
b6016b76
MC
5949 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5950 else
5951 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5952 bnx2_reset_chip(bp, reset_code);
5953 bnx2_free_skbs(bp);
829ca9a3 5954 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
b6016b76
MC
5955 return 0;
5956}
5957
5958static int
5959bnx2_resume(struct pci_dev *pdev)
5960{
5961 struct net_device *dev = pci_get_drvdata(pdev);
972ec0d4 5962 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5963
5964 if (!netif_running(dev))
5965 return 0;
5966
829ca9a3 5967 bnx2_set_power_state(bp, PCI_D0);
b6016b76
MC
5968 netif_device_attach(dev);
5969 bnx2_init_nic(bp);
5970 bnx2_netif_start(bp);
5971 return 0;
5972}
5973
5974static struct pci_driver bnx2_pci_driver = {
14ab9b86
PH
5975 .name = DRV_MODULE_NAME,
5976 .id_table = bnx2_pci_tbl,
5977 .probe = bnx2_init_one,
5978 .remove = __devexit_p(bnx2_remove_one),
5979 .suspend = bnx2_suspend,
5980 .resume = bnx2_resume,
b6016b76
MC
5981};
5982
5983static int __init bnx2_init(void)
5984{
29917620 5985 return pci_register_driver(&bnx2_pci_driver);
b6016b76
MC
5986}
5987
5988static void __exit bnx2_cleanup(void)
5989{
5990 pci_unregister_driver(&bnx2_pci_driver);
5991}
5992
5993module_init(bnx2_init);
5994module_exit(bnx2_cleanup);
5995
5996
5997
This page took 0.509696 seconds and 5 git commands to generate.